summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/deploy/cleaning.rst147
-rw-r--r--doc/source/deploy/drivers.rst19
-rw-r--r--doc/source/deploy/install-guide.rst272
-rw-r--r--doc/source/deploy/upgrade-guide.rst36
-rw-r--r--doc/source/deploy/user-guide.rst2
-rw-r--r--doc/source/dev/dev-quickstart.rst6
-rw-r--r--doc/source/drivers/ilo.rst8
-rw-r--r--doc/source/drivers/seamicro.rst4
-rw-r--r--doc/source/images/states.pngbin102484 -> 147495 bytes
-rw-r--r--doc/source/index.rst2
-rw-r--r--driver-requirements.txt4
-rw-r--r--etc/apache2/ironic2
-rw-r--r--etc/ironic/ironic.conf.sample68
-rw-r--r--ironic/api/controllers/root.py4
-rw-r--r--ironic/api/controllers/v1/__init__.py4
-rw-r--r--ironic/api/controllers/v1/chassis.py14
-rw-r--r--ironic/api/controllers/v1/driver.py12
-rw-r--r--ironic/api/controllers/v1/node.py171
-rw-r--r--ironic/api/controllers/v1/port.py81
-rw-r--r--ironic/api/controllers/v1/utils.py46
-rw-r--r--ironic/api/expose.py24
-rw-r--r--ironic/api/middleware/auth_token.py2
-rw-r--r--ironic/common/exception.py7
-rw-r--r--ironic/common/glance_service/base_image_service.py31
-rw-r--r--ironic/common/grub_conf.template10
-rw-r--r--ironic/common/image_service.py5
-rw-r--r--ironic/common/pxe_utils.py40
-rw-r--r--ironic/common/states.py2
-rw-r--r--ironic/common/utils.py20
-rw-r--r--ironic/conductor/manager.py17
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py42
-rw-r--r--ironic/db/sqlalchemy/models.py2
-rw-r--r--ironic/dhcp/base.py14
-rw-r--r--ironic/dhcp/neutron.py34
-rw-r--r--ironic/dhcp/none.py7
-rw-r--r--ironic/drivers/base.py3
-rw-r--r--ironic/drivers/modules/agent.py80
-rw-r--r--ironic/drivers/modules/agent_base_vendor.py41
-rw-r--r--ironic/drivers/modules/agent_client.py11
-rw-r--r--ironic/drivers/modules/agent_config.template2
-rw-r--r--ironic/drivers/modules/amt/vendor.py13
-rw-r--r--ironic/drivers/modules/boot.ipxe2
-rw-r--r--ironic/drivers/modules/deploy_utils.py58
-rw-r--r--ironic/drivers/modules/drac/client.py42
-rw-r--r--ironic/drivers/modules/drac/power.py11
-rw-r--r--ironic/drivers/modules/elilo_efi_pxe_config.template4
-rw-r--r--ironic/drivers/modules/ilo/common.py68
-rw-r--r--ironic/drivers/modules/ilo/deploy.py120
-rw-r--r--ironic/drivers/modules/ilo/inspect.py119
-rw-r--r--ironic/drivers/modules/ilo/power.py11
-rw-r--r--ironic/drivers/modules/ipmitool.py69
-rw-r--r--ironic/drivers/modules/ipxe_config.template2
-rw-r--r--ironic/drivers/modules/iscsi_deploy.py103
-rw-r--r--ironic/drivers/modules/pxe.py94
-rw-r--r--ironic/drivers/modules/pxe_config.template2
-rw-r--r--ironic/drivers/modules/snmp.py2
-rw-r--r--ironic/drivers/utils.py27
-rw-r--r--ironic/locale/ironic-log-error.pot236
-rw-r--r--ironic/locale/ironic-log-info.pot205
-rw-r--r--ironic/locale/ironic-log-warning.pot174
-rw-r--r--ironic/locale/ironic.pot1289
-rw-r--r--ironic/locale/pt_BR/LC_MESSAGES/ironic-log-critical.po25
-rw-r--r--ironic/openstack/common/service.py32
-rw-r--r--ironic/openstack/common/versionutils.py11
-rw-r--r--ironic/tests/__init__.py11
-rw-r--r--ironic/tests/api/test_acl.py9
-rw-r--r--ironic/tests/api/utils.py10
-rw-r--r--ironic/tests/api/v1/test_chassis.py22
-rw-r--r--ironic/tests/api/v1/test_nodes.py233
-rw-r--r--ironic/tests/api/v1/test_ports.py77
-rw-r--r--ironic/tests/api/v1/test_root.py4
-rw-r--r--ironic/tests/api/v1/test_utils.py111
-rw-r--r--ironic/tests/conductor/test_manager.py54
-rw-r--r--ironic/tests/db/sqlalchemy/test_migrations.py29
-rw-r--r--ironic/tests/db/test_chassis.py49
-rw-r--r--ironic/tests/db/test_conductor.py14
-rw-r--r--ironic/tests/db/test_nodes.py21
-rw-r--r--ironic/tests/db/test_ports.py4
-rw-r--r--ironic/tests/db/utils.py20
-rw-r--r--ironic/tests/dhcp/test_factory.py35
-rw-r--r--ironic/tests/dhcp/test_neutron.py33
-rw-r--r--ironic/tests/drivers/agent_pxe_config.template2
-rw-r--r--ironic/tests/drivers/amt/test_vendor.py58
-rw-r--r--ironic/tests/drivers/drac/test_client.py38
-rw-r--r--ironic/tests/drivers/drac/test_power.py17
-rw-r--r--ironic/tests/drivers/elilo_efi_pxe_config.template16
-rw-r--r--ironic/tests/drivers/ilo/test_common.py70
-rw-r--r--ironic/tests/drivers/ilo/test_deploy.py262
-rw-r--r--ironic/tests/drivers/ilo/test_inspect.py180
-rw-r--r--ironic/tests/drivers/ilo/test_power.py14
-rw-r--r--ironic/tests/drivers/ipxe_config.template21
-rw-r--r--ironic/tests/drivers/pxe_config.template2
-rw-r--r--ironic/tests/drivers/test_agent.py90
-rw-r--r--ironic/tests/drivers/test_agent_base_vendor.py111
-rw-r--r--ironic/tests/drivers/test_agent_client.py32
-rw-r--r--ironic/tests/drivers/test_deploy_utils.py114
-rw-r--r--ironic/tests/drivers/test_ipmitool.py80
-rw-r--r--ironic/tests/drivers/test_iscsi_deploy.py135
-rw-r--r--ironic/tests/drivers/test_pxe.py132
-rw-r--r--ironic/tests/drivers/test_seamicro.py3
-rw-r--r--ironic/tests/drivers/test_ssh.py2
-rw-r--r--ironic/tests/drivers/test_utils.py21
-rw-r--r--ironic/tests/objects/utils.py3
-rw-r--r--ironic/tests/stubs.py12
-rw-r--r--ironic/tests/test_disk_partitioner.py46
-rw-r--r--ironic/tests/test_driver_factory.py3
-rw-r--r--ironic/tests/test_exception.py29
-rw-r--r--ironic/tests/test_glance_service.py26
-rw-r--r--ironic/tests/test_hash_ring.py2
-rw-r--r--ironic/tests/test_image_service.py102
-rw-r--r--ironic/tests/test_images.py288
-rw-r--r--ironic/tests/test_keystone.py26
-rw-r--r--ironic/tests/test_pxe_utils.py121
-rw-r--r--ironic/tests/test_swift.py10
-rw-r--r--ironic/tests/test_utils.py68
-rw-r--r--requirements.txt26
-rw-r--r--test-requirements.txt4
-rw-r--r--tools/config/oslo.config.generator.rc2
-rwxr-xr-xtools/states_to_dot.py2
-rw-r--r--tox.ini2
120 files changed, 4712 insertions, 2211 deletions
diff --git a/doc/source/deploy/cleaning.rst b/doc/source/deploy/cleaning.rst
new file mode 100644
index 000000000..8897bf46b
--- /dev/null
+++ b/doc/source/deploy/cleaning.rst
@@ -0,0 +1,147 @@
+.. _cleaning:
+
+=============
+Node Cleaning
+=============
+
+Overview
+========
+When hardware is recycled from one workload to another, Ironic performs
+cleaning on the node to ensure it's ready for another workload. This ensures
+the tenant will get a consistent baremetal node deployed every time.
+
+Ironic implements cleaning by collecting a list of steps to perform on a node
+from each Power, Deploy, and Management driver assigned to the node. These
+steps are then arranged by priority and executed on the node when it is moved
+to cleaning state, if cleaning is enabled.
+
+Typically, nodes move to cleaning state when moving from active -> available.
+Nodes also traverse cleaning when going from manageable -> available. For a
+full understanding of all state transitions into cleaning, please see
+:ref:`states`.
+
+Ironic added support for cleaning nodes in the Kilo release.
+
+
+Enabling Cleaning
+=================
+To enable cleaning, ensure your ironic.conf is set as follows: ::
+
+ [conductor]
+ clean_nodes=true
+
+This will enable the default set of steps, based on your hardware and Ironic
+drivers. If you're using an agent_* driver, this includes, by default, erasing
+all of the previous tenant's data.
+
+
+In-Band vs Out-of-Band
+======================
+Ironic uses two main methods to perform actions on a node: in-band and
+out-of-band. Ironic supports using both methods to clean a node.
+
+In-Band
+-------
+In-band steps are performed by Ironic making API calls to a ramdisk running
+on the node using a Deploy driver. Currently, only the ironic-python-agent
+ramdisk used with an agent_* driver supports in-band cleaning. By default,
+ironic-python-agent ships with a minimal cleaning configuration, only erasing
+disks. However, with this ramdisk, you can add your own cleaning steps and/or
+override default cleaning steps with a custom Hardware Manager.
+
+There is currently no support for in-band cleaning using the Ironic pxe
+ramdisk.
+
+Out-of-Band
+-----------
+Out-of-band are actions performed by your management controller, such as IPMI,
+iLO, or DRAC. Out-of-band steps will be performed by Ironic using a Power or
+Management driver. Which steps are performed depends on the driver and hardware.
+
+
+FAQ
+===
+
+How are cleaning steps ordered?
+-------------------------------
+Cleaning steps are ordered by integer priority, where a larger integer is a
+higher priority. In case of a conflict between priorities across drivers,
+the following resolution order is used: Power, Management, Deploy.
+
+How do I skip a cleaning step?
+------------------------------
+Cleaning steps with a priority of 0 or None are skipped.
+
+How do I change the priority of a cleaning step?
+------------------------------------------------
+Most out-of-band cleaning steps have an explicit configuration option for
+priority.
+
+Changing the priority of an in-band (ironic-python-agent) cleaning step
+currently requires use of a custom HardwareManager. The only exception is
+erase_devices, which can have its priority set in ironic.conf. For instance,
+to disable erase_devices, you'd use the following config::
+
+ [agent]
+ agent_erase_devices_priority=0
+
+
+What cleaning step is running?
+------------------------------
+To check what cleaning step the node is performing or attempted to perform and
+failed, either query the node endpoint for the node or run ``ironic node-show
+$node_ident`` and look in the `internal_driver_info` field. The `clean_steps`
+field will contain a list of all remaining steps with their priority, and the
+first one listed is the step currently in progress or that the node failed
+before going into cleanfail state.
+
+Should I disable cleaning?
+--------------------------
+Cleaning is recommended for Ironic deployments, however, there are some
+tradeoffs to having it enabled. For instance, Ironic cannot deploy a new
+instance to a node that is currently cleaning, and cleaning can be a time
+consuming process. To mitigate this, we suggest using disks with support for
+cryptographic ATA Security Erase, as typically the erase_devices step in the
+deploy driver takes the longest time to complete of all cleaning steps.
+
+Why can't I power on/off a node while it's cleaning?
+----------------------------------------------------
+During cleaning, nodes may be performing actions that shouldn't be
+interrupted, such as BIOS or Firmware updates. As a result, operators are
+forbidden from changing power state via the Ironic API while a node is
+cleaning.
+
+
+Troubleshooting
+===============
+If cleaning fails on a node, the node will be put into cleanfail state and
+placed in maintenance mode, to prevent Ironic from taking actions on the
+node.
+
+Nodes in cleanfail will not be powered off, as the node might be in a state
+such that powering it off could damage the node or remove useful information
+about the nature of the cleaning failure.
+
+A cleanfail node can be moved to manageable state, where they cannot be
+scheduled by Nova and you can safely attempt to fix the node. To move a node
+from cleanfail to manageable: ``ironic node-set-provision-state manage``.
+You can now take actions on the node, such as replacing a bad disk drive.
+
+Strategies for determining why a cleaning step failed include checking the
+Ironic conductor logs, viewing logs on the still-running ironic-python-agent
+(if an in-band step failed), or performing general hardware troubleshooting on
+the node.
+
+When the node is repaired, you can move the node back to available state, to
+allow it to be scheduled by Nova.
+
+::
+
+ # First, move it out of maintenance mode
+ ironic node-set-maintenance $node_ident false
+
+ # Now, make the node available for scheduling by Nova
+ ironic node-set-provision-state $node_ident provide
+
+The node will begin cleaning from the start, and move to available state
+when complete.
diff --git a/doc/source/deploy/drivers.rst b/doc/source/deploy/drivers.rst
index 3f73f1916..8f005bfa8 100644
--- a/doc/source/deploy/drivers.rst
+++ b/doc/source/deploy/drivers.rst
@@ -4,13 +4,30 @@
Enabling Drivers
=================
+Ironic-Python-Agent (agent)
+---------------------------
+
+To enable IPA add the appropriate Ironic agent driver to the ``enabled_drivers``
+line of the ironic.conf file.
+
+Several variants are currently supported, they are:
+ * agent_ilo
+ * agent_ipmitool
+ * agent_pyghmi
+ * agent_ssh
+ * agent_vbox
+
+.. note:: Starting with the Kilo release IPA ramdisk may also be used with Ironic PXE drivers.
+
+For more information see the `ironic-python-agent GitHub repo <https://github.com/openstack/ironic-python-agent/>`_
+
DRAC
----
DRAC with PXE deploy
^^^^^^^^^^^^^^^^^^^^
-- Add ``pxe_drac`` to the list of ``enabled_drivers in``
+- Add ``pxe_drac`` to the list of ``enabled_drivers`` in
``/etc/ironic/ironic.conf``
- Install openwsman-python package
diff --git a/doc/source/deploy/install-guide.rst b/doc/source/deploy/install-guide.rst
index c02e42828..871e75313 100644
--- a/doc/source/deploy/install-guide.rst
+++ b/doc/source/deploy/install-guide.rst
@@ -4,8 +4,9 @@
Bare Metal Service Installation Guide
=====================================
-This document pertains to the Juno (2014.2) release of OpenStack. Users of
-earlier releases may encounter some differences in configuration of services.
+This document pertains to the Kilo (2015.1) release of OpenStack Ironic. Users
+of earlier releases may encounter differences, and are encouraged to look at
+earlier versions of this document for guidance.
Service Overview
@@ -14,23 +15,33 @@ Service Overview
The Bare Metal Service is a collection of components that provides support to
manage and provision physical machines.
-Also known as the ``ironic`` project, the Bare Metal Service interacts with
-several other OpenStack services such as:
+Also known as the ``Ironic`` project, the Bare Metal Service may, depending
+upon configuration, interact with several other OpenStack services. This
+includes:
-- the Identity Service (keystone) for request authentication and to
+- the Telemetry (Ceilometer) for consuming the IPMI metrics
+- the Identity Service (Keystone) for request authentication and to
locate other OpenStack services
-- the Image Service (glance) from which to retrieve images
-- the Networking Service (neutron) for DHCP and network configuration
-- the Compute Service (nova), which leverages the Bare Metal Service to
- manage compute instances on bare metal.
+- the Image Service (Glance) from which to retrieve images and image meta-data
+- the Networking Service (Neutron) for DHCP and network configuration
+- the Compute Service (Nova) works with Ironic and acts as a user-facing API
+ for instance management, while Ironic provides the admin/operator API for
+ hardware management. Nova also provides scheduling facilities (matching
+ flavors <-> images <-> hardware), tenant quotas, IP assignment, and other
+ services which Ironic does not, in and of itself, provide.
+
+- the Block Storage (Cinder) will provide volumes, but the aspect is not yet available.
The Bare Metal Service includes the following components:
-- ironic-api. A RESTful API that processes application requests by sending
+- ironic-api: A RESTful API that processes application requests by sending
them to the ironic-conductor over RPC.
-- ironic-conductor. Adds/edits/deletes nodes; powers on/off nodes with
+- ironic-conductor: Adds/edits/deletes nodes; powers on/off nodes with
ipmi or ssh; provisions/deploys/decommissions bare metal nodes.
-- Ironic client. A command-line interface (CLI) for interacting with
+- ironic-python-agent: A python service which is run in a temporary ramdisk to
+ provide ironic-conductor service(s) with remote access and in-band hardware
+ control.
+- python-ironicclient: A command-line interface (CLI) for interacting with
the Bare Metal Service.
Additionally, the Bare Metal Service has certain external dependencies, which are
@@ -44,6 +55,20 @@ very similar to other OpenStack Services:
- A queue. A central hub for passing messages. It should use the same
implementation as that of the Compute Service (typically RabbitMQ).
+Optionally, one may wish to utilize the following associated projects for
+additional functionality:
+
+- ironic-discoverd_; An associated service which performs in-band hardware
+ introspection by PXE booting unregistered hardware into a "discovery ramdisk".
+- diskimage-builder_; May be used to customize machine images, create and
+ discovery deploy ramdisks, if necessary.
+.. _ironic-discoverd: https://github.com/stackforge/ironic-discoverd
+.. _diskimage-builder: https://github.com/openstack/diskimage-builder
+
+
+.. todo: include coreos-image-builder reference here, once the split is done
+
+
Install and Configure Prerequisites
===================================
@@ -644,16 +669,17 @@ node(s) where ``ironic-conductor`` is running.
echo 'r ^([^/]) /tftpboot/\1' > /tftpboot/map-file
echo 'r ^(/tftpboot/) /tftpboot/\2' >> /tftpboot/map-file
+#. Enable tftp map file, modify ``/etc/xinetd.d/tftp`` as below and restart xinetd
+ service::
+
+ server_args = -v -v -v -v -v --map-file /tftpboot/map-file /tftpboot
+
.. [1] On **Fedora/RHEL** the ``syslinux-tftpboot`` package already install
the library modules and PXE image at ``/tftpboot``. If the TFTP server
is configured to listen to a different directory you should copy the
contents of ``/tftpboot`` to the configured directory
.. [2] http://www.syslinux.org/wiki/index.php/Library_modules
-#. Enable tftp map file, modify ``/etc/xinetd.d/tftp`` as below and restart xinetd
- service::
-
- server_args = -v -v -v -v -v --map-file /tftpboot/map-file /tftpboot
PXE UEFI Setup
--------------
@@ -888,7 +914,7 @@ The boot modes can be configured in Ironic in the following way:
the ``boot_mode`` set appropriately in ``properties/capabilities``. It will
filter out rest of the nodes.
- The above facility for matching in Nova can be used in heterogenous
+ The above facility for matching in Nova can be used in heterogeneous
environments where there is a mix of ``uefi`` and ``bios`` machines, and
operator wants to provide a choice to the user regarding boot modes. If
the flavor doesn't contain ``boot_mode`` and ``boot_mode`` is configured for
@@ -896,6 +922,54 @@ The boot modes can be configured in Ironic in the following way:
either ``bios`` or ``uefi`` machine.
+Local boot with partition images
+================================
+
+Starting with the Kilo release, Ironic supports local boot with partition
+images, meaning that after the deployment the node's subsequent reboots
+won't happen via PXE or Virtual Media. Instead, it will boot from a
+local boot loader installed on the disk.
+
+It's important to note that in order for this to work the image being
+deployed with Ironic **must** contain ``grub2`` installed within it.
+
+Enabling the local boot is different when Ironic is used with Nova and
+without it. The following sections will describe both methods.
+
+
+Enabling local boot with Nova
+-----------------------------
+
+To enable local boot we need to set a capability on the Ironic node, e.g::
+
+ ironic node-update <node-uuid> add properties/capabilities="boot_option:local"
+
+
+Nodes having ``boot_option`` set to ``local`` may be requested by adding
+an ``extra_spec`` to the Nova flavor, e.g::
+
+ nova flavor-key baremetal set capabilities:boot_option="local"
+
+
+.. note::
+ If the node is configured to use ``UEFI``, Ironic will create an ``EFI
+ partition`` on the disk and switch the partition table format to
+ ``gpt``. The ``EFI partition`` will be used later by the boot loader
+ (which is installed from the deploy ramdisk).
+
+
+Enabling local boot without Nova
+--------------------------------
+
+Since adding ``capabilities`` to the node's properties is only used by
+the Nova scheduler to perform more advanced scheduling of instances,
+we need a way to enable local boot when Nova is not present. To do that
+we can simply specify the capability via the ``instance_info`` attribute
+of the node, e.g::
+
+ ironic node-update <node-uuid> add instance_info/capabilities='{"boot_option": "local"}'
+
+
Enrollment
==========
@@ -1050,8 +1124,59 @@ by an operator. There are two kinds of inspection supported by Ironic:
being run on a separate host from the ironic-conductor service, or is using
non-standard port.
+ In order to ensure that ports in Ironic are synchronized with NIC ports on
+ the node, the following settings in the ironic-discoverd configuration file
+ must be set::
+
+ [discoverd]
+ add_ports = all
+ keep_ports = present
+
+ (requires ironic-discoverd of version 1.1.0 or higher). Note that in this
+ case an operator is responsible for deleting ports that can't be actually
+ used by Ironic, see `bug 1405131
+ <https://bugs.launchpad.net/ironic/+bug/1405131>`_ for explanation.
+
.. _ironic-discoverd: https://github.com/stackforge/ironic-discoverd
+
+Specifying the disk for deployment
+==================================
+
+Starting with the Kilo release, Ironic supports passing hints to the
+deploy ramdisk about which disk it should pick for the deployment. In
+Linux when a server has more than one SATA, SCSI or IDE disk controller,
+the order in which their corresponding device nodes are added is arbitrary
+[`link`_], resulting in devices like ``/dev/sda`` and ``/dev/sdb`` to
+switch around between reboots. Therefore, to guarantee that a specific
+disk is always chosen for the deployment, Ironic introduced root device
+hints.
+
+The list of support hints is:
+
+* model (STRING): device identifier
+* vendor (STRING): device vendor
+* serial (STRING): disk serial number
+* wwn (STRING): unique storage identifier
+* size (INT): size of the device in GiB
+
+To associate one or more hints with a node, update the node's properties
+with a ``root_device`` key, e.g::
+
+ ironic node-update <node-uuid> add properties/root_device='{"wwn": "0x4000cca77fc4dba1"}'
+
+
+That will guarantee that Ironic will pick the disk device that has the
+``wwn`` equal to the specified wwn value, or fail the deployment if it
+can not be found.
+
+.. note::
+ If multiple hints are specified, a device must satisfy all the hints.
+
+
+.. _`link`: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Storage_Administration_Guide/persistent_naming.html
+
+
Using Ironic as a standalone service
====================================
@@ -1097,7 +1222,7 @@ There are however some limitations for different drivers:
* If you're using one of the drivers that use agent deploy method (namely,
``agent_ilo``, ``agent_ipmitool``, ``agent_pyghmi``, ``agent_ssh`` or
``agent_vbox``) you have to know MD5 checksum for your instance image. To
- compute it, you can use the follwoing command::
+ compute it, you can use the following command::
md5sum image.qcow2
ed82def8730f394fb85aef8a208635f6 image.qcow2
@@ -1201,6 +1326,117 @@ For iLO drivers, fields that should be provided are:
about that and does not redownload the content.
+Other references
+----------------
+
+* `Enabling local boot without Nova`_
+
+
+Enabling the configuration drive (configdrive)
+==============================================
+
+Starting with the Kilo release, Ironic supports exposing a configuration
+drive image to the instances.
+
+The configuration drive is usually used in conjunction with Nova, but
+Ironic also offers a standalone way of using it. The following sections
+will describe both methods.
+
+
+When used with Nova
+-------------------
+
+To enable the configuration drive when deploying an instance, pass
+``--config-drive true`` parameter to the ``nova boot`` command, e.g::
+
+ nova boot --config-drive true --flavor baremetal --image test-image instance-1
+
+It's also possible to enable the configuration drive automatically on
+all instances by configuring the ``Nova Compute service`` to always
+create a configuration drive by setting the following option in the
+``/etc/nova/nova.conf`` file, e.g::
+
+ [DEFAULT]
+ ...
+
+ force_config_drive=always
+
+
+When used standalone
+--------------------
+
+When used without Nova, the operator needs to create a configuration drive
+and provide the file or HTTP URL to Ironic.
+
+For the format of the configuration drive, Ironic expects a ``gzipped``
+and ``base64`` encoded ISO 9660 [*]_ file with a ``config-2`` label. The
+`Ironic client <https://github.com/openstack/python-ironicclient>`_
+can generate a configuration drive in the expected format. Just pass a
+directory path containing the files that will be injected into it via the
+``--config-drive`` parameter of the ``node-set-provision-state`` command,
+e.g::
+
+ ironic node-set-provision-state --config-drive /dir/configdrive_files $node_identifier active
+
+
+Accessing the configuration drive data
+--------------------------------------
+
+When the configuration drive is enabled, Ironic will create a partition on the
+instance disk and write the configuration drive image onto it. The
+configuration drive must be mounted before use. This is performed
+automatically by many tools, such as cloud-init and cloudbase-init. To mount
+it manually on a Linux distribution that supports accessing devices by labels,
+simply run the following::
+
+ mkdir -p /mnt/config
+ mount /dev/disk/by-label/config-2 /mnt/config
+
+
+If the guest OS doesn't support accessing devices by labels, you can use
+other tools such as ``blkid`` to identify which device corresponds to
+the configuration drive and mount it, e.g::
+
+ CONFIG_DEV=$(blkid -t LABEL="config-2" -odevice)
+ mkdir -p /mnt/config
+ mount $CONFIG_DEV /mnt/config
+
+
+.. [*] A config drive could also be a data block with a VFAT filesystem
+ on it instead of ISO 9660. But it's unlikely that it would be needed
+ since ISO 9660 is widely supported across operating systems.
+
+
+Cloud-init integration
+----------------------
+
+The configuration drive can be especially
+useful when used with ``cloud-init`` [`link
+<http://cloudinit.readthedocs.org/en/latest/topics/datasources.html#config-drive>`_],
+but in order to use it we should follow some rules:
+
+* ``Cloud-init`` expects a specific format to the data. For
+ more information about the expected file layout see [`link
+ <http://docs.openstack.org/user-guide/content/enable_config_drive.html#config_drive_contents>`_].
+
+
+* Since Ironic uses a disk partition as the configuration drive,
+ it will only work with ``cloud-init`` version **>= 0.7.5** [`link
+ <http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/view/head:/ChangeLog>`_].
+
+
+* ``Cloud-init`` has a collection of data source modules, so when
+ building the image with `disk-image-builder`_ we have to define
+ ``DIB_CLOUD_INIT_DATASOURCES`` environment variable and set the
+ appropriate sources to enable the configuration drive, e.g::
+
+ DIB_CLOUD_INIT_DATASOURCES="ConfigDrive, OpenStack" disk-image-create -o fedora-cloud-image fedora baremetal
+
+ See [`link
+ <http://docs.openstack.org/developer/diskimage-builder/elements/cloud-init-datasources/README.html>`_]
+ for more information.
+
+
Troubleshooting
===============
diff --git a/doc/source/deploy/upgrade-guide.rst b/doc/source/deploy/upgrade-guide.rst
new file mode 100644
index 000000000..2ce9e0952
--- /dev/null
+++ b/doc/source/deploy/upgrade-guide.rst
@@ -0,0 +1,36 @@
+.. _upgrade-guide:
+
+=====================================
+Bare Metal Service Upgrade Guide
+=====================================
+
+This document outlines various steps and notes for operators to consider when
+upgrading their Ironic-driven clouds from previous versions of OpenStack.
+
+The Ironic service is tightly coupled with the Ironic driver that is shipped
+with Nova. Currently, some special considerations must be taken into account
+when upgrading your cloud from previous versions of OpenStack.
+
+Upgrading from Juno to Kilo
+===========================
+
+When upgrading a cloud from Juno to Kilo, users must ensure the Nova
+service is upgraded prior to upgrading the Ironic service. Additionally,
+users need to set a special config flag in Nova prior to upgrading to ensure
+the newer version of Nova is not attempting to take advantage of new Ironic
+features until the Ironic service has been upgraded. The steps for upgrading
+your Nova and Ironic services are as follows:
+
+- Edit nova.conf and ensure force_config_drive=False is set in the [DEFAULT]
+ group. Restart nova-compute if necessary.
+- Install new Nova code, run database migrations
+- Install new python-ironicclient code.
+- Restart Nova services.
+- Install new Ironic code, run database migrations, restart Ironic services.
+- Edit nova.conf and set force_config_drive to your liking, restaring
+ nova-compute if necessary.
+
+Note that during the period between Nova's upgrade and Ironic's upgrades,
+instances can still be provisioned to nodes, however, any attempt by users
+to specify a config drive for an instance will cause error until Ironic's
+upgrade has completed.
diff --git a/doc/source/deploy/user-guide.rst b/doc/source/deploy/user-guide.rst
index 1a417d120..59d290a4e 100644
--- a/doc/source/deploy/user-guide.rst
+++ b/doc/source/deploy/user-guide.rst
@@ -74,7 +74,7 @@ The Ironic service is composed of the following components:
exposed via the API service. The Conductor and API services communicate
via RPC.
-#. various Drivers that support heterogenous hardware
+#. various Drivers that support heterogeneous hardware
#. a Message Queue
diff --git a/doc/source/dev/dev-quickstart.rst b/doc/source/dev/dev-quickstart.rst
index 01b27d644..223052651 100644
--- a/doc/source/dev/dev-quickstart.rst
+++ b/doc/source/dev/dev-quickstart.rst
@@ -366,11 +366,11 @@ Source credentials, create a key, and spawn an instance::
.. note::
Because devstack create multiple networks, we need to pass an additional parameter
- `--nic net-id` to the nova boot command when using the admin account, for example:
+ ``--nic net-id`` to the nova boot command when using the admin account, for example::
- net_id=$(neutron net-list | egrep "$PRIVATE_NETWORK_NAME"'[^-]' | awk '{ print $2 }')
+ net_id=$(neutron net-list | egrep "$PRIVATE_NETWORK_NAME"'[^-]' | awk '{ print $2 }')
- nova boot --flavor baremetal --nic net-id=$net_id --image $image --key-name default testing
+ nova boot --flavor baremetal --nic net-id=$net_id --image $image --key-name default testing
As the demo tenant, you should now see a Nova instance building::
diff --git a/doc/source/drivers/ilo.rst b/doc/source/drivers/ilo.rst
index 63c3c1a71..0cfe092ed 100644
--- a/doc/source/drivers/ilo.rst
+++ b/doc/source/drivers/ilo.rst
@@ -7,7 +7,7 @@ iLO drivers
Overview
========
iLO drivers enable to take advantage of features of iLO management engine in
-HP Proliant servers. iLO drivers are targetted for HP Proliant Gen 8 systems
+HP Proliant servers. iLO drivers are targeted for HP Proliant Gen 8 systems
and above which have iLO 4 management engine. [1]_
For more detailed and up-to-date information (like tested platforms, known
@@ -40,9 +40,9 @@ Prerequisites
managing HP Proliant hardware.
Install ``proliantutils`` [2]_ module on the Ironic conductor node. Minimum
- version required is 2.0.1.::
+ version required is 2.1.0.::
- $ pip install "proliantutils>=2.0.1"
+ $ pip install "proliantutils>=2.1.0"
* ``ipmitool`` command must be present on the service node(s) where
``ironic-conductor`` is running. On most distros, this is provided as part
@@ -554,7 +554,7 @@ The boot modes can be configured in Ironic in the following way:
the ``boot_mode`` set appropriately in ``properties/capabilities``. It will
filter out rest of the nodes.
- The above facility for matching in Nova can be used in heterogenous
+ The above facility for matching in Nova can be used in heterogeneous
environments where there is a mix of ``uefi`` and ``bios`` machines, and
operator wants to provide a choice to the user regarding boot modes. If the
flavor doesn't contain ``boot_mode`` then Nova scheduler will not consider
diff --git a/doc/source/drivers/seamicro.rst b/doc/source/drivers/seamicro.rst
index 74cf4343c..a7e643eb8 100644
--- a/doc/source/drivers/seamicro.rst
+++ b/doc/source/drivers/seamicro.rst
@@ -17,9 +17,9 @@ Prerequisites
for managing SeaMicro Fabric Compute systems.
Install ``python-seamicroclient`` [1]_ module on the Ironic conductor node.
- Minimum version required is 0.2.1.::
+ Minimum version required is 0.4.0.::
- $ pip install "python-seamicroclient>=0.2.1"
+ $ pip install "python-seamicroclient>=0.4.0"
Drivers
=======
diff --git a/doc/source/images/states.png b/doc/source/images/states.png
index 79b07c86e..ec01e19d2 100644
--- a/doc/source/images/states.png
+++ b/doc/source/images/states.png
Binary files differ
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 492c0cd72..7ea533fdb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -57,7 +57,9 @@ Overview
deploy/user-guide
deploy/install-guide
+ deploy/upgrade-guide
deploy/drivers
+ deploy/cleaning
Commands
--------
diff --git a/driver-requirements.txt b/driver-requirements.txt
index da39fd2a0..842c51799 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -5,11 +5,11 @@
# These are available on pypi
ironic-discoverd>=1.0.0
-proliantutils>=2.0.1
+proliantutils>=2.1.0
pyghmi
pysnmp
python-scciclient
-python-seamicroclient
+python-seamicroclient>=0.4.0
# The drac and amt driver import a python module called "pywsman", however,
# this does not exist on pypi.
diff --git a/etc/apache2/ironic b/etc/apache2/ironic
index 195ec5c3c..290b3c548 100644
--- a/etc/apache2/ironic
+++ b/etc/apache2/ironic
@@ -17,7 +17,7 @@
Listen 6385
<VirtualHost *:6385>
- WSGIDaemonProcess ironic user=stack group=stack threads=10
+ WSGIDaemonProcess ironic user=stack group=stack threads=10 display-name=%{GROUP}
WSGIScriptAlias / /opt/stack/ironic/ironic/api/app.wsgi
SetEnv APACHE_RUN_USER stack
diff --git a/etc/ironic/ironic.conf.sample b/etc/ironic/ironic.conf.sample
index 9618ecfaf..ccf368f0d 100644
--- a/etc/ironic/ironic.conf.sample
+++ b/etc/ironic/ironic.conf.sample
@@ -360,6 +360,11 @@
# set to 0, will not run during cleaning. (integer value)
#agent_erase_devices_priority=<None>
+# Whether Ironic will manage TFTP files for the deploy
+# ramdisks. If set to False, you will need to configure your
+# own TFTP server that allows booting the deploy ramdisks.
+# (boolean value)
+#manage_tftp=true
#
# Options defined in ironic.drivers.modules.agent_base_vendor
@@ -516,12 +521,10 @@
# driver that node is managed by; see the individual driver's
# documentation for details. NOTE: The introduction of the
# cleaning operation causes instance deletion to take
-# significantly longer. While this provides a better and more
-# secure user experience, it does impact the service behavior,
-# and as such IS DISABLED BY DEFAULT until consuming services
-# (eg, Nova) have been updated to accomodate the additional
-# time for deletion. (boolean value)
-#clean_nodes=false
+# significantly longer. In an environment where all tenants
+# are trusted (eg, because there is only one tenant), this
+# option could be safely disabled. (boolean value)
+#clean_nodes=true
[console]
@@ -826,8 +829,9 @@
# (integer value)
#glance_num_retries=0
-# Default protocol to use when connecting to glance. Set to
-# https for SSL. (string value)
+# Authentication strategy to use when connecting to glance.
+# Only "keystone" and "noauth" are currently supported by
+# ironic. (string value)
#auth_strategy=keystone
@@ -1192,6 +1196,23 @@
#cleaning_network_uuid=<None>
+[oslo_concurrency]
+
+#
+# Options defined in oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. For security, the
+# specified directory should only be writable by the user
+# running the processes that need locking. Defaults to
+# environment variable OSLO_LOCK_PATH. If external locks are
+# used, a lock path must be set. (string value)
+#lock_path=<None>
+
+
[oslo_messaging_amqp]
#
@@ -1376,11 +1397,42 @@
# value)
#rabbit_ha_queues=false
+# Number of seconds after which the Rabbit broker is
+# considered down if heartbeat's keep-alive fails (0 disable
+# the heartbeat). (integer value)
+#heartbeat_timeout_threshold=60
+
+# How often times during the heartbeat_timeout_threshold we
+# check the heartbeat. (integer value)
+#heartbeat_rate=2
+
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
# (boolean value)
#fake_rabbit=false
+[oslo_policy]
+
+#
+# Options defined in oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path
+# defined by the config_dir option, or absolute paths. The
+# file defined by policy_file must exist for these directories
+# to be searched. Missing or empty directories are ignored.
+# (multi valued)
+#policy_dirs=policy.d
+
+
[pxe]
#
diff --git a/ironic/api/controllers/root.py b/ironic/api/controllers/root.py
index 9ae3a6a0c..c3ba09775 100644
--- a/ironic/api/controllers/root.py
+++ b/ironic/api/controllers/root.py
@@ -19,11 +19,11 @@
import pecan
from pecan import rest
from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers import v1
+from ironic.api import expose
class Version(base.APIBase):
@@ -79,7 +79,7 @@ class RootController(rest.RestController):
v1 = v1.Controller()
- @wsme_pecan.wsexpose(Root)
+ @expose.expose(Root)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py
index 3a9b77e36..b2f432fde 100644
--- a/ironic/api/controllers/v1/__init__.py
+++ b/ironic/api/controllers/v1/__init__.py
@@ -26,7 +26,6 @@ import pecan
from pecan import rest
from webob import exc
from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
@@ -34,6 +33,7 @@ from ironic.api.controllers.v1 import chassis
from ironic.api.controllers.v1 import driver
from ironic.api.controllers.v1 import node
from ironic.api.controllers.v1 import port
+from ironic.api import expose
from ironic.common.i18n import _
BASE_VERSION = 1
@@ -158,7 +158,7 @@ class Controller(rest.RestController):
chassis = chassis.ChassisController()
drivers = driver.DriversController()
- @wsme_pecan.wsexpose(V1)
+ @expose.expose(V1)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py
index a5134ff64..c6f42a7b1 100644
--- a/ironic/api/controllers/v1/chassis.py
+++ b/ironic/api/controllers/v1/chassis.py
@@ -19,7 +19,6 @@ import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
@@ -27,6 +26,7 @@ from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import node
from ironic.api.controllers.v1 import types
from ironic.api.controllers.v1 import utils as api_utils
+from ironic.api import expose
from ironic.common import exception
from ironic import objects
@@ -164,7 +164,7 @@ class ChassisController(rest.RestController):
sort_key=sort_key,
sort_dir=sort_dir)
- @wsme_pecan.wsexpose(ChassisCollection, types.uuid,
+ @expose.expose(ChassisCollection, types.uuid,
int, wtypes.text, wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of chassis.
@@ -176,7 +176,7 @@ class ChassisController(rest.RestController):
"""
return self._get_chassis_collection(marker, limit, sort_key, sort_dir)
- @wsme_pecan.wsexpose(ChassisCollection, types.uuid, int,
+ @expose.expose(ChassisCollection, types.uuid, int,
wtypes.text, wtypes.text)
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of chassis with detail.
@@ -196,7 +196,7 @@ class ChassisController(rest.RestController):
return self._get_chassis_collection(marker, limit, sort_key, sort_dir,
expand, resource_url)
- @wsme_pecan.wsexpose(Chassis, types.uuid)
+ @expose.expose(Chassis, types.uuid)
def get_one(self, chassis_uuid):
"""Retrieve information about the given chassis.
@@ -206,7 +206,7 @@ class ChassisController(rest.RestController):
chassis_uuid)
return Chassis.convert_with_links(rpc_chassis)
- @wsme_pecan.wsexpose(Chassis, body=Chassis, status_code=201)
+ @expose.expose(Chassis, body=Chassis, status_code=201)
def post(self, chassis):
"""Create a new chassis.
@@ -220,7 +220,7 @@ class ChassisController(rest.RestController):
return Chassis.convert_with_links(new_chassis)
@wsme.validate(types.uuid, [ChassisPatchType])
- @wsme_pecan.wsexpose(Chassis, types.uuid, body=[ChassisPatchType])
+ @expose.expose(Chassis, types.uuid, body=[ChassisPatchType])
def patch(self, chassis_uuid, patch):
"""Update an existing chassis.
@@ -250,7 +250,7 @@ class ChassisController(rest.RestController):
rpc_chassis.save()
return Chassis.convert_with_links(rpc_chassis)
- @wsme_pecan.wsexpose(None, types.uuid, status_code=204)
+ @expose.expose(None, types.uuid, status_code=204)
def delete(self, chassis_uuid):
"""Delete a chassis.
diff --git a/ironic/api/controllers/v1/driver.py b/ironic/api/controllers/v1/driver.py
index 49146d03b..89cbe40cf 100644
--- a/ironic/api/controllers/v1/driver.py
+++ b/ironic/api/controllers/v1/driver.py
@@ -17,10 +17,10 @@ import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
+from ironic.api import expose
from ironic.common import exception
from ironic.common.i18n import _
@@ -114,7 +114,7 @@ class DriverPassthruController(rest.RestController):
'methods': ['GET']
}
- @wsme_pecan.wsexpose(wtypes.text, wtypes.text)
+ @expose.expose(wtypes.text, wtypes.text)
def methods(self, driver_name):
"""Retrieve information about vendor methods of the given driver.
@@ -132,7 +132,7 @@ class DriverPassthruController(rest.RestController):
return _VENDOR_METHODS[driver_name]
- @wsme_pecan.wsexpose(wtypes.text, wtypes.text, wtypes.text,
+ @expose.expose(wtypes.text, wtypes.text, wtypes.text,
body=wtypes.text)
def _default(self, driver_name, method, data=None):
"""Call a driver API extension.
@@ -166,7 +166,7 @@ class DriversController(rest.RestController):
'properties': ['GET'],
}
- @wsme_pecan.wsexpose(DriverList)
+ @expose.expose(DriverList)
def get_all(self):
"""Retrieve a list of drivers."""
# FIXME(deva): formatting of the auto-generated REST API docs
@@ -176,7 +176,7 @@ class DriversController(rest.RestController):
driver_list = pecan.request.dbapi.get_active_driver_dict()
return DriverList.convert_with_links(driver_list)
- @wsme_pecan.wsexpose(Driver, wtypes.text)
+ @expose.expose(Driver, wtypes.text)
def get_one(self, driver_name):
"""Retrieve a single driver."""
# NOTE(russell_h): There is no way to make this more efficient than
@@ -191,7 +191,7 @@ class DriversController(rest.RestController):
raise exception.DriverNotFound(driver_name=driver_name)
- @wsme_pecan.wsexpose(wtypes.text, wtypes.text)
+ @expose.expose(wtypes.text, wtypes.text)
def properties(self, driver_name):
"""Retrieve property information of the given driver.
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index 50f750e39..ce48e0916 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -23,7 +23,6 @@ import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
@@ -31,10 +30,10 @@ from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import port
from ironic.api.controllers.v1 import types
from ironic.api.controllers.v1 import utils as api_utils
+from ironic.api import expose
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states as ir_states
-from ironic.common import utils
from ironic import objects
from ironic.openstack.common import log
@@ -57,6 +56,13 @@ _VENDOR_METHODS = {}
def hide_fields_in_newer_versions(obj):
+ # if requested version is < 1.3, hide driver_internal_info
+ if pecan.request.version.minor < 3:
+ obj.driver_internal_info = wsme.Unset
+
+ if not api_utils.allow_node_logical_names():
+ obj.name = wsme.Unset
+
# if requested version is < 1.6, hide inspection_*_at fields
if pecan.request.version.minor < 6:
obj.inspection_finished_at = wsme.Unset
@@ -70,12 +76,6 @@ def assert_juno_provision_state_name(obj):
obj.provision_state = ir_states.NOSTATE
-def hide_driver_internal_info(obj):
- # if requested version is < 1.3, hide driver_internal_info
- if pecan.request.version.minor < 3:
- obj.driver_internal_info = wsme.Unset
-
-
def check_allow_management_verbs(verb):
# v1.4 added the MANAGEABLE state and two verbs to move nodes into
# and out of that state. Reject requests to do this in older versions
@@ -87,45 +87,6 @@ def check_allow_management_verbs(verb):
raise exception.NotAcceptable()
-def allow_logical_names():
- try:
- # v1.5 added logical name aliases
- if pecan.request.version.minor < 5:
- return False
- # ignore check if we're not in a pecan context
- except AttributeError:
- pass
- return True
-
-
-def is_valid_name(name):
- return utils.is_hostname_safe(name) and (not uuidutils.is_uuid_like(name))
-
-
-def _get_rpc_node(node_ident):
- """Get the RPC node from the node uuid or logical name.
-
- :param node_ident: the UUID or logical name of a node.
-
- :returns: The RPC Node.
- :raises: InvalidUuidOrName if the name or uuid provided is not valid.
- :raises: NodeNotFound if the node is not found.
-
- """
- # Check to see if the node_ident is a valid UUID. If it is, treat it
- # as a UUID.
- if uuidutils.is_uuid_like(node_ident):
- return objects.Node.get_by_uuid(pecan.request.context, node_ident)
-
- # If it was not UUID-like, but it is name-like, and we allow names,
- # check for nodes by that name
- if allow_logical_names() and utils.is_hostname_safe(node_ident):
- return objects.Node.get_by_name(pecan.request.context, node_ident)
-
- # It's not a valid uuid, or it's not a valid name, or we don't allow names
- raise exception.InvalidUuidOrName(name=node_ident)
-
-
class NodePatchType(types.JsonPatchType):
@staticmethod
@@ -162,7 +123,7 @@ class BootDeviceController(rest.RestController):
boot devices.
"""
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
if supported:
return pecan.request.rpcapi.get_supported_boot_devices(
@@ -171,7 +132,7 @@ class BootDeviceController(rest.RestController):
return pecan.request.rpcapi.get_boot_device(pecan.request.context,
rpc_node.uuid, topic)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, wtypes.text, types.boolean,
+ @expose.expose(None, types.uuid_or_name, wtypes.text, types.boolean,
status_code=204)
def put(self, node_ident, boot_device, persistent=False):
"""Set the boot device for a node.
@@ -186,7 +147,7 @@ class BootDeviceController(rest.RestController):
Default: False.
"""
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
pecan.request.rpcapi.set_boot_device(pecan.request.context,
rpc_node.uuid,
@@ -194,7 +155,7 @@ class BootDeviceController(rest.RestController):
persistent=persistent,
topic=topic)
- @wsme_pecan.wsexpose(wtypes.text, types.uuid_or_name)
+ @expose.expose(wtypes.text, types.uuid_or_name)
def get(self, node_ident):
"""Get the current boot device for a node.
@@ -209,7 +170,7 @@ class BootDeviceController(rest.RestController):
"""
return self._get_boot_device(node_ident)
- @wsme_pecan.wsexpose(wtypes.text, types.uuid_or_name)
+ @expose.expose(wtypes.text, types.uuid_or_name)
def supported(self, node_ident):
"""Get a list of the supported boot devices.
@@ -246,13 +207,13 @@ class ConsoleInfo(base.APIBase):
class NodeConsoleController(rest.RestController):
- @wsme_pecan.wsexpose(ConsoleInfo, types.uuid_or_name)
+ @expose.expose(ConsoleInfo, types.uuid_or_name)
def get(self, node_ident):
"""Get connection information about the console.
:param node_ident: UUID or logical name of a node.
"""
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
try:
console = pecan.request.rpcapi.get_console_information(
@@ -264,7 +225,7 @@ class NodeConsoleController(rest.RestController):
return ConsoleInfo(console_enabled=console_state, console_info=console)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, types.boolean,
+ @expose.expose(None, types.uuid_or_name, types.boolean,
status_code=202)
def put(self, node_ident, enabled):
"""Start and stop the node console.
@@ -273,7 +234,7 @@ class NodeConsoleController(rest.RestController):
:param enabled: Boolean value; whether to enable or disable the
console.
"""
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
pecan.request.rpcapi.set_console_mode(pecan.request.context,
rpc_node.uuid, enabled, topic)
@@ -341,7 +302,7 @@ class NodeStatesController(rest.RestController):
console = NodeConsoleController()
"""Expose console as a sub-element of states"""
- @wsme_pecan.wsexpose(NodeStates, types.uuid_or_name)
+ @expose.expose(NodeStates, types.uuid_or_name)
def get(self, node_ident):
"""List the states of the node.
@@ -350,10 +311,10 @@ class NodeStatesController(rest.RestController):
# NOTE(lucasagomes): All these state values come from the
# DB. Ironic counts with a periodic task that verify the current
# power states of the nodes and update the DB accordingly.
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
return NodeStates.convert(rpc_node)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, wtypes.text,
+ @expose.expose(None, types.uuid_or_name, wtypes.text,
status_code=202)
def power(self, node_ident, target):
"""Set the power state of the node.
@@ -363,12 +324,12 @@ class NodeStatesController(rest.RestController):
:raises: ClientSideError (HTTP 409) if a power operation is
already in progress.
:raises: InvalidStateRequested (HTTP 400) if the requested target
- state is not valid.
+ state is not valid or if the node is in CLEANING state.
"""
# TODO(lucasagomes): Test if it's able to transition to the
# target state from the current one
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
if target not in [ir_states.POWER_ON,
@@ -378,6 +339,12 @@ class NodeStatesController(rest.RestController):
action=target, node=node_ident,
state=rpc_node.power_state)
+ # Don't change power state for nodes in cleaning
+ elif rpc_node.provision_state == ir_states.CLEANING:
+ raise exception.InvalidStateRequested(
+ action=target, node=node_ident,
+ state=rpc_node.provision_state)
+
pecan.request.rpcapi.change_node_power_state(pecan.request.context,
rpc_node.uuid, target,
topic)
@@ -385,7 +352,7 @@ class NodeStatesController(rest.RestController):
url_args = '/'.join([node_ident, 'states'])
pecan.response.location = link.build_url('nodes', url_args)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, wtypes.text,
+ @expose.expose(None, types.uuid_or_name, wtypes.text,
wtypes.text, status_code=202)
def provision(self, node_ident, target, configdrive=None):
"""Asynchronous trigger the provisioning of the node.
@@ -411,7 +378,7 @@ class NodeStatesController(rest.RestController):
not allow the requested state transition.
"""
check_allow_management_verbs(target)
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
# Normally, we let the task manager recognize and deal with
@@ -424,6 +391,11 @@ class NodeStatesController(rest.RestController):
raise exception.NodeLocked(node=rpc_node.uuid,
host=rpc_node.reservation)
+ if (target in (ir_states.ACTIVE, ir_states.REBUILD)
+ and rpc_node.maintenance):
+ raise exception.NodeInMaintenance(op=_('provisioning'),
+ node=rpc_node.uuid)
+
m = ir_states.machine.copy()
m.initialize(rpc_node.provision_state)
if not m.is_valid_event(ir_states.VERBS.get(target, target)):
@@ -621,9 +593,6 @@ class Node(base.APIBase):
# the user, it's internal only.
node.chassis_id = wtypes.Unset
- if not allow_logical_names():
- node.name = wsme.Unset
-
node.links = [link.Link.make_link('self', url, 'nodes',
node.uuid),
link.Link.make_link('bookmark', url, 'nodes',
@@ -635,7 +604,6 @@ class Node(base.APIBase):
def convert_with_links(cls, rpc_node, expand=True):
node = Node(**rpc_node.as_dict())
assert_juno_provision_state_name(node)
- hide_driver_internal_info(node)
hide_fields_in_newer_versions(node)
return cls._convert_with_links(node, pecan.request.host_url,
expand,
@@ -702,7 +670,7 @@ class NodeVendorPassthruController(rest.RestController):
'methods': ['GET']
}
- @wsme_pecan.wsexpose(wtypes.text, types.uuid_or_name)
+ @expose.expose(wtypes.text, types.uuid_or_name)
def methods(self, node_ident):
"""Retrieve information about vendor methods of the given node.
@@ -712,7 +680,7 @@ class NodeVendorPassthruController(rest.RestController):
:raises: NodeNotFound if the node is not found.
"""
# Raise an exception if node is not found
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
if rpc_node.driver not in _VENDOR_METHODS:
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
@@ -722,7 +690,7 @@ class NodeVendorPassthruController(rest.RestController):
return _VENDOR_METHODS[rpc_node.driver]
- @wsme_pecan.wsexpose(wtypes.text, types.uuid_or_name, wtypes.text,
+ @expose.expose(wtypes.text, types.uuid_or_name, wtypes.text,
body=wtypes.text)
def _default(self, node_ident, method, data=None):
"""Call a vendor extension.
@@ -732,7 +700,7 @@ class NodeVendorPassthruController(rest.RestController):
:param data: body of data to supply to the specified method.
"""
# Raise an exception if node is not found
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
# Raise an exception if method is not specified
@@ -753,7 +721,7 @@ class NodeVendorPassthruController(rest.RestController):
class NodeMaintenanceController(rest.RestController):
def _set_maintenance(self, node_ident, maintenance_mode, reason=None):
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
rpc_node.maintenance = maintenance_mode
rpc_node.maintenance_reason = reason
@@ -765,7 +733,7 @@ class NodeMaintenanceController(rest.RestController):
pecan.request.rpcapi.update_node(pecan.request.context,
rpc_node, topic=topic)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, wtypes.text,
+ @expose.expose(None, types.uuid_or_name, wtypes.text,
status_code=202)
def put(self, node_ident, reason=None):
"""Put the node in maintenance mode.
@@ -776,7 +744,7 @@ class NodeMaintenanceController(rest.RestController):
"""
self._set_maintenance(node_ident, True, reason=reason)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=202)
+ @expose.expose(None, types.uuid_or_name, status_code=202)
def delete(self, node_ident):
"""Remove the node from maintenance mode.
@@ -869,7 +837,7 @@ class NodesController(rest.RestController):
except exception.InstanceNotFound:
return []
- @wsme_pecan.wsexpose(NodeCollection, types.uuid, types.uuid,
+ @expose.expose(NodeCollection, types.uuid, types.uuid,
types.boolean, types.boolean, types.uuid, int, wtypes.text,
wtypes.text)
def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
@@ -896,7 +864,7 @@ class NodesController(rest.RestController):
associated, maintenance, marker,
limit, sort_key, sort_dir)
- @wsme_pecan.wsexpose(NodeCollection, types.uuid, types.uuid,
+ @expose.expose(NodeCollection, types.uuid, types.uuid,
types.boolean, types.boolean, types.uuid, int, wtypes.text,
wtypes.text)
def detail(self, chassis_uuid=None, instance_uuid=None, associated=None,
@@ -931,7 +899,7 @@ class NodesController(rest.RestController):
limit, sort_key, sort_dir, expand,
resource_url)
- @wsme_pecan.wsexpose(wtypes.text, types.uuid_or_name, types.uuid)
+ @expose.expose(wtypes.text, types.uuid_or_name, types.uuid)
def validate(self, node=None, node_uuid=None):
"""Validate the driver interfaces, using the node's UUID or name.
@@ -944,16 +912,17 @@ class NodesController(rest.RestController):
if node:
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
- if not allow_logical_names() and not uuidutils.is_uuid_like(node):
+ if (not api_utils.allow_node_logical_names() and
+ not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
- rpc_node = _get_rpc_node(node_uuid or node)
+ rpc_node = api_utils.get_rpc_node(node_uuid or node)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
return pecan.request.rpcapi.validate_driver_interfaces(
pecan.request.context, rpc_node.uuid, topic)
- @wsme_pecan.wsexpose(Node, types.uuid_or_name)
+ @expose.expose(Node, types.uuid_or_name)
def get_one(self, node_ident):
"""Retrieve information about the given node.
@@ -962,10 +931,10 @@ class NodesController(rest.RestController):
if self.from_chassis:
raise exception.OperationNotPermitted
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
return Node.convert_with_links(rpc_node)
- @wsme_pecan.wsexpose(Node, body=Node, status_code=201)
+ @expose.expose(Node, body=Node, status_code=201)
def post(self, node):
"""Create a new node.
@@ -993,9 +962,9 @@ class NodesController(rest.RestController):
# Verify that if we're creating a new node with a 'name' set
# that it is a valid name
if node.name:
- if not allow_logical_names():
+ if not api_utils.allow_node_logical_names():
raise exception.NotAcceptable()
- if not is_valid_name(node.name):
+ if not api_utils.is_valid_node_name(node.name):
msg = _("Cannot create node with invalid name %(name)s")
raise wsme.exc.ClientSideError(msg % {'name': node.name},
status_code=400)
@@ -1008,7 +977,7 @@ class NodesController(rest.RestController):
return Node.convert_with_links(new_node)
@wsme.validate(types.uuid, [NodePatchType])
- @wsme_pecan.wsexpose(Node, types.uuid_or_name, body=[NodePatchType])
+ @expose.expose(Node, types.uuid_or_name, body=[NodePatchType])
def patch(self, node_ident, patch):
"""Update an existing node.
@@ -1018,11 +987,21 @@ class NodesController(rest.RestController):
if self.from_chassis:
raise exception.OperationNotPermitted
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
# Check if node is transitioning state, although nodes in some states
# can be updated.
- if ((rpc_node.target_power_state or rpc_node.target_provision_state)
+ if (rpc_node.provision_state == ir_states.CLEANING and
+ patch == [{'op': 'remove', 'path': '/instance_uuid'}]):
+ # Allow node.instance_uuid removal during cleaning, but not other
+ # operations.
+ # TODO(JoshNang) remove node.instance_uuid when removing
+ # instance_info and stop removing node.instance_uuid in the Nova
+ # Ironic driver. Bug: 1436568
+ LOG.debug('Removing instance uuid %(instance)s from node %(node)s',
+ {'instance': rpc_node.instance_uuid,
+ 'node': rpc_node.uuid})
+ elif ((rpc_node.target_power_state or rpc_node.target_provision_state)
and rpc_node.provision_state not in
ir_states.UPDATE_ALLOWED_STATES):
msg = _("Node %s can not be updated while a state transition "
@@ -1032,9 +1011,9 @@ class NodesController(rest.RestController):
# Verify that if we're patching 'name' that it is a valid
name = api_utils.get_patch_value(patch, '/name')
if name:
- if not allow_logical_names():
+ if not api_utils.allow_node_logical_names():
raise exception.NotAcceptable()
- if not is_valid_name(name):
+ if not api_utils.is_valid_node_name(name):
msg = _("Node %(node)s: Cannot change name to invalid "
"name '%(name)s'")
raise wsme.exc.ClientSideError(msg % {'node': node_ident,
@@ -1077,12 +1056,22 @@ class NodesController(rest.RestController):
e.code = 400
raise e
+ # NOTE(lucasagomes): If it's changing the driver and the console
+ # is enabled we prevent updating it because the new driver will
+ # not be able to stop a console started by the previous one.
+ delta = rpc_node.obj_what_changed()
+ if 'driver' in delta and rpc_node.console_enabled:
+ raise wsme.exc.ClientSideError(
+ _("Node %s can not update the driver while the console is "
+ "enabled. Please stop the console first.") % node_ident,
+ status_code=409)
+
new_node = pecan.request.rpcapi.update_node(
pecan.request.context, rpc_node, topic)
return Node.convert_with_links(new_node)
- @wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)
+ @expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, node_ident):
"""Delete a node.
@@ -1091,7 +1080,7 @@ class NodesController(rest.RestController):
if self.from_chassis:
raise exception.OperationNotPermitted
- rpc_node = _get_rpc_node(node_ident)
+ rpc_node = api_utils.get_rpc_node(node_ident)
try:
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index eaf5aacfb..9ceaafb1f 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -15,17 +15,18 @@
import datetime
+from oslo_utils import uuidutils
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import collection
from ironic.api.controllers.v1 import types
from ironic.api.controllers.v1 import utils as api_utils
+from ironic.api import expose
from ironic.common import exception
from ironic.common.i18n import _
from ironic import objects
@@ -175,12 +176,12 @@ class PortsController(rest.RestController):
'detail': ['GET'],
}
- def _get_ports_collection(self, node_uuid, address, marker, limit,
+ def _get_ports_collection(self, node_ident, address, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
- if self.from_nodes and not node_uuid:
+ if self.from_nodes and not node_ident:
raise exception.MissingParameterValue(_(
- "Node id not specified."))
+ "Node identifier not specified."))
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
@@ -190,12 +191,12 @@ class PortsController(rest.RestController):
marker_obj = objects.Port.get_by_uuid(pecan.request.context,
marker)
- if node_uuid:
+ if node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
# for that column. This will get cleaned up
# as we move to the object interface.
- node = objects.Node.get_by_uuid(pecan.request.context, node_uuid)
+ node = api_utils.get_rpc_node(node_ident)
ports = objects.Port.list_by_node_id(pecan.request.context,
node.id, limit, marker_obj,
sort_key=sort_key,
@@ -227,13 +228,20 @@ class PortsController(rest.RestController):
except exception.PortNotFound:
return []
- @wsme_pecan.wsexpose(PortCollection, types.uuid, types.macaddress,
- types.uuid, int, wtypes.text, wtypes.text)
- def get_all(self, node_uuid=None, address=None, marker=None, limit=None,
- sort_key='id', sort_dir='asc'):
+ @expose.expose(PortCollection, types.uuid_or_name, types.uuid,
+ types.macaddress, types.uuid, int, wtypes.text,
+ wtypes.text)
+ def get_all(self, node=None, node_uuid=None, address=None, marker=None,
+ limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of ports.
- :param node_uuid: UUID of a node, to get only ports for that node.
+ Note that the 'node_uuid' interface is deprecated in favour
+ of the 'node' interface
+
+ :param node: UUID or name of a node, to get only ports for that
+ node.
+ :param node_uuid: UUID of a node, to get only ports for that
+ node.
:param address: MAC address of a port, to get the port which has
this MAC address.
:param marker: pagination marker for large data sets.
@@ -241,16 +249,31 @@ class PortsController(rest.RestController):
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
- return self._get_ports_collection(node_uuid, address, marker, limit,
- sort_key, sort_dir)
-
- @wsme_pecan.wsexpose(PortCollection, types.uuid, types.macaddress,
- types.uuid, int, wtypes.text, wtypes.text)
- def detail(self, node_uuid=None, address=None, marker=None, limit=None,
- sort_key='id', sort_dir='asc'):
+ if not node_uuid and node:
+ # We're invoking this interface using positional notation, or
+ # explicitly using 'node'. Try and determine which one.
+ # Make sure only one interface, node or node_uuid is used
+ if (not api_utils.allow_node_logical_names() and
+ not uuidutils.is_uuid_like(node)):
+ raise exception.NotAcceptable()
+
+ return self._get_ports_collection(node_uuid or node, address, marker,
+ limit, sort_key, sort_dir)
+
+ @expose.expose(PortCollection, types.uuid_or_name, types.uuid,
+ types.macaddress, types.uuid, int, wtypes.text,
+ wtypes.text)
+ def detail(self, node=None, node_uuid=None, address=None, marker=None,
+ limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of ports with detail.
- :param node_uuid: UUID of a node, to get only ports for that node.
+ Note that the 'node_uuid' interface is deprecated in favour
+ of the 'node' interface
+
+ :param node: UUID or name of a node, to get only ports for that
+ node.
+ :param node_uuid: UUID of a node, to get only ports for that
+ node.
:param address: MAC address of a port, to get the port which has
this MAC address.
:param marker: pagination marker for large data sets.
@@ -258,6 +281,14 @@ class PortsController(rest.RestController):
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
+ if not node_uuid and node:
+ # We're invoking this interface using positional notation, or
+ # explicitly using 'node'. Try and determine which one.
+ # Make sure only one interface, node or node_uuid is used
+ if (not api_utils.allow_node_logical_names() and
+ not uuidutils.is_uuid_like(node)):
+ raise exception.NotAcceptable()
+
# NOTE(lucasagomes): /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "ports":
@@ -265,11 +296,11 @@ class PortsController(rest.RestController):
expand = True
resource_url = '/'.join(['ports', 'detail'])
- return self._get_ports_collection(node_uuid, address, marker, limit,
- sort_key, sort_dir, expand,
+ return self._get_ports_collection(node_uuid or node, address, marker,
+ limit, sort_key, sort_dir, expand,
resource_url)
- @wsme_pecan.wsexpose(Port, types.uuid)
+ @expose.expose(Port, types.uuid)
def get_one(self, port_uuid):
"""Retrieve information about the given port.
@@ -281,7 +312,7 @@ class PortsController(rest.RestController):
rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid)
return Port.convert_with_links(rpc_port)
- @wsme_pecan.wsexpose(Port, body=Port, status_code=201)
+ @expose.expose(Port, body=Port, status_code=201)
def post(self, port):
"""Create a new port.
@@ -298,7 +329,7 @@ class PortsController(rest.RestController):
return Port.convert_with_links(new_port)
@wsme.validate(types.uuid, [PortPatchType])
- @wsme_pecan.wsexpose(Port, types.uuid, body=[PortPatchType])
+ @expose.expose(Port, types.uuid, body=[PortPatchType])
def patch(self, port_uuid, patch):
"""Update an existing port.
@@ -341,7 +372,7 @@ class PortsController(rest.RestController):
return Port.convert_with_links(new_port)
- @wsme_pecan.wsexpose(None, types.uuid, status_code=204)
+ @expose.expose(None, types.uuid, status_code=204)
def delete(self, port_uuid):
"""Delete a port.
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 0c06a6edf..6132e120f 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -15,9 +15,15 @@
import jsonpatch
from oslo_config import cfg
+from oslo_utils import uuidutils
+import pecan
import wsme
+from ironic.common import exception
from ironic.common.i18n import _
+from ironic.common import utils
+from ironic import objects
+
CONF = cfg.CONF
@@ -56,3 +62,43 @@ def get_patch_value(patch, path):
for p in patch:
if p['path'] == path:
return p['value']
+
+
+def allow_node_logical_names():
+ # v1.5 added logical name aliases
+ return pecan.request.version.minor >= 5
+
+
+def get_rpc_node(node_ident):
+ """Get the RPC node from the node uuid or logical name.
+
+ :param node_ident: the UUID or logical name of a node.
+
+ :returns: The RPC Node.
+ :raises: InvalidUuidOrName if the name or uuid provided is not valid.
+ :raises: NodeNotFound if the node is not found.
+ """
+ # Check to see if the node_ident is a valid UUID. If it is, treat it
+ # as a UUID.
+ if uuidutils.is_uuid_like(node_ident):
+ return objects.Node.get_by_uuid(pecan.request.context, node_ident)
+
+ # We can refer to nodes by their name, if the client supports it
+ if allow_node_logical_names():
+ if utils.is_hostname_safe(node_ident):
+ return objects.Node.get_by_name(pecan.request.context, node_ident)
+ raise exception.InvalidUuidOrName(name=node_ident)
+
+ # Ensure we raise the same exception as we did for the Juno release
+ raise exception.NodeNotFound(node=node_ident)
+
+
+def is_valid_node_name(name):
+ """Determine if the provided name is a valid node name.
+
+ Check to see that the provided node name is valid, and isn't a UUID.
+
+ :param: name: the node name to check.
+ :returns: True if the name is valid, False otherwise.
+ """
+ return utils.is_hostname_safe(name) and (not uuidutils.is_uuid_like(name))
diff --git a/ironic/api/expose.py b/ironic/api/expose.py
new file mode 100644
index 000000000..46d4649a6
--- /dev/null
+++ b/ironic/api/expose.py
@@ -0,0 +1,24 @@
+#
+# Copyright 2015 Rackspace, Inc
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import wsmeext.pecan as wsme_pecan
+
+
+def expose(*args, **kwargs):
+ """Ensure that only JSON, and not XML, is supported."""
+ if 'rest_content_types' not in kwargs:
+ kwargs['rest_content_types'] = ('json',)
+ return wsme_pecan.wsexpose(*args, **kwargs)
diff --git a/ironic/api/middleware/auth_token.py b/ironic/api/middleware/auth_token.py
index 539b0359d..053862ce6 100644
--- a/ironic/api/middleware/auth_token.py
+++ b/ironic/api/middleware/auth_token.py
@@ -32,6 +32,8 @@ class AuthTokenMiddleware(auth_token.AuthProtocol):
"""
def __init__(self, app, conf, public_api_routes=[]):
+ # TODO(mrda): Remove .xml and ensure that doesn't result in a
+ # 401 Authentication Required instead of 404 Not Found
route_pattern_tpl = '%s(\.json|\.xml)?$'
try:
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index 71f0bd86e..6b62c380a 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -88,6 +88,13 @@ class IronicException(Exception):
super(IronicException, self).__init__(message)
+ def __str__(self):
+ """Encode to utf-8 then wsme api can consume it as well."""
+ if not six.PY3:
+ return unicode(self.args[0]).encode('utf-8')
+
+ return self.args[0]
+
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
diff --git a/ironic/common/glance_service/base_image_service.py b/ironic/common/glance_service/base_image_service.py
index 1b646d587..9a97cbbf0 100644
--- a/ironic/common/glance_service/base_image_service.py
+++ b/ironic/common/glance_service/base_image_service.py
@@ -22,6 +22,7 @@ import sys
import time
from glanceclient import client
+from glanceclient import exc as glance_exc
from oslo_config import cfg
import sendfile
import six.moves.urllib.parse as urlparse
@@ -36,23 +37,23 @@ CONF = cfg.CONF
def _translate_image_exception(image_id, exc_value):
- if isinstance(exc_value, (exception.Forbidden,
- exception.Unauthorized)):
+ if isinstance(exc_value, (glance_exc.Forbidden,
+ glance_exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
- if isinstance(exc_value, exception.NotFound):
+ if isinstance(exc_value, glance_exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
- if isinstance(exc_value, exception.BadRequest):
+ if isinstance(exc_value, glance_exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def _translate_plain_exception(exc_value):
- if isinstance(exc_value, (exception.Forbidden,
- exception.Unauthorized)):
+ if isinstance(exc_value, (glance_exc.Forbidden,
+ glance_exc.Unauthorized)):
return exception.NotAuthorized(exc_value)
- if isinstance(exc_value, exception.NotFound):
+ if isinstance(exc_value, glance_exc.NotFound):
return exception.NotFound(exc_value)
- if isinstance(exc_value, exception.BadRequest):
+ if isinstance(exc_value, glance_exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
@@ -109,13 +110,13 @@ class BaseImageService(object):
:raises: GlanceConnectionFailed
"""
- retry_excs = (exception.ServiceUnavailable,
- exception.InvalidEndpoint,
- exception.CommunicationError)
- image_excs = (exception.Forbidden,
- exception.Unauthorized,
- exception.NotFound,
- exception.BadRequest)
+ retry_excs = (glance_exc.ServiceUnavailable,
+ glance_exc.InvalidEndpoint,
+ glance_exc.CommunicationError)
+ image_excs = (glance_exc.Forbidden,
+ glance_exc.Unauthorized,
+ glance_exc.NotFound,
+ glance_exc.BadRequest)
num_attempts = 1 + CONF.glance.glance_num_retries
for attempt in range(1, num_attempts + 1):
diff --git a/ironic/common/grub_conf.template b/ironic/common/grub_conf.template
index 746a43d97..2a979d2d6 100644
--- a/ironic/common/grub_conf.template
+++ b/ironic/common/grub_conf.template
@@ -1,4 +1,8 @@
-menuentry "install" {
-linux {{ linux }} {{ kernel_params }} --
-initrd {{ initrd }}
+set default=0
+set timeout=5
+set hidden_timeout_quiet=false
+
+menuentry "boot_partition" {
+linuxefi {{ linux }} {{ kernel_params }} --
+initrdefi {{ initrd }}
}
diff --git a/ironic/common/image_service.py b/ironic/common/image_service.py
index 57ab194d7..e4bef4ae3 100644
--- a/ironic/common/image_service.py
+++ b/ironic/common/image_service.py
@@ -65,8 +65,9 @@ glance_opts = [
'glance.'),
cfg.StrOpt('auth_strategy',
default='keystone',
- help='Default protocol to use when connecting to glance. '
- 'Set to https for SSL.'),
+ help='Authentication strategy to use when connecting to '
+ 'glance. Only "keystone" and "noauth" are currently '
+ 'supported by ironic.'),
]
CONF.register_opts(glance_opts, group='glance')
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index 3a214c3da..09528a218 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -23,6 +23,7 @@ from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import utils
+from ironic.drivers.modules import deploy_utils
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
@@ -79,12 +80,20 @@ def _link_mac_pxe_configs(task):
:param task: A TaskManager instance.
"""
- pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
- for mac in driver_utils.get_node_mac_addresses(task):
- mac_path = _get_pxe_mac_path(mac)
+
+ def create_link(mac_path):
utils.unlink_without_raise(mac_path)
utils.create_link_without_raise(pxe_config_file_path, mac_path)
+ pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
+ for mac in driver_utils.get_node_mac_addresses(task):
+ create_link(_get_pxe_mac_path(mac))
+ # TODO(lucasagomes): Backward compatibility with :hexraw,
+ # to be removed in M.
+ # see: https://bugs.launchpad.net/ironic/+bug/1441710
+ if CONF.pxe.ipxe_enabled:
+ create_link(_get_pxe_mac_path(mac, delimiter=''))
+
def _link_ip_address_pxe_configs(task):
"""Link each IP address with the PXE configuration file.
@@ -109,17 +118,20 @@ def _link_ip_address_pxe_configs(task):
ip_address_path)
-def _get_pxe_mac_path(mac):
+def _get_pxe_mac_path(mac, delimiter=None):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
+ :param delimiter: The MAC address delimiter. Defaults to dash ('-').
:returns: the path to the config file.
"""
- if CONF.pxe.ipxe_enabled:
- mac_file_name = mac.replace(':', '').lower()
- else:
- mac_file_name = "01-" + mac.replace(":", "-").lower()
+ if delimiter is None:
+ delimiter = '-'
+
+ mac_file_name = mac.replace(':', delimiter).lower()
+ if not CONF.pxe.ipxe_enabled:
+ mac_file_name = '01-' + mac_file_name
return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME, mac_file_name)
@@ -191,7 +203,7 @@ def create_pxe_config(task, pxe_options, template=None):
pxe_config = _build_pxe_config(pxe_options, template)
utils.write_to_file(pxe_config_file_path, pxe_config)
- if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
_link_ip_address_pxe_configs(task)
else:
_link_mac_pxe_configs(task)
@@ -205,7 +217,7 @@ def clean_up_pxe_config(task):
"""
LOG.debug("Cleaning up PXE config for node %s", task.node.uuid)
- if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
api = dhcp_factory.DHCPFactory().provider
ip_addresses = api.get_ip_addresses(task)
if not ip_addresses:
@@ -220,6 +232,12 @@ def clean_up_pxe_config(task):
else:
for mac in driver_utils.get_node_mac_addresses(task):
utils.unlink_without_raise(_get_pxe_mac_path(mac))
+ # TODO(lucasagomes): Backward compatibility with :hexraw,
+ # to be removed in M.
+ # see: https://bugs.launchpad.net/ironic/+bug/1441710
+ if CONF.pxe.ipxe_enabled:
+ utils.unlink_without_raise(_get_pxe_mac_path(mac,
+ delimiter=''))
utils.rmtree_without_raise(os.path.join(get_root_dir(),
task.node.uuid))
@@ -252,7 +270,7 @@ def dhcp_options_for_instance(task):
dhcp_opts.append({'opt_name': 'bootfile-name',
'opt_value': ipxe_script_url})
else:
- if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
boot_file = CONF.pxe.uefi_pxe_bootfile_name
else:
boot_file = CONF.pxe.pxe_bootfile_name
diff --git a/ironic/common/states.py b/ironic/common/states.py
index 9baf695cf..7ebd052b6 100644
--- a/ironic/common/states.py
+++ b/ironic/common/states.py
@@ -150,7 +150,7 @@ INSPECTFAIL = 'inspect failed'
""" Node inspection failed. """
-UPDATE_ALLOWED_STATES = (DEPLOYFAIL, INSPECTING, INSPECTFAIL)
+UPDATE_ALLOWED_STATES = (DEPLOYFAIL, INSPECTING, INSPECTFAIL, CLEANFAIL)
"""Transitional states in which we allow updating a node."""
diff --git a/ironic/common/utils.py b/ironic/common/utils.py
index 6d5c1b0a1..3633f826c 100644
--- a/ironic/common/utils.py
+++ b/ironic/common/utils.py
@@ -184,16 +184,26 @@ def is_hostname_safe(hostname):
* http://en.wikipedia.org/wiki/Hostname
* http://tools.ietf.org/html/rfc952
* http://tools.ietf.org/html/rfc1123
-
- Also allow "." because what kind of hostname doesn't allow that.
+ Allowing for hostnames, and hostnames + domains.
:param hostname: The hostname to be validated.
:returns: True if valid. False if not.
"""
- m = '^[a-z0-9]([a-z0-9\-\.]{0,61}[a-z0-9])?$'
- return (isinstance(hostname, six.string_types) and
- (re.match(m, hostname) is not None))
+ if not isinstance(hostname, six.string_types) or len(hostname) > 255:
+ return False
+
+ # Periods on the end of a hostname are ok, but complicates the
+ # regex so we'll do this manually
+ if hostname.endswith('.'):
+ hostname = hostname[:-1]
+
+ host = '[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?'
+ domain = '[a-z0-9\-_]{0,62}[a-z0-9]'
+
+ m = '^' + host + '(\.' + domain + ')*$'
+
+ return re.match(m, hostname) is not None
def validate_and_normalize_mac(address):
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 64f19e1bd..3322741ab 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -169,7 +169,7 @@ conductor_opts = [
help='Timeout (seconds) for waiting for node inspection. '
'0 - unlimited.'),
cfg.BoolOpt('clean_nodes',
- default=False,
+ default=True,
help='Cleaning is a configurable set of steps, such as '
'erasing disk drives, that are performed on the node '
'to ensure it is in a baseline state and ready to be '
@@ -182,11 +182,9 @@ conductor_opts = [
'driver\'s documentation for details. '
'NOTE: The introduction of the cleaning operation '
'causes instance deletion to take significantly '
- 'longer. While this provides a better and more '
- 'secure user experience, it does impact the service '
- 'behavior, and as such IS DISABLED BY DEFAULT until '
- 'consuming services (eg, Nova) have been updated to '
- 'accomodate the additional time for deletion.'),
+ 'longer. In an environment where all tenants are '
+ 'trusted (eg, because there is only one tenant), '
+ 'this option could be safely disabled.'),
]
CONF = cfg.CONF
CONF.register_opts(conductor_opts, 'conductor')
@@ -717,7 +715,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
# Infer the image type to make sure the deploy driver
# validates only the necessary variables for different
# image types.
- # NOTE(sirushtim): The iwdi variable can be None. It's upto
+ # NOTE(sirushtim): The iwdi variable can be None. It's up to
# the deploy driver to validate this.
iwdi = images.is_whole_disk_image(context, node.instance_info)
driver_internal_info['is_whole_disk_image'] = iwdi
@@ -1072,7 +1070,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
node.maintenance or node.reservation is not None):
continue
- with task_manager.acquire(context, node_id) as task:
+ with task_manager.acquire(context, node_uuid) as task:
if (task.node.provision_state == states.DEPLOYWAIT or
task.node.maintenance):
continue
@@ -1155,7 +1153,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
# Node is mapped here, but not updated by this conductor last
try:
- with task_manager.acquire(admin_context, node_id) as task:
+ with task_manager.acquire(admin_context, node_uuid) as task:
# NOTE(deva): now that we have the lock, check again to
# avoid racing with deletes and other state changes
node = task.node
@@ -1986,6 +1984,7 @@ def do_sync_power_state(task, count):
{'node': node.uuid, 'state': power_state})
node.power_state = power_state
node.save()
+ return 0
# If the node is now in the expected state, reset the counter
# otherwise, if we've exceeded the retry limit, stop here
diff --git a/ironic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py b/ironic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py
new file mode 100644
index 000000000..e690d6168
--- /dev/null
+++ b/ironic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""increase-node-name-length
+
+Revision ID: 2fb93ffd2af1
+Revises: 4f399b21ae71
+Create Date: 2015-03-18 17:08:11.470791
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2fb93ffd2af1'
+down_revision = '4f399b21ae71'
+
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+
+def upgrade():
+ op.alter_column('nodes', 'name',
+ existing_type=mysql.VARCHAR(length=63),
+ type_=sa.String(length=255),
+ existing_nullable=True)
+
+
+def downgrade():
+ op.alter_column('nodes', 'name',
+ existing_type=sa.String(length=255),
+ type_=mysql.VARCHAR(length=63),
+ existing_nullable=True)
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index ecc908885..f9c67317a 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -154,7 +154,7 @@ class Node(Base):
# filter on it more efficiently, even though it is
# user-settable, and would otherwise be in node.properties.
instance_uuid = Column(String(36), nullable=True)
- name = Column(String(63), nullable=True)
+ name = Column(String(255), nullable=True)
chassis_id = Column(Integer, ForeignKey('chassis.id'), nullable=True)
power_state = Column(String(15), nullable=True)
target_power_state = Column(String(15), nullable=True)
diff --git a/ironic/dhcp/base.py b/ironic/dhcp/base.py
index 624a8901c..a40a3258a 100644
--- a/ironic/dhcp/base.py
+++ b/ironic/dhcp/base.py
@@ -27,7 +27,7 @@ class BaseDHCP(object):
"""Base class for DHCP provider APIs."""
@abc.abstractmethod
- def update_port_dhcp_opts(self, port_id, dhcp_options):
+ def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update one or more DHCP options on the specified port.
:param port_id: designate which port these attributes
@@ -42,21 +42,24 @@ class BaseDHCP(object):
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
+ :param token: An optional authenticaiton token.
:raises: FailedToUpdateDHCPOptOnPort
"""
@abc.abstractmethod
- def update_port_address(self, port_id, address):
+ def update_port_address(self, port_id, address, token=None):
"""Update a port's MAC address.
:param port_id: port id.
:param address: new MAC address.
+ :param token: An optional authenticaiton token.
+
:raises: FailedToUpdateMacOnPort
"""
@abc.abstractmethod
- def update_dhcp_opts(self, task, options):
+ def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
@@ -70,6 +73,11 @@ class BaseDHCP(object):
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
+ :param vifs: a dict of Neutron port dicts to update DHCP options on.
+ The keys should be Ironic port UUIDs, and the values should be
+ Neutron port UUIDs
+ If the value is None, will get the list of ports from the Ironic
+ port objects.
:raises: FailedToUpdateDHCPOptOnPort
"""
diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py
index ec5f15be8..c4ee54342 100644
--- a/ironic/dhcp/neutron.py
+++ b/ironic/dhcp/neutron.py
@@ -27,7 +27,6 @@ from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import keystone
from ironic.common import network
-from ironic.conductor import manager
from ironic.dhcp import base
from ironic.drivers.modules import ssh
from ironic.openstack.common import log as logging
@@ -310,25 +309,20 @@ class NeutronDHCPApi(base.BaseDHCP):
try:
port = neutron_client.create_port(body)
except neutron_client_exc.ConnectionFailed as e:
+ self._rollback_cleaning_ports(task)
msg = (_('Could not create cleaning port on network %(net)s '
'from %(node)s. %(exc)s') %
{'net': CONF.neutron.cleaning_network_uuid,
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
- return manager.cleaning_error_handler(task, msg)
+ raise exception.NodeCleaningFailure(msg)
if not port.get('port') or not port['port'].get('id'):
- # Rollback changes
- try:
- self.delete_cleaning_ports(task)
- except Exception:
- # Log the error, but continue to cleaning error handler
- LOG.exception(_LE('Failed to rollback cleaning port '
- 'changes for node %s') % task.node.uuid)
+ self._rollback_cleaning_ports(task)
msg = (_('Failed to create cleaning ports for node '
'%(node)s') % task.node.uuid)
LOG.error(msg)
- return manager.cleaning_error_handler(task, msg)
+ raise exception.NodeCleaningFailure(msg)
# Match return value of get_node_vif_ids()
ports[ironic_port.uuid] = port['port']['id']
return ports
@@ -351,7 +345,7 @@ class NeutronDHCPApi(base.BaseDHCP):
{'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
- return manager.cleaning_error_handler(task, msg)
+ raise exception.NodeCleaningFailure(msg)
# Iterate the list of Neutron port dicts, remove the ones we added
for neutron_port in ports.get('ports', []):
@@ -367,4 +361,20 @@ class NeutronDHCPApi(base.BaseDHCP):
'node': task.node.uuid,
'exc': e})
LOG.exception(msg)
- return manager.cleaning_error_handler(task, msg)
+ raise exception.NodeCleaningFailure(msg)
+
+ def _rollback_cleaning_ports(self, task):
+ """Attempts to delete any ports created by cleaning
+
+ Purposefully will not raise any exceptions so error handling can
+ continue.
+
+ :param task: a TaskManager instance.
+ """
+ try:
+ self.delete_cleaning_ports(task)
+ except Exception:
+ # Log the error, but let the caller invoke the
+ # manager.cleaning_error_handler().
+ LOG.exception(_LE('Failed to rollback cleaning port '
+ 'changes for node %s') % task.node.uuid)
diff --git a/ironic/dhcp/none.py b/ironic/dhcp/none.py
index a0da7010d..bfe6c2d69 100644
--- a/ironic/dhcp/none.py
+++ b/ironic/dhcp/none.py
@@ -18,13 +18,14 @@ from ironic.dhcp import base
class NoneDHCPApi(base.BaseDHCP):
"""No-op DHCP API."""
- def update_port_dhcp_opts(self, port_id, dhcp_options):
+
+ def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
pass
- def update_dhcp_opts(self, task, options):
+ def update_dhcp_opts(self, task, options, vifs=None):
pass
- def update_port_address(self, port_id, address):
+ def update_port_address(self, port_id, address, token=None):
pass
def get_ip_addresses(self, task):
diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py
index 3399a63b9..e0685d0e4 100644
--- a/ironic/drivers/base.py
+++ b/ironic/drivers/base.py
@@ -367,6 +367,9 @@ class PowerInterface(BaseInterface):
def reboot(self, task):
"""Perform a hard reboot of the task's node.
+ Drivers are expected to properly handle case when node is powered off
+ by powering it on.
+
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if a required parameter is missing.
"""
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index d2f07eaa2..6a590c8c4 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -57,7 +57,14 @@ agent_opts = [
'Python Agent ramdisk. If unset, will use the priority '
'set in the ramdisk (defaults to 10 for the '
'GenericHardwareManager). If set to 0, will not run '
- 'during cleaning.')
+ 'during cleaning.'),
+ cfg.BoolOpt('manage_tftp',
+ default=True,
+ help='Whether Ironic will manage TFTP files for the deploy '
+ 'ramdisks. If set to False, you will need to configure '
+ 'your own TFTP server that allows booting the deploy '
+ 'ramdisks.'
+ ),
]
CONF = cfg.CONF
@@ -97,7 +104,9 @@ def build_agent_options(node):
keystone.get_service_url()).rstrip('/')
agent_config_opts = {
'ipa-api-url': ironic_api,
- 'ipa-driver-name': node.driver
+ 'ipa-driver-name': node.driver,
+ # NOTE: The below entry is a temporary workaround for bug/1433812
+ 'coreos.configdrive': 0,
}
root_device = deploy_utils.parse_root_device_hints(node)
if root_device:
@@ -194,17 +203,19 @@ def build_instance_info_for_deploy(task):
def _prepare_pxe_boot(task):
"""Prepare the files required for PXE booting the agent."""
- pxe_info = _get_tftp_image_info(task.node)
- pxe_options = _build_pxe_config_options(task.node, pxe_info)
- pxe_utils.create_pxe_config(task,
- pxe_options,
- CONF.agent.agent_pxe_config_template)
- _cache_tftp_images(task.context, task.node, pxe_info)
+ if CONF.agent.manage_tftp:
+ pxe_info = _get_tftp_image_info(task.node)
+ pxe_options = _build_pxe_config_options(task.node, pxe_info)
+ pxe_utils.create_pxe_config(task,
+ pxe_options,
+ CONF.agent.agent_pxe_config_template)
+ _cache_tftp_images(task.context, task.node, pxe_info)
def _do_pxe_boot(task, ports=None):
"""Reboot the node into the PXE ramdisk.
+ :param task: a TaskManager instance
:param ports: a list of Neutron port dicts to update DHCP options on. If
None, will get the list of ports from the Ironic port objects.
"""
@@ -217,13 +228,13 @@ def _do_pxe_boot(task, ports=None):
def _clean_up_pxe(task):
"""Clean up left over PXE and DHCP files."""
- pxe_info = _get_tftp_image_info(task.node)
- for label in pxe_info:
- path = pxe_info[label][1]
- utils.unlink_without_raise(path)
- AgentTFTPImageCache().clean_up()
-
- pxe_utils.clean_up_pxe_config(task)
+ if CONF.agent.manage_tftp:
+ pxe_info = _get_tftp_image_info(task.node)
+ for label in pxe_info:
+ path = pxe_info[label][1]
+ utils.unlink_without_raise(path)
+ AgentTFTPImageCache().clean_up()
+ pxe_utils.clean_up_pxe_config(task)
class AgentDeploy(base.DeployInterface):
@@ -248,10 +259,11 @@ class AgentDeploy(base.DeployInterface):
"""
node = task.node
params = {}
- params['driver_info.deploy_kernel'] = node.driver_info.get(
- 'deploy_kernel')
- params['driver_info.deploy_ramdisk'] = node.driver_info.get(
- 'deploy_ramdisk')
+ if CONF.agent.manage_tftp:
+ params['driver_info.deploy_kernel'] = node.driver_info.get(
+ 'deploy_kernel')
+ params['driver_info.deploy_ramdisk'] = node.driver_info.get(
+ 'deploy_ramdisk')
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
@@ -264,6 +276,15 @@ class AgentDeploy(base.DeployInterface):
"image_source's image_checksum must be provided in "
"instance_info for node %s") % node.uuid)
+ is_whole_disk_image = node.driver_internal_info.get(
+ 'is_whole_disk_image')
+ # TODO(sirushtim): Remove once IPA has support for partition images.
+ if is_whole_disk_image is False:
+ raise exception.InvalidParameterValue(_(
+ "Node %(node)s is configured to use the %(driver)s driver "
+ "which currently does not support deploying partition "
+ "images.") % {'node': node.uuid, 'driver': node.driver})
+
# Validate the root device hints
deploy_utils.parse_root_device_hints(node)
@@ -348,7 +369,7 @@ class AgentDeploy(base.DeployInterface):
:returns: A list of clean step dictionaries
"""
steps = deploy_utils.agent_get_clean_steps(task)
- if CONF.agent.agent_erase_devices_priority:
+ if CONF.agent.agent_erase_devices_priority is not None:
for step in steps:
if (step.get('step') == 'erase_devices' and
step.get('interface') == 'deploy'):
@@ -368,29 +389,44 @@ class AgentDeploy(base.DeployInterface):
return deploy_utils.agent_execute_clean_step(task, step)
def prepare_cleaning(self, task):
- """Boot into the agent to prepare for cleaning."""
+ """Boot into the agent to prepare for cleaning.
+
+ :param task: a TaskManager object containing the node
+ :raises NodeCleaningFailure: if the previous cleaning ports cannot
+ be removed or if new cleaning ports cannot be created
+ :returns: states.CLEANING to signify an asynchronous prepare
+ """
provider = dhcp_factory.DHCPFactory()
# If we have left over ports from a previous cleaning, remove them
if getattr(provider.provider, 'delete_cleaning_ports', None):
+ # Allow to raise if it fails, is caught and handled in conductor
provider.provider.delete_cleaning_ports(task)
# Create cleaning ports if necessary
ports = None
if getattr(provider.provider, 'create_cleaning_ports', None):
+ # Allow to raise if it fails, is caught and handled in conductor
ports = provider.provider.create_cleaning_ports(task)
+
_prepare_pxe_boot(task)
_do_pxe_boot(task, ports)
# Tell the conductor we are waiting for the agent to boot.
return states.CLEANING
def tear_down_cleaning(self, task):
- """Clean up the PXE and DHCP files after cleaning."""
+ """Clean up the PXE and DHCP files after cleaning.
+
+ :param task: a TaskManager object containing the node
+ :raises NodeCleaningFailure: if the cleaning ports cannot be
+ removed
+ """
manager_utils.node_power_action(task, states.POWER_OFF)
_clean_up_pxe(task)
# If we created cleaning ports, delete them
provider = dhcp_factory.DHCPFactory()
if getattr(provider.provider, 'delete_cleaning_ports', None):
+ # Allow to raise if it fails, is caught and handled in conductor
provider.provider.delete_cleaning_ports(task)
diff --git a/ironic/drivers/modules/agent_base_vendor.py b/ironic/drivers/modules/agent_base_vendor.py
index da8cc4aa9..f09aa4aff 100644
--- a/ironic/drivers/modules/agent_base_vendor.py
+++ b/ironic/drivers/modules/agent_base_vendor.py
@@ -147,10 +147,10 @@ class BaseAgentVendor(base.VendorInterface):
we restart cleaning.
"""
command = self._get_completed_cleaning_command(task)
- LOG.debug('Cleaning command status for node %(node)s on step %(step)s '
- '(command)%', {'node': task.node.uuid,
- 'step': task.node.clean_step,
- 'command': command})
+ LOG.debug('Cleaning command status for node %(node)s on step %(step)s:'
+ ' %(command)s', {'node': task.node.uuid,
+ 'step': task.node.clean_step,
+ 'command': command})
if not command:
# Command is not done yet
@@ -163,7 +163,7 @@ class BaseAgentVendor(base.VendorInterface):
'err': command.get('command_error'),
'step': task.node.clean_step})
LOG.error(msg)
- manager.cleaning_error_handler(task, msg)
+ return manager.cleaning_error_handler(task, msg)
elif command.get('command_status') == 'CLEAN_VERSION_MISMATCH':
# Restart cleaning, agent must have rebooted to new version
try:
@@ -175,7 +175,7 @@ class BaseAgentVendor(base.VendorInterface):
'err': command.get('command_error'),
'step': task.node.clean_step})
LOG.exception(msg)
- manager.cleaning_error_handler(task, msg)
+ return manager.cleaning_error_handler(task, msg)
self._notify_conductor_resume_clean(task)
elif command.get('command_status') == 'SUCCEEDED':
@@ -187,7 +187,7 @@ class BaseAgentVendor(base.VendorInterface):
'err': command.get('command_status'),
'step': task.node.clean_step})
LOG.error(msg)
- manager.cleaning_error_handler(task, msg)
+ return manager.cleaning_error_handler(task, msg)
@base.passthru(['POST'])
def heartbeat(self, task, **kwargs):
@@ -223,7 +223,12 @@ class BaseAgentVendor(base.VendorInterface):
# TODO(jimrollenhagen) improve error messages here
msg = _('Failed checking if deploy is done.')
try:
- if node.provision_state == states.DEPLOYWAIT:
+ if node.maintenance:
+ # this shouldn't happen often, but skip the rest if it does.
+ LOG.debug('Heartbeat from node %(node)s in maintenance mode; '
+ 'not taking any action.', {'node': node.uuid})
+ return
+ elif node.provision_state == states.DEPLOYWAIT:
msg = _('Node failed to get image for deploy.')
self.continue_deploy(task, **kwargs)
elif (node.provision_state == states.DEPLOYING and
@@ -308,8 +313,19 @@ class BaseAgentVendor(base.VendorInterface):
last_command = commands[-1]
+ if last_command['command_name'] != 'execute_clean_step':
+ # catches race condition where execute_clean_step is still
+ # processing so the command hasn't started yet
+ return
+
+ last_step = last_command['command_result'].get('clean_step')
if last_command['command_status'] == 'RUNNING':
return
+ elif (last_command['command_status'] == 'SUCCEEDED' and
+ last_step != task.node.clean_step):
+ # A previous clean_step was running, the new command has not yet
+ # started.
+ return
else:
return last_command
@@ -431,7 +447,7 @@ class BaseAgentVendor(base.VendorInterface):
task.process_event('done')
LOG.info(_LI('Deployment to node %s done'), task.node.uuid)
- def configure_local_boot(self, task, root_uuid,
+ def configure_local_boot(self, task, root_uuid=None,
efi_system_part_uuid=None):
"""Helper method to configure local boot on the node.
@@ -441,14 +457,17 @@ class BaseAgentVendor(base.VendorInterface):
:param task: a TaskManager object containing the node
:param root_uuid: The UUID of the root partition. This is used
- for identifying the partition which contains the image deployed.
+ for identifying the partition which contains the image deployed
+ or None in case of whole disk images which we expect to already
+ have a bootloader installed.
:param efi_system_part_uuid: The UUID of the efi system partition.
This is used only in uefi boot mode.
:raises: InstanceDeployFailure if bootloader installation failed or
on encountering error while setting the boot device on the node.
"""
node = task.node
- if not node.driver_internal_info.get('is_whole_disk_image'):
+ if not node.driver_internal_info.get(
+ 'is_whole_disk_image') and root_uuid:
result = self._client.install_bootloader(
node, root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid)
diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py
index 280b3741a..6f715dca1 100644
--- a/ironic/drivers/modules/agent_client.py
+++ b/ironic/drivers/modules/agent_client.py
@@ -73,7 +73,16 @@ class AgentClient(object):
headers=headers)
# TODO(russellhaering): real error handling
- return response.json()
+ try:
+ return response.json()
+ except ValueError:
+ msg = _(
+ 'Unable to decode response as JSON.\n'
+ 'Request URL: %(url)s\nRequest body: "%(body)s"\n'
+ 'Response: "%(response)s"'
+ ) % ({'response': response.text, 'body': body, 'url': url})
+ LOG.error(msg)
+ raise exception.IronicException(msg)
def get_commands_status(self, node):
url = self._get_command_url(node)
diff --git a/ironic/drivers/modules/agent_config.template b/ironic/drivers/modules/agent_config.template
index a23342df5..5c219cacb 100644
--- a/ironic/drivers/modules/agent_config.template
+++ b/ironic/drivers/modules/agent_config.template
@@ -2,4 +2,4 @@ default deploy
label deploy
kernel {{ pxe_options.deployment_aki_path }}
-append initrd={{ pxe_options.deployment_ari_path }} text {{ pxe_options.pxe_append_params }} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }}{% if pxe_options.root_device %} root_device={{ pxe_options.root_device }}{% endif %}
+append initrd={{ pxe_options.deployment_ari_path }} text {{ pxe_options.pxe_append_params }} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }}{% if pxe_options.root_device %} root_device={{ pxe_options.root_device }}{% endif %} coreos.configdrive=0
diff --git a/ironic/drivers/modules/amt/vendor.py b/ironic/drivers/modules/amt/vendor.py
index d1b5a962f..eccf8cbcc 100644
--- a/ironic/drivers/modules/amt/vendor.py
+++ b/ironic/drivers/modules/amt/vendor.py
@@ -17,6 +17,7 @@ AMT Vendor Methods
from ironic.common import boot_devices
from ironic.conductor import task_manager
from ironic.drivers import base
+from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
@@ -25,6 +26,14 @@ class AMTPXEVendorPassthru(pxe.VendorPassthru):
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
def pass_deploy_info(self, task, **kwargs):
- task.driver.management.ensure_next_boot_device(task.node,
- boot_devices.PXE)
+ if iscsi_deploy.get_boot_option(task.node) == "netboot":
+ task.driver.management.ensure_next_boot_device(task.node,
+ boot_devices.PXE)
super(AMTPXEVendorPassthru, self).pass_deploy_info(task, **kwargs)
+
+ @task_manager.require_exclusive_lock
+ def continue_deploy(self, task, **kwargs):
+ if iscsi_deploy.get_boot_option(task.node) == "netboot":
+ task.driver.management.ensure_next_boot_device(task.node,
+ boot_devices.PXE)
+ super(AMTPXEVendorPassthru, self).continue_deploy(task, **kwargs)
diff --git a/ironic/drivers/modules/boot.ipxe b/ironic/drivers/modules/boot.ipxe
index 25a0ea8dc..3567dc029 100644
--- a/ironic/drivers/modules/boot.ipxe
+++ b/ironic/drivers/modules/boot.ipxe
@@ -1,7 +1,7 @@
#!ipxe
# load the MAC-specific file or fail if it's not found
-chain --autofree pxelinux.cfg/${mac:hexraw} || goto error_no_config
+chain --autofree pxelinux.cfg/${mac:hexhyp} || goto error_no_config
:error_no_config
echo PXE boot failed. No configuration found for MAC ${mac}
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index 1591ea297..e6c2c48ef 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -73,10 +73,6 @@ LOG = logging.getLogger(__name__)
VALID_ROOT_DEVICE_HINTS = set(('size', 'model', 'wwn', 'serial', 'vendor'))
-def _get_agent_client():
- return agent_client.AgentClient()
-
-
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
@@ -755,8 +751,14 @@ def _iscsi_setup_and_handle_errors(address, port, iqn, lun,
delete_iscsi(address, port, iqn)
-def notify_deploy_complete(address):
- """Notifies the completion of deployment to the baremetal node.
+def notify_ramdisk_to_proceed(address):
+ """Notifies the ramdisk waiting for instructions from Ironic.
+
+ DIB ramdisk (from init script) makes vendor passhthrus and listens
+ on port 10000 for Ironic to notify back the completion of the task.
+ This method connects to port 10000 of the bare metal running the
+ ramdisk and then sends some data to notify the ramdisk to proceed
+ with it's next task.
:param address: The IP address of the node.
"""
@@ -897,7 +899,7 @@ def agent_get_clean_steps(task):
:raises: NodeCleaningFailure if the agent returns invalid results
:returns: A list of clean step dictionaries
"""
- client = _get_agent_client()
+ client = agent_client.AgentClient()
ports = objects.Port.list_by_node_id(
task.context, task.node.id)
result = client.get_clean_steps(task.node, ports).get('command_result')
@@ -908,10 +910,10 @@ def agent_get_clean_steps(task):
'get_clean_steps for node %(node)s returned invalid result:'
' %(result)s') % ({'node': task.node.uuid, 'result': result}))
- driver_info = task.node.driver_internal_info
- driver_info['hardware_manager_version'] = result[
+ driver_internal_info = task.node.driver_internal_info
+ driver_internal_info['hardware_manager_version'] = result[
'hardware_manager_version']
- task.node.driver_internal_info = driver_info
+ task.node.driver_internal_info = driver_internal_info
task.node.save()
# Clean steps looks like {'HardwareManager': [{step1},{steps2}..]..}
@@ -935,7 +937,7 @@ def agent_execute_clean_step(task, step):
:raises: NodeCleaningFailure if the agent does not return a command status
:returns: states.CLEANING to signify the step will be completed async
"""
- client = _get_agent_client()
+ client = agent_client.AgentClient()
ports = objects.Port.list_by_node_id(
task.context, task.node.id)
result = client.execute_clean_step(step, task.node, ports)
@@ -968,8 +970,7 @@ def try_set_boot_device(task, device, persistent=True):
manager_utils.node_set_boot_device(task, device,
persistent=persistent)
except exception.IPMIFailure:
- if driver_utils.get_node_capability(task.node,
- 'boot_mode') == 'uefi':
+ if get_boot_mode_for_deploy(task.node) == 'uefi':
LOG.warning(_LW("ipmitool is unable to set boot device while "
"the node %s is in UEFI boot mode. Please set "
"the boot device manually.") % task.node.uuid)
@@ -1040,3 +1041,34 @@ def is_secure_boot_requested(node):
sec_boot = capabilities.get('secure_boot', 'false').lower()
return sec_boot == 'true'
+
+
+def get_boot_mode_for_deploy(node):
+ """Returns the boot mode that would be used for deploy.
+
+ This method returns boot mode to be used for deploy.
+ It returns 'uefi' if 'secure_boot' is set to 'true' in
+ 'instance_info/capabilities' of node.
+ Otherwise it returns value of 'boot_mode' in 'properties/capabilities'
+ of node if set. If that is not set, it returns boot mode in
+ 'instance_info/deploy_boot_mode' for the node.
+ It would return None if boot mode is present neither in 'capabilities' of
+ node 'properties' nor in node's 'instance_info' (which could also be None).
+
+ :param node: an ironic node object.
+ :returns: 'bios', 'uefi' or None
+ """
+
+ if is_secure_boot_requested(node):
+ LOG.debug('Deploy boot mode is uefi for %s.', node.uuid)
+ return 'uefi'
+
+ boot_mode = driver_utils.get_node_capability(node, 'boot_mode')
+ if boot_mode is None:
+ instance_info = node.instance_info
+ boot_mode = instance_info.get('deploy_boot_mode')
+
+ LOG.debug('Deploy boot mode is %(boot_mode)s for %(node)s.',
+ {'boot_mode': boot_mode, 'node': node.uuid})
+
+ return boot_mode.lower() if boot_mode else boot_mode
diff --git a/ironic/drivers/modules/drac/client.py b/ironic/drivers/modules/drac/client.py
index 3b4b88929..3b40bd8d6 100644
--- a/ironic/drivers/modules/drac/client.py
+++ b/ironic/drivers/modules/drac/client.py
@@ -15,15 +15,20 @@
Wrapper for pywsman.Client
"""
+import time
from xml.etree import ElementTree
from oslo_utils import importutils
from ironic.common import exception
+from ironic.common.i18n import _LW
from ironic.drivers.modules.drac import common as drac_common
+from ironic.openstack.common import log as logging
pywsman = importutils.try_import('pywsman')
+LOG = logging.getLogger(__name__)
+
_SOAP_ENVELOPE_URI = 'http://www.w3.org/2003/05/soap-envelope'
# Filter Dialects, see (Section 2.3.1):
@@ -36,6 +41,9 @@ RET_SUCCESS = '0'
RET_ERROR = '2'
RET_CREATED = '4096'
+RETRY_COUNT = 5
+RETRY_DELAY = 5
+
def get_wsman_client(node):
"""Return a DRAC client object.
@@ -53,6 +61,29 @@ def get_wsman_client(node):
return client
+def retry_on_empty_response(client, action, *args, **kwargs):
+ """Wrapper to retry an action on failure."""
+
+ func = getattr(client, action)
+ for i in range(RETRY_COUNT):
+ response = func(*args, **kwargs)
+ if response:
+ return response
+ else:
+ LOG.warning(_LW('Empty response on calling %(action)s on client. '
+ 'Last error (cURL error code): %(last_error)s, '
+ 'fault string: "%(fault_string)s" '
+ 'response_code: %(response_code)s. '
+ 'Retry attempt %(count)d') %
+ {'action': action,
+ 'last_error': client.last_error(),
+ 'fault_string': client.fault_string(),
+ 'response_code': client.response_code(),
+ 'count': i + 1})
+
+ time.sleep(RETRY_DELAY)
+
+
class Client(object):
def __init__(self, drac_host, drac_port, drac_path, drac_protocol,
@@ -96,15 +127,16 @@ class Client(object):
options.set_flags(pywsman.FLAG_ENUMERATION_OPTIMIZATION)
options.set_max_elements(100)
- doc = self.client.enumerate(options, filter_, resource_uri)
+ doc = retry_on_empty_response(self.client, 'enumerate',
+ options, filter_, resource_uri)
root = self._get_root(doc)
final_xml = root
find_query = './/{%s}Body' % _SOAP_ENVELOPE_URI
insertion_point = final_xml.find(find_query)
while doc.context() is not None:
- doc = self.client.pull(options, None, resource_uri,
- str(doc.context()))
+ doc = retry_on_empty_response(self.client, 'pull', options, None,
+ resource_uri, str(doc.context()))
root = self._get_root(doc)
for result in root.findall(find_query):
for child in list(result):
@@ -160,7 +192,9 @@ class Client(object):
for name, value in properties.items():
options.add_property(name, value)
- doc = self.client.invoke(options, resource_uri, method, xml_doc)
+ doc = retry_on_empty_response(self.client, 'invoke', options,
+ resource_uri, method, xml_doc)
+
root = self._get_root(doc)
return_value = drac_common.find_xml(root, 'ReturnValue',
diff --git a/ironic/drivers/modules/drac/power.py b/ironic/drivers/modules/drac/power.py
index 41c5aa3e0..1ea374234 100644
--- a/ironic/drivers/modules/drac/power.py
+++ b/ironic/drivers/modules/drac/power.py
@@ -135,7 +135,7 @@ class DracPower(base.PowerInterface):
with unexpected return value.
"""
- return _set_power_state(task.node, power_state)
+ _set_power_state(task.node, power_state)
@task_manager.require_exclusive_lock
def reboot(self, task):
@@ -148,4 +148,11 @@ class DracPower(base.PowerInterface):
:raises: DracUnexpectedReturnValue if the client received a response
with unexpected return value.
"""
- return _set_power_state(task.node, states.REBOOT)
+
+ current_power_state = _get_power_state(task.node)
+ if current_power_state == states.POWER_ON:
+ target_power_state = states.REBOOT
+ else:
+ target_power_state = states.POWER_ON
+
+ _set_power_state(task.node, target_power_state)
diff --git a/ironic/drivers/modules/elilo_efi_pxe_config.template b/ironic/drivers/modules/elilo_efi_pxe_config.template
index 88e1831d9..3aea37e3f 100644
--- a/ironic/drivers/modules/elilo_efi_pxe_config.template
+++ b/ironic/drivers/modules/elilo_efi_pxe_config.template
@@ -3,7 +3,7 @@ default=deploy
image={{pxe_options.deployment_aki_path}}
label=deploy
initrd={{pxe_options.deployment_ari_path}}
- append="selinux=0 disk={{ pxe_options.disk }} iscsi_target_iqn={{ pxe_options.iscsi_target_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} ironic_api_url={{ pxe_options.ironic_api_url }} troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }} ip=%I:{{pxe_options.tftp_server}}:%G:%M:%H::on {% if pxe_options.root_device %}root_device={{ pxe_options.root_device }}{% endif %} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }} boot_option={{ pxe_options.boot_option }} boot_mode={{ pxe_options['boot_mode'] }}"
+ append="selinux=0 disk={{ pxe_options.disk }} iscsi_target_iqn={{ pxe_options.iscsi_target_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} ironic_api_url={{ pxe_options.ironic_api_url }} troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }} ip=%I:{{pxe_options.tftp_server}}:%G:%M:%H::on {% if pxe_options.root_device %}root_device={{ pxe_options.root_device }}{% endif %} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }} boot_option={{ pxe_options.boot_option }} boot_mode={{ pxe_options['boot_mode'] }} coreos.configdrive=0"
image={{pxe_options.aki_path}}
@@ -13,4 +13,4 @@ image={{pxe_options.aki_path}}
image=chain.c32
label=boot_whole_disk
- append mbr:{{ DISK_IDENTIFIER }}
+ append="mbr:{{ DISK_IDENTIFIER }}"
diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py
index 034a84c1e..1d1363d0d 100644
--- a/ironic/drivers/modules/ilo/common.py
+++ b/ironic/drivers/modules/ilo/common.py
@@ -30,7 +30,7 @@ from ironic.common.i18n import _LI
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
-from ironic.drivers import utils as driver_utils
+from ironic.drivers.modules import deploy_utils
from ironic.openstack.common import log as logging
ilo_client = importutils.try_import('proliantutils.ilo.client')
@@ -75,15 +75,6 @@ CONSOLE_PROPERTIES = {
'console_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
-INSPECT_PROPERTIES = {
- 'inspect_ports': _("Comma-separated values of ethernet ports "
- "to be identified for creating node "
- "ports. Valid values may be "
- "inspect_ports = '1,2,...n' or "
- "inspect_ports = 'all' or "
- "inspect_ports = 'none'. "
- "Required only for inspection.")
-}
CLEAN_PROPERTIES = {
'ilo_change_password': _("new password for iLO. Required if the clean "
"step 'reset_ilo_credential' is enabled.")
@@ -141,11 +132,6 @@ def parse_driver_info(node):
except ValueError:
not_integers.append(param)
- for param in INSPECT_PROPERTIES:
- value = info.get(param)
- if value:
- d_info[param] = value
-
if not_integers:
raise exception.InvalidParameterValue(_(
"The following iLO parameters from the node's driver_info "
@@ -346,42 +332,58 @@ def set_boot_mode(node, boot_mode):
def update_boot_mode(task):
- """Update 'boot_mode' capability value of node's 'capabilities' property.
+ """Update instance_info with boot mode to be used for deploy.
- This method updates the 'boot_mode' capability in node's 'capabilities'
- property if not set.
- It also sets the boot mode to be used in the next boot.
+ This method updates instance_info with boot mode to be used for
+ deploy if node properties['capabilities'] do not have boot_mode.
+ It sets the boot mode on the node.
:param task: Task object.
:raises: IloOperationError if setting boot mode failed.
"""
+
node = task.node
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
- boot_mode = driver_utils.get_node_capability(node, 'boot_mode')
if boot_mode is not None:
LOG.debug("Node %(uuid)s boot mode is being set to %(boot_mode)s",
{'uuid': node.uuid, 'boot_mode': boot_mode})
set_boot_mode(node, boot_mode)
return
- ilo_object = get_ilo_object(task.node)
+ LOG.debug("Check pending boot mode for node %s.", node.uuid)
+ ilo_object = get_ilo_object(node)
try:
- p_boot_mode = ilo_object.get_pending_boot_mode()
- if p_boot_mode == 'UNKNOWN':
- # NOTE(faizan) ILO will return this in remote cases and mostly on
- # the nodes which supports UEFI. Such nodes mostly comes with UEFI
- # as default boot mode. So we will try setting bootmode to UEFI
- # and if it fails then we fall back to BIOS boot mode.
- ilo_object.set_pending_boot_mode('UEFI')
- p_boot_mode = 'UEFI'
+ boot_mode = ilo_object.get_pending_boot_mode()
except ilo_error.IloCommandNotSupportedError:
- p_boot_mode = DEFAULT_BOOT_MODE
+ boot_mode = 'legacy'
- driver_utils.rm_node_capability(task, 'boot_mode')
+ if boot_mode != 'UNKNOWN':
+ boot_mode = BOOT_MODE_ILO_TO_GENERIC[boot_mode.lower()]
- driver_utils.add_node_capability(task, 'boot_mode',
- BOOT_MODE_ILO_TO_GENERIC[p_boot_mode.lower()])
+ if boot_mode == 'UNKNOWN':
+ # NOTE(faizan) ILO will return this in remote cases and mostly on
+ # the nodes which supports UEFI. Such nodes mostly comes with UEFI
+ # as default boot mode. So we will try setting bootmode to UEFI
+ # and if it fails then we fall back to BIOS boot mode.
+ try:
+ boot_mode = 'uefi'
+ ilo_object.set_pending_boot_mode(
+ BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
+ except ilo_error.IloError as ilo_exception:
+ operation = _("Setting %s as boot mode") % boot_mode
+ raise exception.IloOperationError(operation=operation,
+ error=ilo_exception)
+
+ LOG.debug("Node %(uuid)s boot mode is being set to %(boot_mode)s "
+ "as pending boot mode is unknown.",
+ {'uuid': node.uuid, 'boot_mode': boot_mode})
+
+ instance_info = node.instance_info
+ instance_info['deploy_boot_mode'] = boot_mode
+ node.instance_info = instance_info
+ node.save()
def setup_vmedia_for_boot(task, boot_iso, parameters=None):
diff --git a/ironic/drivers/modules/ilo/deploy.py b/ironic/drivers/modules/ilo/deploy.py
index 50b3b8621..86dc0c729 100644
--- a/ironic/drivers/modules/ilo/deploy.py
+++ b/ironic/drivers/modules/ilo/deploy.py
@@ -160,7 +160,7 @@ def _get_boot_iso(task, root_uuid):
# Option 3 - Create boot_iso from kernel/ramdisk, upload to Swift
# and provide its name.
deploy_iso_uuid = deploy_info['ilo_deploy_iso']
- boot_mode = driver_utils.get_node_capability(task.node, 'boot_mode')
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(task.node)
boot_iso_object_name = _get_boot_iso_object_name(task.node)
kernel_params = CONF.pxe.pxe_append_params
container = CONF.ilo.swift_ilo_container
@@ -264,11 +264,13 @@ def _reboot_into(task, iso, ramdisk_options):
"""
ilo_common.setup_vmedia_for_boot(task, iso, ramdisk_options)
- # In secure boot mode, node will reboot twice internally to
- # enable/disable secure boot. Any one-time boot settings would
- # be lost. Hence setting persistent=True.
- manager_utils.node_set_boot_device(task, boot_devices.CDROM,
- persistent=True)
+ # In UEFI boot mode, upon inserting virtual CDROM, one has to reset the
+ # system to see it as a valid boot device in persistent boot devices.
+ # But virtual CDROM device is always available for one-time boot.
+ # During enable/disable of secure boot settings, iLO internally resets
+ # the server twice. But it retains one time boot settings across internal
+ # resets. Hence no impact of this change for secure boot deploy.
+ manager_utils.node_set_boot_device(task, boot_devices.CDROM)
manager_utils.node_power_action(task, states.REBOOT)
@@ -298,7 +300,7 @@ def _disable_secure_boot(task):
cur_sec_state = ilo_common.get_secure_boot_mode(task)
except exception.IloOperationNotSupported:
LOG.debug('Secure boot mode is not supported for node %s',
- task.node.uuid)
+ task.node.uuid)
return False
if cur_sec_state:
@@ -330,13 +332,17 @@ def _prepare_node_for_deploy(task):
if _disable_secure_boot(task):
change_boot_mode = False
- # Set boot_mode capability to uefi for secure boot
- if deploy_utils.is_secure_boot_requested(task.node):
- LOG.debug('Secure boot deploy requested for node %s', task.node.uuid)
- _enable_uefi_capability(task)
-
if change_boot_mode:
ilo_common.update_boot_mode(task)
+ else:
+ # Need to update boot mode that will be used during deploy, if one is
+ # not provided.
+ # Since secure boot was disabled, we are in 'uefi' boot mode.
+ if deploy_utils.get_boot_mode_for_deploy(task.node) is None:
+ instance_info = task.node.instance_info
+ instance_info['deploy_boot_mode'] = 'uefi'
+ task.node.instance_info = instance_info
+ task.node.save()
def _update_secure_boot_mode(task, mode):
@@ -361,15 +367,6 @@ def _update_secure_boot_mode(task, mode):
{'mode': mode, 'node': task.node.uuid})
-def _enable_uefi_capability(task):
- """Adds capability boot_mode='uefi' into Node property.
-
- :param task: a TaskManager instance containing the node to act on.
- """
- driver_utils.rm_node_capability(task, 'boot_mode')
- driver_utils.add_node_capability(task, 'boot_mode', 'uefi')
-
-
class IloVirtualMediaIscsiDeploy(base.DeployInterface):
def get_properties(self):
@@ -613,7 +610,7 @@ class IloVirtualMediaAgentDeploy(base.DeployInterface):
class IloVirtualMediaAgentVendorInterface(agent.AgentVendorInterface):
- """Interface for vendor passthru rateled actions."""
+ """Interface for vendor passthru related actions."""
def reboot_to_instance(self, task, **kwargs):
node = task.node
@@ -622,6 +619,10 @@ class IloVirtualMediaAgentVendorInterface(agent.AgentVendorInterface):
error = self.check_deploy_success(node)
if error is None:
+ # Set boot mode
+ ilo_common.update_boot_mode(task)
+
+ # Need to enable secure boot, if being requested
_update_secure_boot_mode(task, True)
super(IloVirtualMediaAgentVendorInterface,
@@ -641,8 +642,21 @@ class IloPXEDeploy(pxe.PXEDeploy):
environment for the node
:param task: a TaskManager instance containing the node to act on.
+ :raises: IloOperationError, if some operation on iLO failed.
+ :raises: InvalidParameterValue, if some information is invalid.
"""
ilo_common.update_boot_mode(task)
+
+ # Check if 'boot_option' is compatible with 'boot_mode' and image.
+ # Whole disk image deploy is not supported in UEFI boot mode if
+ # 'boot_option' is not 'local'.
+ # If boot_mode is not set in the node properties/capabilities then
+ # PXEDeploy.validate() would pass.
+ # Boot mode gets updated in prepare stage. It is possible that the
+ # deploy boot mode is 'uefi' after call to update_boot_mode().
+ # Hence a re-check is required here.
+ pxe.validate_boot_option_for_uefi(task.node)
+
super(IloPXEDeploy, self).prepare(task)
def deploy(self, task):
@@ -715,6 +729,8 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
"""
if method == 'pass_deploy_info':
iscsi_deploy.get_deploy_info(task.node, **kwargs)
+ elif method == 'pass_bootloader_install_info':
+ iscsi_deploy.validate_pass_bootloader_info_input(task, kwargs)
def _configure_vmedia_boot(self, task, root_uuid):
"""Configure vmedia boot for the node."""
@@ -724,9 +740,9 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
LOG.error(_LE("Cannot get boot ISO for node %s"), node.uuid)
return
- # In secure boot mode, node will reboot twice internally to
- # enable/disable secure boot. Any one-time boot settings would
- # be lost. Hence setting persistent=True.
+ # Upon deploy complete, some distros cloud images reboot the system as
+ # part of its configuration. Hence boot device should be persistent and
+ # not one-time.
ilo_common.setup_vmedia_for_boot(task, boot_iso)
manager_utils.node_set_boot_device(task,
boot_devices.CDROM,
@@ -739,6 +755,29 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
+ def pass_bootloader_install_info(self, task, **kwargs):
+ """Accepts the results of bootloader installation.
+
+ This method acts as a vendor passthru and accepts the result of
+ bootloader installation. If the bootloader installation was
+ successful, then it notifies the baremetal to proceed to reboot
+ and makes the instance active. If bootloader installation failed,
+ then it sets provisioning as failed and powers off the node.
+
+ :param task: A TaskManager object.
+ :param kwargs: The arguments sent with vendor passthru. The expected
+ kwargs are::
+ 'key': The deploy key for authorization
+ 'status': 'SUCCEEDED' or 'FAILED'
+ 'error': The error message if status == 'FAILED'
+ 'address': The IP address of the ramdisk
+ """
+ task.process_event('resume')
+ iscsi_deploy.validate_bootloader_install_status(task, kwargs)
+ iscsi_deploy.finish_deploy(task, kwargs['address'])
+
+ @base.passthru(['POST'])
+ @task_manager.require_exclusive_lock
def pass_deploy_info(self, task, **kwargs):
"""Continues the iSCSI deployment from where ramdisk left off.
@@ -767,30 +806,36 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
'root uuid', uuid_dict.get('disk identifier'))
try:
- # For iscsi_ilo driver, we boot from disk everytime if the image
- # deployed is a whole disk image.
- if iscsi_deploy.get_boot_option(node) == "local" or iwdi:
- manager_utils.node_set_boot_device(task, boot_devices.DISK,
- persistent=True)
- else:
- self._configure_vmedia_boot(task, root_uuid_or_disk_id)
-
# Set boot mode
ilo_common.update_boot_mode(task)
# Need to enable secure boot, if being requested
_update_secure_boot_mode(task, True)
- deploy_utils.notify_deploy_complete(kwargs.get('address'))
+ # For iscsi_ilo driver, we boot from disk every time if the image
+ # deployed is a whole disk image.
+ if iscsi_deploy.get_boot_option(node) == "local" or iwdi:
+ manager_utils.node_set_boot_device(task, boot_devices.DISK,
+ persistent=True)
- LOG.info(_LI('Deployment to node %s done'), node.uuid)
- task.process_event('done')
+ # Ask the ramdisk to install bootloader and
+ # wait for the call-back through the vendor passthru
+ # 'pass_bootloader_install_info', if it's not a whole
+ # disk image.
+ if not iwdi:
+ deploy_utils.notify_ramdisk_to_proceed(kwargs['address'])
+ task.process_event('wait')
+ return
+ else:
+ self._configure_vmedia_boot(task, root_uuid_or_disk_id)
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
msg = _('Failed to continue iSCSI deployment.')
deploy_utils.set_failed_state(task, msg)
+ else:
+ iscsi_deploy.finish_deploy(task, kwargs.get('address'))
@task_manager.require_exclusive_lock
def continue_deploy(self, task, **kwargs):
@@ -812,10 +857,11 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
ilo_common.cleanup_vmedia_boot(task)
+ iwdi = node.driver_internal_info.get('is_whole_disk_image')
uuid_dict = iscsi_deploy.do_agent_iscsi_deploy(task, self._client)
root_uuid = uuid_dict.get('root uuid')
- if iscsi_deploy.get_boot_option(node) == "local":
+ if iscsi_deploy.get_boot_option(node) == "local" or iwdi:
efi_system_part_uuid = uuid_dict.get(
'efi system partition uuid')
self.configure_local_boot(
diff --git a/ironic/drivers/modules/ilo/inspect.py b/ironic/drivers/modules/ilo/inspect.py
index a2756ce07..459e34bfe 100644
--- a/ironic/drivers/modules/ilo/inspect.py
+++ b/ironic/drivers/modules/ilo/inspect.py
@@ -170,7 +170,7 @@ def _update_capabilities(node, new_capabilities):
# occur in malformed capabilities like:
# properties/capabilities='boot_mode:bios,boot_option'.
msg = (_("Node %(node)s has invalid capabilities string "
- "%(capabilities), unable to modify the node "
+ "%(capabilities)s, unable to modify the node "
"properties['capabilities'] string")
% {'node': node.uuid, 'capabilities': node_capabilities})
raise exception.InvalidParameterValue(msg)
@@ -186,78 +186,6 @@ def _update_capabilities(node, new_capabilities):
for key, value in six.iteritems(cap_dict)])
-def _get_macs_for_desired_ports(node, macs):
- """Get the dict of MACs which are desired by the operator.
-
- Get the MACs for desired ports.
- Returns a dictionary of MACs associated with the ports specified
- in the node's driver_info/inspect_ports.
-
- The driver_info field is expected to be populated with
- comma-separated port numbers like driver_info/inspect_ports='1,2'.
- In this case the inspection is expected to create ironic ports
- only for these two ports.
- The proliantutils is expected to return key value pair for each
- MAC address like:
- {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
-
- Possible scenarios:
- 'inspect_ports' == 'all' : creates ports for all inspected MACs
- 'inspect_ports' == <valid_port_numbers>: creates ports for
- requested port numbers.
- 'inspect_ports' == <mix_of_valid_invalid> : raise error for
- invalid inputs.
- 'inspect_ports' == 'none' : doesn't do any action with the
- inspected mac addresses.
-
- This method is not called if 'inspect_ports' == 'none', hence the
- scenario is not covered under this method.
-
- :param node: a node object.
- :param macs: a dictionary of MAC addresses returned by the hardware
- with inspection.
- :returns: a dictionary of port numbers and MAC addresses with only
- the MACs requested by operator in
- node.driver_info['inspect_ports']
- :raises: HardwareInspectionFailure for the non-existing ports
- requested in node.driver_info['inspect_ports']
-
- """
- driver_info = node.driver_info
- desired_macs = str(driver_info.get('inspect_ports'))
-
- # If the operator has given 'all' just return all the macs
- # returned by inspection.
- if desired_macs.lower() == 'all':
- to_be_created_macs = macs
- else:
- to_be_created_macs = {}
- # The list should look like ['Port 1', 'Port 2'] as
- # iLO returns port numbers like this.
- desired_macs_list = [
- 'Port %s' % port_number
- for port_number in (desired_macs.split(','))]
-
- # Check if the given input is valid or not. Return all the
- # requested macs.
- non_existing_ports = []
- for port_number in desired_macs_list:
- mac_address = macs.get(port_number)
- if mac_address:
- to_be_created_macs[port_number] = mac_address
- else:
- non_existing_ports.append(port_number)
-
- # It is possible that operator has given a wrong input by mistake.
- if non_existing_ports:
- error = (_("Could not find requested ports %(ports)s on the "
- "node %(node)s")
- % {'ports': non_existing_ports, 'node': node.uuid})
- raise exception.HardwareInspectionFailure(error=error)
-
- return to_be_created_macs
-
-
def _get_capabilities(node, ilo_object):
"""inspects hardware and gets additional capabilities.
@@ -281,9 +209,7 @@ def _get_capabilities(node, ilo_object):
class IloInspect(base.InspectInterface):
def get_properties(self):
- d = ilo_common.REQUIRED_PROPERTIES.copy()
- d.update(ilo_common.INSPECT_PROPERTIES)
- return d
+ return ilo_common.REQUIRED_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains required ILO credentials.
@@ -295,33 +221,18 @@ class IloInspect(base.InspectInterface):
:raises: InvalidParameterValue if required iLO parameters
are not valid.
:raises: MissingParameterValue if a required parameter is missing.
- :raises: InvalidParameterValue if invalid input provided.
-
"""
node = task.node
- driver_info = ilo_common.parse_driver_info(node)
- if 'inspect_ports' not in driver_info:
- raise exception.MissingParameterValue(_(
- "Missing 'inspect_ports' parameter in node's driver_info."))
- value = driver_info['inspect_ports']
- if (value.lower() != 'all' and value.lower() != 'none'
- and not all(s.isdigit() for s in value.split(','))):
- raise exception.InvalidParameterValue(_(
- "inspect_ports can accept either comma separated "
- "port numbers, or a single port number, or 'all' "
- "or 'none'. %(value)s given for node %(node)s "
- "driver_info['inspect_ports']")
- % {'value': value, 'node': node})
+ ilo_common.parse_driver_info(node)
def inspect_hardware(self, task):
"""Inspect hardware to get the hardware properties.
Inspects hardware to get the essential and additional hardware
properties. It fails if any of the essential properties
- are not received from the node or if 'inspect_ports' is
- not provided in driver_info.
- It doesn't fail if node fails to return any capabilities as
- the capabilities differ from hardware to hardware mostly.
+ are not received from the node. It doesn't fail if node fails
+ to return any capabilities as the capabilities differ from hardware
+ to hardware mostly.
:param task: a TaskManager instance.
:raises: HardwareInspectionFailure if essential properties
@@ -371,22 +282,8 @@ class IloInspect(base.InspectInterface):
task.node.save()
- # Get the desired node inputs from the driver_info and create ports
- # as requested. It doesn't delete the ports because there is
- # no way for the operator to know which all MACs are associated
- # with the node and which are not. The proliantutils can
- # return only embedded NICs mac addresses and not the STANDUP NIC
- # cards. The port creation code is not excercised if
- # 'inspect_ports' == 'none'.
-
- driver_info = task.node.driver_info
- if (driver_info['inspect_ports']).lower() != 'none':
- macs_input_given = (
- _get_macs_for_desired_ports(task.node, result['macs']))
-
- if macs_input_given:
- # Create ports only for the requested ports.
- _create_ports_if_not_exist(task.node, macs_input_given)
+ # Create ports for the nics detected.
+ _create_ports_if_not_exist(task.node, result['macs'])
LOG.debug(("Node properties for %(node)s are updated as "
"%(properties)s"),
diff --git a/ironic/drivers/modules/ilo/power.py b/ironic/drivers/modules/ilo/power.py
index 97e1c23e3..8b2760118 100644
--- a/ironic/drivers/modules/ilo/power.py
+++ b/ironic/drivers/modules/ilo/power.py
@@ -60,8 +60,15 @@ def _attach_boot_iso(task):
:param task: a TaskManager instance containing the node to act on.
"""
i_info = task.node.instance_info
-
- if 'ilo_boot_iso' in i_info:
+ node_state = task.node.provision_state
+
+ # NOTE: On instance rebuild, ilo_boot_iso will be present in
+ # instance_info but the node will be in DEPLOYING state.
+ # In such a scenario, the ilo_boot_iso shouldn't be
+ # attached to the node while powering on the node (the node
+ # should boot from deploy ramdisk instead, which will already
+ # be attached by the deploy driver).
+ if 'ilo_boot_iso' in i_info and node_state == states.ACTIVE:
ilo_common.setup_vmedia_for_boot(task, i_info['ilo_boot_iso'])
manager_utils.node_set_boot_device(task, boot_devices.CDROM)
diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py
index 2826dd1e7..ddebd68ef 100644
--- a/ironic/drivers/modules/ipmitool.py
+++ b/ironic/drivers/modules/ipmitool.py
@@ -111,6 +111,13 @@ ipmitool_command_options = {
'dual_bridge': ['ipmitool', '-m', '0', '-b', '0', '-t', '0',
'-B', '0', '-T', '0', '-h']}
+# Note(TheJulia): This string is hardcoded in ipmitool's lanplus driver
+# and is substituted in return for the error code received from the IPMI
+# controller. As of 1.8.15, no internationalization support appears to
+# be in ipmitool which means the string should always be returned in this
+# form regardless of locale.
+IPMITOOL_RETRYABLE_FAILURES = ['insufficient resources for session']
+
def _check_option_support(options):
"""Checks if the specific ipmitool options are supported on host.
@@ -335,32 +342,68 @@ def _exec_ipmitool(driver_info, command):
args.append(driver_info[name])
# specify retry timing more precisely, if supported
+ num_tries = max(
+ (CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
+
if _is_option_supported('timing'):
- num_tries = max(
- (CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
args.append('-R')
args.append(str(num_tries))
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
- # 'ipmitool' command will prompt password if there is no '-f' option,
- # we set it to '\0' to write a password file to support empty password
- with _make_password_file(driver_info['password'] or '\0') as pw_file:
- args.append('-f')
- args.append(pw_file)
- args.extend(command.split(" "))
+ end_time = (time.time() + CONF.ipmi.retry_timeout)
+
+ while True:
+ num_tries = num_tries - 1
# NOTE(deva): ensure that no communications are sent to a BMC more
# often than once every min_command_interval seconds.
time_till_next_poll = CONF.ipmi.min_command_interval - (
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
if time_till_next_poll > 0:
time.sleep(time_till_next_poll)
- try:
- out, err = utils.execute(*args)
- finally:
- LAST_CMD_TIME[driver_info['address']] = time.time()
- return out, err
+ # Resetting the list that will be utilized so the password arguments
+ # from any previous execution are preserved.
+ cmd_args = args[:]
+ # 'ipmitool' command will prompt password if there is no '-f'
+ # option, we set it to '\0' to write a password file to support
+ # empty password
+ with _make_password_file(
+ driver_info['password'] or '\0'
+ ) as pw_file:
+ cmd_args.append('-f')
+ cmd_args.append(pw_file)
+ cmd_args.extend(command.split(" "))
+ try:
+ out, err = utils.execute(*cmd_args)
+ return out, err
+ except processutils.ProcessExecutionError as e:
+ with excutils.save_and_reraise_exception() as ctxt:
+ err_list = [x for x in IPMITOOL_RETRYABLE_FAILURES
+ if x in e.message]
+ if ((time.time() > end_time) or
+ (num_tries == 0) or
+ not err_list):
+ LOG.error(_LE('IPMI Error while attempting '
+ '"%(cmd)s" for node %(node)s. '
+ 'Error: %(error)s'),
+ {
+ 'node': driver_info['uuid'],
+ 'cmd': e.cmd,
+ 'error': e
+ })
+ else:
+ ctxt.reraise = False
+ LOG.warning(_LW('IPMI Error encountered, retrying '
+ '"%(cmd)s" for node %(node)s. '
+ 'Error: %(error)s'),
+ {
+ 'node': driver_info['uuid'],
+ 'cmd': e.cmd,
+ 'error': e
+ })
+ finally:
+ LAST_CMD_TIME[driver_info['address']] = time.time()
def _sleep_time(iter):
diff --git a/ironic/drivers/modules/ipxe_config.template b/ironic/drivers/modules/ipxe_config.template
index bd5647841..062776cc8 100644
--- a/ironic/drivers/modules/ipxe_config.template
+++ b/ironic/drivers/modules/ipxe_config.template
@@ -5,7 +5,7 @@ dhcp
goto deploy
:deploy
-kernel {{ pxe_options.deployment_aki_path }} selinux=0 disk={{ pxe_options.disk }} iscsi_target_iqn={{ pxe_options.iscsi_target_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} ironic_api_url={{ pxe_options.ironic_api_url }} troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }} boot_option={{ pxe_options.boot_option }} ip=${ip}:${next-server}:${gateway}:${netmask} BOOTIF=${mac} {% if pxe_options.root_device %}root_device={{ pxe_options.root_device }}{% endif %} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }}
+kernel {{ pxe_options.deployment_aki_path }} selinux=0 disk={{ pxe_options.disk }} iscsi_target_iqn={{ pxe_options.iscsi_target_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} ironic_api_url={{ pxe_options.ironic_api_url }} troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }} boot_option={{ pxe_options.boot_option }} ip=${ip}:${next-server}:${gateway}:${netmask} BOOTIF=${mac} {% if pxe_options.root_device %}root_device={{ pxe_options.root_device }}{% endif %} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }} coreos.configdrive=0
initrd {{ pxe_options.deployment_ari_path }}
boot
diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py
index 000e76130..ac7315d33 100644
--- a/ironic/drivers/modules/iscsi_deploy.py
+++ b/ironic/drivers/modules/iscsi_deploy.py
@@ -22,9 +22,13 @@ from six.moves.urllib import parse
from ironic.common import exception
from ironic.common.glance_service import service_utils as glance_service_utils
from ironic.common.i18n import _
+from ironic.common.i18n import _LE
+from ironic.common.i18n import _LI
from ironic.common import image_service as service
from ironic.common import keystone
+from ironic.common import states
from ironic.common import utils
+from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
@@ -416,9 +420,9 @@ def _get_boot_mode(node):
:param node: A single Node.
:returns: A string representing the boot mode type. Defaults to 'bios'.
"""
- boot_mode = driver_utils.get_node_capability(node, 'boot_mode')
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
if boot_mode:
- return boot_mode.lower()
+ return boot_mode
return "bios"
@@ -443,14 +447,23 @@ def build_deploy_ramdisk_options(node):
node.instance_info = i_info
node.save()
+ # XXX(jroll) DIB relies on boot_option=local to decide whether or not to
+ # lay down a bootloader. Hack this for now; fix it for real in Liberty.
+ # See also bug #1441556.
+ boot_option = get_boot_option(node)
+ if node.driver_internal_info.get('is_whole_disk_image'):
+ boot_option = 'netboot'
+
deploy_options = {
'deployment_id': node['uuid'],
'deployment_key': deploy_key,
'iscsi_target_iqn': "iqn-%s" % node.uuid,
'ironic_api_url': ironic_api,
'disk': CONF.pxe.disk_devices,
- 'boot_option': get_boot_option(node),
+ 'boot_option': boot_option,
'boot_mode': _get_boot_mode(node),
+ # NOTE: The below entry is a temporary workaround for bug/1433812
+ 'coreos.configdrive': 0,
}
root_device = deploy_utils.parse_root_device_hints(node)
@@ -537,3 +550,87 @@ def validate(task):
# Validate the root device hints
deploy_utils.parse_root_device_hints(node)
+
+
+def validate_pass_bootloader_info_input(task, input_params):
+ """Validates the input sent with bootloader install info passthru.
+
+ This method validates the input sent with bootloader install info
+ passthru.
+
+ :param task: A TaskManager object.
+ :param input_params: A dictionary of params sent as input to passthru.
+ :raises: InvalidParameterValue, if deploy key passed doesn't match the
+ one stored in instance_info.
+ :raises: MissingParameterValue, if some input is missing.
+ """
+ params = {'address': input_params.get('address'),
+ 'key': input_params.get('key'),
+ 'status': input_params.get('status')}
+ msg = _("Some mandatory input missing in 'pass_bootloader_info' "
+ "vendor passthru from ramdisk.")
+ deploy_utils.check_for_missing_params(params, msg)
+
+ deploy_key = task.node.instance_info['deploy_key']
+ if deploy_key != input_params.get('key'):
+ raise exception.InvalidParameterValue(
+ _("Deploy key %(key_sent)s does not match "
+ "with %(expected_key)s") %
+ {'key_sent': input_params.get('key'), 'expected_key': deploy_key})
+
+
+def validate_bootloader_install_status(task, input_params):
+ """Validate if bootloader was installed.
+
+ This method first validates if deploy key sent in vendor passthru
+ was correct one, and then validates whether bootloader installation
+ was successful or not.
+
+ :param task: A TaskManager object.
+ :param input_params: A dictionary of params sent as input to passthru.
+ :raises: InstanceDeployFailure, if bootloader installation was
+ reported from ramdisk as failure.
+ """
+ if input_params['status'] != 'SUCCEEDED':
+ msg = (_('Failed to install bootloader on node %(node)s. '
+ 'Error: %(error)s.') %
+ {'node': task.node.uuid, 'error': input_params.get('error')})
+ LOG.error(msg)
+ deploy_utils.set_failed_state(task, msg)
+ raise exception.InstanceDeployFailure(msg)
+
+
+def finish_deploy(task, address):
+ """Notifies the ramdisk to reboot the node and makes the instance active.
+
+ This method notifies the ramdisk to proceed to reboot and then
+ makes the instance active.
+
+ :param task: a TaskManager object.
+ :param address: The IP address of the bare metal node.
+ :raises: InstanceDeployFailure, if notifying ramdisk failed.
+ """
+ node = task.node
+ try:
+ deploy_utils.notify_ramdisk_to_proceed(address)
+ except Exception as e:
+ LOG.error(_LE('Deploy failed for instance %(instance)s. '
+ 'Error: %(error)s'),
+ {'instance': node.instance_uuid, 'error': e})
+ msg = (_('Failed to notify ramdisk to reboot after bootloader '
+ 'installation. Error: %s') % e)
+ deploy_utils.set_failed_state(task, msg)
+ raise exception.InstanceDeployFailure(msg)
+
+ # TODO(lucasagomes): When deploying a node with the DIB ramdisk
+ # Ironic will not power control the node at the end of the deployment,
+ # it's the DIB ramdisk that reboots the node. But, for the SSH driver
+ # some changes like setting the boot device only gets applied when the
+ # machine is powered off and on again. So the code below is enforcing
+ # it. For Liberty we need to change the DIB ramdisk so that Ironic
+ # always controls the power state of the node for all drivers.
+ if get_boot_option(node) == "local" and 'ssh' in node.driver:
+ manager_utils.node_power_action(task, states.REBOOT)
+
+ LOG.info(_LI('Deployment to node %s done'), node.uuid)
+ task.process_event('done')
diff --git a/ironic/drivers/modules/pxe.py b/ironic/drivers/modules/pxe.py
index 7fdfcde65..9ab3fc32c 100644
--- a/ironic/drivers/modules/pxe.py
+++ b/ironic/drivers/modules/pxe.py
@@ -28,7 +28,6 @@ from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
-from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import image_service as service
from ironic.common import keystone
@@ -186,6 +185,11 @@ def _build_pxe_config_options(node, pxe_info, ctx):
template.
"""
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
+ if is_whole_disk_image:
+ # These are dummy values to satisfy elilo.
+ # image and initrd fields in elilo config cannot be blank.
+ kernel = 'no_kernel'
+ ramdisk = 'no_ramdisk'
if CONF.pxe.ipxe_enabled:
deploy_kernel = '/'.join([CONF.pxe.http_url, node.uuid,
@@ -206,13 +210,11 @@ def _build_pxe_config_options(node, pxe_info, ctx):
'deployment_aki_path': deploy_kernel,
'deployment_ari_path': deploy_ramdisk,
'pxe_append_params': CONF.pxe.pxe_append_params,
- 'tftp_server': CONF.pxe.tftp_server
+ 'tftp_server': CONF.pxe.tftp_server,
+ 'aki_path': kernel,
+ 'ari_path': ramdisk
}
- if not is_whole_disk_image:
- pxe_options.update({'aki_path': kernel,
- 'ari_path': ramdisk})
-
deploy_ramdisk_options = iscsi_deploy.build_deploy_ramdisk_options(node)
pxe_options.update(deploy_ramdisk_options)
@@ -230,6 +232,30 @@ def _get_token_file_path(node_uuid):
return os.path.join(CONF.pxe.tftp_root, 'token-' + node_uuid)
+def validate_boot_option_for_uefi(node):
+ """In uefi boot mode, validate if the boot option is compatible.
+
+ This method raises exception if whole disk image being deployed
+ in UEFI boot mode without 'boot_option' being set to 'local'.
+
+ :param node: a single Node.
+ :raises: InvalidParameterValue
+ """
+
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
+ boot_option = iscsi_deploy.get_boot_option(node)
+ if (boot_mode == 'uefi' and
+ node.driver_internal_info.get('is_whole_disk_image') and
+ boot_option != 'local'):
+ LOG.error(_LE("Whole disk image with netboot is not supported in UEFI "
+ "boot mode."))
+ raise exception.InvalidParameterValue(_(
+ "Conflict: Whole disk image being used for deploy, but "
+ "cannot be used with node %(node_uuid)s configured to use "
+ "UEFI boot with netboot option") %
+ {'node_uuid': node.uuid})
+
+
@image_cache.cleanup(priority=25)
class TFTPImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
@@ -327,7 +353,7 @@ class PXEDeploy(base.DeployInterface):
driver_utils.validate_boot_mode_capability(node)
driver_utils.validate_boot_option_capability(node)
- boot_mode = driver_utils.get_node_capability(node, 'boot_mode')
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(task.node)
if CONF.pxe.ipxe_enabled:
if not CONF.pxe.http_url or not CONF.pxe.http_root:
@@ -343,6 +369,10 @@ class PXEDeploy(base.DeployInterface):
"%(node_uuid)s configured to use UEFI boot") %
{'node_uuid': node.uuid})
+ # Check if 'boot_option' is compatible with 'boot_mode' of uefi and
+ # image being deployed
+ validate_boot_option_for_uefi(task.node)
+
d_info = _parse_deploy_info(node)
iscsi_deploy.validate(task)
@@ -417,7 +447,7 @@ class PXEDeploy(base.DeployInterface):
pxe_options = _build_pxe_config_options(task.node, pxe_info,
task.context)
- if driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi':
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
pxe_config_template = CONF.pxe.uefi_pxe_config_template
else:
pxe_config_template = CONF.pxe.pxe_config_template
@@ -455,7 +485,7 @@ class PXEDeploy(base.DeployInterface):
task.node.uuid)
deploy_utils.switch_pxe_config(
pxe_config_path, root_uuid_or_disk_id,
- driver_utils.get_node_capability(task.node, 'boot_mode'),
+ deploy_utils.get_boot_mode_for_deploy(task.node),
iwdi)
def clean_up(self, task):
@@ -513,6 +543,7 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
Valid methods:
* pass_deploy_info
+ * pass_bootloader_install_info
:param task: a TaskManager instance containing the node to act on.
:param method: method to be validated.
@@ -522,6 +553,30 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
if method == 'pass_deploy_info':
driver_utils.validate_boot_option_capability(task.node)
iscsi_deploy.get_deploy_info(task.node, **kwargs)
+ elif method == 'pass_bootloader_install_info':
+ iscsi_deploy.validate_pass_bootloader_info_input(task, kwargs)
+
+ @base.passthru(['POST'])
+ @task_manager.require_exclusive_lock
+ def pass_bootloader_install_info(self, task, **kwargs):
+ """Accepts the results of bootloader installation.
+
+ This method acts as a vendor passthru and accepts the result of
+ the bootloader installation. If bootloader installation was
+ successful, then it notifies the bare metal to proceed to reboot
+ and makes the instance active. If the bootloader installation failed,
+ then it sets provisioning as failed and powers off the node.
+ :param task: A TaskManager object.
+ :param kwargs: The arguments sent with vendor passthru. The expected
+ kwargs are::
+ 'key': The deploy key for authorization
+ 'status': 'SUCCEEDED' or 'FAILED'
+ 'error': The error message if status == 'FAILED'
+ 'address': The IP address of the ramdisk
+ """
+ task.process_event('resume')
+ iscsi_deploy.validate_bootloader_install_status(task, kwargs)
+ iscsi_deploy.finish_deploy(task, kwargs['address'])
@base.passthru(['POST'])
@task_manager.require_exclusive_lock
@@ -556,25 +611,34 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
try:
if iscsi_deploy.get_boot_option(node) == "local":
deploy_utils.try_set_boot_device(task, boot_devices.DISK)
+
# If it's going to boot from the local disk, get rid of
# the PXE configuration files used for the deployment
pxe_utils.clean_up_pxe_config(task)
+
+ # Ask the ramdisk to install bootloader and
+ # wait for the call-back through the vendor passthru
+ # 'pass_bootloader_install_info', if it's not a
+ # whole disk image.
+ if not is_whole_disk_image:
+ deploy_utils.notify_ramdisk_to_proceed(kwargs['address'])
+ task.process_event('wait')
+ return
else:
pxe_config_path = pxe_utils.get_pxe_config_file_path(node.uuid)
- node_cap = driver_utils.get_node_capability(node, 'boot_mode')
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
deploy_utils.switch_pxe_config(pxe_config_path,
root_uuid_or_disk_id,
- node_cap, is_whole_disk_image)
+ boot_mode, is_whole_disk_image)
- deploy_utils.notify_deploy_complete(kwargs['address'])
- LOG.info(_LI('Deployment to node %s done'), node.uuid)
- task.process_event('done')
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
msg = _('Failed to continue iSCSI deployment.')
deploy_utils.set_failed_state(task, msg)
+ else:
+ iscsi_deploy.finish_deploy(task, kwargs.get('address'))
@task_manager.require_exclusive_lock
def continue_deploy(self, task, **kwargs):
@@ -617,7 +681,7 @@ class VendorPassthru(agent_base_vendor.BaseAgentVendor):
root_uuid_or_disk_id = uuid_dict.get(
'root uuid', uuid_dict.get('disk identifier'))
pxe_config_path = pxe_utils.get_pxe_config_file_path(node.uuid)
- boot_mode = driver_utils.get_node_capability(node, 'boot_mode')
+ boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
deploy_utils.switch_pxe_config(pxe_config_path,
root_uuid_or_disk_id,
boot_mode, is_whole_disk_image)
diff --git a/ironic/drivers/modules/pxe_config.template b/ironic/drivers/modules/pxe_config.template
index 334a6ac81..242182f6e 100644
--- a/ironic/drivers/modules/pxe_config.template
+++ b/ironic/drivers/modules/pxe_config.template
@@ -2,7 +2,7 @@ default deploy
label deploy
kernel {{ pxe_options.deployment_aki_path }}
-append initrd={{ pxe_options.deployment_ari_path }} selinux=0 disk={{ pxe_options.disk }} iscsi_target_iqn={{ pxe_options.iscsi_target_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} ironic_api_url={{ pxe_options.ironic_api_url }} troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }} boot_option={{ pxe_options.boot_option }} {% if pxe_options.root_device %}root_device={{ pxe_options.root_device }}{% endif %} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }} boot_mode={{ pxe_options['boot_mode'] }}
+append initrd={{ pxe_options.deployment_ari_path }} selinux=0 disk={{ pxe_options.disk }} iscsi_target_iqn={{ pxe_options.iscsi_target_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} ironic_api_url={{ pxe_options.ironic_api_url }} troubleshoot=0 text {{ pxe_options.pxe_append_params|default("", true) }} boot_option={{ pxe_options.boot_option }} {% if pxe_options.root_device %}root_device={{ pxe_options.root_device }}{% endif %} ipa-api-url={{ pxe_options['ipa-api-url'] }} ipa-driver-name={{ pxe_options['ipa-driver-name'] }} boot_mode={{ pxe_options['boot_mode'] }} coreos.configdrive=0
ipappend 3
diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py
index 10ab43166..7a252c691 100644
--- a/ironic/drivers/modules/snmp.py
+++ b/ironic/drivers/modules/snmp.py
@@ -118,7 +118,7 @@ class SNMPClient(object):
"""
if self.version == SNMP_V3:
# Handling auth/encryption credentials is not (yet) supported.
- # This version supports a security name analagous to community.
+ # This version supports a security name analogous to community.
return cmdgen.UsmUserData(self.security)
else:
mp_model = 1 if self.version == SNMP_V2C else 0
diff --git a/ironic/drivers/utils.py b/ironic/drivers/utils.py
index be2887f67..7caa6cabe 100644
--- a/ironic/drivers/utils.py
+++ b/ironic/drivers/utils.py
@@ -144,33 +144,6 @@ def get_node_capability(node, capability):
"Format should be 'key:val'."), node_capability)
-def rm_node_capability(task, capability):
- """Remove 'capability' from node's 'capabilities' property.
-
- :param task: Task object.
- :param capability: Capability key.
-
- """
- node = task.node
- properties = node.properties
- capabilities = properties.get('capabilities')
-
- if not capabilities:
- return
-
- caps = []
- for cap in capabilities.split(','):
- parts = cap.split(':')
- if len(parts) == 2 and parts[0] and parts[1]:
- if parts[0] == capability:
- continue
- caps.append(cap)
- new_cap_str = ",".join(caps)
- properties['capabilities'] = new_cap_str if new_cap_str else None
- node.properties = properties
- node.save()
-
-
def add_node_capability(task, capability, value):
"""Add 'capability' to node's 'capabilities' property.
diff --git a/ironic/locale/ironic-log-error.pot b/ironic/locale/ironic-log-error.pot
index ec19ab67e..1e63a60d5 100644
--- a/ironic/locale/ironic-log-error.pot
+++ b/ironic/locale/ironic-log-error.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: ironic 2015.1.dev31\n"
+"Project-Id-Version: ironic 2015.1.dev139\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-02-18 06:15+0000\n"
+"POT-Creation-Date: 2015-04-08 06:27+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -26,40 +26,48 @@ msgstr ""
msgid "Exception in string format operation"
msgstr ""
-#: ironic/common/images.py:132
+#: ironic/common/images.py:148
#, python-format
msgid "vfat image creation failed. Error: %s"
msgstr ""
-#: ironic/common/images.py:200
+#: ironic/common/images.py:218 ironic/common/images.py:284
msgid "Creating the filesystem root failed."
msgstr ""
-#: ironic/common/images.py:214
+#: ironic/common/images.py:233 ironic/common/images.py:310
msgid "Creating ISO image failed."
msgstr ""
-#: ironic/common/service.py:89
+#: ironic/common/images.py:540
+msgid "mounting the deploy iso failed."
+msgstr ""
+
+#: ironic/common/images.py:554
+msgid "examining the deploy iso failed."
+msgstr ""
+
+#: ironic/common/service.py:92
#, python-format
msgid "Service error occurred when stopping the RPC server. Error: %s"
msgstr ""
-#: ironic/common/service.py:94
+#: ironic/common/service.py:97
#, python-format
msgid "Service error occurred when cleaning up the RPC manager. Error: %s"
msgstr ""
-#: ironic/common/utils.py:398
+#: ironic/common/utils.py:401
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: ironic/common/utils.py:429
+#: ironic/common/utils.py:432
#, python-format
msgid "Failed to make file system. File system %s is not supported."
msgstr ""
-#: ironic/common/utils.py:433
+#: ironic/common/utils.py:436
#, python-format
msgid "Failed to create a file system in %(path)s. Error: %(error)s"
msgstr ""
@@ -71,7 +79,7 @@ msgid ""
"attempt %(attempt)s of %(num_attempts)s failed."
msgstr ""
-#: ironic/conductor/manager.py:224
+#: ironic/conductor/manager.py:256
#, python-format
msgid ""
"Conductor %s cannot be started because no drivers were loaded. This "
@@ -79,110 +87,128 @@ msgid ""
"option."
msgstr ""
-#: ironic/conductor/manager.py:1029
+#: ironic/conductor/manager.py:799
+#, python-format
+msgid "Error in tear_down of node %(node)s: %(err)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:1284
#, python-format
msgid "Failed to stop console while deleting the node %(node)s: %(err)s."
msgstr ""
-#: ironic/conductor/manager.py:1510
+#: ironic/conductor/manager.py:1911
#, python-format
msgid "Unexpected state %(state)s returned while deploying node %(node)s."
msgstr ""
-#: ironic/conductor/manager.py:1637
+#: ironic/conductor/manager.py:2011
#, python-format
msgid ""
"Failed to change power state of node %(node)s to '%(state)s'. Attempts "
"left: %(left)s."
msgstr ""
-#: ironic/dhcp/neutron.py:124
+#: ironic/conductor/manager.py:2043
+#, python-format
+msgid "Failed to inspect node %(node)s: %(err)s"
+msgstr ""
+
+#: ironic/dhcp/neutron.py:128
#, python-format
msgid "Failed to update Neutron port %s."
msgstr ""
-#: ironic/dhcp/neutron.py:139
+#: ironic/dhcp/neutron.py:143
#, python-format
msgid "Failed to update MAC address on Neutron port %s."
msgstr ""
-#: ironic/dhcp/neutron.py:206
+#: ironic/dhcp/neutron.py:216
#, python-format
msgid "Failed to Get IP address on Neutron port %s."
msgstr ""
-#: ironic/dhcp/neutron.py:222
+#: ironic/dhcp/neutron.py:232
#, python-format
msgid "Neutron returned invalid IPv4 address %s."
msgstr ""
-#: ironic/dhcp/neutron.py:226
+#: ironic/dhcp/neutron.py:236
#, python-format
msgid "No IP address assigned to Neutron port %s."
msgstr ""
-#: ironic/drivers/base.py:407
+#: ironic/dhcp/neutron.py:379
+#, python-format
+msgid "Failed to rollback cleaning port changes for node %s"
+msgstr ""
+
+#: ironic/drivers/base.py:511
#, python-format
msgid "vendor_passthru failed with method %s"
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:168
+#: ironic/drivers/modules/agent.py:188
#, python-format
-msgid "Async exception for %(node)s: %(msg)s"
+msgid ""
+"Agent deploy supports only HTTP(S) URLs as instance_info['image_source']."
+" Either %s is not a valid HTTP(S) URL or is not reachable."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:282
+#: ironic/drivers/modules/agent_base_vendor.py:374
#, python-format
msgid "Could not find matching node for the provided MACs %s."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:354
+#: ironic/drivers/modules/deploy_utils.py:449
#, python-format
msgid ""
"Failed to erase beginning of disk for node %(node)s. Command: "
"%(command)s. Error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:366
+#: ironic/drivers/modules/deploy_utils.py:461
#, python-format
msgid ""
"Failed to get disk block count for node %(node)s. Command: %(command)s. "
"Error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:379
+#: ironic/drivers/modules/deploy_utils.py:474
#, python-format
msgid ""
"Failed to erase the end of the disk on node %(node)s. Command: "
"%(command)s. Error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:530
-msgid "Failed to detect root device UUID."
+#: ironic/drivers/modules/deploy_utils.py:646
+#, python-format
+msgid "Failed to detect %s"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:572
-#: ironic/drivers/modules/deploy_utils.py:578
+#: ironic/drivers/modules/deploy_utils.py:741
+#: ironic/drivers/modules/deploy_utils.py:747
#, python-format
msgid "Deploy to address %s failed."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:573
+#: ironic/drivers/modules/deploy_utils.py:742
#, python-format
msgid "Command: %s"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:574
+#: ironic/drivers/modules/deploy_utils.py:743
#, python-format
msgid "StdOut: %r"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:575
+#: ironic/drivers/modules/deploy_utils.py:744
#, python-format
msgid "StdErr: %r"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:659
+#: ironic/drivers/modules/deploy_utils.py:832
#, python-format
msgid ""
"Node %s failed to power off while handling deploy failure. This may be a "
@@ -190,56 +216,74 @@ msgid ""
"maintenance mode until the problem is resolved."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:263
+#: ironic/drivers/modules/discoverd.py:160
+#, python-format
+msgid ""
+"Exception during contacting ironic-discoverd for inspection of node "
+"%(node)s: %(err)s"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:191
+#, python-format
+msgid ""
+"Unexpected exception while getting inspection status for node %s, will "
+"retry later"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:197
+#, python-format
+msgid "Inspection failed for node %(uuid)s with error: %(err)s"
+msgstr ""
+
+#: ironic/drivers/modules/ipminative.py:268
#, python-format
msgid ""
"IPMI get sensor data failed for node %(node_id)s with the following "
"error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:414
+#: ironic/drivers/modules/ipminative.py:419
#, python-format
msgid ""
"IPMI set boot device failed for node %(node_id)s with the following "
"error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:449
+#: ironic/drivers/modules/ipminative.py:454
#, python-format
msgid ""
"IPMI get boot device failed for node %(node_id)s with the following "
"error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:413
+#: ironic/drivers/modules/ipmitool.py:424
#, python-format
msgid ""
"IPMI power %(state)s timed out after %(tries)s retries on node "
"%(node_id)s."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:573
+#: ironic/drivers/modules/ipmitool.py:584
#, python-format
msgid "IPMI \"raw bytes\" failed for node %(node_id)s with error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:879
+#: ironic/drivers/modules/ipmitool.py:890
#, python-format
msgid "IPMI \"bmc reset\" failed for node %(node_id)s with error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:266
+#: ironic/drivers/modules/iscsi_deploy.py:610 ironic/drivers/modules/pxe.py:635
+#: ironic/drivers/modules/ilo/deploy.py:833
#, python-format
-msgid "Error returned from deploy ramdisk: %s"
+msgid "Deploy failed for instance %(instance)s. Error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:279 ironic/drivers/modules/pxe.py:515
-#: ironic/drivers/modules/ilo/deploy.py:525
-#, python-format
-msgid "Deploy failed for instance %(instance)s. Error: %(error)s"
+#: ironic/drivers/modules/pxe.py:250
+msgid "Whole disk image with netboot is not supported in UEFI boot mode."
msgstr ""
-#: ironic/drivers/modules/pxe.py:328
+#: ironic/drivers/modules/pxe.py:365
msgid "UEFI boot mode is not supported with iPXE boot enabled."
msgstr ""
@@ -281,8 +325,8 @@ msgstr ""
#: ironic/drivers/modules/virtualbox.py:160
#, python-format
msgid ""
-"Failed while creating a VirtualMachine object for node %(node)s. Error: "
-"%(error)s."
+"Failed while creating a VirtualMachine object for node %(node_id)s. "
+"Error: %(error)s."
msgstr ""
#: ironic/drivers/modules/virtualbox.py:176
@@ -305,6 +349,42 @@ msgstr ""
msgid "'set_boot_device' failed for node %(node_id)s with error: %(error)s"
msgstr ""
+#: ironic/drivers/modules/amt/common.py:105
+#, python-format
+msgid "Call to AMT with URI %(uri)s failed: got Fault %(fault)s"
+msgstr ""
+
+#: ironic/drivers/modules/amt/common.py:129
+#, python-format
+msgid ""
+"Call to AMT with URI %(uri)s and method %(method)s failed: return value "
+"was %(value)s"
+msgstr ""
+
+#: ironic/drivers/modules/amt/management.py:62
+#, python-format
+msgid ""
+"Failed to set boot device %(boot_device)s for node %(node_id)s with "
+"error: %(error)s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/management.py:95
+#, python-format
+msgid "Failed to enable boot config for node %(node_id)s with error: %(error)s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/power.py:112
+#, python-format
+msgid ""
+"Failed to set power state %(state)s for node %(node_id)s with error: "
+"%(error)s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/power.py:136
+#, python-format
+msgid "Failed to get power state for node %(node_id)s with error: %(error)s."
+msgstr ""
+
#: ironic/drivers/modules/drac/management.py:82
#, python-format
msgid ""
@@ -361,59 +441,67 @@ msgid ""
"%(target_power_state)s. Reason: %(error)s."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:412
+#: ironic/drivers/modules/ilo/common.py:450
#, python-format
msgid "Error while deleting %(object_name)s from %(container)s. Error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:422
+#: ironic/drivers/modules/ilo/common.py:460
#, python-format
msgid ""
"Error while ejecting virtual media %(device)s from node %(uuid)s. Error: "
"%(error)s"
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:109
+#: ironic/drivers/modules/ilo/deploy.py:122
#, python-format
msgid ""
-"Unable to find boot_iso in Glance, required to deploy node %(node)s in "
-"UEFI boot mode."
+"Virtual media deploy accepts only Glance images or HTTP(S) URLs as "
+"instance_info['ilo_boot_iso']. Either %s is not a valid HTTP(S) URL or is"
+" not reachable."
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:115
+#: ironic/drivers/modules/ilo/deploy.py:149
#, python-format
msgid ""
-"Unable to find 'kernel_id' and 'ramdisk_id' in Glance image %(image)s for"
-" generating boot ISO for %(node)s"
+"Unable to find kernel or ramdisk for image %(image)s to generate boot ISO"
+" for %(node)s"
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:156
+#: ironic/drivers/modules/ilo/deploy.py:197
#, python-format
msgid "Failed to clean up boot ISO for %(node)s.Error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:509
+#: ironic/drivers/modules/ilo/deploy.py:741
#, python-format
msgid "Cannot get boot ISO for node %s"
msgstr ""
-#: ironic/drivers/modules/ilo/power.py:85
+#: ironic/drivers/modules/ilo/power.py:92
#, python-format
msgid "iLO get_power_state failed for node %(node_id)s with error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/ilo/power.py:157
+#: ironic/drivers/modules/ilo/power.py:164
#, python-format
msgid ""
"iLO set_power_state failed to set state to %(tstate)s for node "
"%(node_id)s with error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ilo/power.py:170
+#: ironic/drivers/modules/ilo/power.py:177
#, python-format
msgid "iLO failed to change state to %(tstate)s within %(timeout)s sec"
msgstr ""
+#: ironic/drivers/modules/irmc/management.py:60
+#, python-format
+msgid ""
+"SCCI get sensor data failed for node %(node_id)s with the following "
+"error: %(error)s"
+msgstr ""
+
#: ironic/drivers/modules/irmc/power.py:65
#, python-format
msgid ""
@@ -431,16 +519,6 @@ msgstr ""
msgid "Unable to instantiate unregistered object type %(objtype)s"
msgstr ""
-#: ironic/openstack/common/excutils.py:76
-#, python-format
-msgid "Original exception being dropped: %s"
-msgstr ""
-
-#: ironic/openstack/common/excutils.py:105
-#, python-format
-msgid "Unexpected exception occurred %d time(s)... retrying."
-msgstr ""
-
#: ironic/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr ""
@@ -454,21 +532,11 @@ msgstr ""
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: ironic/openstack/common/policy.py:563 ironic/openstack/common/policy.py:843
-#, python-format
-msgid "Failed to understand rule %s"
-msgstr ""
-
-#: ironic/openstack/common/policy.py:573
-#, python-format
-msgid "No handler for matches of kind %s"
-msgstr ""
-
-#: ironic/openstack/common/service.py:269
+#: ironic/openstack/common/service.py:276
msgid "Unhandled exception"
msgstr ""
-#: ironic/tests/db/sqlalchemy/test_migrations.py:174
+#: ironic/tests/db/sqlalchemy/test_migrations.py:168
#, python-format
msgid "Failed to migrate to version %(version)s on engine %(engine)s"
msgstr ""
diff --git a/ironic/locale/ironic-log-info.pot b/ironic/locale/ironic-log-info.pot
index 1316e7925..7472171e5 100644
--- a/ironic/locale/ironic-log-info.pot
+++ b/ironic/locale/ironic-log-info.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: ironic 2015.1.dev15\n"
+"Project-Id-Version: ironic 2015.1.dev139\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-02-13 06:14+0000\n"
+"POT-Creation-Date: 2015-04-08 06:27+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -31,115 +31,235 @@ msgstr ""
msgid "Loaded the following drivers: %s"
msgstr ""
-#: ironic/common/service.py:80
+#: ironic/common/service.py:83
#, python-format
msgid "Created RPC server for service %(service)s on host %(host)s."
msgstr ""
-#: ironic/common/service.py:98
+#: ironic/common/service.py:101
#, python-format
msgid "Stopped RPC server for service %(service)s on host %(host)s."
msgstr ""
-#: ironic/conductor/manager.py:260
+#: ironic/common/service.py:106
#, python-format
-msgid "Successfuly started conductor with hostname %(hostname)s."
+msgid ""
+"Got signal SIGUSR1. Not deregistering on next shutdown of service "
+"%(service)s on host %(host)s."
+msgstr ""
+
+#: ironic/conductor/manager.py:292
+#, python-format
+msgid "Successfully started conductor with hostname %(hostname)s."
msgstr ""
-#: ironic/conductor/manager.py:279
+#: ironic/conductor/manager.py:313
#, python-format
msgid "Successfully stopped conductor with hostname %(hostname)s."
msgstr ""
-#: ironic/conductor/manager.py:827
+#: ironic/conductor/manager.py:319
+#, python-format
+msgid "Not deregistering conductor with hostname %(hostname)s."
+msgstr ""
+
+#: ironic/conductor/manager.py:807
+#, python-format
+msgid "Successfully unprovisioned node %(node)s with instance %(instance)s."
+msgstr ""
+
+#: ironic/conductor/manager.py:877
+#, python-format
+msgid ""
+"Cleaning is disabled, node %s has been successfully moved to AVAILABLE "
+"state."
+msgstr ""
+
+#: ironic/conductor/manager.py:936
+#, python-format
+msgid "Executing %(state)s on node %(node)s, remaining steps: %(steps)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:946
+#, python-format
+msgid "Executing %(step)s on node %(node)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:965
+#, python-format
+msgid ""
+"Clean step %(step)s on node %(node)s being executed asynchronously, "
+"waiting for driver."
+msgstr ""
+
+#: ironic/conductor/manager.py:975
+#, python-format
+msgid "Node %(node)s finished clean step %(step)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:990
+#, python-format
+msgid "Node %s cleaning complete"
+msgstr ""
+
+#: ironic/conductor/manager.py:1085
#, python-format
msgid ""
"During sync_power_state, node %(node)s was not found and presumed deleted"
" by another process."
msgstr ""
-#: ironic/conductor/manager.py:831
+#: ironic/conductor/manager.py:1089
#, python-format
msgid ""
"During sync_power_state, node %(node)s was already locked by another "
"process. Skip."
msgstr ""
-#: ironic/conductor/manager.py:1026
+#: ironic/conductor/manager.py:1288
#, python-format
msgid "Successfully deleted node %(node)s."
msgstr ""
-#: ironic/conductor/manager.py:1095
+#: ironic/conductor/manager.py:1307
#, python-format
-msgid "No console action was triggered because the console is already %s"
+msgid ""
+"Successfully deleted port %(port)s. The node associated with the port was"
+" %(node)s"
msgstr ""
-#: ironic/conductor/manager.py:1497
+#: ironic/conductor/manager.py:1378
#, python-format
-msgid "Successfully deployed node %(node)s with instance %(instance)s."
+msgid "No console action was triggered because the console is already %s"
msgstr ""
-#: ironic/conductor/manager.py:1526
+#: ironic/conductor/manager.py:1905
#, python-format
-msgid "Successfully unprovisioned node %(node)s with instance %(instance)s."
+msgid "Successfully deployed node %(node)s with instance %(instance)s."
msgstr ""
-#: ironic/conductor/manager.py:1601
+#: ironic/conductor/manager.py:1981
#, python-format
msgid ""
"During sync_power_state, node %(node)s has no previous known state. "
"Recording current state '%(state)s'."
msgstr ""
+#: ironic/conductor/manager.py:2056
+#, python-format
+msgid "Successfully inspected node %(node)s"
+msgstr ""
+
#: ironic/conductor/utils.py:124
#, python-format
-msgid "Succesfully set node %(node)s power state to %(state)s."
+msgid "Successfully set node %(node)s power state to %(state)s."
msgstr ""
-#: ironic/drivers/modules/image_cache.py:128
+#: ironic/drivers/modules/agent_base_vendor.py:448
+#: ironic/drivers/modules/iscsi_deploy.py:628
+#, python-format
+msgid "Deployment to node %s done"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:71
+#, python-format
+msgid ""
+"Inspection via ironic-discoverd is disabled in configuration for driver "
+"%s. To enable, change [discoverd] enabled = True."
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:169
+#, python-format
+msgid "Node %s was sent to inspection to ironic-discoverd"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:204
+#, python-format
+msgid "Inspection finished successfully for node %s"
+msgstr ""
+
+#: ironic/drivers/modules/image_cache.py:138
#, python-format
msgid "Master cache miss for image %(uuid)s, starting download"
msgstr ""
-#: ironic/drivers/modules/image_cache.py:269
+#: ironic/drivers/modules/image_cache.py:279
#, python-format
msgid ""
"After cleaning up cache dir %(dir)s cache size %(actual)d is still larger"
" than threshold %(expected)d"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:272
+#: ironic/drivers/modules/amt/management.py:67
#, python-format
-msgid "Continuing deployment for node %(node)s, params %(params)s"
+msgid "Successfully set boot device %(boot_device)s for node %(node_id)s"
msgstr ""
-#: ironic/drivers/modules/pxe.py:478 ironic/drivers/modules/ilo/deploy.py:518
+#: ironic/drivers/modules/amt/management.py:99
#, python-format
-msgid "Deployment to node %s done"
+msgid "Successfully enabled boot config for node %(node_id)s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/power.py:117
+#, python-format
+msgid "Power state set to %(state)s for node %(node_id)s"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:293
+#: ironic/drivers/modules/ilo/common.py:300
#, python-format
msgid "Attached virtual media %s successfully."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:311
+#: ironic/drivers/modules/ilo/common.py:318
#, python-format
msgid "Node %(uuid)s pending boot mode is %(boot_mode)s."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:323
+#: ironic/drivers/modules/ilo/common.py:330
#, python-format
msgid "Node %(uuid)s boot mode is set to %(boot_mode)s."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:371
+#: ironic/drivers/modules/ilo/common.py:410
#, python-format
msgid "Setting up node %s to boot from virtual media"
msgstr ""
-#: ironic/openstack/common/eventlet_backdoor.py:140
+#: ironic/drivers/modules/ilo/deploy.py:362
+#, python-format
+msgid "Changed secure boot to %(mode)s for node %(node)s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:57
+#, python-format
+msgid "Port created for MAC address %(address)s for node %(node)s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:254
+#, python-format
+msgid "The node %s is not powered on. Powering on the node for inspection."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:293
+#, python-format
+msgid "Node %s inspected."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:296
+#, python-format
+msgid ""
+"The node %s was powered on for inspection. Powered off the node as "
+"inspection completed."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/management.py:252
+#, python-format
+msgid ""
+"Missing 'ilo_change_password' parameter in driver_info. Clean step "
+"'reset_ilo_credential' is not performed on node %s."
+msgstr ""
+
+#: ironic/openstack/common/eventlet_backdoor.py:146
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr ""
@@ -154,59 +274,54 @@ msgstr ""
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr ""
-#: ironic/openstack/common/policy.py:275
-#, python-format
-msgid "Can not find policy directory: %s"
-msgstr ""
-
-#: ironic/openstack/common/service.py:174
+#: ironic/openstack/common/service.py:173
#, python-format
msgid "Caught %s, exiting"
msgstr ""
-#: ironic/openstack/common/service.py:232
+#: ironic/openstack/common/service.py:239
msgid "Parent process has died unexpectedly, exiting"
msgstr ""
-#: ironic/openstack/common/service.py:263
+#: ironic/openstack/common/service.py:270
#, python-format
msgid "Child caught %s, exiting"
msgstr ""
-#: ironic/openstack/common/service.py:302
+#: ironic/openstack/common/service.py:309
msgid "Forking too fast, sleeping"
msgstr ""
-#: ironic/openstack/common/service.py:321
+#: ironic/openstack/common/service.py:328
#, python-format
msgid "Started child %d"
msgstr ""
-#: ironic/openstack/common/service.py:331
+#: ironic/openstack/common/service.py:338
#, python-format
msgid "Starting %d workers"
msgstr ""
-#: ironic/openstack/common/service.py:348
+#: ironic/openstack/common/service.py:355
#, python-format
msgid "Child %(pid)d killed by signal %(sig)d"
msgstr ""
-#: ironic/openstack/common/service.py:352
+#: ironic/openstack/common/service.py:359
#, python-format
msgid "Child %(pid)s exited with status %(code)d"
msgstr ""
-#: ironic/openstack/common/service.py:391
+#: ironic/openstack/common/service.py:398
#, python-format
msgid "Caught %s, stopping children"
msgstr ""
-#: ironic/openstack/common/service.py:400
+#: ironic/openstack/common/service.py:413
msgid "Wait called after thread killed. Cleaning up."
msgstr ""
-#: ironic/openstack/common/service.py:416
+#: ironic/openstack/common/service.py:429
#, python-format
msgid "Waiting on %d children to exit"
msgstr ""
diff --git a/ironic/locale/ironic-log-warning.pot b/ironic/locale/ironic-log-warning.pot
index 5c1982c39..c1c0126b8 100644
--- a/ironic/locale/ironic-log-warning.pot
+++ b/ironic/locale/ironic-log-warning.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: ironic 2015.1.dev31\n"
+"Project-Id-Version: ironic 2015.1.dev139\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-02-18 06:15+0000\n"
+"POT-Creation-Date: 2015-04-08 06:27+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -29,100 +29,100 @@ msgid ""
"expected format: %(line)s"
msgstr ""
-#: ironic/common/utils.py:445
+#: ironic/common/utils.py:448
#, python-format
msgid "Failed to unlink %(path)s, error: %(e)s"
msgstr ""
-#: ironic/common/utils.py:454
+#: ironic/common/utils.py:457
#, python-format
msgid "Failed to remove dir %(path)s, error: %(e)s"
msgstr ""
-#: ironic/common/utils.py:470
+#: ironic/common/utils.py:473
#, python-format
msgid "Failed to create symlink from %(source)s to %(link)s, error: %(e)s"
msgstr ""
-#: ironic/common/utils.py:484
+#: ironic/common/utils.py:487
#, python-format
msgid ""
"Failed to remove trailing character. Returning original object. Supplied "
"object is not a string: %s,"
msgstr ""
-#: ironic/conductor/manager.py:249
+#: ironic/conductor/manager.py:281
#, python-format
msgid ""
"A conductor with hostname %(hostname)s was previously registered. "
"Updating registration"
msgstr ""
-#: ironic/conductor/manager.py:315
+#: ironic/conductor/manager.py:353
msgid "Conductor could not connect to database while heartbeating."
msgstr ""
-#: ironic/conductor/manager.py:446
+#: ironic/conductor/manager.py:484
msgid ""
"Drivers implementing their own version of vendor_passthru() has been "
"deprecated. Please update the code to use the @passthru decorator."
msgstr ""
-#: ironic/conductor/manager.py:539
+#: ironic/conductor/manager.py:577
msgid ""
"Drivers implementing their own version of driver_vendor_passthru() has "
"been deprecated. Please update the code to use the @driver_passthru "
"decorator."
msgstr ""
-#: ironic/conductor/manager.py:1164
+#: ironic/conductor/manager.py:1440
#, python-format
msgid ""
"No VIF found for instance %(instance)s port %(port)s when attempting to "
"update port MAC address."
msgstr ""
-#: ironic/conductor/manager.py:1221
+#: ironic/conductor/manager.py:1492
#, python-format
msgid ""
"get_sensors_data is not implemented for driver %(driver)s, node_uuid is "
"%(node)s"
msgstr ""
-#: ironic/conductor/manager.py:1225
+#: ironic/conductor/manager.py:1496
#, python-format
msgid ""
"During get_sensors_data, could not parse sensor data for node %(node)s. "
"Error: %(err)s."
msgstr ""
-#: ironic/conductor/manager.py:1229
+#: ironic/conductor/manager.py:1500
#, python-format
msgid ""
"During get_sensors_data, could not get sensor data for node %(node)s. "
"Error: %(err)s."
msgstr ""
-#: ironic/conductor/manager.py:1233
+#: ironic/conductor/manager.py:1504
#, python-format
msgid ""
"During send_sensor_data, node %(node)s was not found and presumed deleted"
" by another process."
msgstr ""
-#: ironic/conductor/manager.py:1237
+#: ironic/conductor/manager.py:1508
#, python-format
msgid "Failed to get sensor data for node %(node)s. Error: %(error)s"
msgstr ""
-#: ironic/conductor/manager.py:1378
+#: ironic/conductor/manager.py:1779
#, python-format
msgid ""
"No free conductor workers available to perform an action on node "
"%(node)s, setting node's power state back to %(power_state)s."
msgstr ""
-#: ironic/conductor/manager.py:1406
+#: ironic/conductor/manager.py:1807
#, python-format
msgid ""
"No free conductor workers available to perform an action on node "
@@ -130,41 +130,36 @@ msgid ""
"target_provision_state to %(tgt_prov_state)s."
msgstr ""
-#: ironic/conductor/manager.py:1474
+#: ironic/conductor/manager.py:1875
#, python-format
msgid "Error while uploading the configdrive for %(node)s to Swift"
msgstr ""
-#: ironic/conductor/manager.py:1484
+#: ironic/conductor/manager.py:1885
#, python-format
msgid "Error while preparing to deploy to node %(node)s: %(err)s"
msgstr ""
-#: ironic/conductor/manager.py:1493
+#: ironic/conductor/manager.py:1894
#, python-format
msgid "Error in deploy of node %(node)s: %(err)s"
msgstr ""
-#: ironic/conductor/manager.py:1525
-#, python-format
-msgid "Error in tear_down of node %(node)s: %(err)s"
-msgstr ""
-
-#: ironic/conductor/manager.py:1598
+#: ironic/conductor/manager.py:1971
#, python-format
msgid ""
"During sync_power_state, could not get power state for node %(node)s. "
"Error: %(err)s."
msgstr ""
-#: ironic/conductor/manager.py:1625
+#: ironic/conductor/manager.py:1999
#, python-format
msgid ""
"During sync_power_state, node %(node)s state '%(actual)s' does not match "
"expected state. Changing hardware state to '%(state)s'."
msgstr ""
-#: ironic/conductor/manager.py:1643
+#: ironic/conductor/manager.py:2017
#, python-format
msgid ""
"During sync_power_state, node %(node)s state does not match expected "
@@ -188,24 +183,24 @@ msgstr ""
msgid "Driver returns ERROR power state for node %s."
msgstr ""
-#: ironic/db/sqlalchemy/api.py:583
+#: ironic/db/sqlalchemy/api.py:584
#, python-format
msgid "Cleared reservations held by %(hostname)s: %(nodes)s"
msgstr ""
-#: ironic/dhcp/neutron.py:179
+#: ironic/dhcp/neutron.py:189
#, python-format
msgid ""
"Some errors were encountered when updating the DHCP BOOT options for node"
" %(node)s on the following ports: %(ports)s."
msgstr ""
-#: ironic/dhcp/neutron.py:243
+#: ironic/dhcp/neutron.py:253
#, python-format
msgid "No VIFs found for node %(node)s when attempting to get port IP address."
msgstr ""
-#: ironic/dhcp/neutron.py:272
+#: ironic/dhcp/neutron.py:282
#, python-format
msgid ""
"Some errors were encountered on node %(node)s while retrieving IP address"
@@ -217,12 +212,12 @@ msgstr ""
msgid "Ignoring malformed capability '%s'. Format should be 'key:val'."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:259
+#: ironic/drivers/modules/agent_base_vendor.py:351
#, python-format
msgid "Malformed MAC: %s"
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:302
+#: ironic/drivers/modules/agent_base_vendor.py:394
#, python-format
msgid "MAC address %s not found in database"
msgstr ""
@@ -239,6 +234,13 @@ msgstr ""
msgid "No console pid found for node %s while trying to stop shellinabox console."
msgstr ""
+#: ironic/drivers/modules/deploy_utils.py:974
+#, python-format
+msgid ""
+"ipmitool is unable to set boot device while the node %s is in UEFI boot "
+"mode. Please set the boot device manually."
+msgstr ""
+
#: ironic/drivers/modules/iboot.py:113
#, python-format
msgid ""
@@ -246,86 +248,100 @@ msgid ""
"get_relays() failed."
msgstr ""
-#: ironic/drivers/modules/image_cache.py:194
+#: ironic/drivers/modules/image_cache.py:204
#, python-format
msgid ""
"Cache clean up was unable to reclaim %(required)d MiB of disk space, "
"still %(left)d MiB required"
msgstr ""
-#: ironic/drivers/modules/image_cache.py:221
-#: ironic/drivers/modules/image_cache.py:260
+#: ironic/drivers/modules/image_cache.py:231
+#: ironic/drivers/modules/image_cache.py:270
#, python-format
msgid "Unable to delete file %(name)s from master image cache: %(exc)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:133
+#: ironic/drivers/modules/ipminative.py:138
#, python-format
msgid ""
"IPMI power on failed for node %(node_id)s with the following error: "
"%(error)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:163
+#: ironic/drivers/modules/ipminative.py:168
#, python-format
msgid ""
"IPMI power off failed for node %(node_id)s with the following error: "
"%(error)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:195
+#: ironic/drivers/modules/ipminative.py:200
#, python-format
msgid ""
"IPMI power reboot failed for node %(node_id)s with the following error: "
"%(error)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:230
+#: ironic/drivers/modules/ipminative.py:235
#, python-format
msgid ""
"IPMI get power state failed for node %(node_id)s with the following "
"error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ipminative.py:244
+#: ironic/drivers/modules/ipminative.py:249
#, python-format
msgid ""
"IPMI get power state for node %(node_id)s returns the following details: "
"%(detail)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:402
+#: ironic/drivers/modules/ipmitool.py:413
#, python-format
msgid "IPMI power %(state)s failed for node %(node)s."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:467
+#: ironic/drivers/modules/ipmitool.py:478
#, python-format
msgid "IPMI power status failed for node %(node_id)s with error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:742
+#: ironic/drivers/modules/ipmitool.py:753
#, python-format
msgid ""
"IPMI set boot device failed for node %(node)s when executing \"ipmitool "
"%(cmd)s\". Error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:773
+#: ironic/drivers/modules/ipmitool.py:784
#, python-format
msgid ""
"IPMI get boot device failed for node %(node)s when executing \"ipmitool "
"%(cmd)s\". Error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/pxe.py:282
+#: ironic/drivers/modules/pxe.py:132
#, python-format
msgid ""
-"ipmitool is unable to set boot device while the node %s is in UEFI boot "
-"mode. Please set the boot device manually."
+"The \"%(old_param)s\" parameter is deprecated. Please update the node "
+"%(node)s to use \"%(new_param)s\" instead."
+msgstr ""
+
+#: ironic/drivers/modules/pxe.py:474
+#, python-format
+msgid ""
+"The UUID for the root partition can't be found, unable to switch the pxe "
+"config from deployment mode to service (boot) mode for node %(node)s"
+msgstr ""
+
+#: ironic/drivers/modules/pxe.py:479
+#, python-format
+msgid ""
+"The disk id for the whole disk image can't be found, unable to switch the"
+" pxe config from deployment mode to service (boot) mode for node %(node)s"
msgstr ""
-#: ironic/drivers/modules/pxe.py:429
+#: ironic/drivers/modules/pxe.py:504
#, python-format
msgid "Could not get image info to clean up images for node %(node)s: %(err)s"
msgstr ""
@@ -364,6 +380,46 @@ msgid ""
" support this operation"
msgstr ""
+#: ironic/drivers/modules/amt/power.py:179
+#, python-format
+msgid ""
+"AMT failed to set power state %(state)s after %(tries)s retries on node "
+"%(node_id)s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/power.py:189
+#, python-format
+msgid ""
+"AMT set power state %(state)s for node %(node)s - Attempt %(attempt)s "
+"times of %(max_attempt)s failed."
+msgstr ""
+
+#: ironic/drivers/modules/drac/client.py:73
+#, python-format
+msgid ""
+"Empty response on calling %(action)s on client. Last error (cURL error "
+"code): %(last_error)s, fault string: \"%(fault_string)s\" response_code: "
+"%(response_code)s. Retry attempt %(count)d"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/deploy.py:456
+#: ironic/drivers/modules/ilo/deploy.py:536
+#, python-format
+msgid "Secure boot mode is not supported for node %s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:60
+#, python-format
+msgid "Port already exists for MAC address %(address)s for node %(node)s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/management.py:98
+#, python-format
+msgid ""
+"'%(step)s' clean step is not supported on node %(uuid)s. Skipping the "
+"clean step."
+msgstr ""
+
#: ironic/nova/scheduler/ironic_host_manager.py:35
msgid ""
"This class (ironic.nova.scheduler.ironic_host_manager.IronicHostManager) "
@@ -381,22 +437,10 @@ msgstr ""
#: ironic/openstack/common/loopingcall.py:87
#, python-format
-msgid "task %(func_name)s run outlasted interval by %(delay).2f sec"
-msgstr ""
-
-#: ironic/openstack/common/network_utils.py:149
-msgid "tcp_keepidle not available on your system"
-msgstr ""
-
-#: ironic/openstack/common/network_utils.py:156
-msgid "tcp_keepintvl not available on your system"
-msgstr ""
-
-#: ironic/openstack/common/network_utils.py:163
-msgid "tcp_keepknt not available on your system"
+msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
msgstr ""
-#: ironic/openstack/common/service.py:356
+#: ironic/openstack/common/service.py:363
#, python-format
msgid "pid %d not in child list"
msgstr ""
diff --git a/ironic/locale/ironic.pot b/ironic/locale/ironic.pot
index bfd7b3f3e..38f3bff0e 100644
--- a/ironic/locale/ironic.pot
+++ b/ironic/locale/ironic.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: ironic 2015.1.dev31\n"
+"Project-Id-Version: ironic 2015.1.dev139\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-02-18 06:14+0000\n"
+"POT-Creation-Date: 2015-04-08 06:27+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -17,96 +17,105 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#: ironic/api/controllers/base.py:92
-msgid "Invalid value for X-OpenStack-Ironic-API-Version header."
+#: ironic/api/controllers/base.py:102
+#, python-format
+msgid "Invalid value for %s header"
msgstr ""
-#: ironic/api/controllers/v1/__init__.py:164
+#: ironic/api/controllers/v1/__init__.py:173
#, python-format
msgid ""
"Mutually exclusive versions requested. Version %(ver)s requested but not "
-"supported by this service."
+"supported by this service. The supported version range is: [%(min)s, "
+"%(max)s]."
msgstr ""
-#: ironic/api/controllers/v1/__init__.py:170
+#: ironic/api/controllers/v1/__init__.py:180
#, python-format
msgid ""
-"Unsupported minor version requested. This API service supports the "
-"following version range: [%(min)s, %(max)s]."
+"Version %(ver)s was requested but the minor version is not supported by "
+"this service. The supported version range is: [%(min)s, %(max)s]."
msgstr ""
#: ironic/api/controllers/v1/driver.py:146
-#: ironic/api/controllers/v1/node.py:716
+#: ironic/api/controllers/v1/node.py:703
msgid "Method not specified"
msgstr ""
-#: ironic/api/controllers/v1/node.py:423
+#: ironic/api/controllers/v1/node.py:402
#, python-format
msgid "Adding a config drive is only supported when setting provision state to %s"
msgstr ""
-#: ironic/api/controllers/v1/node.py:446
+#: ironic/api/controllers/v1/node.py:428
#, python-format
msgid "The requested action \"%(action)s\" could not be understood."
msgstr ""
-#: ironic/api/controllers/v1/node.py:801
+#: ironic/api/controllers/v1/node.py:788
msgid "Chassis id not specified."
msgstr ""
-#: ironic/api/controllers/v1/node.py:975
+#: ironic/api/controllers/v1/node.py:963
#, python-format
msgid "Cannot create node with invalid name %(name)s"
msgstr ""
-#: ironic/api/controllers/v1/node.py:1003
+#: ironic/api/controllers/v1/node.py:1002
#, python-format
msgid "Node %s can not be updated while a state transition is in progress."
msgstr ""
-#: ironic/api/controllers/v1/node.py:1013
+#: ironic/api/controllers/v1/node.py:1012
#, python-format
msgid "Node %(node)s: Cannot change name to invalid name '%(name)s'"
msgstr ""
-#: ironic/api/controllers/v1/port.py:182
-msgid "Node id not specified."
+#: ironic/api/controllers/v1/node.py:1060
+#, python-format
+msgid ""
+"Node %s can not update the driver while the console is enabled. Please "
+"stop the console first."
msgstr ""
-#: ironic/api/controllers/v1/types.py:171
+#: ironic/api/controllers/v1/port.py:183
+msgid "Node identifier not specified."
+msgstr ""
+
+#: ironic/api/controllers/v1/types.py:173
#, python-format
msgid "%s is not JSON serializable"
msgstr ""
-#: ironic/api/controllers/v1/types.py:222
+#: ironic/api/controllers/v1/types.py:224
#, python-format
msgid "'%s' is an internal attribute and can not be updated"
msgstr ""
-#: ironic/api/controllers/v1/types.py:226
+#: ironic/api/controllers/v1/types.py:228
#, python-format
msgid "'%s' is a mandatory attribute and can not be removed"
msgstr ""
-#: ironic/api/controllers/v1/types.py:231
+#: ironic/api/controllers/v1/types.py:233
msgid "'add' and 'replace' operations needs value"
msgstr ""
-#: ironic/api/controllers/v1/utils.py:32
+#: ironic/api/controllers/v1/utils.py:38
msgid "Limit must be positive"
msgstr ""
-#: ironic/api/controllers/v1/utils.py:39
+#: ironic/api/controllers/v1/utils.py:45
#, python-format
msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'"
msgstr ""
-#: ironic/api/controllers/v1/utils.py:49
+#: ironic/api/controllers/v1/utils.py:55
#, python-format
msgid "Adding a new attribute (%s) to the root of the resource is not allowed"
msgstr ""
-#: ironic/api/middleware/auth_token.py:41
+#: ironic/api/middleware/auth_token.py:43
#, python-format
msgid "Cannot compile public API routes: %s"
msgstr ""
@@ -134,510 +143,551 @@ msgstr ""
msgid "An unknown exception occurred."
msgstr ""
-#: ironic/common/exception.py:99
+#: ironic/common/exception.py:106
msgid "Not authorized."
msgstr ""
-#: ironic/common/exception.py:104
+#: ironic/common/exception.py:111
msgid "Operation not permitted."
msgstr ""
-#: ironic/common/exception.py:108
+#: ironic/common/exception.py:115
msgid "Unacceptable parameters."
msgstr ""
-#: ironic/common/exception.py:113
+#: ironic/common/exception.py:120
msgid "Conflict."
msgstr ""
-#: ironic/common/exception.py:118
+#: ironic/common/exception.py:125
msgid "Resource temporarily unavailable, please retry."
msgstr ""
-#: ironic/common/exception.py:124
+#: ironic/common/exception.py:131
msgid "Request not acceptable."
msgstr ""
-#: ironic/common/exception.py:129
+#: ironic/common/exception.py:136
msgid "Invalid resource state."
msgstr ""
-#: ironic/common/exception.py:133
+#: ironic/common/exception.py:140
#, python-format
msgid "A node with UUID %(uuid)s already exists."
msgstr ""
-#: ironic/common/exception.py:137
+#: ironic/common/exception.py:144
#, python-format
msgid "A port with MAC address %(mac)s already exists."
msgstr ""
-#: ironic/common/exception.py:141
+#: ironic/common/exception.py:148
#, python-format
msgid "A chassis with UUID %(uuid)s already exists."
msgstr ""
-#: ironic/common/exception.py:145
+#: ironic/common/exception.py:152
#, python-format
msgid "A port with UUID %(uuid)s already exists."
msgstr ""
-#: ironic/common/exception.py:149
+#: ironic/common/exception.py:156
#, python-format
msgid ""
"Instance %(instance_uuid)s is already associated with a node, it cannot "
"be associated with this other node %(node)s"
msgstr ""
-#: ironic/common/exception.py:154
+#: ironic/common/exception.py:161
#, python-format
msgid "A node with name %(name)s already exists."
msgstr ""
-#: ironic/common/exception.py:158
+#: ironic/common/exception.py:165
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: ironic/common/exception.py:162
+#: ironic/common/exception.py:169
#, python-format
msgid "Expected a logical name or uuid but received %(name)s."
msgstr ""
-#: ironic/common/exception.py:166
+#: ironic/common/exception.py:173
#, python-format
msgid "Expected a logical name but received %(name)s."
msgstr ""
-#: ironic/common/exception.py:170
+#: ironic/common/exception.py:177
#, python-format
msgid "Expected an uuid or int but received %(identity)s."
msgstr ""
-#: ironic/common/exception.py:174
+#: ironic/common/exception.py:181
#, python-format
msgid "Expected a MAC address but received %(mac)s."
msgstr ""
-#: ironic/common/exception.py:178
+#: ironic/common/exception.py:185
#, python-format
msgid ""
"The requested action \"%(action)s\" can not be performed on node "
"\"%(node)s\" while it is in state \"%(state)s\"."
msgstr ""
-#: ironic/common/exception.py:183
+#: ironic/common/exception.py:190
#, python-format
msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s"
msgstr ""
-#: ironic/common/exception.py:187
+#: ironic/common/exception.py:194
#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr ""
-#: ironic/common/exception.py:191 ironic/common/exception.py:195
+#: ironic/common/exception.py:198 ironic/common/exception.py:202
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: ironic/common/exception.py:201 ironic/common/exception.py:205
+#: ironic/common/exception.py:208 ironic/common/exception.py:212
#, python-format
msgid "%(err)s"
msgstr ""
-#: ironic/common/exception.py:209
+#: ironic/common/exception.py:216
msgid "Resource already exists."
msgstr ""
-#: ironic/common/exception.py:213
+#: ironic/common/exception.py:220
msgid "Resource could not be found."
msgstr ""
-#: ironic/common/exception.py:218
+#: ironic/common/exception.py:225
#, python-format
msgid "Failed to load DHCP provider %(dhcp_provider_name)s."
msgstr ""
-#: ironic/common/exception.py:222
+#: ironic/common/exception.py:229
#, python-format
msgid "Could not find the following driver(s): %(driver_name)s."
msgstr ""
-#: ironic/common/exception.py:226
+#: ironic/common/exception.py:233
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: ironic/common/exception.py:230
+#: ironic/common/exception.py:237
#, python-format
msgid "No valid host was found. Reason: %(reason)s"
msgstr ""
-#: ironic/common/exception.py:234
+#: ironic/common/exception.py:241
#, python-format
msgid "Instance %(instance)s could not be found."
msgstr ""
-#: ironic/common/exception.py:238
+#: ironic/common/exception.py:245
#, python-format
msgid "Node %(node)s could not be found."
msgstr ""
-#: ironic/common/exception.py:242
+#: ironic/common/exception.py:249
#, python-format
msgid "Node %(node)s is associated with instance %(instance)s."
msgstr ""
-#: ironic/common/exception.py:246
+#: ironic/common/exception.py:253
#, python-format
msgid "Port %(port)s could not be found."
msgstr ""
-#: ironic/common/exception.py:250
+#: ironic/common/exception.py:257
#, python-format
msgid "Update DHCP options on port: %(port_id)s failed."
msgstr ""
-#: ironic/common/exception.py:254
+#: ironic/common/exception.py:261
#, python-format
msgid "Retrieve IP address on port: %(port_id)s failed."
msgstr ""
-#: ironic/common/exception.py:258
+#: ironic/common/exception.py:265
#, python-format
msgid "Invalid IPv4 address %(ip_address)s."
msgstr ""
-#: ironic/common/exception.py:262
+#: ironic/common/exception.py:269
#, python-format
msgid "Update MAC address on port: %(port_id)s failed."
msgstr ""
-#: ironic/common/exception.py:266
+#: ironic/common/exception.py:273
#, python-format
msgid "Chassis %(chassis)s could not be found."
msgstr ""
-#: ironic/common/exception.py:270
+#: ironic/common/exception.py:277
#, python-format
msgid "Conductor %(conductor)s cannot be started because no drivers were loaded."
msgstr ""
-#: ironic/common/exception.py:275
+#: ironic/common/exception.py:282
#, python-format
msgid "Conductor %(conductor)s could not be found."
msgstr ""
-#: ironic/common/exception.py:279
+#: ironic/common/exception.py:286
#, python-format
msgid "Conductor %(conductor)s already registered."
msgstr ""
-#: ironic/common/exception.py:283
+#: ironic/common/exception.py:290
#, python-format
msgid "Failed to set node power state to %(pstate)s."
msgstr ""
-#: ironic/common/exception.py:287
+#: ironic/common/exception.py:294
msgid "An exclusive lock is required, but the current context has a shared lock."
msgstr ""
-#: ironic/common/exception.py:292
+#: ironic/common/exception.py:299
#, python-format
msgid "Failed to toggle maintenance-mode flag for node %(node)s: %(reason)s"
msgstr ""
-#: ironic/common/exception.py:297
+#: ironic/common/exception.py:304
#, python-format
msgid "Console access is not enabled on node %(node)s"
msgstr ""
-#: ironic/common/exception.py:301
+#: ironic/common/exception.py:308
#, python-format
msgid ""
"The %(op)s operation can't be performed on node %(node)s because it's in "
"maintenance mode."
msgstr ""
-#: ironic/common/exception.py:306
+#: ironic/common/exception.py:313
#, python-format
msgid ""
"Can not change instance association while node %(node)s is in power state"
" %(pstate)s."
msgstr ""
-#: ironic/common/exception.py:311
+#: ironic/common/exception.py:318
#, python-format
msgid ""
"Cannot complete the requested action because chassis %(chassis)s contains"
" nodes."
msgstr ""
-#: ironic/common/exception.py:316
+#: ironic/common/exception.py:323
#, python-format
msgid "IPMI call failed: %(cmd)s."
msgstr ""
-#: ironic/common/exception.py:320
+#: ironic/common/exception.py:327
+msgid "Failed to connect to AMT service."
+msgstr ""
+
+#: ironic/common/exception.py:331
+#, python-format
+msgid "AMT call failed: %(cmd)s."
+msgstr ""
+
+#: ironic/common/exception.py:335
#, python-format
msgid "Failed to establish SSH connection to host %(host)s."
msgstr ""
-#: ironic/common/exception.py:324
+#: ironic/common/exception.py:339
#, python-format
msgid "Failed to execute command via SSH: %(cmd)s."
msgstr ""
-#: ironic/common/exception.py:328
+#: ironic/common/exception.py:343
#, python-format
msgid "Unsupported object type %(objtype)s"
msgstr ""
-#: ironic/common/exception.py:332
+#: ironic/common/exception.py:347
#, python-format
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr ""
-#: ironic/common/exception.py:336
+#: ironic/common/exception.py:351
#, python-format
-msgid "Driver %(driver)s does not support %(extension)s."
+msgid ""
+"Driver %(driver)s does not support %(extension)s (disabled or not "
+"implemented)."
msgstr ""
-#: ironic/common/exception.py:340
+#: ironic/common/exception.py:356
#, python-format
msgid "Version %(objver)s of %(objname)s is not supported"
msgstr ""
-#: ironic/common/exception.py:344
+#: ironic/common/exception.py:360
#, python-format
msgid "Connection to glance host %(host)s:%(port)s failed: %(reason)s"
msgstr ""
-#: ironic/common/exception.py:349
+#: ironic/common/exception.py:365
#, python-format
msgid "Not authorized for image %(image_id)s."
msgstr ""
-#: ironic/common/exception.py:353
+#: ironic/common/exception.py:369
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: ironic/common/exception.py:357
+#: ironic/common/exception.py:373
+#, python-format
+msgid "Validation of image href %(image_href)s failed, reason: %(reason)s"
+msgstr ""
+
+#: ironic/common/exception.py:378
+#, python-format
+msgid "Failed to download image %(image_href)s, reason: %(reason)s"
+msgstr ""
+
+#: ironic/common/exception.py:382
msgid "Not authorized in Keystone."
msgstr ""
-#: ironic/common/exception.py:370
+#: ironic/common/exception.py:395
#, python-format
msgid ""
"Service type %(service_type)s with endpoint type %(endpoint_type)s not "
"found in keystone service catalog."
msgstr ""
-#: ironic/common/exception.py:375
+#: ironic/common/exception.py:400
msgid "Connection failed"
msgstr ""
-#: ironic/common/exception.py:379
+#: ironic/common/exception.py:404
msgid "Requested OpenStack Images API is forbidden"
msgstr ""
-#: ironic/common/exception.py:387
+#: ironic/common/exception.py:412
msgid "The provided endpoint is invalid"
msgstr ""
-#: ironic/common/exception.py:391
+#: ironic/common/exception.py:416
msgid "Unable to communicate with the server."
msgstr ""
-#: ironic/common/exception.py:407
+#: ironic/common/exception.py:432
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: ironic/common/exception.py:411
+#: ironic/common/exception.py:436
#, python-format
msgid ""
"Node %(node)s is locked by host %(host)s, please retry after the current "
"operation is completed."
msgstr ""
-#: ironic/common/exception.py:416
+#: ironic/common/exception.py:441
#, python-format
msgid "Node %(node)s found not to be locked on release"
msgstr ""
-#: ironic/common/exception.py:420
+#: ironic/common/exception.py:445
msgid ""
"Requested action cannot be performed due to lack of free conductor "
"workers."
msgstr ""
-#: ironic/common/exception.py:430
+#: ironic/common/exception.py:455
#, python-format
msgid "Invalid configuration file. %(error_msg)s"
msgstr ""
-#: ironic/common/exception.py:434
+#: ironic/common/exception.py:459
#, python-format
msgid "Driver %(driver)s could not be loaded. Reason: %(reason)s."
msgstr ""
-#: ironic/common/exception.py:442
+#: ironic/common/exception.py:467
#, python-format
msgid "Could not find pid in pid file %(pid_path)s"
msgstr ""
-#: ironic/common/exception.py:446
+#: ironic/common/exception.py:471
#, python-format
msgid "Console subprocess failed to start. %(error)s"
msgstr ""
-#: ironic/common/exception.py:450
+#: ironic/common/exception.py:475
#, python-format
msgid "Failed to create the password file. %(error)s"
msgstr ""
-#: ironic/common/exception.py:458
+#: ironic/common/exception.py:483
#, python-format
msgid "%(operation)s failed, error: %(error)s"
msgstr ""
-#: ironic/common/exception.py:466
+#: ironic/common/exception.py:487
+#, python-format
+msgid "%(operation)s not supported. error: %(error)s"
+msgstr ""
+
+#: ironic/common/exception.py:495
#, python-format
msgid ""
"DRAC client failed. Last error (cURL error code): %(last_error)s, fault "
"string: \"%(fault_string)s\" response_code: %(response_code)s"
msgstr ""
-#: ironic/common/exception.py:473
+#: ironic/common/exception.py:502
#, python-format
msgid "DRAC operation failed. Message: %(message)s"
msgstr ""
-#: ironic/common/exception.py:477
+#: ironic/common/exception.py:506
#, python-format
msgid ""
"DRAC operation yielded return value %(actual_return_value)s that is "
"neither error nor expected %(expected_return_value)s"
msgstr ""
-#: ironic/common/exception.py:482
+#: ironic/common/exception.py:511
#, python-format
msgid ""
"Another job with ID %(job_id)s is already created to configure "
"%(target)s. Wait until existing job is completed or is canceled"
msgstr ""
-#: ironic/common/exception.py:488
+#: ironic/common/exception.py:517
#, python-format
msgid ""
"Invalid filter dialect '%(invalid_filter)s'. Supported options are "
"%(supported)s"
msgstr ""
-#: ironic/common/exception.py:493
+#: ironic/common/exception.py:522
#, python-format
msgid "Failed to get sensor data for node %(node)s. Error: %(error)s"
msgstr ""
-#: ironic/common/exception.py:498
+#: ironic/common/exception.py:527
#, python-format
msgid "Failed to parse sensor data for node %(node)s. Error: %(error)s"
msgstr ""
-#: ironic/common/exception.py:503
+#: ironic/common/exception.py:532
#, python-format
msgid ""
"Disk volume where '%(path)s' is located doesn't have enough disk space. "
"Required %(required)d MiB, only %(actual)d MiB available space present."
msgstr ""
-#: ironic/common/exception.py:509
+#: ironic/common/exception.py:538
#, python-format
msgid "Creating %(image_type)s image failed: %(error)s"
msgstr ""
-#: ironic/common/exception.py:513
+#: ironic/common/exception.py:542
#, python-format
msgid "Swift operation '%(operation)s' failed: %(error)s"
msgstr ""
-#: ironic/common/exception.py:517
+#: ironic/common/exception.py:546
#, python-format
msgid "SNMP operation '%(operation)s' failed: %(error)s"
msgstr ""
-#: ironic/common/exception.py:521
+#: ironic/common/exception.py:550
#, python-format
msgid "Failed to create a file system. File system %(fs)s is not supported."
msgstr ""
-#: ironic/common/exception.py:526
+#: ironic/common/exception.py:555
#, python-format
msgid "iRMC %(operation)s failed. Reason: %(error)s"
msgstr ""
-#: ironic/common/exception.py:530
+#: ironic/common/exception.py:559
#, python-format
msgid "VirtualBox operation '%(operation)s' failed. Error: %(error)s"
msgstr ""
-#: ironic/common/fsm.py:84
+#: ironic/common/exception.py:564
+#, python-format
+msgid "Failed to inspect hardware. Reason: %(error)s"
+msgstr ""
+
+#: ironic/common/exception.py:568
+#, python-format
+msgid "Failed to clean node %(node)s: %(reason)s"
+msgstr ""
+
+#: ironic/common/fsm.py:94
#, python-format
msgid "State '%s' already defined"
msgstr ""
-#: ironic/common/fsm.py:87
+#: ironic/common/fsm.py:97
msgid "On enter callback must be callable"
msgstr ""
-#: ironic/common/fsm.py:90
+#: ironic/common/fsm.py:100
msgid "On exit callback must be callable"
msgstr ""
-#: ironic/common/fsm.py:92
+#: ironic/common/fsm.py:102
#, python-format
msgid "Target state '%s' does not exist"
msgstr ""
-#: ironic/common/fsm.py:108
+#: ironic/common/fsm.py:106
+#, python-format
+msgid "Target state '%s' is not a 'stable' state"
+msgstr ""
+
+#: ironic/common/fsm.py:122
#, python-format
msgid ""
"Can not add a transition on event '%(event)s' that starts in a undefined "
"state '%(state)s'"
msgstr ""
-#: ironic/common/fsm.py:113
+#: ironic/common/fsm.py:127
#, python-format
msgid ""
"Can not add a transition on event '%(event)s' that ends in a undefined "
"state '%(state)s'"
msgstr ""
-#: ironic/common/fsm.py:124
+#: ironic/common/fsm.py:138
msgid "Can only process events after being initialized (not before)"
msgstr ""
-#: ironic/common/fsm.py:128
+#: ironic/common/fsm.py:142
#, python-format
msgid "Can not transition from terminal state '%(state)s' on event '%(event)s'"
msgstr ""
-#: ironic/common/fsm.py:133
+#: ironic/common/fsm.py:147
#, python-format
msgid ""
"Can not transition from state '%(state)s' on event '%(event)s' (no "
"defined transition)"
msgstr ""
-#: ironic/common/fsm.py:171
+#: ironic/common/fsm.py:185
#, python-format
msgid "Can not start from an undefined state '%s'"
msgstr ""
-#: ironic/common/fsm.py:174
+#: ironic/common/fsm.py:188
#, python-format
msgid "Can not start from a terminal state '%s'"
msgstr ""
@@ -655,20 +705,49 @@ msgstr ""
msgid "The driver '%s' is unknown."
msgstr ""
-#: ironic/common/images.py:257
+#: ironic/common/image_service.py:137
+#, python-format
+msgid "Got HTTP code %s instead of 200 in response to HEAD request."
+msgstr ""
+
+#: ironic/common/image_service.py:159
+#, python-format
+msgid "Got HTTP code %s instead of 200 in response to GET request."
+msgstr ""
+
+#: ironic/common/image_service.py:181
+msgid ""
+"Cannot determine image size as there is no Content-Length header "
+"specified in response to HEAD request."
+msgstr ""
+
+#: ironic/common/image_service.py:204
+msgid "Specified image file not found."
+msgstr ""
+
+#: ironic/common/image_service.py:280
+#, python-format
+msgid "Image download protocol %s is not supported."
+msgstr ""
+
+#: ironic/common/images.py:357
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: ironic/common/images.py:263
+#: ironic/common/images.py:363
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: ironic/common/images.py:278
+#: ironic/common/images.py:378
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
+#: ironic/common/images.py:561
+msgid "Deploy iso didn't contain efiboot.img or grub.cfg"
+msgstr ""
+
#: ironic/common/keystone.py:52
msgid "Keystone API endpoint is missing"
msgstr ""
@@ -682,7 +761,7 @@ msgstr ""
msgid "No Keystone service catalog loaded"
msgstr ""
-#: ironic/common/pxe_utils.py:100
+#: ironic/common/pxe_utils.py:102
#, python-format
msgid "Failed to get IP address for any port on node %s."
msgstr ""
@@ -711,7 +790,7 @@ msgstr ""
msgid "post object"
msgstr ""
-#: ironic/common/utils.py:117
+#: ironic/common/utils.py:116
msgid "Invalid private key"
msgstr ""
@@ -748,92 +827,144 @@ msgid ""
"swift_store_multiple_containers_seed."
msgstr ""
-#: ironic/conductor/manager.py:338
+#: ironic/conductor/manager.py:376
msgid "Invalid method call: update_node can not change node state."
msgstr ""
-#: ironic/conductor/manager.py:463 ironic/conductor/manager.py:555
+#: ironic/conductor/manager.py:501 ironic/conductor/manager.py:593
#: ironic/drivers/utils.py:84
#, python-format
msgid "No handler for method %s"
msgstr ""
-#: ironic/conductor/manager.py:468 ironic/conductor/manager.py:560
+#: ironic/conductor/manager.py:506 ironic/conductor/manager.py:598
#, python-format
msgid "The method %(method)s does not support HTTP %(http)s"
msgstr ""
-#: ironic/conductor/manager.py:657
+#: ironic/conductor/manager.py:695
msgid "provisioning"
msgstr ""
-#: ironic/conductor/manager.py:664
+#: ironic/conductor/manager.py:730
#, python-format
msgid "RPC do_node_deploy failed to validate deploy or power info. Error: %(msg)s"
msgstr ""
-#: ironic/conductor/manager.py:724
+#: ironic/conductor/manager.py:777
#, python-format
msgid ""
"Failed to validate power driver interface. Can not delete instance. "
"Error: %(msg)s"
msgstr ""
-#: ironic/conductor/manager.py:994
+#: ironic/conductor/manager.py:802
+#, python-format
+msgid "Failed to tear down. Error: %s"
+msgstr ""
+
+#: ironic/conductor/manager.py:851
+#, python-format
+msgid ""
+"Cannot continue cleaning on %(node)s, node is in %(state)s state, should "
+"be %(clean_state)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:887
+#, python-format
+msgid ""
+"Failed to validate power driver interface. Can not clean node %(node)s. "
+"Error: %(msg)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:897
+#, python-format
+msgid "Failed to prepare node %(node)s for cleaning: %(e)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:929
+#, python-format
+msgid "Node %(node)s got an invalid last step for %(state)s: %(step)s."
+msgstr ""
+
+#: ironic/conductor/manager.py:951
+#, python-format
+msgid "Node %(node)s failed step %(step)s: %(exc)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:970
+#, python-format
+msgid ""
+"While executing step %(step)s on node %(node)s, step returned invalid "
+"value: %(val)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:986
+#, python-format
+msgid "Failed to tear down from cleaning for node %s"
+msgstr ""
+
+#: ironic/conductor/manager.py:1249
msgid "not supported"
msgstr ""
-#: ironic/conductor/manager.py:1022
+#: ironic/conductor/manager.py:1277
#, python-format
msgid "Node %s can't be deleted because it's not powered off"
msgstr ""
-#: ironic/conductor/manager.py:1101
+#: ironic/conductor/manager.py:1377
msgid "enabled"
msgstr ""
-#: ironic/conductor/manager.py:1101
+#: ironic/conductor/manager.py:1377
msgid "disabled"
msgstr ""
-#: ironic/conductor/manager.py:1124
+#: ironic/conductor/manager.py:1400
msgid "enabling"
msgstr ""
-#: ironic/conductor/manager.py:1124
+#: ironic/conductor/manager.py:1400
msgid "disabling"
msgstr ""
-#: ironic/conductor/manager.py:1125
+#: ironic/conductor/manager.py:1401
#, python-format
msgid "Error %(op)s the console on node %(node)s. Reason: %(error)s"
msgstr ""
-#: ironic/conductor/manager.py:1376 ironic/conductor/manager.py:1404
+#: ironic/conductor/manager.py:1660
+#, python-format
+msgid ""
+"RPC inspect_hardware failed to validate inspection or power info. Error: "
+"%(msg)s"
+msgstr ""
+
+#: ironic/conductor/manager.py:1692
+msgid "timeout reached while inspecting the node"
+msgstr ""
+
+#: ironic/conductor/manager.py:1777 ironic/conductor/manager.py:1805
msgid "No free conductor workers available"
msgstr ""
-#: ironic/conductor/manager.py:1476
+#: ironic/conductor/manager.py:1877
#, python-format
msgid "Failed to upload the configdrive to Swift. Error: %s"
msgstr ""
-#: ironic/conductor/manager.py:1486
+#: ironic/conductor/manager.py:1887
#, python-format
msgid "Failed to prepare to deploy. Error: %s"
msgstr ""
-#: ironic/conductor/manager.py:1494
+#: ironic/conductor/manager.py:1895
#, python-format
msgid "Failed to deploy. Error: %s"
msgstr ""
-#: ironic/conductor/manager.py:1528
-#, python-format
-msgid "Failed to tear down. Error: %s"
-msgstr ""
-
-#: ironic/conductor/manager.py:1548
+#: ironic/conductor/manager.py:1921
#, python-format
msgid ""
"During sync_power_state, max retries exceeded for node %(node)s, node "
@@ -841,11 +972,16 @@ msgid ""
"state to '%(actual)s' Switching node to maintenance mode."
msgstr ""
-#: ironic/conductor/manager.py:1594
+#: ironic/conductor/manager.py:1967
msgid "Power driver returned ERROR state while trying to sync power state."
msgstr ""
-#: ironic/conductor/rpcapi.py:105
+#: ironic/conductor/manager.py:2059
+#, python-format
+msgid "During inspection, driver returned unexpected state %(state)s"
+msgstr ""
+
+#: ironic/conductor/rpcapi.py:108
#, python-format
msgid "No conductor service registered which supports driver %s."
msgstr ""
@@ -871,69 +1007,98 @@ msgid ""
"aborting. More info may be found in the log file."
msgstr ""
-#: ironic/db/sqlalchemy/api.py:338
+#: ironic/db/sqlalchemy/api.py:334
msgid "Cannot overwrite UUID for an existing Node."
msgstr ""
-#: ironic/db/sqlalchemy/api.py:425
+#: ironic/db/sqlalchemy/api.py:431
msgid "Cannot overwrite UUID for an existing Port."
msgstr ""
-#: ironic/db/sqlalchemy/api.py:488
+#: ironic/db/sqlalchemy/api.py:489
msgid "Cannot overwrite UUID for an existing Chassis."
msgstr ""
-#: ironic/dhcp/neutron.py:70
+#: ironic/dhcp/neutron.py:74
msgid "Neutron auth_strategy should be either \"noauth\" or \"keystone\"."
msgstr ""
-#: ironic/dhcp/neutron.py:161
+#: ironic/dhcp/neutron.py:171
#, python-format
msgid ""
"No VIFs found for node %(node)s when attempting to update DHCP BOOT "
"options."
msgstr ""
-#: ironic/dhcp/neutron.py:175
+#: ironic/dhcp/neutron.py:185
#, python-format
msgid "Failed to set DHCP BOOT options for any port on node %s."
msgstr ""
-#: ironic/drivers/agent.py:103 ironic/drivers/fake.py:197
-#: ironic/drivers/pxe.py:231
+#: ironic/dhcp/neutron.py:297
+msgid "Valid cleaning network UUID not provided"
+msgstr ""
+
+#: ironic/dhcp/neutron.py:313
+#, python-format
+msgid "Could not create cleaning port on network %(net)s from %(node)s. %(exc)s"
+msgstr ""
+
+#: ironic/dhcp/neutron.py:322
+#, python-format
+msgid "Failed to create cleaning ports for node %(node)s"
+msgstr ""
+
+#: ironic/dhcp/neutron.py:343
+#, python-format
+msgid ""
+"Could not get cleaning network vif for %(node)s from Neutron, possible "
+"network issue. %(exc)s"
+msgstr ""
+
+#: ironic/dhcp/neutron.py:357
+#, python-format
+msgid ""
+"Could not remove cleaning ports on network %(net)s from %(node)s, "
+"possible network issue. %(exc)s"
+msgstr ""
+
+#: ironic/drivers/agent.py:103 ironic/drivers/fake.py:205
+#: ironic/drivers/pxe.py:244
msgid "Unable to import pyremotevbox library"
msgstr ""
-#: ironic/drivers/drac.py:34 ironic/drivers/fake.py:159
+#: ironic/drivers/drac.py:35 ironic/drivers/fake.py:166
+#: ironic/drivers/fake.py:233 ironic/drivers/pxe.py:264
msgid "Unable to import pywsman library"
msgstr ""
-#: ironic/drivers/fake.py:96
+#: ironic/drivers/fake.py:102
msgid "Unable to import pyghmi IPMI library"
msgstr ""
-#: ironic/drivers/fake.py:110 ironic/drivers/pxe.py:114
+#: ironic/drivers/fake.py:116 ironic/drivers/pxe.py:126
msgid "Unable to import seamicroclient library"
msgstr ""
-#: ironic/drivers/fake.py:134 ironic/drivers/pxe.py:142
+#: ironic/drivers/fake.py:140 ironic/drivers/pxe.py:154
msgid "Unable to import iboot library"
msgstr ""
-#: ironic/drivers/fake.py:146 ironic/drivers/ilo.py:43 ironic/drivers/ilo.py:66
-#: ironic/drivers/pxe.py:162
+#: ironic/drivers/fake.py:152 ironic/drivers/ilo.py:43 ironic/drivers/ilo.py:67
+#: ironic/drivers/pxe.py:174
msgid "Unable to import proliantutils library"
msgstr ""
-#: ironic/drivers/fake.py:173 ironic/drivers/pxe.py:184
+#: ironic/drivers/fake.py:180 ironic/drivers/pxe.py:197
msgid "Unable to import pysnmp library"
msgstr ""
-#: ironic/drivers/fake.py:185 ironic/drivers/pxe.py:207
+#: ironic/drivers/fake.py:192 ironic/drivers/pxe.py:220
msgid "Unable to import python-scciclient library"
msgstr ""
-#: ironic/drivers/pxe.py:91
+#: ironic/drivers/pxe.py:101
msgid "Unable to import pyghmi library"
msgstr ""
@@ -948,62 +1113,136 @@ msgid ""
"%(valid_values)s."
msgstr ""
-#: ironic/drivers/modules/agent.py:182
+#: ironic/drivers/modules/agent.py:71 ironic/drivers/modules/pxe.py:102
+msgid "UUID (from Glance) of the deployment kernel. Required."
+msgstr ""
+
+#: ironic/drivers/modules/agent.py:73
+msgid ""
+"UUID (from Glance) of the ramdisk with agent that is used at deploy time."
+" Required."
+msgstr ""
+
+#: ironic/drivers/modules/agent.py:260
#, python-format
msgid "Node %s failed to validate deploy image info. Some parameters were missing"
msgstr ""
-#: ironic/drivers/modules/agent.py:336
+#: ironic/drivers/modules/agent.py:266
+#, python-format
+msgid ""
+"image_source's image_checksum must be provided in instance_info for node "
+"%s"
+msgstr ""
+
+#: ironic/drivers/modules/agent.py:274
+#, python-format
+msgid ""
+"Node %(node)s is configured to use the %(driver)s driver which currently "
+"does not support deploying partition images."
+msgstr ""
+
+#: ironic/drivers/modules/agent.py:483
#, python-format
msgid "node %(node)s command status errored: %(error)s"
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:119
+#: ironic/drivers/modules/agent_base_vendor.py:124
msgid "Missing parameter version"
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:122
+#: ironic/drivers/modules/agent_base_vendor.py:127
#, python-format
msgid "Unknown lookup payload version: %s"
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:149
+#: ironic/drivers/modules/agent_base_vendor.py:160
+#, python-format
+msgid "Agent returned error for clean step %(step)s on node %(node)s : %(err)s."
+msgstr ""
+
+#: ironic/drivers/modules/agent_base_vendor.py:172
+#, python-format
+msgid "Could not restart cleaning on node %(node)s: %(err)s."
+msgstr ""
+
+#: ironic/drivers/modules/agent_base_vendor.py:184
+#, python-format
+msgid ""
+"Agent returned unknown status for clean step %(step)s on node %(node)s : "
+"%(err)s."
+msgstr ""
+
+#: ironic/drivers/modules/agent_base_vendor.py:215
msgid "For heartbeat operation, \"agent_url\" must be specified."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:158
+#: ironic/drivers/modules/agent_base_vendor.py:224
msgid "Failed checking if deploy is done."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:161
+#: ironic/drivers/modules/agent_base_vendor.py:232
msgid "Node failed to get image for deploy."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:165
+#: ironic/drivers/modules/agent_base_vendor.py:236
msgid "Node failed to move to active state."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:245
+#: ironic/drivers/modules/agent_base_vendor.py:249
+#, python-format
+msgid "Asynchronous exception for node %(node)s: %(msg)s exception: %(e)s"
+msgstr ""
+
+#: ironic/drivers/modules/agent_base_vendor.py:337
#, python-format
msgid "Malformed network interfaces lookup: %s"
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:274
+#: ironic/drivers/modules/agent_base_vendor.py:366
#, python-format
msgid "No ports matching the given MAC addresses %sexist in the database."
msgstr ""
-#: ironic/drivers/modules/agent_base_vendor.py:322
+#: ironic/drivers/modules/agent_base_vendor.py:414
#, python-format
msgid ""
"Ports matching mac addresses match multiple nodes. MACs: %(macs)s. Port "
"ids: %(port_ids)s"
msgstr ""
+#: ironic/drivers/modules/agent_base_vendor.py:443
+#, python-format
+msgid "Error rebooting node %(node)s. Error: %(error)s"
+msgstr ""
+
+#: ironic/drivers/modules/agent_base_vendor.py:475
+#, python-format
+msgid ""
+"Failed to install a bootloader when deploying node %(node)s. Error: "
+"%(error)s"
+msgstr ""
+
+#: ironic/drivers/modules/agent_base_vendor.py:484
+#, python-format
+msgid ""
+"Failed to change the boot device to %(boot_dev)s when deploying node "
+"%(node)s. Error: %(error)s"
+msgstr ""
+
#: ironic/drivers/modules/agent_client.py:48
msgid "Agent driver requires agent_url in driver_internal_info"
msgstr ""
+#: ironic/drivers/modules/agent_client.py:79
+#, python-format
+msgid ""
+"Unable to decode response as JSON.\n"
+"Request URL: %(url)s\n"
+"Request body: \"%(body)s\"\n"
+"Response: \"%(response)s\""
+msgstr ""
+
#: ironic/drivers/modules/console_utils.py:89
#, python-format
msgid ""
@@ -1037,74 +1276,125 @@ msgstr ""
msgid "Could not stop the console for node '%(node)s'. Reason: %(err)s."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:117
+#: ironic/drivers/modules/deploy_utils.py:129
#, python-format
msgid ""
"iSCSI connection was not seen by the file system after attempting to "
"verify %d times."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:140
+#: ironic/drivers/modules/deploy_utils.py:152
#, python-format
msgid ""
"iSCSI connection did not become active after attempting to verify %d "
"times."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:406
+#: ironic/drivers/modules/deploy_utils.py:303
+#, python-format
+msgid ""
+"Unable to stat device %(dev)s after attempting to verify %(attempts)d "
+"times."
+msgstr ""
+
+#: ironic/drivers/modules/deploy_utils.py:501
#, python-format
msgid ""
"Can't download the configdrive content for node %(node)s from '%(url)s'. "
"Reason: %(reason)s"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:415
+#: ironic/drivers/modules/deploy_utils.py:510
#, python-format
msgid ""
"Config drive for node %s is not base64 encoded or the content is "
"malformed."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:418
+#: ironic/drivers/modules/deploy_utils.py:513
#, python-format
msgid " Downloaded from \"%s\"."
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:431
+#: ironic/drivers/modules/deploy_utils.py:526
#, python-format
msgid ""
"Encountered error while decompressing and writing config drive for node "
"%(node)s. Error: %(exc)s"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:468
+#: ironic/drivers/modules/deploy_utils.py:597
#, python-format
-msgid "Parent device '%s' not found"
+msgid "Root device '%s' not found"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:496
+#: ironic/drivers/modules/deploy_utils.py:607
#, python-format
-msgid "Root device '%s' not found"
+msgid "'%(partition)s' device '%(part_device)s' not found"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:505
+#: ironic/drivers/modules/deploy_utils.py:735
#, python-format
-msgid "'%(partition)s' device '%(part_device)s' not found"
+msgid "Parent device '%s' not found"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:613
+#: ironic/drivers/modules/deploy_utils.py:786
#, python-format
msgid "%(error_msg)s. Missing are: %(missing_info)s"
msgstr ""
-#: ironic/drivers/modules/deploy_utils.py:701
+#: ironic/drivers/modules/deploy_utils.py:875
#, python-format
msgid ""
"Error parsing capabilities from Node %s instance_info field. A dictionary"
-" or a dictionary string is expected."
+" or a \"jsonified\" dictionary is expected."
+msgstr ""
+
+#: ironic/drivers/modules/deploy_utils.py:909
+#, python-format
+msgid "get_clean_steps for node %(node)s returned invalid result: %(result)s"
+msgstr ""
+
+#: ironic/drivers/modules/deploy_utils.py:945
+#, python-format
+msgid "Agent on node %(node)s returned bad command result: %(result)s"
+msgstr ""
+
+#: ironic/drivers/modules/deploy_utils.py:1002
+#, python-format
+msgid ""
+"The hints \"%(invalid_hints)s\" are invalid. Valid hints are: "
+"\"%(valid_hints)s\""
+msgstr ""
+
+#: ironic/drivers/modules/deploy_utils.py:1012
+msgid "Root device hint \"size\" is not an integer value."
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:78
+msgid "ironic-discoverd support is disabled"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:82
+msgid "ironic-discoverd Python module not found"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:88
+#, python-format
+msgid "ironic-discoverd version is too old: required >= 1.0.0, got %s"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:166
+#, python-format
+msgid "Failed to start inspection: %s"
+msgstr ""
+
+#: ironic/drivers/modules/discoverd.py:200
+#, python-format
+msgid "ironic-discoverd inspection failed: %s"
msgstr ""
-#: ironic/drivers/modules/fake.py:48 ironic/drivers/modules/ipminative.py:338
+#: ironic/drivers/modules/fake.py:48 ironic/drivers/modules/ipminative.py:343
#, python-format
msgid "set_power_state called with an invalid power state: %s."
msgstr ""
@@ -1130,11 +1420,12 @@ msgstr ""
msgid "Test if the value of bar is meow"
msgstr ""
-#: ironic/drivers/modules/fake.py:163 ironic/drivers/modules/ipminative.py:404
-#: ironic/drivers/modules/ipmitool.py:723
+#: ironic/drivers/modules/fake.py:163 ironic/drivers/modules/ipminative.py:409
+#: ironic/drivers/modules/ipmitool.py:734
#: ironic/drivers/modules/seamicro.py:571 ironic/drivers/modules/ssh.py:644
#: ironic/drivers/modules/virtualbox.py:338
-#: ironic/drivers/modules/ilo/management.py:132
+#: ironic/drivers/modules/ilo/management.py:198
+#: ironic/drivers/modules/irmc/management.py:142
#, python-format
msgid "Invalid boot device %s specified."
msgstr ""
@@ -1177,100 +1468,100 @@ msgstr ""
msgid "Cannot get power status for node '%(node)s'. iBoot get_relays() failed."
msgstr ""
-#: ironic/drivers/modules/iboot.py:185 ironic/drivers/modules/ipmitool.py:642
+#: ironic/drivers/modules/iboot.py:185 ironic/drivers/modules/ipmitool.py:653
#: ironic/drivers/modules/snmp.py:672 ironic/drivers/modules/ssh.py:561
#, python-format
msgid "set_power_state called with invalid power state %s."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:63
+#: ironic/drivers/modules/ipminative.py:68
msgid "IP of the node's BMC. Required."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:64
+#: ironic/drivers/modules/ipminative.py:69
msgid "IPMI password. Required."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:65
+#: ironic/drivers/modules/ipminative.py:70
msgid "IPMI username. Required."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:68
-#: ironic/drivers/modules/ipmitool.py:96 ironic/drivers/modules/seamicro.py:80
-#: ironic/drivers/modules/ilo/common.py:72
+#: ironic/drivers/modules/ipminative.py:73
+#: ironic/drivers/modules/ipmitool.py:95 ironic/drivers/modules/seamicro.py:80
+#: ironic/drivers/modules/ilo/common.py:75
msgid "node's UDP port to connect to. Only required for console access."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:92
-#: ironic/drivers/modules/ipmitool.py:212
+#: ironic/drivers/modules/ipminative.py:97
+#: ironic/drivers/modules/ipmitool.py:223
#, python-format
msgid "Missing the following IPMI credentials in node's driver_info: %s."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:110
-#: ironic/drivers/modules/ipmitool.py:232
+#: ironic/drivers/modules/ipminative.py:115
+#: ironic/drivers/modules/ipmitool.py:243
msgid "IPMI terminal port is not an integer."
msgstr ""
-#: ironic/drivers/modules/ipminative.py:494
-#: ironic/drivers/modules/ipmitool.py:940
+#: ironic/drivers/modules/ipminative.py:499
+#: ironic/drivers/modules/ipmitool.py:951
msgid "Missing 'ipmi_terminal_port' parameter in node's driver_info."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:70
+#: ironic/drivers/modules/ipmitool.py:69
msgid "IP address or hostname of the node. Required."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:73
+#: ironic/drivers/modules/ipmitool.py:72
msgid "password. Optional."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:74
+#: ironic/drivers/modules/ipmitool.py:73
#, python-format
msgid "privilege level; default is ADMINISTRATOR. One of %s. Optional."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:76
+#: ironic/drivers/modules/ipmitool.py:75
msgid "username; default is NULL user. Optional."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:77
+#: ironic/drivers/modules/ipmitool.py:76
msgid ""
"bridging_type; default is \"no\". One of \"single\", \"dual\", \"no\". "
"Optional."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:79
+#: ironic/drivers/modules/ipmitool.py:78
msgid ""
"transit channel for bridged request. Required only if ipmi_bridging is "
"set to \"dual\"."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:81
+#: ironic/drivers/modules/ipmitool.py:80
msgid ""
"transit address for bridged request. Required only if ipmi_bridging is "
"set to \"dual\"."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:83
+#: ironic/drivers/modules/ipmitool.py:82
msgid ""
"destination channel for bridged request. Required only if ipmi_bridging "
"is set to \"single\" or \"dual\"."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:86
+#: ironic/drivers/modules/ipmitool.py:85
msgid ""
"destination address for bridged request. Required only if ipmi_bridging "
"is set to \"single\" or \"dual\"."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:89
+#: ironic/drivers/modules/ipmitool.py:88
msgid ""
"local IPMB address for bridged requests. Used only if ipmi_bridging is "
"set to \"single\" or \"dual\". Optional."
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:243
+#: ironic/drivers/modules/ipmitool.py:254
#, python-format
msgid ""
"Value for ipmi_bridging is provided as %s, but IPMI bridging is not "
@@ -1278,152 +1569,206 @@ msgid ""
"is > 1.8.11"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:266
+#: ironic/drivers/modules/ipmitool.py:277
#, python-format
msgid "%(param)s not provided"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:269
+#: ironic/drivers/modules/ipmitool.py:280
#, python-format
msgid ""
"Invalid value for ipmi_bridging: %(bridging_type)s, the valid value can "
"be one of: %(bridging_types)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:277
+#: ironic/drivers/modules/ipmitool.py:288
#, python-format
msgid ""
"Invalid privilege level value:%(priv_level)s, the valid value can be one "
"of %(valid_levels)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:507
+#: ironic/drivers/modules/ipmitool.py:518
#, python-format
msgid "parse ipmi sensor data failed, unknown sensor type data: %(sensors_data)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:545
+#: ironic/drivers/modules/ipmitool.py:556
#, python-format
msgid ""
"parse ipmi sensor data failed, get nothing with input data: "
"%(sensors_data)s"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:587
-#: ironic/drivers/modules/ipmitool.py:678
-#: ironic/drivers/modules/ipmitool.py:830
-#: ironic/drivers/modules/ipmitool.py:922
+#: ironic/drivers/modules/ipmitool.py:598
+#: ironic/drivers/modules/ipmitool.py:689
+#: ironic/drivers/modules/ipmitool.py:841
+#: ironic/drivers/modules/ipmitool.py:933
msgid ""
"Unable to locate usable ipmitool command in the system path when checking"
" ipmitool version"
msgstr ""
-#: ironic/drivers/modules/ipmitool.py:906
+#: ironic/drivers/modules/ipmitool.py:917
msgid "Parameter raw_bytes (string of bytes) was not specified."
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:119
+#: ironic/drivers/modules/iscsi_deploy.py:127
msgid ""
"Cannot validate iSCSI deploy. Some parameters were missing in node's "
"instance_info"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:131
+#: ironic/drivers/modules/iscsi_deploy.py:136
#, python-format
msgid ""
"Cannot validate parameter for iSCSI deploy. Invalid parameter %(param)s. "
"Reason: %(reason)s"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:137
+#: ironic/drivers/modules/iscsi_deploy.py:142
#, python-format
-msgid "'%s' is not an integer value."
+msgid "%s is not an integer value."
+msgstr ""
+
+#: ironic/drivers/modules/iscsi_deploy.py:149
+msgid "Cannot deploy whole disk image with swap or ephemeral size set"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:166
+#: ironic/drivers/modules/iscsi_deploy.py:182
#, python-format
msgid ""
"Root partition is too small for requested image. Image size: %(image_mb)d"
" MB, Root size: %(root_mb)d MB"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:221
+#: ironic/drivers/modules/iscsi_deploy.py:236
msgid "Deploy key does not match"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:237
+#: ironic/drivers/modules/iscsi_deploy.py:257
#, python-format
msgid "Parameters %s were not passed to ironic for deploy."
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:268
-msgid "Failure in deploy ramdisk."
+#: ironic/drivers/modules/iscsi_deploy.py:304
+#, python-format
+msgid "Error returned from deploy ramdisk: %s"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:282 ironic/drivers/modules/pxe.py:518
-#: ironic/drivers/modules/ilo/deploy.py:528
-msgid "Failed to continue iSCSI deployment."
+#: ironic/drivers/modules/iscsi_deploy.py:325
+#, python-format
+msgid "Deploy failed for instance %(instance)s. Error: %(error)s"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:389
+#: ironic/drivers/modules/iscsi_deploy.py:333
+#, python-format
+msgid ""
+"Couldn't determine the UUID of the root partition or the disk identifier "
+"after deploying node %s"
+msgstr ""
+
+#: ironic/drivers/modules/iscsi_deploy.py:371
+#, python-format
+msgid ""
+"Failed to start the iSCSI target to deploy the node %(node)s. Error: "
+"%(error)s"
+msgstr ""
+
+#: ironic/drivers/modules/iscsi_deploy.py:495
#, python-format
msgid "Failed to connect to Glance to get the properties of the image %s"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:393
+#: ironic/drivers/modules/iscsi_deploy.py:499
#, python-format
-msgid "Image %s not found in Glance"
+msgid "Image %s can not be found."
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:403
+#: ironic/drivers/modules/iscsi_deploy.py:511
#, python-format
msgid "Image %(image)s is missing the following properties: %(properties)s"
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:423 ironic/drivers/modules/ssh.py:508
+#: ironic/drivers/modules/iscsi_deploy.py:531 ironic/drivers/modules/ssh.py:508
#, python-format
msgid "Node %s does not have any port associated with it."
msgstr ""
-#: ironic/drivers/modules/iscsi_deploy.py:432
+#: ironic/drivers/modules/iscsi_deploy.py:540
#, python-format
msgid ""
"Couldn't get the URL of the Ironic API service from the configuration "
"file or keystone catalog. Keystone error: %s"
msgstr ""
-#: ironic/drivers/modules/pxe.py:100
-msgid "UUID (from Glance) of the deployment kernel. Required."
+#: ironic/drivers/modules/iscsi_deploy.py:563
+msgid ""
+"Some mandatory input missing in 'pass_bootloader_info' vendor passthru "
+"from ramdisk."
+msgstr ""
+
+#: ironic/drivers/modules/iscsi_deploy.py:570
+#, python-format
+msgid "Deploy key %(key_sent)s does not match with %(expected_key)s"
+msgstr ""
+
+#: ironic/drivers/modules/iscsi_deploy.py:588
+#, python-format
+msgid "Failed to install bootloader on node %(node)s. Error: %(error)s."
msgstr ""
-#: ironic/drivers/modules/pxe.py:102
+#: ironic/drivers/modules/iscsi_deploy.py:613
+#, python-format
+msgid ""
+"Failed to notify ramdisk to reboot after bootloader installation. Error: "
+"%s"
+msgstr ""
+
+#: ironic/drivers/modules/pxe.py:104
msgid "UUID (from Glance) of the ramdisk that is mounted at boot time. Required."
msgstr ""
-#: ironic/drivers/modules/pxe.py:124
+#: ironic/drivers/modules/pxe.py:106
+msgid ""
+"DEPRECATED: Use deploy_kernel instead. UUID (from Glance) of the "
+"deployment kernel. Required."
+msgstr ""
+
+#: ironic/drivers/modules/pxe.py:109
+msgid ""
+"DEPRECATED: Use deploy_ramdisk instead. UUID (from Glance) of the ramdisk"
+" that is mounted at boot time. Required."
+msgstr ""
+
+#: ironic/drivers/modules/pxe.py:147
msgid ""
"Cannot validate PXE bootloader. Some parameters were missing in node's "
"driver_info"
msgstr ""
-#: ironic/drivers/modules/pxe.py:315
+#: ironic/drivers/modules/pxe.py:252
#, python-format
msgid ""
-"Local boot is requested, but can't be used with node %s because it's "
-"configured to use UEFI boot"
+"Conflict: Whole disk image being used for deploy, but cannot be used with"
+" node %(node_uuid)s configured to use UEFI boot with netboot option"
msgstr ""
-#: ironic/drivers/modules/pxe.py:323
+#: ironic/drivers/modules/pxe.py:360
msgid "iPXE boot is enabled but no HTTP URL or HTTP root was specified."
msgstr ""
-#: ironic/drivers/modules/pxe.py:330
+#: ironic/drivers/modules/pxe.py:367
#, python-format
msgid ""
"Conflict: iPXE is enabled, but cannot be used with node%(node_uuid)s "
"configured to use UEFI boot"
msgstr ""
+#: ironic/drivers/modules/pxe.py:638 ironic/drivers/modules/ilo/deploy.py:836
+msgid "Failed to continue iSCSI deployment."
+msgstr ""
+
#: ironic/drivers/modules/seamicro.py:68
msgid "API endpoint. Required."
msgstr ""
@@ -1667,6 +2012,46 @@ msgstr ""
msgid "'set_power_state' called with invalid power state '%s'"
msgstr ""
+#: ironic/drivers/modules/amt/common.py:37
+msgid "IP address or host name of the node. Required."
+msgstr ""
+
+#: ironic/drivers/modules/amt/common.py:38
+msgid "Password. Required."
+msgstr ""
+
+#: ironic/drivers/modules/amt/common.py:39
+msgid "Username to log into AMT system. Required."
+msgstr ""
+
+#: ironic/drivers/modules/amt/common.py:42
+msgid ""
+"Protocol used for AMT endpoint. one of http, https; default is \"http\". "
+"Optional."
+msgstr ""
+
+#: ironic/drivers/modules/amt/common.py:159
+#, python-format
+msgid "AMT driver requires the following to be set in node's driver_info: %s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/common.py:167
+#, python-format
+msgid "Invalid protocol %s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/management.py:144
+#, python-format
+msgid ""
+"set_boot_device called with invalid device %(device)s for node "
+"%(node_id)s."
+msgstr ""
+
+#: ironic/drivers/modules/amt/power.py:165
+#, python-format
+msgid "Unsupported target_state: %s"
+msgstr ""
+
#: ironic/drivers/modules/drac/common.py:26
msgid "IP address or hostname of the DRAC card. Required."
msgstr ""
@@ -1722,138 +2107,222 @@ msgid ""
"%s"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:62
+#: ironic/drivers/modules/ilo/common.py:65
msgid "IP address or hostname of the iLO. Required."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:63
+#: ironic/drivers/modules/ilo/common.py:66
msgid "username for the iLO with administrator privileges. Required."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:65
+#: ironic/drivers/modules/ilo/common.py:68
msgid "password for ilo_username. Required."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:68
+#: ironic/drivers/modules/ilo/common.py:71
msgid "port to be used for iLO operations. Optional."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:69
+#: ironic/drivers/modules/ilo/common.py:72
msgid "timeout (in seconds) for iLO operations. Optional."
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:108
+#: ironic/drivers/modules/ilo/common.py:79
+msgid ""
+"new password for iLO. Required if the clean step 'reset_ilo_credential' "
+"is enabled."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/common.py:115
#, python-format
msgid ""
"The following required iLO parameters are missing from the node's "
"driver_info: %s"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:129
+#: ironic/drivers/modules/ilo/common.py:136
#, python-format
msgid ""
"The following iLO parameters from the node's driver_info should be "
"integers: %s"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:177
+#: ironic/drivers/modules/ilo/common.py:184
msgid "iLO license check"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:289
+#: ironic/drivers/modules/ilo/common.py:296
#, python-format
msgid "Inserting virtual media %s"
msgstr ""
-#: ironic/drivers/modules/ilo/common.py:319
+#: ironic/drivers/modules/ilo/common.py:326
+#: ironic/drivers/modules/ilo/common.py:375
#, python-format
msgid "Setting %s as boot mode"
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:47
+#: ironic/drivers/modules/ilo/common.py:479
+#, python-format
+msgid "Get secure boot mode for node %s."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/common.py:512
+#, python-format
+msgid "Setting secure boot to %(flag)s for node %(node)s."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/deploy.py:61
msgid "UUID (from Glance) of the deployment ISO. Required."
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:177
+#: ironic/drivers/modules/ilo/deploy.py:218
msgid ""
"Error validating iLO virtual media deploy. Some parameters were missing "
"in node's driver_info"
msgstr ""
-#: ironic/drivers/modules/ilo/deploy.py:446
+#: ironic/drivers/modules/ilo/deploy.py:695
msgid "Missing 'console_port' parameter in node's driver_info."
msgstr ""
-#: ironic/drivers/modules/ilo/management.py:100
+#: ironic/drivers/modules/ilo/inspect.py:101
+#, python-format
+msgid "Server didn't return the key(s): %(key)s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:106
+#, python-format
+msgid ""
+"Essential properties are expected to be in dictionary format, received "
+"%(properties)s from node %(node)s."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:112
+#, python-format
+msgid "The node %s didn't return 'properties' as the key with inspection."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:118
+#, python-format
+msgid "Node %(node)s didn't return MACs %(macs)s in dictionary format."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:123
+#, python-format
+msgid "The node %s didn't return 'macs' as the key with inspection."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:172
+#, python-format
+msgid ""
+"Node %(node)s has invalid capabilities string %(capabilities)s, unable to"
+" modify the node properties['capabilities'] string"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:180
+#, python-format
+msgid ""
+"The expected format of capabilities from inspection is dictionary while "
+"node %(node)s returned %(capabilities)s."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/inspect.py:249
+#, python-format
+msgid "Inspecting hardware (get_power_state) on %s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/management.py:91
+#, python-format
+msgid "Clean step '%s' not found. 'proliantutils' package needs to be updated."
+msgstr ""
+
+#: ironic/drivers/modules/ilo/management.py:102
+#, python-format
+msgid "Clean step %(step)s failed on node %(node)s with error: %(err)s"
+msgstr ""
+
+#: ironic/drivers/modules/ilo/management.py:166
msgid "Get boot device"
msgstr ""
-#: ironic/drivers/modules/ilo/management.py:143
+#: ironic/drivers/modules/ilo/management.py:209
#, python-format
msgid "Setting %s as boot device"
msgstr ""
-#: ironic/drivers/modules/ilo/power.py:88
+#: ironic/drivers/modules/ilo/power.py:95
msgid "iLO get_power_status"
msgstr ""
-#: ironic/drivers/modules/ilo/power.py:152
+#: ironic/drivers/modules/ilo/power.py:159
#: ironic/drivers/modules/irmc/power.py:60
#, python-format
msgid "_set_power_state called with invalid power state '%s'"
msgstr ""
-#: ironic/drivers/modules/ilo/power.py:161
+#: ironic/drivers/modules/ilo/power.py:168
msgid "iLO set_power_state"
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:46
+#: ironic/drivers/modules/irmc/common.py:50
msgid "IP address or hostname of the iRMC. Required."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:47
+#: ironic/drivers/modules/irmc/common.py:51
msgid "Username for the iRMC with administrator privileges. Required."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:49
+#: ironic/drivers/modules/irmc/common.py:53
msgid "Password for irmc_username. Required."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:52
+#: ironic/drivers/modules/irmc/common.py:56
msgid ""
"Port to be used for iRMC operations; either 80 or 443. The default value "
"is 443. Optional."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:54
+#: ironic/drivers/modules/irmc/common.py:58
msgid ""
"Authentication method for iRMC operations; either 'basic' or 'digest'. "
-"The default value is 'digest'. Optional."
+"The default value is 'basic'. Optional."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:57
+#: ironic/drivers/modules/irmc/common.py:61
msgid ""
"Timeout (in seconds) for iRMC operations. The default value is 60. "
"Optional."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:82
+#: ironic/drivers/modules/irmc/common.py:63
+msgid ""
+"Sensor data retrieval method; either 'ipmitool' or 'scci'. The default "
+"value is 'ipmitool'. Optional."
+msgstr ""
+
+#: ironic/drivers/modules/irmc/common.py:89
#, python-format
msgid "Missing the following iRMC parameters in node's driver_info: %s."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:96
-#: ironic/drivers/modules/irmc/common.py:99
-#, python-format
-msgid "'%s' has unsupported value."
+#: ironic/drivers/modules/irmc/common.py:103
+msgid "'irmc_auth_method' has unsupported value."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:102
-#, python-format
-msgid "'%s' is not integer type."
+#: ironic/drivers/modules/irmc/common.py:106
+msgid "'irmc_port' has unsupported value."
msgstr ""
-#: ironic/drivers/modules/irmc/common.py:104
+#: ironic/drivers/modules/irmc/common.py:109
+msgid "'irmc_client_timeout' is not integer type."
+msgstr ""
+
+#: ironic/drivers/modules/irmc/common.py:112
+msgid "'irmc_sensor_method' has unsupported value."
+msgstr ""
+
+#: ironic/drivers/modules/irmc/common.py:114
#, python-format
msgid ""
"The following type errors were encountered while parsing driver_info:\n"
@@ -1900,18 +2369,6 @@ msgstr ""
msgid "An object of class %s is required here"
msgstr ""
-#: ironic/openstack/common/cliutils.py:271
-#, python-format
-msgid "No %(name)s with a name or ID of '%(name_or_id)s' exists."
-msgstr ""
-
-#: ironic/openstack/common/cliutils.py:279
-#, python-format
-msgid ""
-"Multiple %(name)s matches found for '%(name_or_id)s', use an ID to be "
-"more specific."
-msgstr ""
-
#: ironic/openstack/common/gettextutils.py:301
msgid "Message objects do not support addition."
msgstr ""
@@ -1922,16 +2379,17 @@ msgid ""
"characters. Please use unicode() or translate() instead."
msgstr ""
-#: ironic/openstack/common/imageutils.py:75
+#: ironic/openstack/common/imageutils.py:76
#, python-format
msgid "Invalid input value \"%s\"."
msgstr ""
-#: ironic/openstack/common/imageutils.py:104
+#: ironic/openstack/common/imageutils.py:105
msgid "Snapshot list encountered but no header found!"
msgstr ""
#: ironic/openstack/common/log.py:298
+#: ironic/openstack/common/versionutils.py:241
#, python-format
msgid "Deprecated: %s"
msgstr ""
@@ -1947,6 +2405,7 @@ msgid "syslog facility must be one of: %s"
msgstr ""
#: ironic/openstack/common/log.py:715
+#: ironic/openstack/common/versionutils.py:259
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr ""
@@ -1956,225 +2415,27 @@ msgstr ""
msgid "Unexpected argument for periodic task creation: %(arg)s."
msgstr ""
-#: ironic/openstack/common/policy.py:111
-msgid "The JSON file that defines policies."
-msgstr ""
-
-#: ironic/openstack/common/policy.py:114
-msgid "Default rule. Enforced when a requested rule is not found."
-msgstr ""
-
-#: ironic/openstack/common/policy.py:118
-msgid ""
-"Directories where policy configuration files are stored. They can be "
-"relative to any directory in the search path defined by the config_dir "
-"option, or absolute paths. The file defined by policy_file must exist for"
-" these directories to be searched."
-msgstr ""
-
-#: ironic/openstack/common/policy.py:142
-#, python-format
-msgid "Policy doesn't allow %s to be performed."
-msgstr ""
-
-#: ironic/openstack/common/policy.py:239
-#, python-format
-msgid "Rules must be an instance of dict or Rules, got %s instead"
-msgstr ""
-
-#: ironic/openstack/common/strutils.py:114
-#, python-format
-msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s"
-msgstr ""
-
-#: ironic/openstack/common/strutils.py:219
-#, python-format
-msgid "Invalid unit system: \"%s\""
-msgstr ""
-
-#: ironic/openstack/common/strutils.py:228
-#, python-format
-msgid "Invalid string format: %s"
-msgstr ""
-
-#: ironic/openstack/common/versionutils.py:88
+#: ironic/openstack/common/versionutils.py:108
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
"may be removed in %(remove_in)s."
msgstr ""
-#: ironic/openstack/common/versionutils.py:92
+#: ironic/openstack/common/versionutils.py:112
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s and may be removed in "
"%(remove_in)s. It will not be superseded."
msgstr ""
-#: ironic/openstack/common/versionutils.py:96
+#: ironic/openstack/common/versionutils.py:116
#, python-format
msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
msgstr ""
-#: ironic/openstack/common/versionutils.py:99
+#: ironic/openstack/common/versionutils.py:119
#, python-format
msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
msgstr ""
-#: ironic/openstack/common/apiclient/base.py:224
-#: ironic/openstack/common/apiclient/base.py:381
-#, python-format
-msgid "No %(name)s matching %(args)s."
-msgstr ""
-
-#: ironic/openstack/common/apiclient/client.py:233
-msgid "Cannot find endpoint or token for request"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/client.py:356
-#, python-format
-msgid ""
-"Invalid %(api_name)s client version '%(version)s'. Must be one of: "
-"%(version_map)s"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:41
-#, python-format
-msgid "Missing arguments: %s"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:74
-#, python-format
-msgid "Authentication failed. Missing options: %s"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:83
-#, python-format
-msgid "AuthSystemNotFound: %s"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:106
-#, python-format
-msgid "AmbiguousEndpoints: %s"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:114
-msgid "HTTP Error"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:134
-msgid "HTTP Redirection"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:142
-msgid "HTTP Client Error"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:151
-msgid "HTTP Server Error"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:161
-msgid "Multiple Choices"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:170
-msgid "Bad Request"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:180
-msgid "Unauthorized"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:189
-msgid "Payment Required"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:199
-msgid "Forbidden"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:209
-msgid "Not Found"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:219
-msgid "Method Not Allowed"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:229
-msgid "Not Acceptable"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:238
-msgid "Proxy Authentication Required"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:247
-msgid "Request Timeout"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:257
-msgid "Conflict"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:267
-msgid "Gone"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:277
-msgid "Length Required"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:287
-msgid "Precondition Failed"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:296
-msgid "Request Entity Too Large"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:313
-msgid "Request-URI Too Long"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:323
-msgid "Unsupported Media Type"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:333
-msgid "Requested Range Not Satisfiable"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:342
-msgid "Expectation Failed"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:352
-msgid "Unprocessable Entity"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:361
-msgid "Internal Server Error"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:372
-msgid "Not Implemented"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:382
-msgid "Bad Gateway"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:391
-msgid "Service Unavailable"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:401
-msgid "Gateway Timeout"
-msgstr ""
-
-#: ironic/openstack/common/apiclient/exceptions.py:410
-msgid "HTTP Version Not Supported"
-msgstr ""
-
diff --git a/ironic/locale/pt_BR/LC_MESSAGES/ironic-log-critical.po b/ironic/locale/pt_BR/LC_MESSAGES/ironic-log-critical.po
new file mode 100644
index 000000000..712adbb10
--- /dev/null
+++ b/ironic/locale/pt_BR/LC_MESSAGES/ironic-log-critical.po
@@ -0,0 +1,25 @@
+# Translations template for ironic.
+# Copyright (C) 2015 ORGANIZATION
+# This file is distributed under the same license as the ironic project.
+#
+# Translators:
+# Lucas Alvares Gomes <lucasagomes@gmail.com>, 2015
+msgid ""
+msgstr ""
+"Project-Id-Version: Ironic\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2015-04-08 06:27+0000\n"
+"PO-Revision-Date: 2015-03-30 09:01+0000\n"
+"Last-Translator: Lucas Alvares Gomes <lucasagomes@gmail.com>\n"
+"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/"
+"ironic/language/pt_BR/)\n"
+"Language: pt_BR\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: ironic/conductor/manager.py:297
+msgid "Failed to start keepalive"
+msgstr "Falha ao inicar o keep alive"
diff --git a/ironic/openstack/common/service.py b/ironic/openstack/common/service.py
index e4eed8a2e..694edf271 100644
--- a/ironic/openstack/common/service.py
+++ b/ironic/openstack/common/service.py
@@ -199,18 +199,30 @@ class ServiceWrapper(object):
class ProcessLauncher(object):
- def __init__(self):
- """Constructor."""
+ _signal_handlers_set = set()
+
+ @classmethod
+ def _handle_class_signals(cls, *args, **kwargs):
+ for handler in cls._signal_handlers_set:
+ handler(*args, **kwargs)
+ def __init__(self, wait_interval=0.01):
+ """Constructor.
+
+ :param wait_interval: The interval to sleep for between checks
+ of child process exit.
+ """
self.children = {}
self.sigcaught = None
self.running = True
+ self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
- _set_signals_handler(self._handle_signal)
+ self._signal_handlers_set.add(self._handle_signal)
+ _set_signals_handler(self._handle_class_signals)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
@@ -329,8 +341,8 @@ class ProcessLauncher(object):
def _wait_child(self):
try:
- # Block while any of child processes have exited
- pid, status = os.waitpid(0, 0)
+ # Don't block if no child processes have exited
+ pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
@@ -359,6 +371,10 @@ class ProcessLauncher(object):
while self.running:
wrap = self._wait_child()
if not wrap:
+ # Yield to other threads if no children have exited
+ # Sleep for a short time to avoid excessive CPU usage
+ # (see bug #1095346)
+ eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
@@ -383,8 +399,14 @@ class ProcessLauncher(object):
if not _is_sighup_and_daemon(self.sigcaught):
break
+ cfg.CONF.reload_config_files()
+ for service in set(
+ [wrap.service for wrap in self.children.values()]):
+ service.reset()
+
for pid in self.children:
os.kill(pid, signal.SIGHUP)
+
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
diff --git a/ironic/openstack/common/versionutils.py b/ironic/openstack/common/versionutils.py
index dce07a82e..83bc04eac 100644
--- a/ironic/openstack/common/versionutils.py
+++ b/ironic/openstack/common/versionutils.py
@@ -17,6 +17,7 @@
Helpers for comparing version strings.
"""
+import copy
import functools
import inspect
import logging
@@ -32,13 +33,19 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
-opts = [
+deprecated_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
]
+def list_opts():
+ """Entry point for oslo.config-generator.
+ """
+ return [(None, copy.deepcopy(deprecated_opts))]
+
+
class deprecated(object):
"""A decorator to mark callables as deprecated.
@@ -232,7 +239,7 @@ def report_deprecated_feature(logger, msg, *args, **kwargs):
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
- CONF.register_opts(opts)
+ CONF.register_opts(deprecated_opts)
if CONF.fatal_deprecations:
logger.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
diff --git a/ironic/tests/__init__.py b/ironic/tests/__init__.py
index 7e8390833..918237d30 100644
--- a/ironic/tests/__init__.py
+++ b/ironic/tests/__init__.py
@@ -32,14 +32,3 @@ eventlet.monkey_patch(os=False)
# The code below enables nosetests to work with i18n _() blocks
import six.moves.builtins as __builtin__
setattr(__builtin__, '_', lambda x: x)
-
-# NOTE(viktors): Ironic unittests patches timeutils from oslo_utils. At the
-# same time oslo.db uses oslo.utils not oslo_utils till 1.5.0
-# release, so timeutils in oslo.db code at and leave not
-# patched, so time comparison fails in Ironic tests. To avoid
-# this we have oslo_db use timeutils from oslo_utils in tests.
-# TODO(viktors): Remove this workaround when Ironic will use oslo.db 1.5.0
-from oslo_db.sqlalchemy import models
-from oslo_utils import timeutils
-
-models.timeutils = timeutils
diff --git a/ironic/tests/api/test_acl.py b/ironic/tests/api/test_acl.py
index 0b60387d8..8de194ffc 100644
--- a/ironic/tests/api/test_acl.py
+++ b/ironic/tests/api/test_acl.py
@@ -88,7 +88,10 @@ class TestACL(base.FunctionalTest):
self.assertEqual(200, response.status_int)
def test_public_api_with_path_extensions(self):
- for route in ('/v1/', '/v1.json', '/v1.xml'):
- response = self.get_json(route,
+ routes = {'/v1/': 200,
+ '/v1.json': 200,
+ '/v1.xml': 404}
+ for url in routes:
+ response = self.get_json(url,
path_prefix='', expect_errors=True)
- self.assertEqual(200, response.status_int)
+ self.assertEqual(routes[url], response.status_int)
diff --git a/ironic/tests/api/utils.py b/ironic/tests/api/utils.py
index ee029717f..76d229b0a 100644
--- a/ironic/tests/api/utils.py
+++ b/ironic/tests/api/utils.py
@@ -94,3 +94,13 @@ def chassis_post_data(**kw):
chassis = utils.get_test_chassis(**kw)
internal = chassis_controller.ChassisPatchType.internal_attrs()
return remove_internal(chassis, internal)
+
+
+def post_get_test_node(**kw):
+ # NOTE(lucasagomes): When creating a node via API (POST)
+ # we have to use chassis_uuid
+ node = node_post_data(**kw)
+ chassis = utils.get_test_chassis()
+ node['chassis_id'] = None
+ node['chassis_uuid'] = kw.get('chassis_uuid', chassis['uuid'])
+ return node
diff --git a/ironic/tests/api/v1/test_chassis.py b/ironic/tests/api/v1/test_chassis.py
index 71607b270..a45e79e5a 100644
--- a/ironic/tests/api/v1/test_chassis.py
+++ b/ironic/tests/api/v1/test_chassis.py
@@ -21,6 +21,7 @@ import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
+import six
from six.moves.urllib import parse as urlparse
from wsme import types as wtypes
@@ -77,16 +78,16 @@ class TestListChassis(api_base.FunctionalTest):
ch_list = []
for id_ in range(5):
chassis = obj_utils.create_test_chassis(
- self.context, id=id_, uuid=uuidutils.generate_uuid())
+ self.context, uuid=uuidutils.generate_uuid())
ch_list.append(chassis.uuid)
data = self.get_json('/chassis')
self.assertEqual(len(ch_list), len(data['chassis']))
uuids = [n['uuid'] for n in data['chassis']]
- self.assertEqual(ch_list.sort(), uuids.sort())
+ six.assertCountEqual(self, ch_list, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
- obj_utils.create_test_chassis(self.context, id=1, uuid=uuid)
+ obj_utils.create_test_chassis(self.context, uuid=uuid)
data = self.get_json('/chassis/%s' % uuid)
self.assertIn('links', data.keys())
self.assertEqual(2, len(data['links']))
@@ -97,7 +98,7 @@ class TestListChassis(api_base.FunctionalTest):
def test_collection_links(self):
for id in range(5):
- obj_utils.create_test_chassis(self.context, id=id,
+ obj_utils.create_test_chassis(self.context,
uuid=uuidutils.generate_uuid())
data = self.get_json('/chassis/?limit=3')
self.assertEqual(3, len(data['chassis']))
@@ -108,7 +109,7 @@ class TestListChassis(api_base.FunctionalTest):
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
- obj_utils.create_test_chassis(self.context, id=id_,
+ obj_utils.create_test_chassis(self.context,
uuid=uuidutils.generate_uuid())
data = self.get_json('/chassis')
self.assertEqual(3, len(data['chassis']))
@@ -186,8 +187,7 @@ class TestPatch(api_base.FunctionalTest):
def test_replace_multi(self):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
chassis = obj_utils.create_test_chassis(self.context, extra=extra,
- uuid=uuidutils.generate_uuid(),
- id=1)
+ uuid=uuidutils.generate_uuid())
new_value = 'new value'
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/extra/foo2',
@@ -201,8 +201,7 @@ class TestPatch(api_base.FunctionalTest):
def test_remove_singular(self):
chassis = obj_utils.create_test_chassis(self.context, extra={'a': 'b'},
- uuid=uuidutils.generate_uuid(),
- id=1)
+ uuid=uuidutils.generate_uuid())
response = self.patch_json('/chassis/%s' % chassis.uuid,
[{'path': '/description', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
@@ -218,8 +217,7 @@ class TestPatch(api_base.FunctionalTest):
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
chassis = obj_utils.create_test_chassis(self.context, extra=extra,
description="foobar",
- uuid=uuidutils.generate_uuid(),
- id=1)
+ uuid=uuidutils.generate_uuid())
# Removing one item from the collection
response = self.patch_json('/chassis/%s' % chassis.uuid,
@@ -343,7 +341,7 @@ class TestPost(api_base.FunctionalTest):
def test_post_nodes_subresource(self):
chassis = obj_utils.create_test_chassis(self.context)
- ndict = apiutils.node_post_data(chassis_id=None)
+ ndict = apiutils.node_post_data()
ndict['chassis_uuid'] = chassis.uuid
response = self.post_json('/chassis/nodes', ndict,
expect_errors=True)
diff --git a/ironic/tests/api/v1/test_nodes.py b/ironic/tests/api/v1/test_nodes.py
index 4f79e6aff..dbb95daaf 100644
--- a/ironic/tests/api/v1/test_nodes.py
+++ b/ironic/tests/api/v1/test_nodes.py
@@ -22,7 +22,6 @@ import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
-import pecan
from six.moves.urllib import parse as urlparse
from testtools.matchers import HasLength
from wsme import types as wtypes
@@ -30,6 +29,7 @@ from wsme import types as wtypes
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import node as api_node
+from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
@@ -38,89 +38,13 @@ from ironic import objects
from ironic.tests.api import base as test_api_base
from ironic.tests.api import utils as test_api_utils
from ironic.tests import base
-from ironic.tests.db import utils as dbutils
from ironic.tests.objects import utils as obj_utils
-# NOTE(lucasagomes): When creating a node via API (POST)
-# we have to use chassis_uuid
-def post_get_test_node(**kw):
- node = test_api_utils.node_post_data(**kw)
- chassis = dbutils.get_test_chassis()
- node['chassis_id'] = None
- node['chassis_uuid'] = kw.get('chassis_uuid', chassis['uuid'])
- return node
-
-
-class TestTopLevelFunctions(base.TestCase):
-
- def setUp(self):
- super(TestTopLevelFunctions, self).setUp()
- self.valid_name = 'my-host'
- self.valid_uuid = uuidutils.generate_uuid()
- self.invalid_name = 'Mr Plow'
- self.invalid_uuid = '636-555-3226-'
- self.node = post_get_test_node()
-
- def test_is_valid_name(self):
- self.assertTrue(api_node.is_valid_name(self.valid_name))
- self.assertFalse(api_node.is_valid_name(self.invalid_name))
- self.assertFalse(api_node.is_valid_name(self.valid_uuid))
- self.assertFalse(api_node.is_valid_name(self.invalid_uuid))
-
- @mock.patch.object(pecan, 'request')
- @mock.patch.object(api_node, 'allow_logical_names')
- @mock.patch.object(objects.Node, 'get_by_uuid')
- @mock.patch.object(objects.Node, 'get_by_name')
- def test__get_rpc_node_expect_uuid(self, mock_gbn, mock_gbu, mock_aln,
- mock_pr):
- mock_aln.return_value = True
- self.node['uuid'] = self.valid_uuid
- mock_gbu.return_value = self.node
- self.assertEqual(self.node, api_node._get_rpc_node(self.valid_uuid))
- self.assertEqual(1, mock_gbu.call_count)
- self.assertEqual(0, mock_gbn.call_count)
-
- @mock.patch.object(pecan, 'request')
- @mock.patch.object(api_v1.node, 'allow_logical_names')
- @mock.patch.object(objects.Node, 'get_by_uuid')
- @mock.patch.object(objects.Node, 'get_by_name')
- def test__get_rpc_node_expect_name(self, mock_gbn, mock_gbu, mock_aln,
- mock_pr):
- mock_aln.return_value = True
- self.node['name'] = self.valid_name
- mock_gbn.return_value = self.node
- self.assertEqual(self.node, api_node._get_rpc_node(self.valid_name))
- self.assertEqual(0, mock_gbu.call_count)
- self.assertEqual(1, mock_gbn.call_count)
-
- @mock.patch.object(pecan, 'request')
- @mock.patch.object(api_v1.node, 'allow_logical_names')
- @mock.patch.object(objects.Node, 'get_by_uuid')
- @mock.patch.object(objects.Node, 'get_by_name')
- def test__get_rpc_node_invalid_name(self, mock_gbn, mock_gbu,
- mock_aln, mock_pr):
- mock_aln.return_value = True
- self.assertRaises(exception.InvalidUuidOrName,
- api_node._get_rpc_node,
- self.invalid_name)
-
- @mock.patch.object(pecan, 'request')
- @mock.patch.object(api_v1.node, 'allow_logical_names')
- @mock.patch.object(objects.Node, 'get_by_uuid')
- @mock.patch.object(objects.Node, 'get_by_name')
- def test__get_rpc_node_invalid_uuid(self, mock_gbn, mock_gbu,
- mock_aln, mock_pr):
- mock_aln.return_value = True
- self.assertRaises(exception.InvalidUuidOrName,
- api_node._get_rpc_node,
- self.invalid_uuid)
-
-
class TestNodeObject(base.TestCase):
def test_node_init(self):
- node_dict = test_api_utils.node_post_data(chassis_id=None)
+ node_dict = test_api_utils.node_post_data()
del node_dict['instance_uuid']
node = api_node.Node(**node_dict)
self.assertEqual(wtypes.Unset, node.instance_uuid)
@@ -159,7 +83,8 @@ class TestListNodes(test_api_base.FunctionalTest):
self.assertEqual([], data['nodes'])
def test_one(self):
- node = obj_utils.create_test_node(self.context)
+ node = obj_utils.create_test_node(self.context,
+ chassis_id=self.chassis.id)
data = self.get_json('/nodes',
headers={api_base.Version.string: str(api_v1.MAX_VER)})
self.assertIn('instance_uuid', data['nodes'][0])
@@ -184,7 +109,8 @@ class TestListNodes(test_api_base.FunctionalTest):
self.assertNotIn('chassis_id', data['nodes'][0])
def test_get_one(self):
- node = obj_utils.create_test_node(self.context)
+ node = obj_utils.create_test_node(self.context,
+ chassis_id=self.chassis.id)
data = self.get_json('/nodes/%s' % node.uuid,
headers={api_base.Version.string: str(api_v1.MAX_VER)})
self.assertEqual(node.uuid, data['uuid'])
@@ -205,7 +131,8 @@ class TestListNodes(test_api_base.FunctionalTest):
self.assertNotIn('chassis_id', data)
def test_detail(self):
- node = obj_utils.create_test_node(self.context)
+ node = obj_utils.create_test_node(self.context,
+ chassis_id=self.chassis.id)
data = self.get_json('/nodes/detail',
headers={api_base.Version.string: str(api_v1.MAX_VER)})
self.assertEqual(node.uuid, data['nodes'][0]["uuid"])
@@ -244,23 +171,7 @@ class TestListNodes(test_api_base.FunctionalTest):
headers={api_base.Version.string: "1.2"})
self.assertEqual(states.AVAILABLE, data['provision_state'])
- def test_hide_fields_in_newer_versions(self):
- some_time = datetime.datetime(2015, 3, 18, 19, 20)
- node = obj_utils.create_test_node(self.context,
- inspection_started_at=some_time)
- data = self.get_json('/nodes/%s' % node.uuid,
- headers={api_base.Version.string: str(api_v1.MIN_VER)})
- self.assertNotIn('inspection_finished_at', data)
- self.assertNotIn('inspection_started_at', data)
-
- data = self.get_json('/nodes/%s' % node.uuid,
- headers={api_base.Version.string: "1.6"})
- started = timeutils.parse_isotime(
- data['inspection_started_at']).replace(tzinfo=None)
- self.assertEqual(some_time, started)
- self.assertEqual(None, data['inspection_finished_at'])
-
- def test_hide_driver_internal_info(self):
+ def test_hide_fields_in_newer_versions_driver_internal(self):
node = obj_utils.create_test_node(self.context,
driver_internal_info={"foo": "bar"})
data = self.get_json('/nodes/%s' % node.uuid,
@@ -271,7 +182,7 @@ class TestListNodes(test_api_base.FunctionalTest):
headers={api_base.Version.string: "1.3"})
self.assertEqual({"foo": "bar"}, data['driver_internal_info'])
- def test_unset_logical_names(self):
+ def test_hide_fields_in_newer_versions_name(self):
node = obj_utils.create_test_node(self.context,
name="fish")
data = self.get_json('/nodes/%s' % node.uuid,
@@ -282,6 +193,22 @@ class TestListNodes(test_api_base.FunctionalTest):
headers={api_base.Version.string: "1.5"})
self.assertEqual('fish', data['name'])
+ def test_hide_fields_in_newer_versions_inspection(self):
+ some_time = datetime.datetime(2015, 3, 18, 19, 20)
+ node = obj_utils.create_test_node(self.context,
+ inspection_started_at=some_time)
+ data = self.get_json('/nodes/%s' % node.uuid,
+ headers={api_base.Version.string: str(api_v1.MIN_VER)})
+ self.assertNotIn('inspection_finished_at', data)
+ self.assertNotIn('inspection_started_at', data)
+
+ data = self.get_json('/nodes/%s' % node.uuid,
+ headers={api_base.Version.string: "1.6"})
+ started = timeutils.parse_isotime(
+ data['inspection_started_at']).replace(tzinfo=None)
+ self.assertEqual(some_time, started)
+ self.assertEqual(None, data['inspection_finished_at'])
+
def test_many(self):
nodes = []
for id in range(5):
@@ -530,7 +457,8 @@ class TestListNodes(test_api_base.FunctionalTest):
node = obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
- instance_uuid=uuidutils.generate_uuid())
+ instance_uuid=uuidutils.generate_uuid(),
+ chassis_id=self.chassis.id)
instance_uuid = node.instance_uuid
data = self.get_json('/nodes/detail?instance_uuid=%s' % instance_uuid)
@@ -734,9 +662,11 @@ class TestPatch(test_api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
self.chassis = obj_utils.create_test_chassis(self.context)
- self.node = obj_utils.create_test_node(self.context, name='node-57')
+ self.node = obj_utils.create_test_node(self.context, name='node-57',
+ chassis_id=self.chassis.id)
self.node_no_name = obj_utils.create_test_node(self.context,
- uuid='deadbeef-0000-1111-2222-333333333333')
+ uuid='deadbeef-0000-1111-2222-333333333333',
+ chassis_id=self.chassis.id)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
@@ -777,7 +707,7 @@ class TestPatch(test_api_base.FunctionalTest):
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
'op': 'replace'}],
expect_errors=True)
- self.assertEqual(400, response.status_code)
+ self.assertEqual(404, response.status_code)
self.assertFalse(self.mock_update_node.called)
def test_update_ok_by_name(self):
@@ -945,6 +875,35 @@ class TestPatch(test_api_base.FunctionalTest):
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
+ def test_remove_instance_uuid_cleaning(self):
+ node = obj_utils.create_test_node(
+ self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE)
+ self.mock_update_node.return_value = node
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'op': 'remove',
+ 'path': '/instance_uuid'}])
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(200, response.status_code)
+ self.mock_update_node.assert_called_once_with(
+ mock.ANY, mock.ANY, 'test-topic')
+
+ def test_add_state_in_cleaning(self):
+ node = obj_utils.create_test_node(
+ self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE)
+ self.mock_update_node.return_value = node
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/extra/foo', 'value': 'bar',
+ 'op': 'add'}], expect_errors=True)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(409, response.status_code)
+ self.assertTrue(response.json['error_message'])
+
def test_remove_mandatory_field(self):
response = self.patch_json('/nodes/%s' % self.node.uuid,
[{'path': '/driver', 'op': 'remove'}],
@@ -1134,6 +1093,21 @@ class TestPatch(test_api_base.FunctionalTest):
self.assertEqual(409, response.status_code)
self.assertTrue(response.json['error_message'])
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_patch_update_drive_console_enabled(self, mock_rpc_node):
+ self.node.console_enabled = True
+ mock_rpc_node.return_value = self.node
+
+ response = self.patch_json('/nodes/%s' % self.node.uuid,
+ [{'path': '/driver',
+ 'value': 'foo',
+ 'op': 'add'}],
+ expect_errors=True)
+ mock_rpc_node.assert_called_once_with(self.node.uuid)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(409, response.status_code)
+ self.assertTrue(response.json['error_message'])
+
class TestPost(test_api_base.FunctionalTest):
@@ -1147,7 +1121,7 @@ class TestPost(test_api_base.FunctionalTest):
@mock.patch.object(timeutils, 'utcnow')
def test_create_node(self, mock_utcnow):
- ndict = post_get_test_node()
+ ndict = test_api_utils.post_get_test_node()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/nodes', ndict)
@@ -1172,7 +1146,7 @@ class TestPost(test_api_base.FunctionalTest):
# as Unset).
with mock.patch.object(self.dbapi, 'create_node',
wraps=self.dbapi.create_node) as cn_mock:
- ndict = post_get_test_node(extra={'foo': 123})
+ ndict = test_api_utils.post_get_test_node(extra={'foo': 123})
self.post_json('/nodes', ndict)
result = self.get_json('/nodes/%s' % ndict['uuid'])
self.assertEqual(ndict['extra'], result['extra'])
@@ -1184,7 +1158,7 @@ class TestPost(test_api_base.FunctionalTest):
kwargs = {attr_name: {'str': 'foo', 'int': 123, 'float': 0.1,
'bool': True, 'list': [1, 2], 'none': None,
'dict': {'cat': 'meow'}}}
- ndict = post_get_test_node(**kwargs)
+ ndict = test_api_utils.post_get_test_node(**kwargs)
self.post_json('/nodes', ndict)
result = self.get_json('/nodes/%s' % ndict['uuid'])
self.assertEqual(ndict[attr_name], result[attr_name])
@@ -1310,7 +1284,7 @@ class TestPost(test_api_base.FunctionalTest):
self.assertEqual(403, response.status_int)
def test_create_node_no_mandatory_field_driver(self):
- ndict = post_get_test_node()
+ ndict = test_api_utils.post_get_test_node()
del ndict['driver']
response = self.post_json('/nodes', ndict, expect_errors=True)
self.assertEqual(400, response.status_int)
@@ -1318,7 +1292,7 @@ class TestPost(test_api_base.FunctionalTest):
self.assertTrue(response.json['error_message'])
def test_create_node_invalid_driver(self):
- ndict = post_get_test_node()
+ ndict = test_api_utils.post_get_test_node()
self.mock_gtf.side_effect = exception.NoValidHost('Fake Error')
response = self.post_json('/nodes', ndict, expect_errors=True)
self.assertEqual(400, response.status_int)
@@ -1326,7 +1300,7 @@ class TestPost(test_api_base.FunctionalTest):
self.assertTrue(response.json['error_message'])
def test_create_node_no_chassis_uuid(self):
- ndict = post_get_test_node()
+ ndict = test_api_utils.post_get_test_node()
del ndict['chassis_uuid']
response = self.post_json('/nodes', ndict)
self.assertEqual('application/json', response.content_type)
@@ -1338,7 +1312,8 @@ class TestPost(test_api_base.FunctionalTest):
expected_location)
def test_create_node_with_chassis_uuid(self):
- ndict = post_get_test_node(chassis_uuid=self.chassis.uuid)
+ ndict = test_api_utils.post_get_test_node(
+ chassis_uuid=self.chassis.uuid)
response = self.post_json('/nodes', ndict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
@@ -1351,7 +1326,7 @@ class TestPost(test_api_base.FunctionalTest):
expected_location)
def test_create_node_chassis_uuid_not_found(self):
- ndict = post_get_test_node(
+ ndict = test_api_utils.post_get_test_node(
chassis_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
response = self.post_json('/nodes', ndict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
@@ -1359,7 +1334,7 @@ class TestPost(test_api_base.FunctionalTest):
self.assertTrue(response.json['error_message'])
def test_create_node_with_internal_field(self):
- ndict = post_get_test_node()
+ ndict = test_api_utils.post_get_test_node()
ndict['reservation'] = 'fake'
response = self.post_json('/nodes', ndict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
@@ -1392,7 +1367,6 @@ class TestDelete(test_api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
- self.chassis = obj_utils.create_test_chassis(self.context)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
self.mock_gtf = p.start()
self.mock_gtf.return_value = 'test-topic'
@@ -1436,7 +1410,7 @@ class TestDelete(test_api_base.FunctionalTest):
response = self.delete('/nodes/%s' % node.name,
expect_errors=True)
- self.assertEqual(400, response.status_int)
+ self.assertEqual(404, response.status_int)
self.assertFalse(mock_gbn.called)
@mock.patch.object(objects.Node, 'get_by_name')
@@ -1508,7 +1482,6 @@ class TestPut(test_api_base.FunctionalTest):
def setUp(self):
super(TestPut, self).setUp()
- self.chassis = obj_utils.create_test_chassis(self.context)
self.node = obj_utils.create_test_node(self.context,
provision_state=states.AVAILABLE, name='node-39')
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
@@ -1547,7 +1520,7 @@ class TestPut(test_api_base.FunctionalTest):
response = self.put_json('/nodes/%s/states/power' % self.node.name,
{'target': states.POWER_ON},
expect_errors=True)
- self.assertEqual(400, response.status_code)
+ self.assertEqual(404, response.status_code)
def test_power_state_by_name(self):
response = self.put_json('/nodes/%s/states/power' % self.node.name,
@@ -1570,6 +1543,13 @@ class TestPut(test_api_base.FunctionalTest):
{'target': 'not-supported'}, expect_errors=True)
self.assertEqual(400, ret.status_code)
+ def test_power_change_during_cleaning(self):
+ self.node.provision_state = states.CLEANING
+ self.node.save()
+ ret = self.put_json('/nodes/%s/states/power' % self.node.uuid,
+ {'target': states.POWER_OFF}, expect_errors=True)
+ self.assertEqual(400, ret.status_code)
+
def test_provision_invalid_state_request(self):
ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid,
{'target': 'not-supported'}, expect_errors=True)
@@ -1592,7 +1572,7 @@ class TestPut(test_api_base.FunctionalTest):
ret = self.put_json('/nodes/%s/states/provision' % self.node.name,
{'target': states.ACTIVE},
expect_errors=True)
- self.assertEqual(400, ret.status_code)
+ self.assertEqual(404, ret.status_code)
def test_provision_by_name(self):
ret = self.put_json('/nodes/%s/states/provision' % self.node.name,
@@ -1775,7 +1755,7 @@ class TestPut(test_api_base.FunctionalTest):
ret = self.put_json('/nodes/%s/states/console' % self.node.name,
{'enabled': "true"},
expect_errors=True)
- self.assertEqual(400, ret.status_code)
+ self.assertEqual(404, ret.status_code)
@mock.patch.object(rpcapi.ConductorAPI, 'set_console_mode')
def test_set_console_by_name(self, mock_scm):
@@ -1833,17 +1813,14 @@ class TestPut(test_api_base.FunctionalTest):
True, 'test-topic')
def test_provision_node_in_maintenance_fail(self):
- with mock.patch.object(rpcapi.ConductorAPI, 'do_node_deploy') as dnd:
- self.node.maintenance = True
- self.node.save()
- dnd.side_effect = exception.NodeInMaintenance(op='provisioning',
- node=self.node.uuid)
+ self.node.maintenance = True
+ self.node.save()
- ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid,
- {'target': states.ACTIVE},
- expect_errors=True)
- self.assertEqual(400, ret.status_code)
- self.assertTrue(ret.json['error_message'])
+ ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid,
+ {'target': states.ACTIVE},
+ expect_errors=True)
+ self.assertEqual(400, ret.status_code)
+ self.assertTrue(ret.json['error_message'])
@mock.patch.object(rpcapi.ConductorAPI, 'set_boot_device')
def test_set_boot_device(self, mock_sbd):
diff --git a/ironic/tests/api/v1/test_ports.py b/ironic/tests/api/v1/test_ports.py
index 8a2c1201b..fd5a16376 100644
--- a/ironic/tests/api/v1/test_ports.py
+++ b/ironic/tests/api/v1/test_ports.py
@@ -21,11 +21,14 @@ import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
+import six
from six.moves.urllib import parse as urlparse
from testtools.matchers import HasLength
from wsme import types as wtypes
+from ironic.api.controllers import base as api_controller
from ironic.api.controllers.v1 import port as api_port
+from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.tests.api import base as api_base
@@ -109,7 +112,7 @@ class TestListPorts(api_base.FunctionalTest):
self.assertEqual(len(ports), len(data['ports']))
uuids = [n['uuid'] for n in data['ports']]
- self.assertEqual(ports.sort(), uuids.sort())
+ six.assertCountEqual(self, ports, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
@@ -180,6 +183,78 @@ class TestListPorts(api_base.FunctionalTest):
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_address, response.json['error_message'])
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_get_all_by_node_name_ok(self, mock_get_rpc_node):
+ # GET /v1/ports specifying node_name - success
+ mock_get_rpc_node.return_value = self.node
+ for i in range(5):
+ if i < 3:
+ node_id = self.node.id
+ else:
+ node_id = 100000 + i
+ obj_utils.create_test_port(self.context,
+ node_id=node_id,
+ uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:3%s' % i)
+ data = self.get_json("/ports?node=%s" % 'test-node',
+ headers={api_controller.Version.string: '1.5'})
+ self.assertEqual(3, len(data['ports']))
+
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_get_all_by_node_uuid_and_name(self, mock_get_rpc_node):
+ # GET /v1/ports specifying node and uuid - should only use node_uuid
+ mock_get_rpc_node.return_value = self.node
+ obj_utils.create_test_port(self.context, node_id=self.node.id)
+ self.get_json('/ports/detail?node_uuid=%s&node=%s' %
+ (self.node.uuid, 'node-name'))
+ mock_get_rpc_node.assert_called_once_with(self.node.uuid)
+
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_get_all_by_node_name_not_supported(self, mock_get_rpc_node):
+ # GET /v1/ports specifying node_name - name not supported
+ mock_get_rpc_node.side_effect = exception.InvalidUuidOrName(
+ name=self.node.uuid)
+ for i in range(3):
+ obj_utils.create_test_port(self.context,
+ node_id=self.node.id,
+ uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:3%s' % i)
+ data = self.get_json("/ports?node=%s" % 'test-node',
+ expect_errors=True)
+ self.assertEqual(0, mock_get_rpc_node.call_count)
+ self.assertEqual(406, data.status_int)
+
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_detail_by_node_name_ok(self, mock_get_rpc_node):
+ # GET /v1/ports/detail specifying node_name - success
+ mock_get_rpc_node.return_value = self.node
+ port = obj_utils.create_test_port(self.context, node_id=self.node.id)
+ data = self.get_json('/ports/detail?node=%s' % 'test-node',
+ headers={api_controller.Version.string: '1.5'})
+ self.assertEqual(port.uuid, data['ports'][0]['uuid'])
+ self.assertEqual(self.node.uuid, data['ports'][0]['node_uuid'])
+
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_detail_by_node_name_not_supported(self, mock_get_rpc_node):
+ # GET /v1/ports/detail specifying node_name - name not supported
+ mock_get_rpc_node.side_effect = exception.InvalidUuidOrName(
+ name=self.node.uuid)
+ obj_utils.create_test_port(self.context, node_id=self.node.id)
+ data = self.get_json('/ports/detail?node=%s' % 'test-node',
+ expect_errors=True)
+ self.assertEqual(0, mock_get_rpc_node.call_count)
+ self.assertEqual(406, data.status_int)
+
+ @mock.patch.object(api_port.PortsController, '_get_ports_collection')
+ def test_detail_with_incorrect_api_usage(self, mock_gpc):
+ # GET /v1/ports/detail specifying node and node_uuid. In this case
+ # we expect the node_uuid interface to be used.
+ self.get_json('/ports/detail?node=%s&node_uuid=%s' %
+ ('test-node', self.node.uuid))
+ mock_gpc.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY,
+ mock.ANY, mock.ANY, mock.ANY,
+ mock.ANY, mock.ANY)
+
@mock.patch.object(rpcapi.ConductorAPI, 'update_port')
class TestPatch(api_base.FunctionalTest):
diff --git a/ironic/tests/api/v1/test_root.py b/ironic/tests/api/v1/test_root.py
index 7228663f1..35913e60b 100644
--- a/ironic/tests/api/v1/test_root.py
+++ b/ironic/tests/api/v1/test_root.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
from webob import exc as webob_exc
from ironic.api.controllers import v1 as v1_api
@@ -25,7 +26,8 @@ class TestV1Routing(api_base.FunctionalTest):
def test_route_checks_version(self):
self.get_json('/')
- self._check_version.assert_called_once()
+ self._check_version.assert_called_once_with(mock.ANY,
+ mock.ANY)
class TestCheckVersions(test_base.TestCase):
diff --git a/ironic/tests/api/v1/test_utils.py b/ironic/tests/api/v1/test_utils.py
index dda776b97..f55435884 100644
--- a/ironic/tests/api/v1/test_utils.py
+++ b/ironic/tests/api/v1/test_utils.py
@@ -13,13 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+from oslo_config import cfg
+from oslo_utils import uuidutils
+import pecan
import wsme
from ironic.api.controllers.v1 import utils
+from ironic.common import exception
+from ironic import objects
+from ironic.tests.api import utils as test_api_utils
from ironic.tests import base
-from oslo_config import cfg
-
CONF = cfg.CONF
@@ -47,3 +52,105 @@ class TestApiUtils(base.TestCase):
self.assertRaises(wsme.exc.ClientSideError,
utils.validate_sort_dir,
'fake-sort')
+
+
+class TestNodeIdent(base.TestCase):
+
+ def setUp(self):
+ super(TestNodeIdent, self).setUp()
+ self.valid_name = 'my-host'
+ self.valid_uuid = uuidutils.generate_uuid()
+ self.invalid_name = 'Mr Plow'
+ self.invalid_uuid = '636-555-3226-'
+ self.node = test_api_utils.post_get_test_node()
+
+ @mock.patch.object(pecan, 'request')
+ def test_allow_node_logical_names_pre_name(self, mock_pecan_req):
+ mock_pecan_req.version.minor = 1
+ self.assertFalse(utils.allow_node_logical_names())
+
+ @mock.patch.object(pecan, 'request')
+ def test_allow_node_logical_names_post_name(self, mock_pecan_req):
+ mock_pecan_req.version.minor = 5
+ self.assertTrue(utils.allow_node_logical_names())
+
+ def test_is_valid_node_name(self):
+ self.assertTrue(utils.is_valid_node_name(self.valid_name))
+ self.assertFalse(utils.is_valid_node_name(self.invalid_name))
+ self.assertFalse(utils.is_valid_node_name(self.valid_uuid))
+ self.assertFalse(utils.is_valid_node_name(self.invalid_uuid))
+
+ @mock.patch.object(pecan, 'request')
+ @mock.patch.object(utils, 'allow_node_logical_names')
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_name')
+ def test_get_rpc_node_expect_uuid(self, mock_gbn, mock_gbu, mock_anln,
+ mock_pr):
+ mock_anln.return_value = True
+ self.node['uuid'] = self.valid_uuid
+ mock_gbu.return_value = self.node
+ self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))
+ self.assertEqual(1, mock_gbu.call_count)
+ self.assertEqual(0, mock_gbn.call_count)
+
+ @mock.patch.object(pecan, 'request')
+ @mock.patch.object(utils, 'allow_node_logical_names')
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_name')
+ def test_get_rpc_node_expect_name(self, mock_gbn, mock_gbu, mock_anln,
+ mock_pr):
+ mock_anln.return_value = True
+ self.node['name'] = self.valid_name
+ mock_gbn.return_value = self.node
+ self.assertEqual(self.node, utils.get_rpc_node(self.valid_name))
+ self.assertEqual(0, mock_gbu.call_count)
+ self.assertEqual(1, mock_gbn.call_count)
+
+ @mock.patch.object(pecan, 'request')
+ @mock.patch.object(utils, 'allow_node_logical_names')
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_name')
+ def test_get_rpc_node_invalid_name(self, mock_gbn, mock_gbu,
+ mock_anln, mock_pr):
+ mock_anln.return_value = True
+ self.assertRaises(exception.InvalidUuidOrName,
+ utils.get_rpc_node,
+ self.invalid_name)
+
+ @mock.patch.object(pecan, 'request')
+ @mock.patch.object(utils, 'allow_node_logical_names')
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_name')
+ def test_get_rpc_node_invalid_uuid(self, mock_gbn, mock_gbu,
+ mock_anln, mock_pr):
+ mock_anln.return_value = True
+ self.assertRaises(exception.InvalidUuidOrName,
+ utils.get_rpc_node,
+ self.invalid_uuid)
+
+ @mock.patch.object(pecan, 'request')
+ @mock.patch.object(utils, 'allow_node_logical_names')
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_name')
+ def test_get_rpc_node_by_uuid_no_logical_name(self, mock_gbn, mock_gbu,
+ mock_anln, mock_pr):
+ # allow_node_logical_name() should have no effect
+ mock_anln.return_value = False
+ self.node['uuid'] = self.valid_uuid
+ mock_gbu.return_value = self.node
+ self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))
+ self.assertEqual(1, mock_gbu.call_count)
+ self.assertEqual(0, mock_gbn.call_count)
+
+ @mock.patch.object(pecan, 'request')
+ @mock.patch.object(utils, 'allow_node_logical_names')
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_name')
+ def test_get_rpc_node_by_name_no_logical_name(self, mock_gbn, mock_gbu,
+ mock_anln, mock_pr):
+ mock_anln.return_value = False
+ self.node['name'] = self.valid_name
+ mock_gbn.return_value = self.node
+ self.assertRaises(exception.NodeNotFound,
+ utils.get_rpc_node,
+ self.valid_name)
diff --git a/ironic/tests/conductor/test_manager.py b/ironic/tests/conductor/test_manager.py
index e2dd06568..47a6e77f7 100644
--- a/ironic/tests/conductor/test_manager.py
+++ b/ironic/tests/conductor/test_manager.py
@@ -1696,7 +1696,7 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
node.refresh()
# Assert that the node was moved to available without cleaning
- mock_validate.assert_not_called()
+ self.assertFalse(mock_validate.called)
self.assertEqual(states.AVAILABLE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertEqual({}, node.clean_step)
@@ -1725,9 +1725,9 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
self.service._worker_pool.waitall()
node.refresh()
- mock_validate.assert_called_once()
+ mock_validate.assert_called_once_with(task)
mock_next_step.assert_called_once_with(mock.ANY, [], {})
- mock_steps.assert_called_once()
+ mock_steps.assert_called_once_with(task)
# Check that state didn't change
self.assertEqual(states.CLEANING, node.provision_state)
@@ -1806,7 +1806,7 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
# Cleaning should be complete without calling additional steps
self.assertEqual(states.AVAILABLE, node.provision_state)
self.assertEqual({}, node.clean_step)
- mock_execute.assert_not_called()
+ self.assertFalse(mock_execute.called)
@mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step')
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step')
@@ -1869,7 +1869,7 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
self.assertEqual({}, node.clean_step)
self.assertIsNotNone(node.last_error)
self.assertTrue(node.maintenance)
- mock_execute.assert_not_called()
+ self.assertFalse(mock_execute.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step')
def test__do_next_clean_step_fail(self, mock_execute):
@@ -1897,7 +1897,6 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
self.assertEqual({}, node.clean_step)
self.assertIsNotNone(node.last_error)
self.assertTrue(node.maintenance)
- mock_execute.assert_not_called()
mock_execute.assert_called_once_with(mock.ANY, self.clean_steps[0])
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step')
@@ -1923,7 +1922,7 @@ class DoNodeCleanTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
# Cleaning should be complete without calling additional steps
self.assertEqual(states.AVAILABLE, node.provision_state)
self.assertEqual({}, node.clean_step)
- mock_execute.assert_not_called()
+ self.assertFalse(mock_execute.called)
@mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step')
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step')
@@ -2841,7 +2840,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
self.assertFalse(sync_mock.called)
def test_node_in_deploywait_on_acquire(self, get_nodeinfo_mock,
@@ -2853,7 +2852,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
task = self._create_task(
node_attrs=dict(provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE,
- id=self.node.id))
+ uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
@@ -2863,7 +2862,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
self.assertFalse(sync_mock.called)
def test_node_in_maintenance_on_acquire(self, get_nodeinfo_mock,
@@ -2873,7 +2872,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
get_node_mock.return_value = self.node
mapped_mock.return_value = True
task = self._create_task(
- node_attrs=dict(maintenance=True, id=self.node.id))
+ node_attrs=dict(maintenance=True, uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
@@ -2883,7 +2882,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
self.assertFalse(sync_mock.called)
def test_node_disappears_on_acquire(self, get_nodeinfo_mock,
@@ -2902,7 +2901,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
self.assertFalse(sync_mock.called)
def test_single_node(self, get_nodeinfo_mock, get_node_mock,
@@ -2910,7 +2909,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
get_node_mock.return_value = self.node
mapped_mock.return_value = True
- task = self._create_task(node_attrs=dict(id=self.node.id))
+ task = self._create_task(node_attrs=dict(uuid=self.node.uuid))
acquire_mock.side_effect = self._get_acquire_side_effect(task)
self.service._sync_power_states(self.context)
@@ -2920,7 +2919,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid,
self.node.driver)
get_node_mock.assert_called_once_with(self.context, self.node.id)
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
sync_mock.assert_called_once_with(task, mock.ANY)
def test__sync_power_state_multiple_nodes(self, get_nodeinfo_mock,
@@ -2957,16 +2956,16 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_map[n.uuid] = False if i == 2 else True
get_node_map[n.uuid] = n
- tasks = [self._create_task(node_attrs=dict(id=1)),
+ tasks = [self._create_task(node_attrs=dict(uuid=nodes[0].uuid)),
exception.NodeLocked(node=7, host='fake'),
exception.NodeNotFound(node=8, host='fake'),
self._create_task(
- node_attrs=dict(id=9,
+ node_attrs=dict(uuid=nodes[8].uuid,
provision_state=states.DEPLOYWAIT,
target_provision_state=states.ACTIVE)),
self._create_task(
- node_attrs=dict(id=10, maintenance=True)),
- self._create_task(node_attrs=dict(id=11))]
+ node_attrs=dict(uuid=nodes[9].uuid, maintenance=True)),
+ self._create_task(node_attrs=dict(uuid=nodes[10].uuid))]
def _get_node_side_effect(ctxt, node_id):
if node_id == 6:
@@ -2994,7 +2993,7 @@ class ManagerSyncPowerStatesTestCase(_CommonMixIn, tests_db_base.DbTestCase):
for x in nodes[:1] + nodes[2:]]
self.assertEqual(get_node_calls,
get_node_mock.call_args_list)
- acquire_calls = [mock.call(self.context, x.id)
+ acquire_calls = [mock.call(self.context, x.uuid)
for x in nodes[:1] + nodes[6:]]
self.assertEqual(acquire_calls, acquire_mock.call_args_list)
sync_calls = [mock.call(tasks[0], mock.ANY),
@@ -3339,20 +3338,19 @@ class ManagerTestProperties(tests_db_base.DbTestCase):
def test_driver_properties_fake_ilo(self):
expected = ['ilo_address', 'ilo_username', 'ilo_password',
- 'client_port', 'client_timeout', 'inspect_ports',
- 'ilo_change_password']
+ 'client_port', 'client_timeout', 'ilo_change_password']
self._check_driver_properties("fake_ilo", expected)
def test_driver_properties_ilo_iscsi(self):
expected = ['ilo_address', 'ilo_username', 'ilo_password',
'client_port', 'client_timeout', 'ilo_deploy_iso',
- 'console_port', 'inspect_ports', 'ilo_change_password']
+ 'console_port', 'ilo_change_password']
self._check_driver_properties("iscsi_ilo", expected)
def test_driver_properties_agent_ilo(self):
expected = ['ilo_address', 'ilo_username', 'ilo_password',
'client_port', 'client_timeout', 'ilo_deploy_iso',
- 'console_port', 'inspect_ports', 'ilo_change_password']
+ 'console_port', 'ilo_change_password']
self._check_driver_properties("agent_ilo", expected)
def test_driver_properties_fail(self):
@@ -3437,7 +3435,7 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
get_authtoken_mock.assert_called_once_with()
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
# assert spawn_after has been called
self.task.spawn_after.assert_called_once_with(
self.service._spawn_worker,
@@ -3471,7 +3469,7 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
# assert acquire() gets called 2 times only instead of 3. When
# NoFreeConductorWorker is raised the loop should be broken
- expected = [mock.call(self.context, self.node.id)] * 2
+ expected = [mock.call(self.context, self.node.uuid)] * 2
self.assertEqual(expected, acquire_mock.call_args_list)
# Only one auth token needed for all runs
@@ -3504,7 +3502,7 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 3 times
- expected = [mock.call(self.context, self.node.id)] * 3
+ expected = [mock.call(self.context, self.node.uuid)] * 3
self.assertEqual(expected, acquire_mock.call_args_list)
# Only one auth token needed for all runs
@@ -3539,7 +3537,7 @@ class ManagerSyncLocalStateTestCase(_CommonMixIn, tests_db_base.DbTestCase):
mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver)
# assert acquire() gets called only once because of the worker limit
- acquire_mock.assert_called_once_with(self.context, self.node.id)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid)
# Only one auth token needed for all runs
get_authtoken_mock.assert_called_once_with()
diff --git a/ironic/tests/db/sqlalchemy/test_migrations.py b/ironic/tests/db/sqlalchemy/test_migrations.py
index b6ba0e095..37b539e02 100644
--- a/ironic/tests/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/db/sqlalchemy/test_migrations.py
@@ -15,15 +15,9 @@
# under the License.
"""
-Tests for database migrations. This test case reads the configuration
-file test_migrations.conf for database connection settings
-to use in the tests. For each connection found in the config file,
-the test case runs a series of test cases to ensure that migrations work
-properly.
-
-There are also "opportunistic" tests for both mysql and postgresql in here,
-which allows testing against all 3 databases (sqlite in memory, mysql, pg) in
-a properly configured unit test environment.
+Tests for database migrations. There are "opportunistic" tests for both mysql
+and postgresql in here, which allows testing against these databases in a
+properly configured unit test environment.
For the opportunistic testing you need to set up a db named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost.
@@ -314,12 +308,8 @@ class MigrationCheckersMixin(object):
'instance_uuid': instance_uuid}
nodes.insert().values(data).execute()
data['uuid'] = uuidutils.generate_uuid()
- # TODO(viktors): Remove check on sqlalchemy.exc.IntegrityError, when
- # Ironic will use oslo_db 0.4.0 or higher.
- # See bug #1214341 for details.
- self.assertRaises(
- (sqlalchemy.exc.IntegrityError, db_exc.DBDuplicateEntry),
- nodes.insert().execute, data)
+ self.assertRaises(db_exc.DBDuplicateEntry,
+ nodes.insert().execute, data)
def _check_242cc6a923b3(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -374,6 +364,15 @@ class MigrationCheckersMixin(object):
self.assertIsInstance(nodes.c.clean_step.type,
sqlalchemy.types.String)
+ def _check_2fb93ffd2af1(self, engine, data):
+ nodes = db_utils.get_table(engine, 'nodes')
+ bigstring = 'a' * 255
+ uuid = uuidutils.generate_uuid()
+ data = {'uuid': uuid, 'name': bigstring}
+ nodes.insert().execute(data)
+ node = nodes.select(nodes.c.uuid == uuid).execute().first()
+ self.assertEqual(bigstring, node['name'])
+
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('head')
diff --git a/ironic/tests/db/test_chassis.py b/ironic/tests/db/test_chassis.py
index de81e756e..480d4e6cd 100644
--- a/ironic/tests/db/test_chassis.py
+++ b/ironic/tests/db/test_chassis.py
@@ -25,40 +25,36 @@ from ironic.tests.db import utils
class DbChassisTestCase(base.DbTestCase):
- def _create_test_chassis(self, **kwargs):
- ch = utils.get_test_chassis(**kwargs)
- self.dbapi.create_chassis(ch)
- return ch
+ def setUp(self):
+ super(DbChassisTestCase, self).setUp()
+ self.chassis = utils.create_test_chassis()
def test_get_chassis_list(self):
- uuids = []
+ uuids = [self.chassis.uuid]
for i in range(1, 6):
- n = utils.get_test_chassis(id=i, uuid=uuidutils.generate_uuid())
- self.dbapi.create_chassis(n)
- uuids.append(six.text_type(n['uuid']))
+ ch = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
+ uuids.append(six.text_type(ch.uuid))
res = self.dbapi.get_chassis_list()
res_uuids = [r.uuid for r in res]
- self.assertEqual(uuids.sort(), res_uuids.sort())
+ six.assertCountEqual(self, uuids, res_uuids)
def test_get_chassis_by_id(self):
- ch = self._create_test_chassis()
- chassis = self.dbapi.get_chassis_by_id(ch['id'])
+ chassis = self.dbapi.get_chassis_by_id(self.chassis.id)
- self.assertEqual(ch['uuid'], chassis.uuid)
+ self.assertEqual(self.chassis.uuid, chassis.uuid)
def test_get_chassis_by_uuid(self):
- ch = self._create_test_chassis()
- chassis = self.dbapi.get_chassis_by_uuid(ch['uuid'])
+ chassis = self.dbapi.get_chassis_by_uuid(self.chassis.uuid)
- self.assertEqual(ch['id'], chassis.id)
+ self.assertEqual(self.chassis.id, chassis.id)
def test_get_chassis_that_does_not_exist(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_chassis_by_id, 666)
def test_update_chassis(self):
- ch = self._create_test_chassis()
- res = self.dbapi.update_chassis(ch['id'], {'description': 'hello'})
+ res = self.dbapi.update_chassis(self.chassis.id,
+ {'description': 'hello'})
self.assertEqual('hello', res.description)
@@ -67,32 +63,27 @@ class DbChassisTestCase(base.DbTestCase):
self.dbapi.update_chassis, 666, {'description': ''})
def test_update_chassis_uuid(self):
- ch = self._create_test_chassis()
self.assertRaises(exception.InvalidParameterValue,
- self.dbapi.update_chassis, ch['id'],
+ self.dbapi.update_chassis, self.chassis.id,
{'uuid': 'hello'})
def test_destroy_chassis(self):
- ch = self._create_test_chassis()
- self.dbapi.destroy_chassis(ch['id'])
+ self.dbapi.destroy_chassis(self.chassis.id)
self.assertRaises(exception.ChassisNotFound,
- self.dbapi.get_chassis_by_id, ch['id'])
+ self.dbapi.get_chassis_by_id, self.chassis.id)
def test_destroy_chassis_that_does_not_exist(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.destroy_chassis, 666)
def test_destroy_chassis_with_nodes(self):
- ch = self._create_test_chassis()
- utils.create_test_node(chassis_id=ch['id'])
+ utils.create_test_node(chassis_id=self.chassis.id)
self.assertRaises(exception.ChassisNotEmpty,
- self.dbapi.destroy_chassis, ch['id'])
+ self.dbapi.destroy_chassis, self.chassis.id)
def test_create_chassis_already_exists(self):
- uuid = uuidutils.generate_uuid()
- self._create_test_chassis(id=1, uuid=uuid)
self.assertRaises(exception.ChassisAlreadyExists,
- self._create_test_chassis,
- id=2, uuid=uuid)
+ utils.create_test_chassis,
+ uuid=self.chassis.uuid)
diff --git a/ironic/tests/db/test_conductor.py b/ironic/tests/db/test_conductor.py
index d93aad120..1ff182615 100644
--- a/ironic/tests/db/test_conductor.py
+++ b/ironic/tests/db/test_conductor.py
@@ -64,7 +64,7 @@ class DbConductorTestCase(base.DbTestCase):
self.dbapi.unregister_conductor,
c.hostname)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_touch_conductor(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
@@ -110,7 +110,7 @@ class DbConductorTestCase(base.DbTestCase):
self.assertEqual('hostname2', node2.reservation)
self.assertIsNone(node3.reservation)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_driver_dict_one_host_no_driver(self, mock_utcnow):
h = 'fake-host'
expected = {}
@@ -120,7 +120,7 @@ class DbConductorTestCase(base.DbTestCase):
result = self.dbapi.get_active_driver_dict()
self.assertEqual(expected, result)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_driver_dict_one_host_one_driver(self, mock_utcnow):
h = 'fake-host'
d = 'fake-driver'
@@ -131,7 +131,7 @@ class DbConductorTestCase(base.DbTestCase):
result = self.dbapi.get_active_driver_dict()
self.assertEqual(expected, result)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_driver_dict_one_host_many_drivers(self, mock_utcnow):
h = 'fake-host'
d1 = 'driver-one'
@@ -143,7 +143,7 @@ class DbConductorTestCase(base.DbTestCase):
result = self.dbapi.get_active_driver_dict()
self.assertEqual(expected, result)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_driver_dict_many_hosts_one_driver(self, mock_utcnow):
h1 = 'host-one'
h2 = 'host-two'
@@ -156,7 +156,7 @@ class DbConductorTestCase(base.DbTestCase):
result = self.dbapi.get_active_driver_dict()
self.assertEqual(expected, result)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_driver_dict_many_hosts_and_drivers(self, mock_utcnow):
h1 = 'host-one'
h2 = 'host-two'
@@ -172,7 +172,7 @@ class DbConductorTestCase(base.DbTestCase):
result = self.dbapi.get_active_driver_dict()
self.assertEqual(expected, result)
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_active_driver_dict_with_old_conductor(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
present = past + datetime.timedelta(minutes=2)
diff --git a/ironic/tests/db/test_nodes.py b/ironic/tests/db/test_nodes.py
index 35c542e5e..be44943b4 100644
--- a/ironic/tests/db/test_nodes.py
+++ b/ironic/tests/db/test_nodes.py
@@ -33,9 +33,6 @@ class DbNodeTestCase(base.DbTestCase):
def test_create_node(self):
utils.create_test_node()
- def test_create_node_nullable_chassis_id(self):
- utils.create_test_node(chassis_id=None)
-
def test_create_node_already_exists(self):
utils.create_test_node()
self.assertRaises(exception.NodeAlreadyExists,
@@ -139,7 +136,7 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_provision(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
@@ -164,7 +161,7 @@ class DbNodeTestCase(base.DbTestCase):
states.DEPLOYWAIT})
self.assertEqual([node2.id], [r[0] for r in res])
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_inspection(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
@@ -197,13 +194,11 @@ class DbNodeTestCase(base.DbTestCase):
uuids.append(six.text_type(node['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
- self.assertEqual(uuids.sort(), res_uuids.sort())
+ six.assertCountEqual(self, uuids, res_uuids)
def test_get_node_list_with_filters(self):
- ch1 = utils.get_test_chassis(id=1, uuid=uuidutils.generate_uuid())
- ch2 = utils.get_test_chassis(id=2, uuid=uuidutils.generate_uuid())
- self.dbapi.create_chassis(ch1)
- self.dbapi.create_chassis(ch2)
+ ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
+ ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
node1 = utils.create_test_node(driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
@@ -359,7 +354,7 @@ class DbNodeTestCase(base.DbTestCase):
node2.id,
{'instance_uuid': new_i_uuid})
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
@@ -383,7 +378,7 @@ class DbNodeTestCase(base.DbTestCase):
self.assertIsNone(res['provision_updated_at'])
self.assertIsNone(res['inspection_started_at'])
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_started_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
@@ -395,7 +390,7 @@ class DbNodeTestCase(base.DbTestCase):
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_finished_at'])
- @mock.patch.object(timeutils, 'utcnow')
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_finished_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
diff --git a/ironic/tests/db/test_ports.py b/ironic/tests/db/test_ports.py
index afb1284e4..947de11a1 100644
--- a/ironic/tests/db/test_ports.py
+++ b/ironic/tests/db/test_ports.py
@@ -50,9 +50,11 @@ class DbPortTestCase(base.DbTestCase):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:4%s' % i)
uuids.append(six.text_type(port.uuid))
+ # Also add the uuid for the port created in setUp()
+ uuids.append(six.text_type(self.port.uuid))
res = self.dbapi.get_port_list()
res_uuids = [r.uuid for r in res]
- self.assertEqual(uuids.sort(), res_uuids.sort())
+ six.assertCountEqual(self, uuids, res_uuids)
def test_get_ports_by_node_id(self):
res = self.dbapi.get_ports_by_node_id(self.node.id)
diff --git a/ironic/tests/db/utils.py b/ironic/tests/db/utils.py
index 677187e80..dbb8e8d95 100644
--- a/ironic/tests/db/utils.py
+++ b/ironic/tests/db/utils.py
@@ -149,6 +149,7 @@ def get_test_agent_driver_info():
def get_test_agent_driver_internal_info():
return {
'agent_url': 'http://127.0.0.1/foo',
+ 'is_whole_disk_image': True,
}
@@ -187,7 +188,7 @@ def get_test_node(**kw):
'id': kw.get('id', 123),
'name': kw.get('name', None),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'),
- 'chassis_id': kw.get('chassis_id', 42),
+ 'chassis_id': kw.get('chassis_id', None),
'conductor_affinity': kw.get('conductor_affinity', None),
'power_state': kw.get('power_state', states.NOSTATE),
'target_power_state': kw.get('target_power_state', states.NOSTATE),
@@ -272,6 +273,23 @@ def get_test_chassis(**kw):
}
+def create_test_chassis(**kw):
+ """Create test chassis entry in DB and return Chassis DB object.
+
+ Function to be used to create test Chassis objects in the database.
+
+ :param kw: kwargs with overriding values for chassis's attributes.
+ :returns: Test Chassis DB object.
+
+ """
+ chassis = get_test_chassis(**kw)
+ # Let DB generate ID if it isn't specified explicitly
+ if 'id' not in kw:
+ del chassis['id']
+ dbapi = db_api.get_instance()
+ return dbapi.create_chassis(chassis)
+
+
def get_test_conductor(**kw):
return {
'id': kw.get('id', 6),
diff --git a/ironic/tests/dhcp/test_factory.py b/ironic/tests/dhcp/test_factory.py
index 37ce4c040..850479f8f 100644
--- a/ironic/tests/dhcp/test_factory.py
+++ b/ironic/tests/dhcp/test_factory.py
@@ -13,10 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import inspect
+
import mock
from ironic.common import dhcp_factory
from ironic.common import exception
+from ironic.dhcp import base as base_class
from ironic.dhcp import neutron
from ironic.dhcp import none
from ironic.tests import base
@@ -68,3 +71,35 @@ class TestDHCPFactory(base.TestCase):
group='dhcp')
self.assertRaises(exception.DHCPNotFound, dhcp_factory.DHCPFactory)
+
+
+class CompareBasetoModules(base.TestCase):
+
+ def test_drivers_match_dhcp_base(self):
+ def _get_public_apis(inst):
+ methods = {}
+ for (name, value) in inspect.getmembers(inst, inspect.ismethod):
+ if name.startswith("_"):
+ continue
+ methods[name] = value
+ return methods
+
+ def _compare_classes(baseclass, driverclass):
+
+ basemethods = _get_public_apis(baseclass)
+ implmethods = _get_public_apis(driverclass)
+
+ for name in basemethods:
+ baseargs = inspect.getargspec(basemethods[name])
+ implargs = inspect.getargspec(implmethods[name])
+ self.assertEqual(
+ baseargs,
+ implargs,
+ "%s args of %s don't match base %s" % (
+ name,
+ driverclass,
+ baseclass)
+ )
+
+ _compare_classes(base_class.BaseDHCP, none.NoneDHCPApi)
+ _compare_classes(base_class.BaseDHCP, neutron.NeutronDHCPApi)
diff --git a/ironic/tests/dhcp/test_neutron.py b/ironic/tests/dhcp/test_neutron.py
index 4cb4402f6..11a716fa9 100644
--- a/ironic/tests/dhcp/test_neutron.py
+++ b/ironic/tests/dhcp/test_neutron.py
@@ -377,23 +377,25 @@ class TestNeutron(db_base.DbTestCase):
'network_id': '00000000-0000-0000-0000-000000000000',
'admin_state_up': True, 'mac_address': self.ports[0].address}})
- @mock.patch('ironic.conductor.manager.cleaning_error_handler')
+ @mock.patch.object(neutron.NeutronDHCPApi, '_rollback_cleaning_ports')
@mock.patch.object(client.Client, 'create_port')
- def test_create_cleaning_ports_fail(self, create_mock, error_mock):
- # Check that if creating a port fails, the node goes to cleanfail
+ def test_create_cleaning_ports_fail(self, create_mock, rollback_mock):
+ # Check that if creating a port fails, the ports are cleaned up
create_mock.side_effect = neutron_client_exc.ConnectionFailed
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
- api.create_cleaning_ports(task)
- error_mock.assert_called_once_with(task, mock.ANY)
+ self.assertRaises(exception.NodeCleaningFailure,
+ api.create_cleaning_ports,
+ task)
create_mock.assert_called_once_with({'port': {
'network_id': '00000000-0000-0000-0000-000000000000',
'admin_state_up': True, 'mac_address': self.ports[0].address}})
+ rollback_mock.assert_called_once_with(task)
- @mock.patch('ironic.conductor.manager.cleaning_error_handler')
@mock.patch.object(client.Client, 'create_port')
- def test_create_cleaning_ports_bad_config(self, create_mock, error_mock):
+ def test_create_cleaning_ports_bad_config(self, create_mock):
+ # Check an error is raised if the cleaning network is not set
self.config(cleaning_network_uuid=None, group='neutron')
api = dhcp_factory.DHCPFactory().provider
@@ -417,32 +419,31 @@ class TestNeutron(db_base.DbTestCase):
network_id='00000000-0000-0000-0000-000000000000')
delete_mock.assert_called_once_with(self.neutron_port['id'])
- @mock.patch('ironic.conductor.manager.cleaning_error_handler')
@mock.patch.object(client.Client, 'list_ports')
- def test_delete_cleaning_ports_list_fail(self, list_mock, error_mock):
+ def test_delete_cleaning_ports_list_fail(self, list_mock):
# Check that if listing ports fails, the node goes to cleanfail
list_mock.side_effect = neutron_client_exc.ConnectionFailed
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
- api.delete_cleaning_ports(task)
+ self.assertRaises(exception.NodeCleaningFailure,
+ api.delete_cleaning_ports,
+ task)
list_mock.assert_called_once_with(
network_id='00000000-0000-0000-0000-000000000000')
- error_mock.assert_called_once_with(task, mock.ANY)
- @mock.patch('ironic.conductor.manager.cleaning_error_handler')
@mock.patch.object(client.Client, 'delete_port')
@mock.patch.object(client.Client, 'list_ports')
- def test_delete_cleaning_ports_delete_fail(self, list_mock, delete_mock,
- error_mock):
+ def test_delete_cleaning_ports_delete_fail(self, list_mock, delete_mock):
# Check that if deleting ports fails, the node goes to cleanfail
list_mock.return_value = {'ports': [self.neutron_port]}
delete_mock.side_effect = neutron_client_exc.ConnectionFailed
api = dhcp_factory.DHCPFactory().provider
with task_manager.acquire(self.context, self.node.uuid) as task:
- api.delete_cleaning_ports(task)
+ self.assertRaises(exception.NodeCleaningFailure,
+ api.delete_cleaning_ports,
+ task)
list_mock.assert_called_once_with(
network_id='00000000-0000-0000-0000-000000000000')
delete_mock.assert_called_once_with(self.neutron_port['id'])
- error_mock.assert_called_once_with(task, mock.ANY)
diff --git a/ironic/tests/drivers/agent_pxe_config.template b/ironic/tests/drivers/agent_pxe_config.template
index 414703a1c..7b26d58cf 100644
--- a/ironic/tests/drivers/agent_pxe_config.template
+++ b/ironic/tests/drivers/agent_pxe_config.template
@@ -2,4 +2,4 @@ default deploy
label deploy
kernel /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_kernel
-append initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk text test_param ipa-api-url=http://192.168.122.184:6385 ipa-driver-name=agent_ipmitool root_device=vendor=fake,size=123
+append initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk text test_param ipa-api-url=http://192.168.122.184:6385 ipa-driver-name=agent_ipmitool root_device=vendor=fake,size=123 coreos.configdrive=0
diff --git a/ironic/tests/drivers/amt/test_vendor.py b/ironic/tests/drivers/amt/test_vendor.py
index 1d1742918..a6ddfc769 100644
--- a/ironic/tests/drivers/amt/test_vendor.py
+++ b/ironic/tests/drivers/amt/test_vendor.py
@@ -37,7 +37,8 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
driver='pxe_amt', driver_info=INFO_DICT)
def test_vendor_routes(self):
- expected = ['heartbeat', 'pass_deploy_info']
+ expected = ['heartbeat', 'pass_deploy_info',
+ 'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
@@ -54,13 +55,64 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
@mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device')
@mock.patch.object(pxe.VendorPassthru, 'pass_deploy_info')
- def test_vendorpassthru_pass_deploy_info(self, mock_pxe_vendorpassthru,
- mock_ensure):
+ def test_vendorpassthru_pass_deploy_info_netboot(self,
+ mock_pxe_vendorpassthru,
+ mock_ensure):
kwargs = {'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
+ task.node.instance_info['capabilities'] = {
+ "boot_option": "netboot"
+ }
task.driver.vendor.pass_deploy_info(task, **kwargs)
mock_ensure.assert_called_with(task.node, boot_devices.PXE)
mock_pxe_vendorpassthru.assert_called_once_with(task, **kwargs)
+
+ @mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device')
+ @mock.patch.object(pxe.VendorPassthru, 'pass_deploy_info')
+ def test_vendorpassthru_pass_deploy_info_localboot(self,
+ mock_pxe_vendorpassthru,
+ mock_ensure):
+ kwargs = {'address': '123456'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = states.DEPLOYWAIT
+ task.node.target_provision_state = states.ACTIVE
+ task.node.instance_info['capabilities'] = {"boot_option": "local"}
+ task.driver.vendor.pass_deploy_info(task, **kwargs)
+ self.assertFalse(mock_ensure.called)
+ mock_pxe_vendorpassthru.assert_called_once_with(task, **kwargs)
+
+ @mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device')
+ @mock.patch.object(pxe.VendorPassthru, 'continue_deploy')
+ def test_vendorpassthru_continue_deploy_netboot(self,
+ mock_pxe_vendorpassthru,
+ mock_ensure):
+ kwargs = {'address': '123456'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = states.DEPLOYWAIT
+ task.node.target_provision_state = states.ACTIVE
+ task.node.instance_info['capabilities'] = {
+ "boot_option": "netboot"
+ }
+ task.driver.vendor.continue_deploy(task, **kwargs)
+ mock_ensure.assert_called_with(task.node, boot_devices.PXE)
+ mock_pxe_vendorpassthru.assert_called_once_with(task, **kwargs)
+
+ @mock.patch.object(amt_mgmt.AMTManagement, 'ensure_next_boot_device')
+ @mock.patch.object(pxe.VendorPassthru, 'continue_deploy')
+ def test_vendorpassthru_continue_deploy_localboot(self,
+ mock_pxe_vendorpassthru,
+ mock_ensure):
+ kwargs = {'address': '123456'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = states.DEPLOYWAIT
+ task.node.target_provision_state = states.ACTIVE
+ task.node.instance_info['capabilities'] = {"boot_option": "local"}
+ task.driver.vendor.continue_deploy(task, **kwargs)
+ self.assertFalse(mock_ensure.called)
+ mock_pxe_vendorpassthru.assert_called_once_with(task, **kwargs)
diff --git a/ironic/tests/drivers/drac/test_client.py b/ironic/tests/drivers/drac/test_client.py
index a5df57bd6..247f34253 100644
--- a/ironic/tests/drivers/drac/test_client.py
+++ b/ironic/tests/drivers/drac/test_client.py
@@ -15,6 +15,7 @@
Test class for DRAC client wrapper.
"""
+import time
from xml.etree import ElementTree
import mock
@@ -51,6 +52,25 @@ class DracClientTestCase(base.TestCase):
None, self.resource_uri)
mock_xml.context.assert_called_once_with()
+ @mock.patch.object(time, 'sleep', lambda seconds: None)
+ def test_wsman_enumerate_retry(self, mock_client_pywsman):
+ mock_xml = test_utils.mock_wsman_root('<test></test>')
+ mock_pywsman_client = mock_client_pywsman.Client.return_value
+ mock_pywsman_client.enumerate.side_effect = [None, mock_xml]
+
+ client = drac_client.Client(**INFO_DICT)
+ client.wsman_enumerate(self.resource_uri)
+
+ mock_options = mock_client_pywsman.ClientOptions.return_value
+ mock_options.set_flags.assert_called_once_with(
+ mock_client_pywsman.FLAG_ENUMERATION_OPTIMIZATION)
+ mock_options.set_max_elements.assert_called_once_with(100)
+ mock_pywsman_client.enumerate.assert_has_calls([
+ mock.call(mock_options, None, self.resource_uri),
+ mock.call(mock_options, None, self.resource_uri)
+ ])
+ mock_xml.context.assert_called_once_with()
+
def test_wsman_enumerate_with_additional_pull(self, mock_client_pywsman):
mock_root = mock.Mock()
mock_root.string.side_effect = [test_utils.build_soap_xml(
@@ -118,6 +138,24 @@ class DracClientTestCase(base.TestCase):
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
self.resource_uri, method_name, None)
+ @mock.patch.object(time, 'sleep', lambda seconds: None)
+ def test_wsman_invoke_retry(self, mock_client_pywsman):
+ result_xml = test_utils.build_soap_xml(
+ [{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
+ mock_xml = test_utils.mock_wsman_root(result_xml)
+ mock_pywsman_client = mock_client_pywsman.Client.return_value
+ mock_pywsman_client.invoke.side_effect = [None, mock_xml]
+
+ method_name = 'method'
+ client = drac_client.Client(**INFO_DICT)
+ client.wsman_invoke(self.resource_uri, method_name)
+
+ mock_options = mock_client_pywsman.ClientOptions.return_value
+ mock_pywsman_client.invoke.assert_has_calls([
+ mock.call(mock_options, self.resource_uri, method_name, None),
+ mock.call(mock_options, self.resource_uri, method_name, None)
+ ])
+
def test_wsman_invoke_with_selectors(self, mock_client_pywsman):
result_xml = test_utils.build_soap_xml(
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
diff --git a/ironic/tests/drivers/drac/test_power.py b/ironic/tests/drivers/drac/test_power.py
index a3fd5fc06..796368d24 100644
--- a/ironic/tests/drivers/drac/test_power.py
+++ b/ironic/tests/drivers/drac/test_power.py
@@ -137,7 +137,6 @@ class DracPowerTestCase(base.DbTestCase):
@mock.patch.object(drac_power, '_set_power_state')
def test_set_power_state(self, mock_set_power_state):
- mock_set_power_state.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
@@ -145,10 +144,22 @@ class DracPowerTestCase(base.DbTestCase):
states.POWER_ON)
@mock.patch.object(drac_power, '_set_power_state')
- def test_reboot(self, mock_set_power_state):
- mock_set_power_state.return_value = states.REBOOT
+ @mock.patch.object(drac_power, '_get_power_state')
+ def test_reboot(self, mock_get_power_state, mock_set_power_state):
+ mock_get_power_state.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.power.reboot(task)
mock_set_power_state.assert_called_once_with(task.node,
states.REBOOT)
+
+ @mock.patch.object(drac_power, '_set_power_state')
+ @mock.patch.object(drac_power, '_get_power_state')
+ def test_reboot_in_power_off(self, mock_get_power_state,
+ mock_set_power_state):
+ mock_get_power_state.return_value = states.POWER_OFF
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.power.reboot(task)
+ mock_set_power_state.assert_called_once_with(task.node,
+ states.POWER_ON) \ No newline at end of file
diff --git a/ironic/tests/drivers/elilo_efi_pxe_config.template b/ironic/tests/drivers/elilo_efi_pxe_config.template
new file mode 100644
index 000000000..0dca09d8c
--- /dev/null
+++ b/ironic/tests/drivers/elilo_efi_pxe_config.template
@@ -0,0 +1,16 @@
+default=deploy
+
+image=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_kernel
+ label=deploy
+ initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk
+ append="selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=iqn-1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_id=1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_key=0123456789ABCDEFGHIJKLMNOPQRSTUV ironic_api_url=http://192.168.122.184:6385 troubleshoot=0 text test_param ip=%I::%G:%M:%H::on root_device=vendor=fake,size=123 ipa-api-url=http://192.168.122.184:6385 ipa-driver-name=pxe_ssh boot_option=netboot boot_mode=uefi coreos.configdrive=0"
+
+
+image=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/kernel
+ label=boot_partition
+ initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/ramdisk
+ append="root={{ ROOT }} ro text test_param ip=%I::%G:%M:%H::on"
+
+image=chain.c32
+ label=boot_whole_disk
+ append="mbr:{{ DISK_IDENTIFIER }}"
diff --git a/ironic/tests/drivers/ilo/test_common.py b/ironic/tests/drivers/ilo/test_common.py
index fddd57ba7..a66b4b721 100644
--- a/ironic/tests/drivers/ilo/test_common.py
+++ b/ironic/tests/drivers/ilo/test_common.py
@@ -27,7 +27,6 @@ from ironic.common import swift
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules.ilo import common as ilo_common
-from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
@@ -300,42 +299,39 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
get_pending_boot_mode_mock.assert_called_once_with()
@mock.patch.object(ilo_common, 'set_boot_mode')
- @mock.patch.object(driver_utils, 'get_node_capability')
- def test_update_boot_mode_avbl(self,
- node_capability_mock,
- set_boot_mode_mock):
- node_capability_mock.return_value = 'uefi'
+ def test_update_boot_mode_instance_info_exists(self,
+ set_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.instance_info['deploy_boot_mode'] = 'bios'
ilo_common.update_boot_mode(task)
- node_capability_mock.assert_called_once_with(task.node,
- 'boot_mode')
- set_boot_mode_mock.assert_called_once_with(task.node, 'uefi')
+ set_boot_mode_mock.assert_called_once_with(task.node, 'bios')
+
+ @mock.patch.object(ilo_common, 'set_boot_mode')
+ def test_update_boot_mode_capabilities_exist(self,
+ set_boot_mode_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.properties['capabilities'] = 'boot_mode:bios'
+ ilo_common.update_boot_mode(task)
+ set_boot_mode_mock.assert_called_once_with(task.node, 'bios')
- @mock.patch.object(driver_utils, 'rm_node_capability')
- @mock.patch.object(driver_utils, 'add_node_capability')
@mock.patch.object(ilo_common, 'get_ilo_object')
- def test_update_boot_mode(self, get_ilo_object_mock,
- add_node_capability_mock,
- rm_node_capability_mock):
+ def test_update_boot_mode(self, get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
- ilo_mock_obj.get_pending_boot_mode.return_value = 'legacy'
+ ilo_mock_obj.get_pending_boot_mode.return_value = 'LEGACY'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_common.update_boot_mode(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
- rm_node_capability_mock.assert_called_once_with(task, 'boot_mode')
- add_node_capability_mock.assert_called_once_with(task,
- 'boot_mode',
- 'bios')
+ self.assertEqual('bios',
+ task.node.instance_info['deploy_boot_mode'])
- @mock.patch.object(driver_utils, 'add_node_capability')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_update_boot_mode_unknown(self,
- get_ilo_object_mock,
- add_node_capability_mock):
+ get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
ilo_mock_obj.get_pending_boot_mode.return_value = 'UNKNOWN'
set_pending_boot_mode_mock = ilo_mock_obj.set_pending_boot_mode
@@ -346,15 +342,28 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
set_pending_boot_mode_mock.assert_called_once_with('UEFI')
- add_node_capability_mock.assert_called_once_with(task,
- 'boot_mode',
- 'uefi')
+ self.assertEqual('uefi',
+ task.node.instance_info['deploy_boot_mode'])
+
+ @mock.patch.object(ilo_common, 'get_ilo_object')
+ def test_update_boot_mode_unknown_except(self,
+ get_ilo_object_mock):
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ ilo_mock_obj.get_pending_boot_mode.return_value = 'UNKNOWN'
+ set_pending_boot_mode_mock = ilo_mock_obj.set_pending_boot_mode
+ exc = ilo_error.IloError('error')
+ set_pending_boot_mode_mock.side_effect = exc
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(exception.IloOperationError,
+ ilo_common.update_boot_mode, task)
+ get_ilo_object_mock.assert_called_once_with(task.node)
+ ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
- @mock.patch.object(driver_utils, 'add_node_capability')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_update_boot_mode_legacy(self,
- get_ilo_object_mock,
- add_node_capability_mock):
+ get_ilo_object_mock):
ilo_mock_obj = get_ilo_object_mock.return_value
exc = ilo_error.IloCommandNotSupportedError('error')
ilo_mock_obj.get_pending_boot_mode.side_effect = exc
@@ -364,9 +373,8 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
ilo_common.update_boot_mode(task)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
- add_node_capability_mock.assert_called_once_with(task,
- 'boot_mode',
- 'bios')
+ self.assertEqual('bios',
+ task.node.instance_info['deploy_boot_mode'])
@mock.patch.object(ilo_common, 'set_boot_mode')
def test_update_boot_mode_prop_boot_mode_exist(self,
diff --git a/ironic/tests/drivers/ilo/test_deploy.py b/ironic/tests/drivers/ilo/test_deploy.py
index 32ea63734..f44e532b9 100644
--- a/ironic/tests/drivers/ilo/test_deploy.py
+++ b/ironic/tests/drivers/ilo/test_deploy.py
@@ -124,26 +124,29 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
boot_iso_expected = 'boot-iso-uuid'
self.assertEqual(boot_iso_expected, boot_iso_actual)
- @mock.patch.object(driver_utils, 'get_node_capability')
+ @mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy')
@mock.patch.object(images, 'get_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
- def test__get_boot_iso_uefi_no_glance_image(self, deploy_info_mock,
- image_props_mock, get_node_cap_mock):
+ def test__get_boot_iso_uefi_no_glance_image(self,
+ deploy_info_mock,
+ image_props_mock,
+ boot_mode_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': None,
'ramdisk_id': None}
- get_node_cap_mock.return_value = 'uefi'
+ properties = {'capabilities': 'boot_mode:uefi'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.properties = properties
boot_iso_result = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
- get_node_cap_mock.assert_not_called(task.node, 'boot_mode')
+ self.assertFalse(boot_mode_mock.called)
self.assertIsNone(boot_iso_result)
@mock.patch.object(tempfile, 'NamedTemporaryFile')
@@ -250,8 +253,7 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
ilo_deploy._reboot_into(task, 'iso', opts)
setup_vmedia_mock.assert_called_once_with(task, 'iso', opts)
set_boot_device_mock.assert_called_once_with(task,
- boot_devices.CDROM,
- persistent=True)
+ boot_devices.CDROM)
node_power_action_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(ilo_deploy, '_reboot_into')
@@ -271,39 +273,26 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
@mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
- def test__update_secure_boot_passed_true(self,
- func_set_secure_boot_mode,
- func_is_secure_boot_requested):
+ def test__update_secure_boot_mode_passed_true(self,
+ func_set_secure_boot_mode,
+ func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- func_is_secure_boot_requested.return_value = True
+ func_is_secure_boot_req.return_value = True
ilo_deploy._update_secure_boot_mode(task, True)
func_set_secure_boot_mode.assert_called_once_with(task, True)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
- def test__update_secure_boot_passed_False(self,
- func_set_secure_boot_mode,
- func_is_secure_boot_requested):
+ def test__update_secure_boot_mode_passed_False(self,
+ func_set_secure_boot_mode,
+ func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- func_is_secure_boot_requested.return_value = False
+ func_is_secure_boot_req.return_value = False
ilo_deploy._update_secure_boot_mode(task, False)
self.assertFalse(func_set_secure_boot_mode.called)
- @mock.patch.object(driver_utils, 'add_node_capability')
- @mock.patch.object(driver_utils, 'rm_node_capability')
- def test__enable_uefi_capability(self, func_rm_node_capability,
- func_add_node_capability):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- ilo_deploy._enable_uefi_capability(task)
- func_rm_node_capability.assert_called_once_with(task,
- 'boot_mode')
- func_add_node_capability.assert_called_once_with(task,
- 'boot_mode',
- 'uefi')
-
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
@mock.patch.object(ilo_common, 'get_secure_boot_mode')
def test__disable_secure_boot_false(self,
@@ -330,81 +319,103 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
func_set_secure_boot_mode.assert_called_once_with(task, False)
self.assertTrue(returned_state)
+ @mock.patch.object(ilo_deploy.LOG, 'debug')
@mock.patch.object(ilo_deploy, 'exception')
@mock.patch.object(ilo_common, 'get_secure_boot_mode')
def test__disable_secure_boot_exception(self,
func_get_secure_boot_mode,
- exception_mock):
+ exception_mock,
+ mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
func_get_secure_boot_mode.side_effect = Exception
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
+ self.assertTrue(mock_log.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'update_boot_mode')
- @mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
def test__prepare_node_for_deploy(self,
func_node_power_action,
func_disable_secure_boot,
- func_is_secure_boot_requested,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
- func_is_secure_boot_requested.return_value = False
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
- func_is_secure_boot_requested.assert_called_once_with(task.node)
func_update_boot_mode.assert_called_once_with(task)
+ bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
+ self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode')
- @mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
def test__prepare_node_for_deploy_sec_boot_on(self,
func_node_power_action,
func_disable_secure_boot,
- func_is_secure_boot_req,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
- func_is_secure_boot_req.return_value = False
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
- func_is_secure_boot_req.assert_called_once_with(task.node)
self.assertFalse(func_update_boot_mode.called)
+ ret_boot_mode = task.node.instance_info['deploy_boot_mode']
+ self.assertEqual('uefi', ret_boot_mode)
+ bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
+ self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode')
- @mock.patch.object(ilo_deploy, '_enable_uefi_capability')
- @mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
- def test__prepare_node_for_deploy_sec_boot_req(self,
- func_node_power_action,
- func_disable_secure_boot,
- func_is_secure_boot_req,
- func_enable_uefi_cap,
- func_update_boot_mode):
+ def test__prepare_node_for_deploy_inst_info(self,
+ func_node_power_action,
+ func_disable_secure_boot,
+ func_update_boot_mode):
+ instance_info = {'capabilities': '{"secure_boot": "true"}'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ func_disable_secure_boot.return_value = False
+ task.node.instance_info = instance_info
+ ilo_deploy._prepare_node_for_deploy(task)
+ func_node_power_action.assert_called_once_with(task,
+ states.POWER_OFF)
+ func_disable_secure_boot.assert_called_once_with(task)
+ func_update_boot_mode.assert_called_once_with(task)
+ bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
+ self.assertIsNone(bootmode)
+ deploy_boot_mode = task.node.instance_info.get('deploy_boot_mode')
+ self.assertIsNone(deploy_boot_mode)
+
+ @mock.patch.object(ilo_common, 'update_boot_mode')
+ @mock.patch.object(ilo_deploy, '_disable_secure_boot')
+ @mock.patch.object(manager_utils, 'node_power_action')
+ def test__prepare_node_for_deploy_sec_boot_on_inst_info(self,
+ func_node_power_action,
+ func_disable_secure_boot,
+ func_update_boot_mode):
+ instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
- func_is_secure_boot_req.return_value = True
+ task.node.instance_info = instance_info
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
- func_is_secure_boot_req.assert_called_once_with(task.node)
- func_enable_uefi_cap.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
+ bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
+ self.assertIsNone(bootmode)
+ deploy_boot_mode = task.node.instance_info.get('deploy_boot_mode')
+ self.assertIsNone(deploy_boot_mode)
class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
@@ -739,6 +750,18 @@ class VendorPassthruTestCase(db_base.DbTestCase):
get_deploy_info_mock.assert_called_once_with(task.node,
foo='bar')
+ @mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input',
+ autospec=True)
+ def test_validate_pass_bootloader_install_info(self,
+ validate_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ kwargs = {'address': '1.2.3.4', 'key': 'fake-key',
+ 'status': 'SUCCEEDED', 'error': ''}
+ task.driver.vendor.validate(
+ task, method='pass_bootloader_install_info', **kwargs)
+ validate_mock.assert_called_once_with(task, kwargs)
+
@mock.patch.object(iscsi_deploy, 'get_deploy_info')
def test_validate_heartbeat(self, get_deploy_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
@@ -747,7 +770,22 @@ class VendorPassthruTestCase(db_base.DbTestCase):
vendor.validate(task, method='heartbeat', foo='bar')
self.assertFalse(get_deploy_info_mock.called)
- @mock.patch.object(deploy_utils, 'notify_deploy_complete')
+ @mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
+ autospec=True)
+ @mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
+ def test_pass_bootloader_install_info(self, finish_deploy_mock,
+ validate_input_mock):
+ kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
+ finish_deploy_mock.assert_called_once_with(task, '123456')
+ validate_input_mock.assert_called_once_with(task, kwargs)
+
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@@ -760,7 +798,7 @@ class VendorPassthruTestCase(db_base.DbTestCase):
setup_vmedia_mock, set_boot_device_mock,
func_update_boot_mode,
func_update_secure_boot_mode,
- notify_deploy_complete_mock):
+ notify_ramdisk_to_proceed_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
get_boot_iso_mock.return_value = 'boot-iso'
@@ -786,7 +824,7 @@ class VendorPassthruTestCase(db_base.DbTestCase):
self.assertEqual('boot-iso',
task.node.instance_info['ilo_boot_iso'])
- notify_deploy_complete_mock.assert_called_once_with('123456')
+ notify_ramdisk_to_proceed_mock.assert_called_once_with('123456')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_pass_deploy_info_bad(self, cleanup_vmedia_boot_mock):
@@ -805,16 +843,19 @@ class VendorPassthruTestCase(db_base.DbTestCase):
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(cleanup_vmedia_boot_mock.called)
+ @mock.patch.object(ilo_deploy, '_update_secure_boot_mode', autospec=True)
+ @mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
@mock.patch.object(ilo_deploy, '_get_boot_iso')
def test_pass_deploy_info_create_boot_iso_fail(self, get_iso_mock,
- cleanup_vmedia_boot_mock, continue_deploy_mock, node_power_mock):
+ cleanup_vmedia_boot_mock, continue_deploy_mock, node_power_mock,
+ update_boot_mode_mock, update_secure_boot_mode_mock):
kwargs = {'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
get_iso_mock.side_effect = exception.ImageCreationFailed(
- image_type='iso', error="error")
+ image_type='iso', error="error")
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
@@ -824,6 +865,8 @@ class VendorPassthruTestCase(db_base.DbTestCase):
task.driver.vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
+ update_boot_mode_mock.assert_called_once_with(task)
+ update_secure_boot_mode_mock.assert_called_once_with(task, True)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
get_iso_mock.assert_called_once_with(task, 'root-uuid')
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
@@ -831,22 +874,57 @@ class VendorPassthruTestCase(db_base.DbTestCase):
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNotNone(task.node.last_error)
- @mock.patch.object(deploy_utils, 'notify_deploy_complete')
+ @mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
+ autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
- def _test_pass_deploy_info_localboot(self, cleanup_vmedia_boot_mock,
- continue_deploy_mock,
- func_update_boot_mode,
- func_update_secure_boot_mode,
- set_boot_device_mock,
- notify_deploy_complete_mock):
+ def test_pass_deploy_info_boot_option_local(
+ self, cleanup_vmedia_boot_mock, continue_deploy_mock,
+ func_update_boot_mode, func_update_secure_boot_mode,
+ set_boot_device_mock, notify_ramdisk_to_proceed_mock,
+ finish_deploy_mock):
+ kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
+ continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'}
+
+ self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ vendor = task.driver.vendor
+ vendor.pass_deploy_info(task, **kwargs)
+ cleanup_vmedia_boot_mock.assert_called_once_with(task)
+ continue_deploy_mock.assert_called_once_with(task, **kwargs)
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.DISK,
+ persistent=True)
+ func_update_boot_mode.assert_called_once_with(task)
+ func_update_secure_boot_mode.assert_called_once_with(task, True)
+ notify_ramdisk_to_proceed_mock.assert_called_once_with('123456')
+ self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.ACTIVE, task.node.target_provision_state)
+ self.assertFalse(finish_deploy_mock.called)
+
+ @mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
+ @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
+ @mock.patch.object(ilo_deploy, '_update_secure_boot_mode', autospec=True)
+ @mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
+ @mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
+ @mock.patch.object(ilo_common, 'cleanup_vmedia_boot', autospec=True)
+ def _test_pass_deploy_info_whole_disk_image(
+ self, cleanup_vmedia_boot_mock, continue_deploy_mock,
+ func_update_boot_mode, func_update_secure_boot_mode,
+ set_boot_device_mock, notify_ramdisk_to_proceed_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'}
+ self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
@@ -862,19 +940,15 @@ class VendorPassthruTestCase(db_base.DbTestCase):
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
- notify_deploy_complete_mock.assert_called_once_with('123456')
- self.assertEqual(states.ACTIVE, task.node.provision_state)
- self.assertEqual(states.NOSTATE, task.node.target_provision_state)
+ iscsi_deploy.finish_deploy.assert_called_once_with(task, '123456')
- def test_pass_deploy_info_boot_option_local(self):
+ def test_pass_deploy_info_whole_disk_image_local(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
- self._test_pass_deploy_info_localboot()
+ self._test_pass_deploy_info_whole_disk_image()
def test_pass_deploy_info_whole_disk_image(self):
- self.node.driver_internal_info = {'is_whole_disk_image': True}
- self.node.save()
- self._test_pass_deploy_info_localboot()
+ self._test_pass_deploy_info_whole_disk_image()
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@@ -953,6 +1027,34 @@ class VendorPassthruTestCase(db_base.DbTestCase):
'configure_local_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
+ def test_continue_deploy_whole_disk_image(
+ self, cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock,
+ configure_local_boot_mock, reboot_and_finish_deploy_mock,
+ boot_mode_cap_mock, update_secure_boot_mock):
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.target_provision_state = states.DEPLOYING
+ self.node.driver_internal_info = {'is_whole_disk_image': True}
+ self.node.save()
+ do_agent_iscsi_deploy_mock.return_value = {
+ 'disk identifier': 'some-disk-id'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.vendor.continue_deploy(task)
+ cleanup_vmedia_boot_mock.assert_called_once_with(task)
+ do_agent_iscsi_deploy_mock.assert_called_once_with(task,
+ mock.ANY)
+ configure_local_boot_mock.assert_called_once_with(
+ task, root_uuid=None, efi_system_part_uuid=None)
+ reboot_and_finish_deploy_mock.assert_called_once_with(task)
+
+ @mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
+ @mock.patch.object(ilo_common, 'update_boot_mode')
+ @mock.patch.object(agent_base_vendor.BaseAgentVendor,
+ 'reboot_and_finish_deploy')
+ @mock.patch.object(agent_base_vendor.BaseAgentVendor,
+ 'configure_local_boot')
+ @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
+ @mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_continue_deploy_localboot_uefi(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
@@ -1003,10 +1105,25 @@ class IloPXEDeployTestCase(db_base.DbTestCase):
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.properties['capabilities'] = 'boot_mode:uefi'
task.driver.deploy.prepare(task)
update_boot_mode_mock.assert_called_once_with(task)
pxe_prepare_mock.assert_called_once_with(task)
+ @mock.patch.object(pxe.PXEDeploy, 'prepare')
+ @mock.patch.object(ilo_common, 'update_boot_mode')
+ def test_prepare_uefi_whole_disk_image_fail(self,
+ update_boot_mode_mock,
+ pxe_prepare_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.properties['capabilities'] = 'boot_mode:uefi'
+ task.node.driver_internal_info['is_whole_disk_image'] = True
+ self.assertRaises(exception.InvalidParameterValue,
+ task.driver.deploy.prepare, task)
+ update_boot_mode_mock.assert_called_once_with(task)
+ self.assertFalse(pxe_prepare_mock.called)
+
@mock.patch.object(pxe.PXEDeploy, 'deploy')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_deploy_boot_mode_exists(self, set_persistent_mock,
@@ -1027,7 +1144,8 @@ class IloPXEVendorPassthruTestCase(db_base.DbTestCase):
driver='pxe_ilo', driver_info=INFO_DICT)
def test_vendor_routes(self):
- expected = ['heartbeat', 'pass_deploy_info']
+ expected = ['heartbeat', 'pass_deploy_info',
+ 'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
@@ -1067,8 +1185,10 @@ class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance')
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success')
+ @mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
def test_reboot_to_instance(self, func_update_secure_boot_mode,
+ func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
@@ -1077,14 +1197,17 @@ class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.called_once_with(task.node)
+ func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
agent_reboot_to_instance_mock.assert_called_once_with(task,
**kwargs)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance')
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success')
+ @mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
def test_reboot_to_instance_deploy_fail(self, func_update_secure_boot_mode,
+ func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
@@ -1093,6 +1216,7 @@ class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.called_once_with(task.node)
+ self.assertFalse(func_update_boot_mode.called)
self.assertFalse(func_update_secure_boot_mode.called)
agent_reboot_to_instance_mock.assert_called_once_with(task,
**kwargs)
diff --git a/ironic/tests/drivers/ilo/test_inspect.py b/ironic/tests/drivers/ilo/test_inspect.py
index 129f52b0f..9b7461a7b 100644
--- a/ironic/tests/drivers/ilo/test_inspect.py
+++ b/ironic/tests/drivers/ilo/test_inspect.py
@@ -48,78 +48,30 @@ class IloInspectTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = ilo_common.REQUIRED_PROPERTIES.copy()
- properties.update(ilo_common.INSPECT_PROPERTIES)
self.assertEqual(properties,
task.driver.inspect.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info')
- def test_validate_inspect_ports_valid_with_comma(self, driver_info_mock):
+ def test_validate(self, driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- driver_info_mock.return_value = {'inspect_ports': '1,2'}
task.driver.inspect.validate(task)
driver_info_mock.assert_called_once_with(task.node)
- @mock.patch.object(ilo_common, 'parse_driver_info')
- def test_validate_inspect_ports_valid_None(self, driver_info_mock):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- driver_info_mock.return_value = {'inspect_ports': 'None'}
- task.driver.inspect.validate(task)
- driver_info_mock.assert_called_once_with(task.node)
-
- @mock.patch.object(ilo_common, 'parse_driver_info')
- def test_validate_inspect_ports_valid_all(self, driver_info_mock):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- driver_info_mock.return_value = {'inspect_ports': 'all'}
- task.driver.inspect.validate(task)
- driver_info_mock.assert_called_once_with(task.node)
-
- @mock.patch.object(ilo_common, 'parse_driver_info')
- def test_validate_inspect_ports_valid_single(self, driver_info_mock):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- driver_info_mock.return_value = {'inspect_ports': '1'}
- task.driver.inspect.validate(task)
- driver_info_mock.assert_called_once_with(task.node)
-
- @mock.patch.object(ilo_common, 'parse_driver_info')
- def test_validate_inspect_ports_invalid(self, driver_info_mock):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- driver_info_mock.return_value = {'inspect_ports': 'abc'}
- self.assertRaises(exception.InvalidParameterValue,
- task.driver.inspect.validate, task)
- driver_info_mock.assert_called_once_with(task.node)
-
- @mock.patch.object(ilo_common, 'parse_driver_info')
- def test_validate_inspect_ports_missing(self, driver_info_mock):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- driver_info_mock.return_value = {'xyz': 'abc'}
- self.assertRaises(exception.MissingParameterValue,
- task.driver.inspect.validate, task)
- driver_info_mock.assert_called_once_with(task.node)
-
@mock.patch.object(ilo_inspect, '_get_capabilities')
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist')
- @mock.patch.object(ilo_inspect, '_get_macs_for_desired_ports')
@mock.patch.object(ilo_inspect, '_get_essential_properties')
@mock.patch.object(ilo_power.IloPower, 'get_power_state')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_inspect_essential_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
- desired_macs_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- desired_macs_mock.return_value = {'Port 1': 'aa:aa:aa:aa:aa:aa',
- 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
@@ -127,7 +79,6 @@ class IloInspectTestCase(db_base.DbTestCase):
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- task.node.driver_info = {'inspect_ports': 'all'}
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(task)
@@ -139,7 +90,6 @@ class IloInspectTestCase(db_base.DbTestCase):
@mock.patch.object(ilo_inspect, '_get_capabilities')
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist')
- @mock.patch.object(ilo_inspect, '_get_macs_for_desired_ports')
@mock.patch.object(ilo_inspect, '_get_essential_properties')
@mock.patch.object(conductor_utils, 'node_power_action')
@mock.patch.object(ilo_power.IloPower, 'get_power_state')
@@ -148,15 +98,12 @@ class IloInspectTestCase(db_base.DbTestCase):
power_mock,
set_power_mock,
get_essential_mock,
- desired_macs_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- desired_macs_mock.return_value = {'Port 1': 'aa:aa:aa:aa:aa:aa',
- 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
@@ -164,7 +111,6 @@ class IloInspectTestCase(db_base.DbTestCase):
power_mock.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- task.node.driver_info = {'inspect_ports': 'all'}
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(task)
@@ -177,22 +123,18 @@ class IloInspectTestCase(db_base.DbTestCase):
@mock.patch.object(ilo_inspect, '_get_capabilities')
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist')
- @mock.patch.object(ilo_inspect, '_get_macs_for_desired_ports')
@mock.patch.object(ilo_inspect, '_get_essential_properties')
@mock.patch.object(ilo_power.IloPower, 'get_power_state')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_inspect_essential_capabilities_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
- desired_macs_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- desired_macs_mock.return_value = {'Port 1': 'aa:aa:aa:aa:aa:aa',
- 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capability_str = 'BootMode:uefi'
capabilities = {'BootMode': 'uefi'}
result = {'properties': properties, 'macs': macs}
@@ -201,7 +143,6 @@ class IloInspectTestCase(db_base.DbTestCase):
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- task.node.driver_info = {'inspect_ports': 'all'}
task.driver.inspect.inspect_hardware(task)
expected_properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
@@ -216,14 +157,12 @@ class IloInspectTestCase(db_base.DbTestCase):
@mock.patch.object(ilo_inspect, '_get_capabilities')
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist')
- @mock.patch.object(ilo_inspect, '_get_macs_for_desired_ports')
@mock.patch.object(ilo_inspect, '_get_essential_properties')
@mock.patch.object(ilo_power.IloPower, 'get_power_state')
@mock.patch.object(ilo_common, 'get_ilo_object')
def test_inspect_essential_capabilities_exist_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
- desired_macs_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
@@ -231,8 +170,6 @@ class IloInspectTestCase(db_base.DbTestCase):
'cpus': '1', 'cpu_arch': 'x86_64',
'somekey': 'somevalue'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- desired_macs_mock.return_value = {'Port 1': 'aa:aa:aa:aa:aa:aa',
- 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
capabilities = {'BootMode': 'uefi'}
get_essential_mock.return_value = result
@@ -240,7 +177,6 @@ class IloInspectTestCase(db_base.DbTestCase):
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- task.node.driver_info = {'inspect_ports': 'all'}
task.node.properties = {'capabilities': 'foo:bar'}
expected_capabilities = ('BootMode:uefi,'
'foo:bar')
@@ -260,75 +196,6 @@ class IloInspectTestCase(db_base.DbTestCase):
ilo_object_mock)
create_port_mock.assert_called_once_with(task.node, macs)
- @mock.patch.object(ilo_inspect, '_get_capabilities')
- @mock.patch.object(ilo_inspect, '_create_ports_if_not_exist')
- @mock.patch.object(ilo_inspect, '_get_macs_for_desired_ports')
- @mock.patch.object(ilo_inspect, '_get_essential_properties')
- @mock.patch.object(ilo_power.IloPower, 'get_power_state')
- @mock.patch.object(ilo_common, 'get_ilo_object')
- def test_inspect_hardware_port_desired(self, get_ilo_object_mock,
- power_mock,
- get_essential_mock,
- desired_macs_mock,
- create_port_mock,
- get_capabilities_mock):
- ilo_object_mock = get_ilo_object_mock.return_value
- properties = {'memory_mb': '512', 'local_gb': '10',
- 'cpus': '1', 'cpu_arch': 'x86_64'}
- macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- result = {'properties': properties, 'macs': macs}
- macs_input_given = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
- desired_macs_mock.return_value = macs_input_given
- capabilities = ''
- get_essential_mock.return_value = result
- get_capabilities_mock.return_value = capabilities
- power_mock.return_value = states.POWER_ON
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- task.node.driver_info = {'inspect_ports': '1'}
- task.driver.inspect.inspect_hardware(task)
- power_mock.assert_called_once_with(task)
- get_essential_mock.assert_called_once_with(task.node,
- ilo_object_mock)
- self.assertEqual(task.node.properties, result['properties'])
- get_capabilities_mock.assert_called_once_with(task.node,
- ilo_object_mock)
- create_port_mock.assert_called_once_with(task.node,
- macs_input_given)
-
- @mock.patch.object(ilo_inspect, '_get_capabilities')
- @mock.patch.object(ilo_inspect, '_create_ports_if_not_exist')
- @mock.patch.object(ilo_inspect, '_get_macs_for_desired_ports')
- @mock.patch.object(ilo_inspect, '_get_essential_properties')
- @mock.patch.object(ilo_power.IloPower, 'get_power_state')
- @mock.patch.object(ilo_common, 'get_ilo_object')
- def test_inspect_hardware_port_desired_none(self, get_ilo_object_mock,
- power_mock,
- get_essential_mock,
- desired_macs_mock,
- create_port_mock,
- get_capabilities_mock):
- ilo_object_mock = get_ilo_object_mock.return_value
- properties = {'memory_mb': '512', 'local_gb': '10',
- 'cpus': '1', 'cpu_arch': 'x86_64'}
- macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- result = {'properties': properties, 'macs': macs}
- macs_input_given = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
- capabilities = ''
- get_capabilities_mock.return_value = capabilities
- desired_macs_mock.return_value = macs_input_given
- get_essential_mock.return_value = result
- power_mock.return_value = states.POWER_ON
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- task.node.driver_info = {'inspect_ports': 'none'}
- task.driver.inspect.inspect_hardware(task)
- power_mock.assert_called_once_with(task)
- get_essential_mock.assert_called_once_with(task.node,
- ilo_object_mock)
- self.assertEqual(task.node.properties, result['properties'])
- create_port_mock.assert_not_called()
-
class TestInspectPrivateMethods(db_base.DbTestCase):
@@ -347,7 +214,7 @@ class TestInspectPrivateMethods(db_base.DbTestCase):
port_dict1 = {'address': 'aa:aa:aa:aa:aa:aa', 'node_id': node_id}
port_dict2 = {'address': 'bb:bb:bb:bb:bb:bb', 'node_id': node_id}
ilo_inspect._create_ports_if_not_exist(self.node, macs)
- instance_mock.assert_called_once()
+ instance_mock.assert_called_once_with()
self.assertTrue(log_mock.called)
db_obj.create_port.assert_any_call(port_dict1)
db_obj.create_port.assert_any_call(port_dict2)
@@ -361,7 +228,7 @@ class TestInspectPrivateMethods(db_base.DbTestCase):
dbapi_mock.create_port.side_effect = exception.MACAlreadyExists('f')
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
ilo_inspect._create_ports_if_not_exist(self.node, macs)
- instance_mock.assert_called_once()
+ instance_mock.assert_called_once_with()
self.assertTrue(log_mock.called)
def test__get_essential_properties_ok(self):
@@ -523,44 +390,3 @@ class TestInspectPrivateMethods(db_base.DbTestCase):
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
-
- def test__get_macs_for_desired_ports(self):
- driver_info_mock = {'inspect_ports': '1,2'}
- self.node.driver_info = driver_info_mock
- macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- expected_macs = {'Port 1': 'aa:aa:aa:aa:aa:aa',
- 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- macs_out = (
- ilo_inspect._get_macs_for_desired_ports(self.node,
- macs))
- self.assertEqual(expected_macs, macs_out)
-
- def test__get_macs_for_desired_ports_few(self):
- driver_info_mock = {'inspect_ports': '1,2'}
- self.node.driver_info = driver_info_mock
- macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb',
- 'Port 3': 'cc:cc:cc:cc:cc:cc', 'Port 4': 'dd:dd:dd:dd:dd:dd'}
- expected_macs = {'Port 1': 'aa:aa:aa:aa:aa:aa',
- 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- macs_out = (
- ilo_inspect._get_macs_for_desired_ports(self.node,
- macs))
- self.assertEqual(expected_macs, macs_out)
-
- def test__get_macs_for_desired_ports_one(self):
- driver_info_mock = {'inspect_ports': '1'}
- self.node.driver_info = driver_info_mock
- macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- expected_macs = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
- macs_out = (
- ilo_inspect._get_macs_for_desired_ports(self.node,
- macs))
- self.assertEqual(expected_macs, macs_out)
-
- def test__get_macs_for_desired_ports_none(self):
- driver_info_mock = {}
- self.node.driver_info = driver_info_mock
- macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
- self.assertRaises(exception.HardwareInspectionFailure,
- ilo_inspect._get_macs_for_desired_ports,
- self.node, macs)
diff --git a/ironic/tests/drivers/ilo/test_power.py b/ironic/tests/drivers/ilo/test_power.py
index 2ed8d97fb..a7f87f450 100644
--- a/ironic/tests/drivers/ilo/test_power.py
+++ b/ironic/tests/drivers/ilo/test_power.py
@@ -137,12 +137,26 @@ class IloPowerInternalMethodsTestCase(db_base.DbTestCase):
get_ilo_object_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
+ task.node.provision_state = states.ACTIVE
task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
ilo_power._attach_boot_iso(task)
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
+ @mock.patch.object(manager_utils, 'node_set_boot_device')
+ @mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
+ def test__attach_boot_iso_on_rebuild(self, setup_vmedia_mock,
+ set_boot_device_mock,
+ get_ilo_object_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.provision_state = states.DEPLOYING
+ task.node.instance_info['ilo_boot_iso'] = 'boot-iso'
+ ilo_power._attach_boot_iso(task)
+ self.assertFalse(setup_vmedia_mock.called)
+ self.assertFalse(set_boot_device_mock.called)
+
class IloPowerTestCase(db_base.DbTestCase):
diff --git a/ironic/tests/drivers/ipxe_config.template b/ironic/tests/drivers/ipxe_config.template
new file mode 100644
index 000000000..bc803d4a7
--- /dev/null
+++ b/ironic/tests/drivers/ipxe_config.template
@@ -0,0 +1,21 @@
+#!ipxe
+
+dhcp
+
+goto deploy
+
+:deploy
+kernel http://1.2.3.4:1234/deploy_kernel selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=iqn-1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_id=1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_key=0123456789ABCDEFGHIJKLMNOPQRSTUV ironic_api_url=http://192.168.122.184:6385 troubleshoot=0 text test_param boot_option=netboot ip=${ip}:${next-server}:${gateway}:${netmask} BOOTIF=${mac} root_device=vendor=fake,size=123 ipa-api-url=http://192.168.122.184:6385 ipa-driver-name=pxe_ssh coreos.configdrive=0
+
+initrd http://1.2.3.4:1234/deploy_ramdisk
+boot
+
+:boot_partition
+kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param
+initrd http://1.2.3.4:1234/ramdisk
+boot
+
+:boot_whole_disk
+kernel chain.c32
+append mbr:{{ DISK_IDENTIFIER }}
+boot
diff --git a/ironic/tests/drivers/pxe_config.template b/ironic/tests/drivers/pxe_config.template
index d77f17e6c..936a9620e 100644
--- a/ironic/tests/drivers/pxe_config.template
+++ b/ironic/tests/drivers/pxe_config.template
@@ -2,7 +2,7 @@ default deploy
label deploy
kernel /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_kernel
-append initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=iqn-1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_id=1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_key=0123456789ABCDEFGHIJKLMNOPQRSTUV ironic_api_url=http://192.168.122.184:6385 troubleshoot=0 text test_param boot_option=netboot root_device=vendor=fake,size=123 ipa-api-url=http://192.168.122.184:6385 ipa-driver-name=pxe_ssh boot_mode=bios
+append initrd=/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/deploy_ramdisk selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn=iqn-1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_id=1be26c0b-03f2-4d2e-ae87-c02d7f33c123 deployment_key=0123456789ABCDEFGHIJKLMNOPQRSTUV ironic_api_url=http://192.168.122.184:6385 troubleshoot=0 text test_param boot_option=netboot root_device=vendor=fake,size=123 ipa-api-url=http://192.168.122.184:6385 ipa-driver-name=pxe_ssh boot_mode=bios coreos.configdrive=0
ipappend 3
diff --git a/ironic/tests/drivers/test_agent.py b/ironic/tests/drivers/test_agent.py
index 7e5ed3133..bb9675f3f 100644
--- a/ironic/tests/drivers/test_agent.py
+++ b/ironic/tests/drivers/test_agent.py
@@ -48,6 +48,7 @@ class TestAgentMethods(db_base.DbTestCase):
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
+ self.assertEqual(0, options['coreos.configdrive'])
@mock.patch.object(keystone, 'get_service_url')
def test_build_agent_options_keystone(self, get_url_mock):
@@ -57,6 +58,7 @@ class TestAgentMethods(db_base.DbTestCase):
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
+ self.assertEqual(0, options['coreos.configdrive'])
def test_build_agent_options_root_device_hints(self):
self.config(api_url='api-url', group='conductor')
@@ -162,6 +164,14 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertIn('driver_info.deploy_ramdisk', str(e))
self.assertIn('driver_info.deploy_kernel', str(e))
+ def test_validate_driver_info_manage_tftp_false(self):
+ self.config(manage_tftp=False, group='agent')
+ self.node.driver_info = {}
+ self.node.save()
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ self.driver.validate(task)
+
def test_validate_instance_info_missing_params(self):
self.node.instance_info = {}
self.node.save()
@@ -183,6 +193,13 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
+ def test_validate_agent_fail_partition_image(self):
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ task.node.driver_internal_info['is_whole_disk_image'] = False
+ self.assertRaises(exception.InvalidParameterValue,
+ self.driver.validate, task)
+
def test_validate_invalid_root_device_hints(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
@@ -190,6 +207,37 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
+ @mock.patch.object(agent, '_cache_tftp_images')
+ @mock.patch.object(pxe_utils, 'create_pxe_config')
+ @mock.patch.object(agent, '_build_pxe_config_options')
+ @mock.patch.object(agent, '_get_tftp_image_info')
+ def test__prepare_pxe_boot(self, pxe_info_mock, options_mock,
+ create_mock, cache_mock):
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ agent._prepare_pxe_boot(task)
+ pxe_info_mock.assert_called_once_with(task.node)
+ options_mock.assert_called_once_with(task.node, mock.ANY)
+ create_mock.assert_called_once_with(
+ task, mock.ANY, CONF.agent.agent_pxe_config_template)
+ cache_mock.assert_called_once_with(task.context, task.node,
+ mock.ANY)
+
+ @mock.patch.object(agent, '_cache_tftp_images')
+ @mock.patch.object(pxe_utils, 'create_pxe_config')
+ @mock.patch.object(agent, '_build_pxe_config_options')
+ @mock.patch.object(agent, '_get_tftp_image_info')
+ def test__prepare_pxe_boot_manage_tftp_false(
+ self, pxe_info_mock, options_mock, create_mock, cache_mock):
+ self.config(manage_tftp=False, group='agent')
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ agent._prepare_pxe_boot(task)
+ self.assertFalse(pxe_info_mock.called)
+ self.assertFalse(options_mock.called)
+ self.assertFalse(create_mock.called)
+ self.assertFalse(cache_mock.called)
+
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
@mock.patch('ironic.conductor.utils.node_set_boot_device')
@mock.patch('ironic.conductor.utils.node_power_action')
@@ -212,6 +260,36 @@ class TestAgentDeploy(db_base.DbTestCase):
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
+ @mock.patch.object(pxe_utils, 'clean_up_pxe_config')
+ @mock.patch.object(agent, 'AgentTFTPImageCache')
+ @mock.patch('ironic.common.utils.unlink_without_raise')
+ @mock.patch.object(agent, '_get_tftp_image_info')
+ def test__clean_up_pxe(self, info_mock, unlink_mock, cache_mock,
+ clean_mock):
+ info_mock.return_value = {'label': ['fake1', 'fake2']}
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ agent._clean_up_pxe(task)
+ info_mock.assert_called_once_with(task.node)
+ unlink_mock.assert_called_once_with('fake2')
+ clean_mock.assert_called_once_with(task)
+
+ @mock.patch.object(pxe_utils, 'clean_up_pxe_config')
+ @mock.patch.object(agent.AgentTFTPImageCache, 'clean_up')
+ @mock.patch('ironic.common.utils.unlink_without_raise')
+ @mock.patch.object(agent, '_get_tftp_image_info')
+ def test__clean_up_pxe_manage_tftp_false(
+ self, info_mock, unlink_mock, cache_mock, clean_mock):
+ self.config(manage_tftp=False, group='agent')
+ info_mock.return_value = {'label': ['fake1', 'fake2']}
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ agent._clean_up_pxe(task)
+ self.assertFalse(info_mock.called)
+ self.assertFalse(unlink_mock.called)
+ self.assertFalse(cache_mock.called)
+ self.assertFalse(clean_mock.called)
+
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports')
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.create_cleaning_ports')
@mock.patch('ironic.drivers.modules.agent._do_pxe_boot')
@@ -226,8 +304,8 @@ class TestAgentDeploy(db_base.DbTestCase):
self.driver.prepare_cleaning(task))
prepare_mock.assert_called_once_with(task)
boot_mock.assert_called_once_with(task, ports)
- create_mock.assert_called_once()
- delete_mock.assert_called_once()
+ create_mock.assert_called_once_with(task)
+ delete_mock.assert_called_once_with(task)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports')
@mock.patch('ironic.drivers.modules.agent._clean_up_pxe')
@@ -238,7 +316,7 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertIsNone(self.driver.tear_down_cleaning(task))
power_mock.assert_called_once_with(task, states.POWER_OFF)
cleanup_mock.assert_called_once_with(task)
- neutron_mock.assert_called_once()
+ neutron_mock.assert_called_once_with(task)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps')
def test_get_clean_steps(self, mock_get_clean_steps):
@@ -254,10 +332,11 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps')
def test_get_clean_steps_config_priority(self, mock_get_clean_steps):
# Test that we can override the priority of get clean steps
- self.config(agent_erase_devices_priority=20, group='agent')
+ # Use 0 because it is an edge case (false-y) and used in devstack
+ self.config(agent_erase_devices_priority=0, group='agent')
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
- expected_steps = [{'priority': 20, 'interface': 'deploy',
+ expected_steps = [{'priority': 0, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -399,6 +478,7 @@ class TestAgentVendor(db_base.DbTestCase):
'deployment_ari_path': 'fake-node/deploy_ramdisk',
'ipa-api-url': 'api-url',
'ipa-driver-name': u'fake_agent',
+ 'coreos.configdrive': 0,
'pxe_append_params': 'foo bar'}
if root_device_hints:
diff --git a/ironic/tests/drivers/test_agent_base_vendor.py b/ironic/tests/drivers/test_agent_base_vendor.py
index 31596abc1..e672df85b 100644
--- a/ironic/tests/drivers/test_agent_base_vendor.py
+++ b/ironic/tests/drivers/test_agent_base_vendor.py
@@ -284,6 +284,29 @@ class TestBaseAgentVendor(db_base.DbTestCase):
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy '
'is done. exception: LlamaException')
+ @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'continue_deploy')
+ @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_to_instance')
+ @mock.patch.object(agent_base_vendor.BaseAgentVendor,
+ '_notify_conductor_resume_clean')
+ def test_heartbeat_noops_maintenance_mode(self, ncrc_mock, rti_mock,
+ cd_mock):
+ """Ensures that heartbeat() no-ops for a maintenance node."""
+ kwargs = {
+ 'agent_url': 'http://127.0.0.1:9999/bar'
+ }
+ self.node.maintenance = True
+ for state in (states.AVAILABLE, states.DEPLOYWAIT, states.DEPLOYING,
+ states.CLEANING):
+ self.node.provision_state = state
+ self.node.save()
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=True) as task:
+ self.passthru.heartbeat(task, **kwargs)
+
+ self.assertEqual(0, ncrc_mock.call_count)
+ self.assertEqual(0, rti_mock.call_count)
+ self.assertEqual(0, cd_mock.call_count)
+
def test_vendor_passthru_vendor_routes(self):
expected = ['heartbeat']
with task_manager.acquire(self.context, self.node.uuid,
@@ -336,7 +359,9 @@ class TestBaseAgentVendor(db_base.DbTestCase):
'command_status': 'SUCCESS', 'command_error': None}
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
- self.passthru.configure_local_boot(task, 'some-root-uuid')
+ task.node.driver_internal_info['is_whole_disk_image'] = False
+ self.passthru.configure_local_boot(task,
+ root_uuid='some-root-uuid')
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
install_bootloader_mock.assert_called_once_with(
@@ -351,6 +376,7 @@ class TestBaseAgentVendor(db_base.DbTestCase):
'command_status': 'SUCCESS', 'command_error': None}
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
+ task.node.driver_internal_info['is_whole_disk_image'] = False
self.passthru.configure_local_boot(
task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
@@ -360,6 +386,29 @@ class TestBaseAgentVendor(db_base.DbTestCase):
task.node, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
+ @mock.patch.object(deploy_utils, 'try_set_boot_device')
+ @mock.patch.object(agent_client.AgentClient, 'install_bootloader')
+ def test_configure_local_boot_whole_disk_image(
+ self, install_bootloader_mock, try_set_boot_device_mock):
+ with task_manager.acquire(self.context, self.node['uuid'],
+ shared=False) as task:
+ self.passthru.configure_local_boot(task)
+ self.assertFalse(install_bootloader_mock.called)
+ try_set_boot_device_mock.assert_called_once_with(
+ task, boot_devices.DISK)
+
+ @mock.patch.object(deploy_utils, 'try_set_boot_device')
+ @mock.patch.object(agent_client.AgentClient, 'install_bootloader')
+ def test_configure_local_boot_no_root_uuid(
+ self, install_bootloader_mock, try_set_boot_device_mock):
+ with task_manager.acquire(self.context, self.node['uuid'],
+ shared=False) as task:
+ task.node.driver_internal_info['is_whole_disk_image'] = False
+ self.passthru.configure_local_boot(task)
+ self.assertFalse(install_bootloader_mock.called)
+ try_set_boot_device_mock.assert_called_once_with(
+ task, boot_devices.DISK)
+
@mock.patch.object(agent_client.AgentClient, 'install_bootloader')
def test_configure_local_boot_boot_loader_install_fail(
self, install_bootloader_mock):
@@ -370,9 +419,10 @@ class TestBaseAgentVendor(db_base.DbTestCase):
self.node.save()
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
+ task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InstanceDeployFailure,
self.passthru.configure_local_boot,
- task, 'some-root-uuid')
+ task, root_uuid='some-root-uuid')
install_bootloader_mock.assert_called_once_with(
task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
@@ -391,9 +441,10 @@ class TestBaseAgentVendor(db_base.DbTestCase):
self.node.save()
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
+ task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InstanceDeployFailure,
self.passthru.configure_local_boot,
- task, 'some-root-uuid')
+ task, root_uuid='some-root-uuid')
install_bootloader_mock.assert_called_once_with(
task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
@@ -406,8 +457,20 @@ class TestBaseAgentVendor(db_base.DbTestCase):
'_notify_conductor_resume_clean')
@mock.patch.object(agent_client.AgentClient, 'get_commands_status')
def test_continue_cleaning(self, status_mock, notify_mock):
+ # Test a successful execute clean step on the agent
+ self.node.clean_step = {
+ 'priority': 10,
+ 'interface': 'deploy',
+ 'step': 'erase_devices',
+ 'reboot_requested': False
+ }
+ self.node.save()
status_mock.return_value = [{
'command_status': 'SUCCEEDED',
+ 'command_name': 'execute_clean_step',
+ 'command_result': {
+ 'clean_step': self.node.clean_step
+ }
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
@@ -417,20 +480,54 @@ class TestBaseAgentVendor(db_base.DbTestCase):
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'_notify_conductor_resume_clean')
@mock.patch.object(agent_client.AgentClient, 'get_commands_status')
+ def test_continue_cleaning_old_command(self, status_mock, notify_mock):
+ # Test when a second execute_clean_step happens to the agent, but
+ # the new step hasn't started yet.
+ self.node.clean_step = {
+ 'priority': 10,
+ 'interface': 'deploy',
+ 'step': 'erase_devices',
+ 'reboot_requested': False
+ }
+ self.node.save()
+ status_mock.return_value = [{
+ 'command_status': 'SUCCEEDED',
+ 'command_name': 'execute_clean_step',
+ 'command_result': {
+ 'priority': 20,
+ 'interface': 'deploy',
+ 'step': 'update_firmware',
+ 'reboot_requested': False
+ }
+ }]
+ with task_manager.acquire(self.context, self.node['uuid'],
+ shared=False) as task:
+ self.passthru.continue_cleaning(task)
+ self.assertFalse(notify_mock.called)
+
+ @mock.patch.object(agent_base_vendor.BaseAgentVendor,
+ '_notify_conductor_resume_clean')
+ @mock.patch.object(agent_client.AgentClient, 'get_commands_status')
def test_continue_cleaning_running(self, status_mock, notify_mock):
+ # Test that no action is taken while a clean step is executing
status_mock.return_value = [{
'command_status': 'RUNNING',
+ 'command_name': 'execute_clean_step',
+ 'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.continue_cleaning(task)
- notify_mock.assert_not_called()
+ self.assertFalse(notify_mock.called)
@mock.patch('ironic.conductor.manager.cleaning_error_handler')
@mock.patch.object(agent_client.AgentClient, 'get_commands_status')
def test_continue_cleaning_fail(self, status_mock, error_mock):
+ # Test the a failure puts the node in CLEANFAIL
status_mock.return_value = [{
'command_status': 'FAILED',
+ 'command_name': 'execute_clean_step',
+ 'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
@@ -443,8 +540,11 @@ class TestBaseAgentVendor(db_base.DbTestCase):
@mock.patch.object(agent_client.AgentClient, 'get_commands_status')
def test_continue_cleaning_clean_version_mismatch(
self, status_mock, notify_mock, steps_mock):
+ # Test that cleaning is restarted if there is a version mismatch
status_mock.return_value = [{
'command_status': 'CLEAN_VERSION_MISMATCH',
+ 'command_name': 'execute_clean_step',
+ 'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
@@ -455,8 +555,11 @@ class TestBaseAgentVendor(db_base.DbTestCase):
@mock.patch('ironic.conductor.manager.cleaning_error_handler')
@mock.patch.object(agent_client.AgentClient, 'get_commands_status')
def test_continue_cleaning_unknown(self, status_mock, error_mock):
+ # Test that unknown commands are treated as failures
status_mock.return_value = [{
'command_status': 'UNKNOWN',
+ 'command_name': 'execute_clean_step',
+ 'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
diff --git a/ironic/tests/drivers/test_agent_client.py b/ironic/tests/drivers/test_agent_client.py
index 80a0f69af..bfe91f83a 100644
--- a/ironic/tests/drivers/test_agent_client.py
+++ b/ironic/tests/drivers/test_agent_client.py
@@ -16,6 +16,7 @@ import json
import mock
import requests
+import six
from ironic.common import exception
from ironic.drivers.modules import agent_client
@@ -23,12 +24,12 @@ from ironic.tests import base
class MockResponse(object):
- def __init__(self, data):
- self.data = data
- self.text = json.dumps(data)
+ def __init__(self, text):
+ assert isinstance(text, six.string_types)
+ self.text = text
def json(self):
- return self.data
+ return json.loads(self.text)
class MockNode(object):
@@ -75,7 +76,8 @@ class TestAgentClient(base.TestCase):
def test__command(self):
response_data = {'status': 'ok'}
- self.client.session.post.return_value = MockResponse(response_data)
+ response_text = json.dumps(response_data)
+ self.client.session.post.return_value = MockResponse(response_text)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
@@ -92,6 +94,26 @@ class TestAgentClient(base.TestCase):
headers=headers,
params={'wait': 'false'})
+ def test__command_fail_json(self):
+ response_text = 'this be not json matey!'
+ self.client.session.post.return_value = MockResponse(response_text)
+ method = 'standby.run_image'
+ image_info = {'image_id': 'test_image'}
+ params = {'image_info': image_info}
+
+ url = self.client._get_command_url(self.node)
+ body = self.client._get_command_body(method, params)
+ headers = {'Content-Type': 'application/json'}
+
+ self.assertRaises(exception.IronicException,
+ self.client._command,
+ self.node, method, params)
+ self.client.session.post.assert_called_once_with(
+ url,
+ data=body,
+ headers=headers,
+ params={'wait': 'false'})
+
def test_get_commands_status(self):
with mock.patch.object(self.client.session, 'get') as mock_get:
res = mock.Mock()
diff --git a/ironic/tests/drivers/test_deploy_utils.py b/ironic/tests/drivers/test_deploy_utils.py
index dfbcc5532..fdb6653a3 100644
--- a/ironic/tests/drivers/test_deploy_utils.py
+++ b/ironic/tests/drivers/test_deploy_utils.py
@@ -37,6 +37,7 @@ from ironic.common import states
from ironic.common import utils as common_utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
+from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils as utils
from ironic.drivers.modules import image_cache
from ironic.tests import base as tests_base
@@ -180,7 +181,7 @@ image=kernel
image=chain.c32
label=boot_whole_disk
- append mbr:{{ DISK_IDENTIFIER }}
+ append="mbr:{{ DISK_IDENTIFIER }}"
"""
_UEFI_PXECONF_BOOT_PARTITION = """
@@ -198,7 +199,7 @@ image=kernel
image=chain.c32
label=boot_whole_disk
- append mbr:{{ DISK_IDENTIFIER }}
+ append="mbr:{{ DISK_IDENTIFIER }}"
"""
_UEFI_PXECONF_BOOT_WHOLE_DISK = """
@@ -216,7 +217,7 @@ image=kernel
image=chain.c32
label=boot_whole_disk
- append mbr:0x12345678
+ append="mbr:0x12345678"
"""
@@ -1019,13 +1020,18 @@ class WorkOnDiskTestCase(tests_base.TestCase):
self.swap_part = '/dev/fake-part1'
self.root_part = '/dev/fake-part2'
- self.mock_ibd = mock.patch.object(utils, 'is_block_device').start()
- self.mock_mp = mock.patch.object(utils, 'make_partitions').start()
- self.addCleanup(self.mock_ibd.stop)
- self.addCleanup(self.mock_mp.stop)
- self.mock_remlbl = mock.patch.object(utils,
- 'destroy_disk_metadata').start()
- self.addCleanup(self.mock_remlbl.stop)
+ self.mock_ibd_obj = mock.patch.object(
+ utils, 'is_block_device', autospec=True)
+ self.mock_ibd = self.mock_ibd_obj.start()
+ self.addCleanup(self.mock_ibd_obj.stop)
+ self.mock_mp_obj = mock.patch.object(
+ utils, 'make_partitions', autospec=True)
+ self.mock_mp = self.mock_mp_obj.start()
+ self.addCleanup(self.mock_mp_obj.stop)
+ self.mock_remlbl_obj = mock.patch.object(
+ utils, 'destroy_disk_metadata', autospec=True)
+ self.mock_remlbl = self.mock_remlbl_obj.start()
+ self.addCleanup(self.mock_remlbl_obj.stop)
self.mock_mp.return_value = {'swap': self.swap_part,
'root': self.root_part}
@@ -1043,7 +1049,7 @@ class WorkOnDiskTestCase(tests_base.TestCase):
boot_mode="bios")
def test_no_swap_partition(self):
- self.mock_ibd.side_effect = [True, False]
+ self.mock_ibd.side_effect = iter([True, False])
calls = [mock.call(self.root_part),
mock.call(self.swap_part)]
self.assertRaises(exception.InstanceDeployFailure,
@@ -1067,7 +1073,7 @@ class WorkOnDiskTestCase(tests_base.TestCase):
self.mock_mp.return_value = {'ephemeral': ephemeral_part,
'swap': swap_part,
'root': root_part}
- self.mock_ibd.side_effect = [True, True, False]
+ self.mock_ibd.side_effect = iter([True, True, False])
calls = [mock.call(root_part),
mock.call(swap_part),
mock.call(ephemeral_part)]
@@ -1095,7 +1101,7 @@ class WorkOnDiskTestCase(tests_base.TestCase):
self.mock_mp.return_value = {'swap': swap_part,
'configdrive': configdrive_part,
'root': root_part}
- self.mock_ibd.side_effect = [True, True, False]
+ self.mock_ibd.side_effect = iter([True, True, False])
calls = [mock.call(root_part),
mock.call(swap_part),
mock.call(configdrive_part)]
@@ -1453,7 +1459,7 @@ class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase):
utils.parse_instance_info_capabilities, self.node)
def test_is_secure_boot_requested_true(self):
- self.node.instance_info = {'capabilities': {"secure_boot": "true"}}
+ self.node.instance_info = {'capabilities': {"secure_boot": "tRue"}}
self.assertTrue(utils.is_secure_boot_requested(self.node))
def test_is_secure_boot_requested_false(self):
@@ -1464,6 +1470,27 @@ class ParseInstanceInfoCapabilitiesTestCase(tests_base.TestCase):
self.node.instance_info = {'capabilities': {"secure_boot": "invalid"}}
self.assertFalse(utils.is_secure_boot_requested(self.node))
+ def test_get_boot_mode_for_deploy_using_capabilities(self):
+ properties = {'capabilities': 'boot_mode:uefi,cap2:value2'}
+ self.node.properties = properties
+
+ result = utils.get_boot_mode_for_deploy(self.node)
+ self.assertEqual('uefi', result)
+
+ def test_get_boot_mode_for_deploy_using_instance_info_cap(self):
+ instance_info = {'capabilities': {'secure_boot': 'True'}}
+ self.node.instance_info = instance_info
+
+ result = utils.get_boot_mode_for_deploy(self.node)
+ self.assertEqual('uefi', result)
+
+ def test_get_boot_mode_for_deploy_using_instance_info(self):
+ instance_info = {'deploy_boot_mode': 'bios'}
+ self.node.instance_info = instance_info
+
+ result = utils.get_boot_mode_for_deploy(self.node)
+ self.assertEqual('bios', result)
+
class TrySetBootDeviceTestCase(db_base.DbTestCase):
@@ -1526,7 +1553,9 @@ class AgentCleaningTestCase(db_base.DbTestCase):
def setUp(self):
super(AgentCleaningTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
- n = {'driver': 'fake_agent'}
+ n = {'driver': 'fake_agent',
+ 'driver_internal_info': {'agent_url': 'http://127.0.0.1:9999'}}
+
self.node = obj_utils.create_test_node(self.context, **n)
self.ports = [obj_utils.create_test_port(self.context,
node_id=self.node.id)]
@@ -1551,39 +1580,34 @@ class AgentCleaningTestCase(db_base.DbTestCase):
}
@mock.patch('ironic.objects.Port.list_by_node_id')
- @mock.patch('ironic.drivers.modules.deploy_utils._get_agent_client')
- def test_get_clean_steps(self, get_client_mock, list_ports_mock):
- client_mock = mock.Mock()
- client_mock.get_clean_steps.return_value = {
+ @mock.patch.object(agent_client.AgentClient, 'get_clean_steps')
+ def test_get_clean_steps(self, client_mock, list_ports_mock):
+ client_mock.return_value = {
'command_result': self.clean_steps}
- get_client_mock.return_value = client_mock
list_ports_mock.return_value = self.ports
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
response = utils.agent_get_clean_steps(task)
- client_mock.get_clean_steps.assert_called_once_with(task.node,
- self.ports)
+ client_mock.assert_called_once_with(task.node, self.ports)
self.assertEqual('1', task.node.driver_internal_info[
'hardware_manager_version'])
# Since steps are returned in dicts, they have non-deterministic
# ordering
self.assertEqual(2, len(response))
- self.assertTrue(self.clean_steps['clean_steps'][
- 'GenericHardwareManager'][0] in response)
- self.assertTrue(self.clean_steps['clean_steps'][
- 'SpecificHardwareManager'][0] in response)
+ self.assertIn(self.clean_steps['clean_steps'][
+ 'GenericHardwareManager'][0], response)
+ self.assertIn(self.clean_steps['clean_steps'][
+ 'SpecificHardwareManager'][0], response)
@mock.patch('ironic.objects.Port.list_by_node_id')
- @mock.patch('ironic.drivers.modules.deploy_utils._get_agent_client')
- def test_get_clean_steps_missing_steps(self, get_client_mock,
+ @mock.patch.object(agent_client.AgentClient, 'get_clean_steps')
+ def test_get_clean_steps_missing_steps(self, client_mock,
list_ports_mock):
- client_mock = mock.Mock()
del self.clean_steps['clean_steps']
- client_mock.get_clean_steps.return_value = {
+ client_mock.return_value = {
'command_result': self.clean_steps}
- get_client_mock.return_value = client_mock
list_ports_mock.return_value = self.ports
with task_manager.acquire(
@@ -1591,16 +1615,13 @@ class AgentCleaningTestCase(db_base.DbTestCase):
self.assertRaises(exception.NodeCleaningFailure,
utils.agent_get_clean_steps,
task)
- client_mock.get_clean_steps.assert_called_once_with(task.node,
- self.ports)
+ client_mock.assert_called_once_with(task.node, self.ports)
@mock.patch('ironic.objects.Port.list_by_node_id')
- @mock.patch('ironic.drivers.modules.deploy_utils._get_agent_client')
- def test_execute_clean_step(self, get_client_mock, list_ports_mock):
- client_mock = mock.Mock()
- client_mock.execute_clean_step.return_value = {
+ @mock.patch.object(agent_client.AgentClient, 'execute_clean_step')
+ def test_execute_clean_step(self, client_mock, list_ports_mock):
+ client_mock.return_value = {
'command_status': 'SUCCEEDED'}
- get_client_mock.return_value = client_mock
list_ports_mock.return_value = self.ports
with task_manager.acquire(
@@ -1611,13 +1632,10 @@ class AgentCleaningTestCase(db_base.DbTestCase):
self.assertEqual(states.CLEANING, response)
@mock.patch('ironic.objects.Port.list_by_node_id')
- @mock.patch('ironic.drivers.modules.deploy_utils._get_agent_client')
- def test_execute_clean_step_running(self, get_client_mock,
- list_ports_mock):
- client_mock = mock.Mock()
- client_mock.execute_clean_step.return_value = {
+ @mock.patch.object(agent_client.AgentClient, 'execute_clean_step')
+ def test_execute_clean_step_running(self, client_mock, list_ports_mock):
+ client_mock.return_value = {
'command_status': 'RUNNING'}
- get_client_mock.return_value = client_mock
list_ports_mock.return_value = self.ports
with task_manager.acquire(
@@ -1628,13 +1646,11 @@ class AgentCleaningTestCase(db_base.DbTestCase):
self.assertEqual(states.CLEANING, response)
@mock.patch('ironic.objects.Port.list_by_node_id')
- @mock.patch('ironic.drivers.modules.deploy_utils._get_agent_client')
- def test_execute_clean_step_version_mismatch(self, get_client_mock,
+ @mock.patch.object(agent_client.AgentClient, 'execute_clean_step')
+ def test_execute_clean_step_version_mismatch(self, client_mock,
list_ports_mock):
- client_mock = mock.Mock()
- client_mock.execute_clean_step.return_value = {
+ client_mock.return_value = {
'command_status': 'RUNNING'}
- get_client_mock.return_value = client_mock
list_ports_mock.return_value = self.ports
with task_manager.acquire(
diff --git a/ironic/tests/drivers/test_ipmitool.py b/ironic/tests/drivers/test_ipmitool.py
index a27060d0f..2e0097fba 100644
--- a/ironic/tests/drivers/test_ipmitool.py
+++ b/ironic/tests/drivers/test_ipmitool.py
@@ -840,6 +840,86 @@ class IPMIToolPrivateMethodTestCase(db_base.DbTestCase):
mock_support.assert_called_once_with('timing')
mock_pwf.assert_called_once_with(self.info['password'])
mock_exec.assert_called_once_with(*args)
+ self.assertEqual(1, mock_exec.call_count)
+
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_exception_retry(self,
+ mock_exec, mock_support, mock_sleep):
+
+ ipmi.LAST_CMD_TIME = {}
+ mock_support.return_value = False
+ mock_exec.side_effect = iter([
+ processutils.ProcessExecutionError(
+ stderr="insufficient resources for session"
+ ),
+ (None, None)
+ ])
+
+ # Directly set the configuration values such that
+ # the logic will cause _exec_ipmitool to retry twice.
+ self.config(min_command_interval=1, group='ipmi')
+ self.config(retry_timeout=2, group='ipmi')
+
+ ipmi._exec_ipmitool(self.info, 'A B C')
+
+ mock_support.assert_called_once_with('timing')
+ self.assertEqual(2, mock_exec.call_count)
+
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_exception_retries_exceeded(self,
+ mock_exec, mock_support, mock_sleep):
+
+ ipmi.LAST_CMD_TIME = {}
+ mock_support.return_value = False
+
+ mock_exec.side_effect = processutils.ProcessExecutionError(
+ stderr="insufficient resources for session"
+ )
+
+ # Directly set the configuration values such that
+ # the logic will cause _exec_ipmitool to timeout.
+ self.config(min_command_interval=1, group='ipmi')
+ self.config(retry_timeout=1, group='ipmi')
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ ipmi._exec_ipmitool,
+ self.info, 'A B C')
+ mock_support.assert_called_once_with('timing')
+ self.assertEqual(1, mock_exec.call_count)
+
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_exception_non_retryable_failure(self,
+ mock_exec, mock_support, mock_sleep):
+
+ ipmi.LAST_CMD_TIME = {}
+ mock_support.return_value = False
+
+ # Return a retryable error, then an error that cannot
+ # be retried thus resulting in a single retry
+ # attempt by _exec_ipmitool.
+ mock_exec.side_effect = iter([
+ processutils.ProcessExecutionError(
+ stderr="insufficient resources for session"
+ ),
+ processutils.ProcessExecutionError(
+ stderr="Unknown"
+ ),
+ ])
+
+ # Directly set the configuration values such that
+ # the logic will cause _exec_ipmitool to retry up
+ # to 3 times.
+ self.config(min_command_interval=1, group='ipmi')
+ self.config(retry_timeout=3, group='ipmi')
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ ipmi._exec_ipmitool,
+ self.info, 'A B C')
+ mock_support.assert_called_once_with('timing')
+ self.assertEqual(2, mock_exec.call_count)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__power_status_on(self, mock_exec, mock_sleep):
diff --git a/ironic/tests/drivers/test_iscsi_deploy.py b/ironic/tests/drivers/test_iscsi_deploy.py
index 01b4c853e..4d3803542 100644
--- a/ironic/tests/drivers/test_iscsi_deploy.py
+++ b/ironic/tests/drivers/test_iscsi_deploy.py
@@ -429,6 +429,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'ironic_api_url': api_url,
'boot_option': expected_boot_option,
'boot_mode': expected_boot_mode,
+ 'coreos.configdrive': 0,
}
if expected_root_device:
@@ -486,6 +487,25 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
expected_boot_option=expected)
+ @mock.patch.object(keystone, 'get_service_url', autospec=True)
+ @mock.patch.object(utils, 'random_alnum', autospec=True)
+ def test_build_deploy_ramdisk_options_whole_disk_image(self, mock_alnum,
+ mock_get_url):
+ """Tests a hack to boot_option for whole disk images.
+
+ This hack is in place to fix bug #1441556.
+ """
+ self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
+ dii = self.node.driver_internal_info
+ dii['is_whole_disk_image'] = True
+ self.node.driver_internal_info = dii
+ self.node.save()
+ expected = 'netboot'
+ fake_api_url = 'http://127.0.0.1:6385'
+ self.config(api_url=fake_api_url, group='conductor')
+ self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
+ expected_boot_option=expected)
+
def test_get_boot_option(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
result = iscsi_deploy.get_boot_option(self.node)
@@ -777,3 +797,118 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
self.assertIsNotNone(self.node.last_error)
+
+ def test_validate_pass_bootloader_info_input(self):
+ params = {'key': 'some-random-key', 'address': '1.2.3.4',
+ 'error': '', 'status': 'SUCCEEDED'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.instance_info['deploy_key'] = 'some-random-key'
+ # Assert that the method doesn't raise
+ iscsi_deploy.validate_pass_bootloader_info_input(task, params)
+
+ def test_validate_pass_bootloader_info_missing_status(self):
+ params = {'key': 'some-random-key', 'address': '1.2.3.4'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(exception.MissingParameterValue,
+ iscsi_deploy.validate_pass_bootloader_info_input,
+ task, params)
+
+ def test_validate_pass_bootloader_info_missing_key(self):
+ params = {'status': 'SUCCEEDED', 'address': '1.2.3.4'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(exception.MissingParameterValue,
+ iscsi_deploy.validate_pass_bootloader_info_input,
+ task, params)
+
+ def test_validate_pass_bootloader_info_missing_address(self):
+ params = {'status': 'SUCCEEDED', 'key': 'some-random-key'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(exception.MissingParameterValue,
+ iscsi_deploy.validate_pass_bootloader_info_input,
+ task, params)
+
+ def test_validate_pass_bootloader_info_input_invalid_key(self):
+ params = {'key': 'some-other-key', 'address': '1.2.3.4',
+ 'status': 'SUCCEEDED'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.instance_info['deploy_key'] = 'some-random-key'
+ self.assertRaises(exception.InvalidParameterValue,
+ iscsi_deploy.validate_pass_bootloader_info_input,
+ task, params)
+
+ def test_validate_bootloader_install_status(self):
+ kwargs = {'key': 'abcdef', 'status': 'SUCCEEDED', 'error': ''}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.instance_info['deploy_key'] = 'abcdef'
+ # Nothing much to assert except that it shouldn't raise.
+ iscsi_deploy.validate_bootloader_install_status(task, kwargs)
+
+ @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
+ def test_validate_bootloader_install_status_install_failed(
+ self, set_fail_state_mock):
+ kwargs = {'key': 'abcdef', 'status': 'FAILED', 'error': 'some-error'}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = states.DEPLOYING
+ task.node.target_provision_state = states.ACTIVE
+ task.node.instance_info['deploy_key'] = 'abcdef'
+ self.assertRaises(exception.InstanceDeployFailure,
+ iscsi_deploy.validate_bootloader_install_status,
+ task, kwargs)
+ set_fail_state_mock.assert_called_once_with(task, mock.ANY)
+
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
+ autospec=True)
+ def test_finish_deploy(self, notify_mock):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ iscsi_deploy.finish_deploy(task, '1.2.3.4')
+ notify_mock.assert_called_once_with('1.2.3.4')
+ self.assertEqual(states.ACTIVE, task.node.provision_state)
+ self.assertEqual(states.NOSTATE, task.node.target_provision_state)
+
+ @mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
+ autospec=True)
+ def test_finish_deploy_notify_fails(self, notify_mock,
+ set_fail_state_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ notify_mock.side_effect = RuntimeError()
+ self.assertRaises(exception.InstanceDeployFailure,
+ iscsi_deploy.finish_deploy, task, '1.2.3.4')
+ set_fail_state_mock.assert_called_once_with(task, mock.ANY)
+
+ @mock.patch.object(manager_utils, 'node_power_action')
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
+ autospec=True)
+ def test_finish_deploy_ssh_with_local_boot(self, notify_mock,
+ node_power_mock):
+ instance_info = dict(INST_INFO_DICT)
+ instance_info['capabilities'] = {'boot_option': 'local'}
+ n = {
+ 'uuid': uuidutils.generate_uuid(),
+ 'driver': 'fake_ssh',
+ 'instance_info': instance_info,
+ 'provision_state': states.DEPLOYING,
+ 'target_provision_state': states.ACTIVE,
+ }
+ mgr_utils.mock_the_extension_manager(driver="fake_ssh")
+ node = obj_utils.create_test_node(self.context, **n)
+
+ with task_manager.acquire(self.context, node.uuid,
+ shared=False) as task:
+ iscsi_deploy.finish_deploy(task, '1.2.3.4')
+ notify_mock.assert_called_once_with('1.2.3.4')
+ self.assertEqual(states.ACTIVE, task.node.provision_state)
+ self.assertEqual(states.NOSTATE, task.node.target_provision_state)
+ node_power_mock.assert_called_once_with(task, states.REBOOT)
diff --git a/ironic/tests/drivers/test_pxe.py b/ironic/tests/drivers/test_pxe.py
index a7a211f75..0c6168c86 100644
--- a/ironic/tests/drivers/test_pxe.py
+++ b/ironic/tests/drivers/test_pxe.py
@@ -198,7 +198,8 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(pxe_utils, '_build_pxe_config')
def _test_build_pxe_config_options(self, build_pxe_mock, deploy_opts_mock,
- ipxe_enabled=False):
+ whle_dsk_img=False,
+ ipxe_enabled=False):
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
@@ -212,9 +213,11 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
'ironic_api_url': 'fake-api-url',
'boot_option': 'netboot',
'boot_mode': 'bios',
+ 'coreos.configdrive': 0,
}
deploy_opts_mock.return_value = fake_deploy_opts
+ self.node.driver_internal_info['is_whole_disk_image'] = whle_dsk_img
tftp_server = CONF.pxe.tftp_server
@@ -241,6 +244,10 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
'ramdisk')
root_dir = CONF.pxe.tftp_root
+ if whle_dsk_img:
+ ramdisk = 'no_ramdisk'
+ kernel = 'no_kernel'
+
expected_options = {
'ari_path': ramdisk,
'deployment_ari_path': deploy_ramdisk,
@@ -278,30 +285,36 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
self.assertEqual(expected_options, options)
def test__build_pxe_config_options(self):
- self._test_build_pxe_config_options(ipxe_enabled=False)
+ self._test_build_pxe_config_options(whle_dsk_img=True,
+ ipxe_enabled=False)
def test__build_pxe_config_options_ipxe(self):
- self._test_build_pxe_config_options(ipxe_enabled=True)
+ self._test_build_pxe_config_options(whle_dsk_img=True,
+ ipxe_enabled=True)
def test__build_pxe_config_options_without_is_whole_disk_image(self):
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
- self._test_build_pxe_config_options(ipxe_enabled=False)
+ self._test_build_pxe_config_options(whle_dsk_img=False,
+ ipxe_enabled=False)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(pxe_utils, '_build_pxe_config')
- def _test_build_pxe_config_options_whole_disk_image(self, build_pxe_mock,
- deploy_opts_mock, ipxe_enabled=False):
+ def test__build_pxe_config_options_whole_disk_image(self,
+ build_pxe_mock,
+ deploy_opts_mock,
+ ipxe_enabled=False):
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
- self.config(api_url='http://192.168.122.184:6385/', group='conductor')
+ self.config(api_url='http://192.168.122.184:6385', group='conductor')
self.config(disk_devices='sda', group='pxe')
fake_deploy_opts = {'iscsi_target_iqn': 'fake-iqn',
'deployment_id': 'fake-deploy-id',
'deployment_key': 'fake-deploy-key',
'disk': 'fake-disk',
- 'ironic_api_url': 'fake-api-url'}
+ 'ironic_api_url': 'fake-api-url',
+ 'coreos.configdrive': 0}
deploy_opts_mock.return_value = fake_deploy_opts
@@ -329,6 +342,10 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
'pxe_append_params': 'test_param',
'deployment_aki_path': deploy_kernel,
'tftp_server': tftp_server,
+ 'aki_path': 'no_kernel',
+ 'ari_path': 'no_ramdisk',
+ 'ipa-api-url': CONF.conductor.api_url,
+ 'ipa-driver-name': self.node.driver,
}
expected_options.update(fake_deploy_opts)
@@ -401,6 +418,48 @@ class PXEPrivateMethodsTestCase(db_base.DbTestCase):
fake_pxe_info.values(),
True)
+ @mock.patch.object(pxe.LOG, 'error')
+ def test_validate_boot_option_for_uefi_exc(self, mock_log):
+ properties = {'capabilities': 'boot_mode:uefi'}
+ instance_info = {"boot_option": "netboot"}
+ self.node.properties = properties
+ self.node.instance_info['capabilities'] = instance_info
+ self.node.driver_internal_info['is_whole_disk_image'] = True
+ self.assertRaises(exception.InvalidParameterValue,
+ pxe.validate_boot_option_for_uefi,
+ self.node)
+ self.assertTrue(mock_log.called)
+
+ @mock.patch.object(pxe.LOG, 'error')
+ def test_validate_boot_option_for_uefi_noexc_one(self, mock_log):
+ properties = {'capabilities': 'boot_mode:uefi'}
+ instance_info = {"boot_option": "local"}
+ self.node.properties = properties
+ self.node.instance_info['capabilities'] = instance_info
+ self.node.driver_internal_info['is_whole_disk_image'] = True
+ pxe.validate_boot_option_for_uefi(self.node)
+ self.assertFalse(mock_log.called)
+
+ @mock.patch.object(pxe.LOG, 'error')
+ def test_validate_boot_option_for_uefi_noexc_two(self, mock_log):
+ properties = {'capabilities': 'boot_mode:bios'}
+ instance_info = {"boot_option": "local"}
+ self.node.properties = properties
+ self.node.instance_info['capabilities'] = instance_info
+ self.node.driver_internal_info['is_whole_disk_image'] = True
+ pxe.validate_boot_option_for_uefi(self.node)
+ self.assertFalse(mock_log.called)
+
+ @mock.patch.object(pxe.LOG, 'error')
+ def test_validate_boot_option_for_uefi_noexc_three(self, mock_log):
+ properties = {'capabilities': 'boot_mode:uefi'}
+ instance_info = {"boot_option": "local"}
+ self.node.properties = properties
+ self.node.instance_info['capabilities'] = instance_info
+ self.node.driver_internal_info['is_whole_disk_image'] = False
+ pxe.validate_boot_option_for_uefi(self.node)
+ self.assertFalse(mock_log.called)
+
class PXEDriverTestCase(db_base.DbTestCase):
@@ -484,6 +543,17 @@ class PXEDriverTestCase(db_base.DbTestCase):
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
+ def test_validate_fail_invalid_config_uefi_whole_disk_image(self):
+ properties = {'capabilities': 'boot_mode:uefi,boot_option:netboot'}
+ instance_info = {"boot_option": "netboot"}
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.properties = properties
+ task.node.instance_info['capabilities'] = instance_info
+ task.node.driver_internal_info['is_whole_disk_image'] = True
+ self.assertRaises(exception.InvalidParameterValue,
+ task.driver.deploy.validate, task)
+
@mock.patch.object(base_image_service.BaseImageService, '_show')
def test_validate_fail_invalid_boot_option(self, mock_glance):
properties = {'capabilities': 'boot_option:foo,dog:wuff'}
@@ -611,6 +681,33 @@ class PXEDriverTestCase(db_base.DbTestCase):
address='123456', iqn='aaa-bbb',
key='fake-12345')
+ @mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input',
+ autospec=True)
+ def test_vendor_passthru_pass_bootloader_install_info(self,
+ validate_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ kwargs = {'address': '1.2.3.4', 'key': 'fake-key',
+ 'status': 'SUCCEEDED', 'error': ''}
+ task.driver.vendor.validate(
+ task, method='pass_bootloader_install_info', **kwargs)
+ validate_mock.assert_called_once_with(task, kwargs)
+
+ @mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
+ autospec=True)
+ @mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
+ def test_pass_bootloader_install_info(self, finish_deploy_mock,
+ validate_input_mock):
+ kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
+ finish_deploy_mock.assert_called_once_with(task, '123456')
+ validate_input_mock.assert_called_once_with(task, kwargs)
+
@mock.patch.object(pxe, '_get_image_info')
@mock.patch.object(pxe, '_cache_ramdisk_kernel')
@mock.patch.object(pxe, '_build_pxe_config_options')
@@ -831,7 +928,7 @@ class PXEDriverTestCase(db_base.DbTestCase):
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(manager_utils, 'node_set_boot_device')
- @mock.patch.object(deploy_utils, 'notify_deploy_complete')
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
@mock.patch.object(deploy_utils, 'deploy_partition_image')
@@ -862,8 +959,6 @@ class PXEDriverTestCase(db_base.DbTestCase):
task, address='123456', iqn='aaa-bbb', key='fake-56789')
self.node.refresh()
- self.assertEqual(states.ACTIVE, self.node.provision_state)
- self.assertEqual(states.NOSTATE, self.node.target_provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIn('root_uuid_or_disk_id', self.node.driver_internal_info)
self.assertIsNone(self.node.last_error)
@@ -887,7 +982,7 @@ class PXEDriverTestCase(db_base.DbTestCase):
@mock.patch.object(pxe_utils, 'clean_up_pxe_config')
@mock.patch.object(manager_utils, 'node_set_boot_device')
- @mock.patch.object(deploy_utils, 'notify_deploy_complete')
+ @mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed')
@mock.patch.object(deploy_utils, 'switch_pxe_config')
@mock.patch.object(iscsi_deploy, 'InstanceImageCache')
@mock.patch.object(deploy_utils, 'deploy_disk_image')
@@ -923,8 +1018,6 @@ class PXEDriverTestCase(db_base.DbTestCase):
key='fake-56789')
self.node.refresh()
- self.assertEqual(states.ACTIVE, self.node.provision_state)
- self.assertEqual(states.NOSTATE, self.node.target_provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIsNone(self.node.last_error)
self.assertFalse(os.path.exists(token_path))
@@ -947,15 +1040,23 @@ class PXEDriverTestCase(db_base.DbTestCase):
def test_pass_deploy_info_deploy(self):
self._test_pass_deploy_info_deploy(False)
+ self.assertEqual(states.ACTIVE, self.node.provision_state)
+ self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_localboot(self):
self._test_pass_deploy_info_deploy(True)
+ self.assertEqual(states.DEPLOYWAIT, self.node.provision_state)
+ self.assertEqual(states.ACTIVE, self.node.target_provision_state)
def test_pass_deploy_info_whole_disk_image(self):
self._test_pass_deploy_info_whole_disk_image(False)
+ self.assertEqual(states.ACTIVE, self.node.provision_state)
+ self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_whole_disk_image_localboot(self):
self._test_pass_deploy_info_whole_disk_image(True)
+ self.assertEqual(states.ACTIVE, self.node.provision_state)
+ self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_invalid(self):
self.node.power_state = states.POWER_ON
@@ -986,7 +1087,8 @@ class PXEDriverTestCase(db_base.DbTestCase):
"pass_deploy_info was not called once.")
def test_vendor_routes(self):
- expected = ['heartbeat', 'pass_deploy_info']
+ expected = ['heartbeat', 'pass_deploy_info',
+ 'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
diff --git a/ironic/tests/drivers/test_seamicro.py b/ironic/tests/drivers/test_seamicro.py
index ad4586828..e477cddd4 100644
--- a/ironic/tests/drivers/test_seamicro.py
+++ b/ironic/tests/drivers/test_seamicro.py
@@ -129,6 +129,7 @@ class SeaMicroValidateParametersTestCase(db_base.DbTestCase):
node)
+@mock.patch('eventlet.greenthread.sleep', lambda n: None)
class SeaMicroPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
@@ -144,8 +145,6 @@ class SeaMicroPrivateMethodsTestCase(db_base.DbTestCase):
self.config(action_timeout=0, group='seamicro')
self.config(max_retry=2, group='seamicro')
- self.patcher = mock.patch('eventlet.greenthread.sleep')
- self.mock_sleep = self.patcher.start()
self.info = seamicro._parse_driver_info(self.node)
@mock.patch.object(seamicro_client, "Client")
diff --git a/ironic/tests/drivers/test_ssh.py b/ironic/tests/drivers/test_ssh.py
index f70e21309..76e023675 100644
--- a/ironic/tests/drivers/test_ssh.py
+++ b/ironic/tests/drivers/test_ssh.py
@@ -283,7 +283,7 @@ class SSHPrivateMethodsTestCase(db_base.DbTestCase):
info)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
- exec_ssh_mock.assert_not_called()
+ self.assertFalse(exec_ssh_mock.called)
@mock.patch.object(processutils, 'ssh_execute')
def test__get_power_status_exception(self, exec_ssh_mock):
diff --git a/ironic/tests/drivers/test_utils.py b/ironic/tests/drivers/test_utils.py
index 1ca6e90d4..cd14b464f 100644
--- a/ironic/tests/drivers/test_utils.py
+++ b/ironic/tests/drivers/test_utils.py
@@ -112,27 +112,6 @@ class UtilsTestCase(db_base.DbTestCase):
self.assertEqual('a:b,c:d,a:b',
task.node.properties['capabilities'])
- def test_rm_node_capability(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- task.node.properties['capabilities'] = 'a:b'
- driver_utils.rm_node_capability(task, 'a')
- self.assertIsNone(task.node.properties['capabilities'])
-
- def test_rm_node_capability_exists(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- task.node.properties['capabilities'] = 'a:b,c:d,x:y'
- self.assertIsNone(driver_utils.rm_node_capability(task, 'c'))
- self.assertEqual('a:b,x:y', task.node.properties['capabilities'])
-
- def test_rm_node_capability_non_existent(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- task.node.properties['capabilities'] = 'a:b'
- self.assertIsNone(driver_utils.rm_node_capability(task, 'x'))
- self.assertEqual('a:b', task.node.properties['capabilities'])
-
def test_validate_capability(self):
properties = {'capabilities': 'cat:meow,cap2:value2'}
self.node.properties = properties
diff --git a/ironic/tests/objects/utils.py b/ironic/tests/objects/utils.py
index 1eead9e8b..aa21c91fd 100644
--- a/ironic/tests/objects/utils.py
+++ b/ironic/tests/objects/utils.py
@@ -79,6 +79,9 @@ def get_test_chassis(ctxt, **kw):
that a create() could be used to commit it to the DB.
"""
db_chassis = db_utils.get_test_chassis(**kw)
+ # Let DB generate ID if it isn't specified explicitly
+ if 'id' not in kw:
+ del db_chassis['id']
chassis = objects.Chassis(ctxt)
for key in db_chassis:
setattr(chassis, key, db_chassis[key])
diff --git a/ironic/tests/stubs.py b/ironic/tests/stubs.py
index 7d43d2676..d20c1fd8a 100644
--- a/ironic/tests/stubs.py
+++ b/ironic/tests/stubs.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from ironic.common import exception
+from glanceclient import exc as glance_exc
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
@@ -40,7 +40,7 @@ class StubGlanceClient(object):
index += 1
break
else:
- raise exception.BadRequest('Marker not found')
+ raise glance_exc.BadRequest('Marker not found')
return self._images[index:index + limit]
@@ -48,7 +48,7 @@ class StubGlanceClient(object):
for image in self._images:
if image.id == str(image_id):
return image
- raise exception.ImageNotFound(image_id)
+ raise glance_exc.NotFound(image_id)
def data(self, image_id):
self.get(image_id)
@@ -76,7 +76,7 @@ class StubGlanceClient(object):
for k, v in metadata.items():
setattr(self._images[i], k, v)
return self._images[i]
- raise exception.NotFound(image_id)
+ raise glance_exc.NotFound(image_id)
def delete(self, image_id):
for i, image in enumerate(self._images):
@@ -86,10 +86,10 @@ class StubGlanceClient(object):
# HTTPForbidden.
image_data = self._images[i]
if image_data.deleted:
- raise exception.Forbidden()
+ raise glance_exc.Forbidden()
image_data.deleted = True
return
- raise exception.NotFound(image_id)
+ raise glance_exc.NotFound(image_id)
class FakeImage(object):
diff --git a/ironic/tests/test_disk_partitioner.py b/ironic/tests/test_disk_partitioner.py
index 00cbef5e4..941c92b3c 100644
--- a/ironic/tests/test_disk_partitioner.py
+++ b/ironic/tests/test_disk_partitioner.py
@@ -47,8 +47,9 @@ class DiskPartitionerTestCase(base.TestCase):
self.assertThat(partitions, HasLength(3))
self.assertEqual(expected, partitions)
- @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
+ autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_commit(self, mock_utils_exc, mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'bootable': False,
@@ -59,20 +60,22 @@ class DiskPartitionerTestCase(base.TestCase):
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
- with mock.patch.object(dp, 'get_partitions') as mock_gp:
+ with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = (None, None)
dp.commit()
- mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
+ mock_disk_partitioner_exec.assert_called_once_with(
+ mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_once_with('fuser', '/dev/fake',
run_as_root=True, check_exit_code=[0, 1])
- @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
+ autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_is_busy_once(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
@@ -84,14 +87,15 @@ class DiskPartitionerTestCase(base.TestCase):
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
- fuser_outputs = [("/dev/fake: 10000 10001", None), (None, None)]
+ fuser_outputs = iter([("/dev/fake: 10000 10001", None), (None, None)])
- with mock.patch.object(dp, 'get_partitions') as mock_gp:
+ with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.side_effect = fuser_outputs
dp.commit()
- mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
+ mock_disk_partitioner_exec.assert_called_once_with(
+ mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
@@ -99,8 +103,9 @@ class DiskPartitionerTestCase(base.TestCase):
run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(2, mock_utils_exc.call_count)
- @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
+ autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_is_always_busy(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
@@ -113,12 +118,13 @@ class DiskPartitionerTestCase(base.TestCase):
'type': 'fake-type',
'size': 1})]
- with mock.patch.object(dp, 'get_partitions') as mock_gp:
+ with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = ("/dev/fake: 10000 10001", None)
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
- mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
+ mock_disk_partitioner_exec.assert_called_once_with(
+ mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
@@ -126,8 +132,9 @@ class DiskPartitionerTestCase(base.TestCase):
run_as_root=True, check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)
- @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec')
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
+ autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_disconnected(self, mock_utils_exc,
mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
@@ -140,13 +147,14 @@ class DiskPartitionerTestCase(base.TestCase):
'type': 'fake-type',
'size': 1})]
- with mock.patch.object(dp, 'get_partitions') as mock_gp:
+ with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = (None, "Specified filename /dev/fake"
" does not exist.")
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
- mock_disk_partitioner_exec.assert_called_once_with('mklabel', 'msdos',
+ mock_disk_partitioner_exec.assert_called_once_with(
+ mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
@@ -155,7 +163,7 @@ class DiskPartitionerTestCase(base.TestCase):
self.assertEqual(20, mock_utils_exc.call_count)
-@mock.patch.object(utils, 'execute')
+@mock.patch.object(utils, 'execute', autospec=True)
class ListPartitionsTestCase(base.TestCase):
def test_correct(self, execute_mock):
@@ -178,7 +186,7 @@ BYT;
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
use_standard_locale=True)
- @mock.patch.object(disk_partitioner.LOG, 'warn')
+ @mock.patch.object(disk_partitioner.LOG, 'warn', autospec=True)
def test_incorrect(self, log_mock, execute_mock):
output = """
BYT;
diff --git a/ironic/tests/test_driver_factory.py b/ironic/tests/test_driver_factory.py
index 6f5be3c09..1dee9d0de 100644
--- a/ironic/tests/test_driver_factory.py
+++ b/ironic/tests/test_driver_factory.py
@@ -52,7 +52,8 @@ class DriverLoadTestCase(base.TestCase):
self.assertRaises(exception.DriverLoadError,
driver_factory.DriverFactory._init_extension_manager)
- @mock.patch.object(dispatch.NameDispatchExtensionManager, 'names')
+ @mock.patch.object(dispatch.NameDispatchExtensionManager, 'names',
+ autospec=True)
def test_no_driver_load_error_if_driver_disabled(self, mock_em):
self.config(enabled_drivers=[])
with mock.patch.object(dispatch.NameDispatchExtensionManager,
diff --git a/ironic/tests/test_exception.py b/ironic/tests/test_exception.py
new file mode 100644
index 000000000..75e8c7cfb
--- /dev/null
+++ b/ironic/tests/test_exception.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2015 IBM, Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from ironic.common import exception
+from ironic.tests import base
+
+
+class TestIronicException(base.TestCase):
+ def test____init__(self):
+ expected = '\xc3\xa9\xe0\xaf\xb2\xe0\xbe\x84'
+ if six.PY3:
+ message = chr(233) + chr(0x0bf2) + chr(3972)
+ else:
+ message = unichr(233) + unichr(0x0bf2) + unichr(3972)
+ exc = exception.IronicException(message)
+ self.assertEqual(expected, exc.__str__())
diff --git a/ironic/tests/test_glance_service.py b/ironic/tests/test_glance_service.py
index dfe6d90b3..6c3276da6 100644
--- a/ironic/tests/test_glance_service.py
+++ b/ironic/tests/test_glance_service.py
@@ -18,9 +18,13 @@ import datetime
import filecmp
import os
import tempfile
+import time
+from glanceclient import exc as glance_exc
import mock
+from oslo_config import cfg
from oslo_context import context
+from oslo_serialization import jsonutils
import testtools
@@ -32,8 +36,6 @@ from ironic.tests import base
from ironic.tests import matchers
from ironic.tests import stubs
-from oslo_config import cfg
-from oslo_serialization import jsonutils
CONF = cfg.CONF
@@ -458,7 +460,8 @@ class TestGlanceImageService(base.TestCase):
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
- def test_download_with_retries(self):
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_download_with_retries(self, mock_sleep):
tries = [0]
class MyGlanceStubClient(stubs.StubGlanceClient):
@@ -466,7 +469,7 @@ class TestGlanceImageService(base.TestCase):
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
- raise exception.ServiceUnavailable('')
+ raise glance_exc.ServiceUnavailable('')
else:
return {}
@@ -487,6 +490,7 @@ class TestGlanceImageService(base.TestCase):
tries = [0]
self.config(glance_num_retries=1, group='glance')
stub_service.download(image_id, writer)
+ self.assertTrue(mock_sleep.called)
def test_download_file_url(self):
# NOTE: only in v2 API
@@ -533,7 +537,7 @@ class TestGlanceImageService(base.TestCase):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
- raise exception.Forbidden(image_id)
+ raise glance_exc.Forbidden(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
@@ -549,7 +553,7 @@ class TestGlanceImageService(base.TestCase):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
- raise exception.HTTPForbidden(image_id)
+ raise glance_exc.HTTPForbidden(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
@@ -565,7 +569,7 @@ class TestGlanceImageService(base.TestCase):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
- raise exception.NotFound(image_id)
+ raise glance_exc.NotFound(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
@@ -581,7 +585,7 @@ class TestGlanceImageService(base.TestCase):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
- raise exception.HTTPNotFound(image_id)
+ raise glance_exc.HTTPNotFound(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
@@ -632,7 +636,7 @@ def _create_failing_glance_client(info):
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
- raise exception.ServiceUnavailable('')
+ raise glance_exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
@@ -663,7 +667,7 @@ class TestGlanceSwiftTempURL(base.TestCase):
'id': '757274c4-2856-4bd2-bb20-9a4a231e187b'
}
- @mock.patch('swiftclient.utils.generate_temp_url')
+ @mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url(self, tempurl_mock):
path = ('/v1/AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30'
@@ -686,7 +690,7 @@ class TestGlanceSwiftTempURL(base.TestCase):
key=CONF.glance.swift_temp_url_key,
method='GET')
- @mock.patch('swiftclient.utils.generate_temp_url')
+ @mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_multiple_containers(self, tempurl_mock):
self.config(swift_store_multiple_containers_seed=8,
diff --git a/ironic/tests/test_hash_ring.py b/ironic/tests/test_hash_ring.py
index 54a4ca0d3..8f483f774 100644
--- a/ironic/tests/test_hash_ring.py
+++ b/ironic/tests/test_hash_ring.py
@@ -36,7 +36,7 @@ class HashRingTestCase(base.TestCase):
# fake -> foo, bar, baz
# fake-again -> bar, baz, foo
- @mock.patch.object(hashlib, 'md5')
+ @mock.patch.object(hashlib, 'md5', autospec=True)
def test__hash2int_returns_int(self, mock_md5):
CONF.set_override('hash_partition_exponent', 0)
r1 = 32 * 'a'
diff --git a/ironic/tests/test_image_service.py b/ironic/tests/test_image_service.py
index c2e426f78..a5974d636 100644
--- a/ironic/tests/test_image_service.py
+++ b/ironic/tests/test_image_service.py
@@ -30,7 +30,7 @@ class HttpImageServiceTestCase(base.TestCase):
self.service = image_service.HttpImageService()
self.href = 'http://127.0.0.1:12345/fedora.qcow2'
- @mock.patch.object(requests, 'head')
+ @mock.patch.object(requests, 'head', autospec=True)
def test_validate_href(self, head_mock):
response = head_mock.return_value
response.status_code = 200
@@ -45,28 +45,28 @@ class HttpImageServiceTestCase(base.TestCase):
self.service.validate_href,
self.href)
- @mock.patch.object(requests, 'head')
+ @mock.patch.object(requests, 'head', autospec=True)
def test_validate_href_error_code(self, head_mock):
head_mock.return_value.status_code = 400
self.assertRaises(exception.ImageRefValidationFailed,
self.service.validate_href, self.href)
head_mock.assert_called_once_with(self.href)
- @mock.patch.object(requests, 'head')
+ @mock.patch.object(requests, 'head', autospec=True)
def test_validate_href_error(self, head_mock):
head_mock.side_effect = requests.ConnectionError()
self.assertRaises(exception.ImageRefValidationFailed,
self.service.validate_href, self.href)
head_mock.assert_called_once_with(self.href)
- @mock.patch.object(requests, 'head')
+ @mock.patch.object(requests, 'head', autospec=True)
def test_show(self, head_mock):
head_mock.return_value.status_code = 200
result = self.service.show(self.href)
head_mock.assert_called_with(self.href)
self.assertEqual({'size': 1, 'properties': {}}, result)
- @mock.patch.object(requests, 'head')
+ @mock.patch.object(requests, 'head', autospec=True)
def test_show_no_content_length(self, head_mock):
head_mock.return_value.status_code = 200
head_mock.return_value.headers = {}
@@ -74,8 +74,8 @@ class HttpImageServiceTestCase(base.TestCase):
self.service.show, self.href)
head_mock.assert_called_with(self.href)
- @mock.patch.object(shutil, 'copyfileobj')
- @mock.patch.object(requests, 'get')
+ @mock.patch.object(shutil, 'copyfileobj', autospec=True)
+ @mock.patch.object(requests, 'get', autospec=True)
def test_download_success(self, req_get_mock, shutil_mock):
response_mock = req_get_mock.return_value
response_mock.status_code = 200
@@ -88,15 +88,15 @@ class HttpImageServiceTestCase(base.TestCase):
)
req_get_mock.assert_called_once_with(self.href, stream=True)
- @mock.patch.object(requests, 'get',
+ @mock.patch.object(requests, 'get', autospec=True,
side_effect=requests.ConnectionError())
def test_download_fail_connerror(self, req_get_mock):
file_mock = mock.Mock(spec=file)
self.assertRaises(exception.ImageDownloadFailed,
self.service.download, self.href, file_mock)
- @mock.patch.object(shutil, 'copyfileobj')
- @mock.patch.object(requests, 'get')
+ @mock.patch.object(shutil, 'copyfileobj', autospec=True)
+ @mock.patch.object(requests, 'get', autospec=True)
def test_download_fail_ioerror(self, req_get_mock, shutil_mock):
response_mock = req_get_mock.return_value
response_mock.status_code = 200
@@ -115,31 +115,33 @@ class FileImageServiceTestCase(base.TestCase):
self.href = 'file:///home/user/image.qcow2'
self.href_path = '/home/user/image.qcow2'
- @mock.patch.object(os.path, 'isfile', return_value=True)
+ @mock.patch.object(os.path, 'isfile', return_value=True, autospec=True)
def test_validate_href(self, path_exists_mock):
self.service.validate_href(self.href)
path_exists_mock.assert_called_once_with(self.href_path)
- @mock.patch.object(os.path, 'isfile', return_value=False)
+ @mock.patch.object(os.path, 'isfile', return_value=False, autospec=True)
def test_validate_href_path_not_found_or_not_file(self, path_exists_mock):
self.assertRaises(exception.ImageRefValidationFailed,
self.service.validate_href, self.href)
path_exists_mock.assert_called_once_with(self.href_path)
- @mock.patch.object(os.path, 'getsize', return_value=42)
- @mock.patch.object(image_service.FileImageService, 'validate_href')
+ @mock.patch.object(os.path, 'getsize', return_value=42, autospec=True)
+ @mock.patch.object(image_service.FileImageService, 'validate_href',
+ autospec=True)
def test_show(self, _validate_mock, getsize_mock):
_validate_mock.return_value = self.href_path
result = self.service.show(self.href)
getsize_mock.assert_called_once_with(self.href_path)
- _validate_mock.assert_called_once_with(self.href)
+ _validate_mock.assert_called_once_with(mock.ANY, self.href)
self.assertEqual({'size': 42, 'properties': {}}, result)
- @mock.patch.object(os, 'link')
- @mock.patch.object(os, 'remove')
- @mock.patch.object(os, 'access', return_value=True)
- @mock.patch.object(os, 'stat')
- @mock.patch.object(image_service.FileImageService, 'validate_href')
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'remove', autospec=True)
+ @mock.patch.object(os, 'access', return_value=True, autospec=True)
+ @mock.patch.object(os, 'stat', autospec=True)
+ @mock.patch.object(image_service.FileImageService, 'validate_href',
+ autospec=True)
def test_download_hard_link(self, _validate_mock, stat_mock, access_mock,
remove_mock, link_mock):
_validate_mock.return_value = self.href_path
@@ -147,18 +149,19 @@ class FileImageServiceTestCase(base.TestCase):
file_mock = mock.Mock(spec=file)
file_mock.name = 'file'
self.service.download(self.href, file_mock)
- _validate_mock.assert_called_once_with(self.href)
+ _validate_mock.assert_called_once_with(mock.ANY, self.href)
self.assertEqual(2, stat_mock.call_count)
access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
remove_mock.assert_called_once_with('file')
link_mock.assert_called_once_with(self.href_path, 'file')
- @mock.patch.object(sendfile, 'sendfile')
- @mock.patch.object(os.path, 'getsize', return_value=42)
- @mock.patch.object(__builtin__, 'open')
- @mock.patch.object(os, 'access', return_value=False)
- @mock.patch.object(os, 'stat')
- @mock.patch.object(image_service.FileImageService, 'validate_href')
+ @mock.patch.object(sendfile, 'sendfile', autospec=True)
+ @mock.patch.object(os.path, 'getsize', return_value=42, autospec=True)
+ @mock.patch.object(__builtin__, 'open', autospec=True)
+ @mock.patch.object(os, 'access', return_value=False, autospec=True)
+ @mock.patch.object(os, 'stat', autospec=True)
+ @mock.patch.object(image_service.FileImageService, 'validate_href',
+ autospec=True)
def test_download_copy(self, _validate_mock, stat_mock, access_mock,
open_mock, size_mock, copy_mock):
_validate_mock.return_value = self.href_path
@@ -167,7 +170,7 @@ class FileImageServiceTestCase(base.TestCase):
input_mock = mock.MagicMock(spec=file)
open_mock.return_value = input_mock
self.service.download(self.href, file_mock)
- _validate_mock.assert_called_once_with(self.href)
+ _validate_mock.assert_called_once_with(mock.ANY, self.href)
self.assertEqual(2, stat_mock.call_count)
access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
copy_mock.assert_called_once_with(file_mock.fileno(),
@@ -175,10 +178,11 @@ class FileImageServiceTestCase(base.TestCase):
0, 42)
size_mock.assert_called_once_with(self.href_path)
- @mock.patch.object(os, 'remove', side_effect=OSError)
- @mock.patch.object(os, 'access', return_value=True)
- @mock.patch.object(os, 'stat')
- @mock.patch.object(image_service.FileImageService, 'validate_href')
+ @mock.patch.object(os, 'remove', side_effect=OSError, autospec=True)
+ @mock.patch.object(os, 'access', return_value=True, autospec=True)
+ @mock.patch.object(os, 'stat', autospec=True)
+ @mock.patch.object(image_service.FileImageService, 'validate_href',
+ autospec=True)
def test_download_hard_link_fail(self, _validate_mock, stat_mock,
access_mock, remove_mock):
_validate_mock.return_value = self.href_path
@@ -187,16 +191,18 @@ class FileImageServiceTestCase(base.TestCase):
file_mock.name = 'file'
self.assertRaises(exception.ImageDownloadFailed,
self.service.download, self.href, file_mock)
- _validate_mock.assert_called_once_with(self.href)
+ _validate_mock.assert_called_once_with(mock.ANY, self.href)
self.assertEqual(2, stat_mock.call_count)
access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
- @mock.patch.object(sendfile, 'sendfile', side_effect=OSError)
- @mock.patch.object(os.path, 'getsize', return_value=42)
- @mock.patch.object(__builtin__, 'open')
- @mock.patch.object(os, 'access', return_value=False)
- @mock.patch.object(os, 'stat')
- @mock.patch.object(image_service.FileImageService, 'validate_href')
+ @mock.patch.object(sendfile, 'sendfile', side_effect=OSError,
+ autospec=True)
+ @mock.patch.object(os.path, 'getsize', return_value=42, autospec=True)
+ @mock.patch.object(__builtin__, 'open', autospec=True)
+ @mock.patch.object(os, 'access', return_value=False, autospec=True)
+ @mock.patch.object(os, 'stat', autospec=True)
+ @mock.patch.object(image_service.FileImageService, 'validate_href',
+ autospec=True)
def test_download_copy_fail(self, _validate_mock, stat_mock, access_mock,
open_mock, size_mock, copy_mock):
_validate_mock.return_value = self.href_path
@@ -206,7 +212,7 @@ class FileImageServiceTestCase(base.TestCase):
open_mock.return_value = input_mock
self.assertRaises(exception.ImageDownloadFailed,
self.service.download, self.href, file_mock)
- _validate_mock.assert_called_once_with(self.href)
+ _validate_mock.assert_called_once_with(mock.ANY, self.href)
self.assertEqual(2, stat_mock.call_count)
access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
size_mock.assert_called_once_with(self.href_path)
@@ -215,35 +221,37 @@ class FileImageServiceTestCase(base.TestCase):
class ServiceGetterTestCase(base.TestCase):
@mock.patch.object(glance_v1_service.GlanceImageService, '__init__',
- return_value=None)
+ return_value=None, autospec=True)
def test_get_glance_image_service(self, glance_service_mock):
image_href = 'image-uuid'
image_service.get_image_service(image_href, context=self.context)
- glance_service_mock.assert_called_once_with(None, 1, self.context)
+ glance_service_mock.assert_called_once_with(mock.ANY, None, 1,
+ self.context)
@mock.patch.object(glance_v1_service.GlanceImageService, '__init__',
- return_value=None)
+ return_value=None, autospec=True)
def test_get_glance_image_service_url(self, glance_service_mock):
image_href = 'glance://image-uuid'
image_service.get_image_service(image_href, context=self.context)
- glance_service_mock.assert_called_once_with(None, 1, self.context)
+ glance_service_mock.assert_called_once_with(mock.ANY, None, 1,
+ self.context)
@mock.patch.object(image_service.HttpImageService, '__init__',
- return_value=None)
+ return_value=None, autospec=True)
def test_get_http_image_service(self, http_service_mock):
image_href = 'http://127.0.0.1/image.qcow2'
image_service.get_image_service(image_href)
http_service_mock.assert_called_once_with()
@mock.patch.object(image_service.HttpImageService, '__init__',
- return_value=None)
+ return_value=None, autospec=True)
def test_get_https_image_service(self, http_service_mock):
image_href = 'https://127.0.0.1/image.qcow2'
image_service.get_image_service(image_href)
http_service_mock.assert_called_once_with()
@mock.patch.object(image_service.FileImageService, '__init__',
- return_value=None)
+ return_value=None, autospec=True)
def test_get_file_image_service(self, local_service_mock):
image_href = 'file:///home/user/image.qcow2'
image_service.get_image_service(image_href)
diff --git a/ironic/tests/test_images.py b/ironic/tests/test_images.py
index 9ebd2d4b8..b610d4b0a 100644
--- a/ironic/tests/test_images.py
+++ b/ironic/tests/test_images.py
@@ -40,17 +40,18 @@ class IronicImagesTestCase(base.TestCase):
class FakeImgInfo(object):
pass
- @mock.patch.object(imageutils, 'QemuImgInfo')
- @mock.patch.object(os.path, 'exists', return_value=False)
+ @mock.patch.object(imageutils, 'QemuImgInfo', autospec=True)
+ @mock.patch.object(os.path, 'exists', return_value=False, autospec=True)
def test_qemu_img_info_path_doesnt_exist(self, path_exists_mock,
qemu_img_info_mock):
images.qemu_img_info('noimg')
path_exists_mock.assert_called_once_with('noimg')
qemu_img_info_mock.assert_called_once_with()
- @mock.patch.object(utils, 'execute', return_value=('out', 'err'))
- @mock.patch.object(imageutils, 'QemuImgInfo')
- @mock.patch.object(os.path, 'exists', return_value=True)
+ @mock.patch.object(utils, 'execute', return_value=('out', 'err'),
+ autospec=True)
+ @mock.patch.object(imageutils, 'QemuImgInfo', autospec=True)
+ @mock.patch.object(os.path, 'exists', return_value=True, autospec=True)
def test_qemu_img_info_path_exists(self, path_exists_mock,
qemu_img_info_mock, execute_mock):
images.qemu_img_info('img')
@@ -59,15 +60,15 @@ class IronicImagesTestCase(base.TestCase):
'qemu-img', 'info', 'img')
qemu_img_info_mock.assert_called_once_with('out')
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image(self, execute_mock):
images.convert_image('source', 'dest', 'out_format')
execute_mock.assert_called_once_with('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False)
- @mock.patch.object(image_service, 'get_image_service')
- @mock.patch.object(__builtin__, 'open')
+ @mock.patch.object(image_service, 'get_image_service', autospec=True)
+ @mock.patch.object(__builtin__, 'open', autospec=True)
def test_fetch_no_image_service(self, open_mock, image_service_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
@@ -81,7 +82,7 @@ class IronicImagesTestCase(base.TestCase):
image_service_mock.return_value.download.assert_called_once_with(
'image_href', 'file')
- @mock.patch.object(__builtin__, 'open')
+ @mock.patch.object(__builtin__, 'open', autospec=True)
def test_fetch_image_service(self, open_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
@@ -94,8 +95,8 @@ class IronicImagesTestCase(base.TestCase):
image_service_mock.download.assert_called_once_with(
'image_href', 'file')
- @mock.patch.object(images, 'image_to_raw')
- @mock.patch.object(__builtin__, 'open')
+ @mock.patch.object(images, 'image_to_raw', autospec=True)
+ @mock.patch.object(__builtin__, 'open', autospec=True)
def test_fetch_image_service_force_raw(self, open_mock, image_to_raw_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
@@ -111,7 +112,7 @@ class IronicImagesTestCase(base.TestCase):
image_to_raw_mock.assert_called_once_with(
'image_href', 'path', 'path.part')
- @mock.patch.object(images, 'qemu_img_info')
+ @mock.patch.object(images, 'qemu_img_info', autospec=True)
def test_image_to_raw_no_file_format(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.file_format = None
@@ -122,7 +123,7 @@ class IronicImagesTestCase(base.TestCase):
qemu_img_info_mock.assert_called_once_with('path_tmp')
self.assertIn("'qemu-img info' parsing failed.", str(e))
- @mock.patch.object(images, 'qemu_img_info')
+ @mock.patch.object(images, 'qemu_img_info', autospec=True)
def test_image_to_raw_backing_file_present(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.file_format = 'raw'
@@ -134,10 +135,10 @@ class IronicImagesTestCase(base.TestCase):
qemu_img_info_mock.assert_called_once_with('path_tmp')
self.assertIn("fmt=raw backed by: backing_file", str(e))
- @mock.patch.object(os, 'rename')
- @mock.patch.object(os, 'unlink')
- @mock.patch.object(images, 'convert_image')
- @mock.patch.object(images, 'qemu_img_info')
+ @mock.patch.object(os, 'rename', autospec=True)
+ @mock.patch.object(os, 'unlink', autospec=True)
+ @mock.patch.object(images, 'convert_image', autospec=True)
+ @mock.patch.object(images, 'qemu_img_info', autospec=True)
def test_image_to_raw(self, qemu_img_info_mock, convert_image_mock,
unlink_mock, rename_mock):
CONF.set_override('force_raw_images', True)
@@ -159,9 +160,9 @@ class IronicImagesTestCase(base.TestCase):
unlink_mock.assert_called_once_with('path_tmp')
rename_mock.assert_called_once_with('path.converted', 'path')
- @mock.patch.object(os, 'unlink')
- @mock.patch.object(images, 'convert_image')
- @mock.patch.object(images, 'qemu_img_info')
+ @mock.patch.object(os, 'unlink', autospec=True)
+ @mock.patch.object(images, 'convert_image', autospec=True)
+ @mock.patch.object(images, 'qemu_img_info', autospec=True)
def test_image_to_raw_not_raw_after_conversion(self, qemu_img_info_mock,
convert_image_mock,
unlink_mock):
@@ -179,8 +180,8 @@ class IronicImagesTestCase(base.TestCase):
'path.converted', 'raw')
unlink_mock.assert_called_once_with('path_tmp')
- @mock.patch.object(os, 'rename')
- @mock.patch.object(images, 'qemu_img_info')
+ @mock.patch.object(os, 'rename', autospec=True)
+ @mock.patch.object(images, 'qemu_img_info', autospec=True)
def test_image_to_raw_already_raw_format(self, qemu_img_info_mock,
rename_mock):
info = self.FakeImgInfo()
@@ -193,7 +194,7 @@ class IronicImagesTestCase(base.TestCase):
qemu_img_info_mock.assert_called_once_with('path_tmp')
rename_mock.assert_called_once_with('path_tmp', 'path')
- @mock.patch.object(image_service, 'get_image_service')
+ @mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_download_size_no_image_service(self, image_service_mock):
images.download_size('context', 'image_href')
image_service_mock.assert_called_once_with('image_href',
@@ -206,7 +207,7 @@ class IronicImagesTestCase(base.TestCase):
images.download_size('context', 'image_href', image_service_mock)
image_service_mock.show.assert_called_once_with('image_href')
- @mock.patch.object(images, 'qemu_img_info')
+ @mock.patch.object(images, 'qemu_img_info', autospec=True)
def test_converted_size(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.virtual_size = 1
@@ -215,8 +216,8 @@ class IronicImagesTestCase(base.TestCase):
qemu_img_info_mock.assert_called_once_with('path')
self.assertEqual(1, size)
- @mock.patch.object(images, 'get_image_properties')
- @mock.patch.object(glance_utils, 'is_glance_image')
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_no_img_src(self, mock_igi, mock_gip):
instance_info = {'image_source': ''}
iwdi = images.is_whole_disk_image('context', instance_info)
@@ -224,8 +225,8 @@ class IronicImagesTestCase(base.TestCase):
self.assertFalse(mock_igi.called)
self.assertFalse(mock_gip.called)
- @mock.patch.object(images, 'get_image_properties')
- @mock.patch.object(glance_utils, 'is_glance_image')
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_partition_image(self, mock_igi, mock_gip):
mock_igi.return_value = True
mock_gip.return_value = {'kernel_id': 'kernel',
@@ -238,8 +239,8 @@ class IronicImagesTestCase(base.TestCase):
mock_igi.assert_called_once_with(image_source)
mock_gip.assert_called_once_with('context', image_source)
- @mock.patch.object(images, 'get_image_properties')
- @mock.patch.object(glance_utils, 'is_glance_image')
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_whole_disk_image(self, mock_igi, mock_gip):
mock_igi.return_value = True
mock_gip.return_value = {}
@@ -251,8 +252,8 @@ class IronicImagesTestCase(base.TestCase):
mock_igi.assert_called_once_with(image_source)
mock_gip.assert_called_once_with('context', image_source)
- @mock.patch.object(images, 'get_image_properties')
- @mock.patch.object(glance_utils, 'is_glance_image')
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_partition_non_glance(self, mock_igi,
mock_gip):
mock_igi.return_value = False
@@ -265,8 +266,8 @@ class IronicImagesTestCase(base.TestCase):
self.assertFalse(mock_gip.called)
mock_igi.assert_called_once_with(instance_info['image_source'])
- @mock.patch.object(images, 'get_image_properties')
- @mock.patch.object(glance_utils, 'is_glance_image')
+ @mock.patch.object(images, 'get_image_properties', autospec=True)
+ @mock.patch.object(glance_utils, 'is_glance_image', autospec=True)
def test_is_whole_disk_image_whole_disk_non_glance(self, mock_igi,
mock_gip):
mock_igi.return_value = False
@@ -280,10 +281,10 @@ class IronicImagesTestCase(base.TestCase):
class FsImageTestCase(base.TestCase):
- @mock.patch.object(shutil, 'copyfile')
- @mock.patch.object(os, 'makedirs')
- @mock.patch.object(os.path, 'dirname')
- @mock.patch.object(os.path, 'exists')
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(os, 'makedirs', autospec=True)
+ @mock.patch.object(os.path, 'dirname', autospec=True)
+ @mock.patch.object(os.path, 'exists', autospec=True)
def test__create_root_fs(self, path_exists_mock,
dirname_mock, mkdir_mock, cp_mock):
@@ -295,8 +296,8 @@ class FsImageTestCase(base.TestCase):
'a3': 'sub_dir/b3'}
path_exists_mock.side_effect = path_exists_mock_func
- dirname_mock.side_effect = ['root_dir', 'root_dir',
- 'root_dir/sub_dir', 'root_dir/sub_dir']
+ dirname_mock.side_effect = iter(
+ ['root_dir', 'root_dir', 'root_dir/sub_dir', 'root_dir/sub_dir'])
images._create_root_fs('root_dir', files_info)
cp_mock.assert_any_call('a1', 'root_dir/b1')
cp_mock.assert_any_call('a2', 'root_dir/b2')
@@ -308,13 +309,13 @@ class FsImageTestCase(base.TestCase):
dirname_mock.assert_any_call('root_dir/sub_dir/b3')
mkdir_mock.assert_called_once_with('root_dir/sub_dir')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'write_to_file')
- @mock.patch.object(utils, 'dd')
- @mock.patch.object(utils, 'umount')
- @mock.patch.object(utils, 'mount')
- @mock.patch.object(utils, 'mkfs')
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'write_to_file', autospec=True)
+ @mock.patch.object(utils, 'dd', autospec=True)
+ @mock.patch.object(utils, 'umount', autospec=True)
+ @mock.patch.object(utils, 'mount', autospec=True)
+ @mock.patch.object(utils, 'mkfs', autospec=True)
def test_create_vfat_image(self, mkfs_mock, mount_mock, umount_mock,
dd_mock, write_mock, tempdir_mock, create_root_fs_mock):
@@ -343,12 +344,12 @@ class FsImageTestCase(base.TestCase):
create_root_fs_mock.assert_called_once_with('tempdir', files_info)
umount_mock.assert_called_once_with('tempdir')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'dd')
- @mock.patch.object(utils, 'umount')
- @mock.patch.object(utils, 'mount')
- @mock.patch.object(utils, 'mkfs')
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'dd', autospec=True)
+ @mock.patch.object(utils, 'umount', autospec=True)
+ @mock.patch.object(utils, 'mount', autospec=True)
+ @mock.patch.object(utils, 'mkfs', autospec=True)
def test_create_vfat_image_always_umount(self, mkfs_mock, mount_mock,
umount_mock, dd_mock, tempdir_mock, create_root_fs_mock):
@@ -363,16 +364,16 @@ class FsImageTestCase(base.TestCase):
umount_mock.assert_called_once_with('tempdir')
- @mock.patch.object(utils, 'dd')
+ @mock.patch.object(utils, 'dd', autospec=True)
def test_create_vfat_image_dd_fails(self, dd_mock):
dd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'dd')
- @mock.patch.object(utils, 'mkfs')
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'dd', autospec=True)
+ @mock.patch.object(utils, 'mkfs', autospec=True)
def test_create_vfat_image_mkfs_fails(self, mkfs_mock, dd_mock,
tempdir_mock):
@@ -384,12 +385,12 @@ class FsImageTestCase(base.TestCase):
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'dd')
- @mock.patch.object(utils, 'umount')
- @mock.patch.object(utils, 'mount')
- @mock.patch.object(utils, 'mkfs')
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'dd', autospec=True)
+ @mock.patch.object(utils, 'umount', autospec=True)
+ @mock.patch.object(utils, 'mount', autospec=True)
+ @mock.patch.object(utils, 'mkfs', autospec=True)
def test_create_vfat_image_umount_fails(self, mkfs_mock, mount_mock,
umount_mock, dd_mock, tempdir_mock, create_root_fs_mock):
@@ -401,7 +402,7 @@ class FsImageTestCase(base.TestCase):
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
- @mock.patch.object(utils, 'umount')
+ @mock.patch.object(utils, 'umount', autospec=True)
def test__umount_without_raise(self, umount_mock):
umount_mock.side_effect = processutils.ProcessExecutionError
@@ -423,12 +424,15 @@ class FsImageTestCase(base.TestCase):
self.assertEqual(expected_cfg, cfg)
def test__generate_grub_cfg(self):
-
kernel_params = ['key1=value1', 'key2']
options = {'linux': '/vmlinuz', 'initrd': '/initrd'}
- expected_cfg = ("menuentry \"install\" {\n"
- "linux /vmlinuz key1=value1 key2 --\n"
- "initrd /initrd\n"
+ expected_cfg = ("set default=0\n"
+ "set timeout=5\n"
+ "set hidden_timeout_quiet=false\n"
+ "\n"
+ "menuentry \"boot_partition\" {\n"
+ "linuxefi /vmlinuz key1=value1 key2 --\n"
+ "initrdefi /initrd\n"
"}")
cfg = images._generate_cfg(kernel_params,
@@ -436,34 +440,34 @@ class FsImageTestCase(base.TestCase):
options)
self.assertEqual(expected_cfg, cfg)
- @mock.patch.object(os.path, 'relpath')
- @mock.patch.object(os, 'walk')
- @mock.patch.object(utils, 'mount')
+ @mock.patch.object(os.path, 'relpath', autospec=True)
+ @mock.patch.object(os, 'walk', autospec=True)
+ @mock.patch.object(utils, 'mount', autospec=True)
def test__mount_deploy_iso(self, mount_mock,
walk_mock, relpath_mock):
walk_mock.return_value = [('/tmpdir1/EFI/ubuntu', [], ['grub.cfg']),
('/tmpdir1/isolinux', [],
['efiboot.img', 'isolinux.bin',
'isolinux.cfg'])]
- relpath_mock.side_effect = ['EFI/ubuntu/grub.cfg',
- 'isolinux/efiboot.img']
+ relpath_mock.side_effect = iter(
+ ['EFI/ubuntu/grub.cfg', 'isolinux/efiboot.img'])
images._mount_deploy_iso('path/to/deployiso', 'tmpdir1')
mount_mock.assert_called_once_with('path/to/deployiso',
'tmpdir1', '-o', 'loop')
walk_mock.assert_called_once_with('tmpdir1')
- @mock.patch.object(images, '_umount_without_raise')
- @mock.patch.object(os.path, 'relpath')
- @mock.patch.object(os, 'walk')
- @mock.patch.object(utils, 'mount')
+ @mock.patch.object(images, '_umount_without_raise', autospec=True)
+ @mock.patch.object(os.path, 'relpath', autospec=True)
+ @mock.patch.object(os, 'walk', autospec=True)
+ @mock.patch.object(utils, 'mount', autospec=True)
def test__mount_deploy_iso_fail_no_efibootimg(self, mount_mock,
walk_mock, relpath_mock,
umount_mock):
walk_mock.return_value = [('/tmpdir1/EFI/ubuntu', [], ['grub.cfg']),
('/tmpdir1/isolinux', [],
['isolinux.bin', 'isolinux.cfg'])]
- relpath_mock.side_effect = ['EFI/ubuntu/grub.cfg']
+ relpath_mock.side_effect = iter(['EFI/ubuntu/grub.cfg'])
self.assertRaises(exception.ImageCreationFailed,
images._mount_deploy_iso,
@@ -473,10 +477,10 @@ class FsImageTestCase(base.TestCase):
walk_mock.assert_called_once_with('tmpdir1')
umount_mock.assert_called_once_with('tmpdir1')
- @mock.patch.object(images, '_umount_without_raise')
- @mock.patch.object(os.path, 'relpath')
- @mock.patch.object(os, 'walk')
- @mock.patch.object(utils, 'mount')
+ @mock.patch.object(images, '_umount_without_raise', autospec=True)
+ @mock.patch.object(os.path, 'relpath', autospec=True)
+ @mock.patch.object(os, 'walk', autospec=True)
+ @mock.patch.object(utils, 'mount', autospec=True)
def test__mount_deploy_iso_fails_no_grub_cfg(self, mount_mock,
walk_mock, relpath_mock,
umount_mock):
@@ -484,7 +488,7 @@ class FsImageTestCase(base.TestCase):
('/tmpdir1/isolinux', '',
['efiboot.img', 'isolinux.bin',
'isolinux.cfg'])]
- relpath_mock.side_effect = ['isolinux/efiboot.img']
+ relpath_mock.side_effect = iter(['isolinux/efiboot.img'])
self.assertRaises(exception.ImageCreationFailed,
images._mount_deploy_iso,
@@ -494,20 +498,20 @@ class FsImageTestCase(base.TestCase):
walk_mock.assert_called_once_with('tmpdir1')
umount_mock.assert_called_once_with('tmpdir1')
- @mock.patch.object(utils, 'mount')
+ @mock.patch.object(utils, 'mount', autospec=True)
def test__mount_deploy_iso_fail_with_ExecutionError(self, mount_mock):
mount_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images._mount_deploy_iso,
'path/to/deployiso', 'tmpdir1')
- @mock.patch.object(images, '_umount_without_raise')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'write_to_file')
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(images, '_mount_deploy_iso')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(images, '_generate_cfg')
+ @mock.patch.object(images, '_umount_without_raise', autospec=True)
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'write_to_file', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(images, '_mount_deploy_iso', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(images, '_generate_cfg', autospec=True)
def test_create_isolinux_image_for_uefi(self, gen_cfg_mock,
tempdir_mock, mount_mock, execute_mock,
write_to_file_mock,
@@ -524,7 +528,7 @@ class FsImageTestCase(base.TestCase):
cfg_file = 'tmpdir/isolinux/isolinux.cfg'
grubcfg = "grubcfg"
grub_file = 'tmpdir/relpath/to/grub.cfg'
- gen_cfg_mock.side_effect = [cfg, grubcfg]
+ gen_cfg_mock.side_effect = iter([cfg, grubcfg])
params = ['a=b', 'c']
isolinux_options = {'kernel': '/vmlinuz',
@@ -541,8 +545,8 @@ class FsImageTestCase(base.TestCase):
mock_file_handle.__enter__.return_value = 'tmpdir'
mock_file_handle1 = mock.MagicMock(spec=file)
mock_file_handle1.__enter__.return_value = 'mountdir'
- tempdir_mock.side_effect = [mock_file_handle,
- mock_file_handle1]
+ tempdir_mock.side_effect = iter(
+ [mock_file_handle, mock_file_handle1])
mount_mock.return_value = (uefi_path_info,
e_img_rel_path, grub_rel_path)
@@ -566,11 +570,11 @@ class FsImageTestCase(base.TestCase):
'-no-emul-boot', '-o', 'tgt_file', 'tmpdir')
umount_mock.assert_called_once_with('mountdir')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'write_to_file')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(images, '_generate_cfg')
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'write_to_file', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(images, '_generate_cfg', autospec=True)
def test_create_isolinux_image_for_bios(self, gen_cfg_mock,
execute_mock,
tempdir_mock, write_to_file_mock,
@@ -609,11 +613,11 @@ class FsImageTestCase(base.TestCase):
'4', '-boot-info-table', '-b', 'isolinux/isolinux.bin',
'-o', 'tgt_file', 'tmpdir')
- @mock.patch.object(images, '_umount_without_raise')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(os, 'walk')
+ @mock.patch.object(images, '_umount_without_raise', autospec=True)
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(os, 'walk', autospec=True)
def test_create_isolinux_image_uefi_rootfs_fails(self, walk_mock,
utils_mock,
tempdir_mock,
@@ -624,8 +628,8 @@ class FsImageTestCase(base.TestCase):
mock_file_handle.__enter__.return_value = 'tmpdir'
mock_file_handle1 = mock.MagicMock(spec=file)
mock_file_handle1.__enter__.return_value = 'mountdir'
- tempdir_mock.side_effect = [mock_file_handle,
- mock_file_handle1]
+ tempdir_mock.side_effect = iter(
+ [mock_file_handle, mock_file_handle1])
create_root_fs_mock.side_effect = IOError
self.assertRaises(exception.ImageCreationFailed,
@@ -635,10 +639,10 @@ class FsImageTestCase(base.TestCase):
'path/to/ramdisk')
umount_mock.assert_called_once_with('mountdir')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(os, 'walk')
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(os, 'walk', autospec=True)
def test_create_isolinux_image_bios_rootfs_fails(self, walk_mock,
utils_mock,
tempdir_mock,
@@ -650,13 +654,13 @@ class FsImageTestCase(base.TestCase):
'tgt_file', 'path/to/kernel',
'path/to/ramdisk')
- @mock.patch.object(images, '_umount_without_raise')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'write_to_file')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(images, '_mount_deploy_iso')
- @mock.patch.object(images, '_generate_cfg')
+ @mock.patch.object(images, '_umount_without_raise', autospec=True)
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'write_to_file', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(images, '_mount_deploy_iso', autospec=True)
+ @mock.patch.object(images, '_generate_cfg', autospec=True)
def test_create_isolinux_image_mkisofs_fails(self,
gen_cfg_mock,
mount_mock,
@@ -669,8 +673,8 @@ class FsImageTestCase(base.TestCase):
mock_file_handle.__enter__.return_value = 'tmpdir'
mock_file_handle1 = mock.MagicMock(spec=file)
mock_file_handle1.__enter__.return_value = 'mountdir'
- tempdir_mock.side_effect = [mock_file_handle,
- mock_file_handle1]
+ tempdir_mock.side_effect = iter(
+ [mock_file_handle, mock_file_handle1])
mount_mock.return_value = ({'a': 'a'}, 'b', 'c')
utils_mock.side_effect = processutils.ProcessExecutionError
@@ -679,13 +683,13 @@ class FsImageTestCase(base.TestCase):
'tgt_file', 'path/to/deployiso',
'path/to/kernel',
'path/to/ramdisk')
- umount_mock.assert_called_once_wth('mountdir')
+ umount_mock.assert_called_once_with('mountdir')
- @mock.patch.object(images, '_create_root_fs')
- @mock.patch.object(utils, 'write_to_file')
- @mock.patch.object(utils, 'tempdir')
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(images, '_generate_cfg')
+ @mock.patch.object(images, '_create_root_fs', autospec=True)
+ @mock.patch.object(utils, 'write_to_file', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(images, '_generate_cfg', autospec=True)
def test_create_isolinux_image_bios_mkisofs_fails(self,
gen_cfg_mock,
utils_mock,
@@ -702,9 +706,9 @@ class FsImageTestCase(base.TestCase):
'tgt_file', 'path/to/kernel',
'path/to/ramdisk')
- @mock.patch.object(images, 'create_isolinux_image_for_uefi')
- @mock.patch.object(images, 'fetch')
- @mock.patch.object(utils, 'tempdir')
+ @mock.patch.object(images, 'create_isolinux_image_for_uefi', autospec=True)
+ @mock.patch.object(images, 'fetch', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
def test_create_boot_iso_for_uefi(self, tempdir_mock, fetch_images_mock,
create_isolinux_mock):
mock_file_handle = mock.MagicMock(spec=file)
@@ -727,9 +731,9 @@ class FsImageTestCase(base.TestCase):
'tmpdir/deploy_iso-uuid', 'tmpdir/kernel-uuid',
'tmpdir/ramdisk-uuid', params)
- @mock.patch.object(images, 'create_isolinux_image_for_bios')
- @mock.patch.object(images, 'fetch')
- @mock.patch.object(utils, 'tempdir')
+ @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True)
+ @mock.patch.object(images, 'fetch', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
def test_create_boot_iso_for_bios(self, tempdir_mock, fetch_images_mock,
create_isolinux_mock):
mock_file_handle = mock.MagicMock(spec=file)
@@ -744,8 +748,12 @@ class FsImageTestCase(base.TestCase):
'tmpdir/kernel-uuid')
fetch_images_mock.assert_any_call('ctx', 'ramdisk-uuid',
'tmpdir/ramdisk-uuid')
- fetch_images_mock.assert_not_called_with('ctx', 'deploy_iso-uuid',
- 'tmpdir/deploy_iso-uuid')
+ # Note (NobodyCam): the orginal assert asserted that fetch_images
+ # was not called with parameters, this did not
+ # work, So I instead assert that there were only
+ # Two calls to the mock validating the above
+ # asserts.
+ self.assertEqual(2, fetch_images_mock.call_count)
params = ['root=UUID=root-uuid', 'kernel-params']
create_isolinux_mock.assert_called_once_with('output_file',
@@ -753,9 +761,9 @@ class FsImageTestCase(base.TestCase):
'tmpdir/ramdisk-uuid',
params)
- @mock.patch.object(images, 'create_isolinux_image_for_bios')
- @mock.patch.object(images, 'fetch')
- @mock.patch.object(utils, 'tempdir')
+ @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True)
+ @mock.patch.object(images, 'fetch', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
def test_create_boot_iso_for_bios_with_no_boot_mode(self, tempdir_mock,
fetch_images_mock,
create_isolinux_mock):
@@ -778,7 +786,7 @@ class FsImageTestCase(base.TestCase):
'tmpdir/ramdisk-uuid',
params)
- @mock.patch.object(image_service, 'get_image_service')
+ @mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_get_glance_image_properties_no_such_prop(self,
image_service_mock):
@@ -796,7 +804,7 @@ class FsImageTestCase(base.TestCase):
'p2': 'v2',
'p3': None}, ret_val)
- @mock.patch.object(image_service, 'get_image_service')
+ @mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_get_glance_image_properties_default_all(
self, image_service_mock):
@@ -812,7 +820,7 @@ class FsImageTestCase(base.TestCase):
self.assertEqual({'p1': 'v1',
'p2': 'v2'}, ret_val)
- @mock.patch.object(image_service, 'get_image_service')
+ @mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_get_glance_image_properties_with_prop_subset(
self, image_service_mock):
@@ -830,7 +838,7 @@ class FsImageTestCase(base.TestCase):
self.assertEqual({'p1': 'v1',
'p3': 'v3'}, ret_val)
- @mock.patch.object(image_service, 'GlanceImageService')
+ @mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_get_temp_url_for_glance_image(self, image_service_mock):
direct_url = 'swift+http://host/v1/AUTH_xx/con/obj'
diff --git a/ironic/tests/test_keystone.py b/ironic/tests/test_keystone.py
index b41a52f1b..7933ffe7e 100644
--- a/ironic/tests/test_keystone.py
+++ b/ironic/tests/test_keystone.py
@@ -46,8 +46,8 @@ class KeystoneTestCase(base.TestCase):
def test_failure_authorization(self):
self.assertRaises(exception.KeystoneFailure, keystone.get_service_url)
- @mock.patch.object(FakeCatalog, 'url_for')
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch.object(FakeCatalog, 'url_for', autospec=True)
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_get_url(self, mock_ks, mock_uf):
fake_url = 'http://127.0.0.1:6385'
mock_uf.return_value = fake_url
@@ -55,21 +55,21 @@ class KeystoneTestCase(base.TestCase):
res = keystone.get_service_url()
self.assertEqual(fake_url, res)
- @mock.patch.object(FakeCatalog, 'url_for')
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch.object(FakeCatalog, 'url_for', autospec=True)
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_url_not_found(self, mock_ks, mock_uf):
mock_uf.side_effect = ksexception.EndpointNotFound
mock_ks.return_value = FakeClient()
self.assertRaises(exception.CatalogNotFound, keystone.get_service_url)
- @mock.patch.object(FakeClient, 'has_service_catalog')
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch.object(FakeClient, 'has_service_catalog', autospec=True)
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_no_catalog(self, mock_ks, mock_hsc):
mock_hsc.return_value = False
mock_ks.return_value = FakeClient()
self.assertRaises(exception.KeystoneFailure, keystone.get_service_url)
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_unauthorized(self, mock_ks):
mock_ks.side_effect = ksexception.Unauthorized
self.assertRaises(exception.KeystoneUnauthorized,
@@ -80,7 +80,7 @@ class KeystoneTestCase(base.TestCase):
self.assertRaises(exception.KeystoneFailure,
keystone.get_service_url)
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_get_service_url_versionless_v2(self, mock_ks):
mock_ks.return_value = FakeClient()
self.config(group='keystone_authtoken', auth_uri='http://127.0.0.1')
@@ -91,7 +91,7 @@ class KeystoneTestCase(base.TestCase):
region_name='fake',
auth_url=expected_url)
- @mock.patch('keystoneclient.v3.client.Client')
+ @mock.patch('keystoneclient.v3.client.Client', autospec=True)
def test_get_service_url_versionless_v3(self, mock_ks):
mock_ks.return_value = FakeClient()
self.config(group='keystone_authtoken', auth_version='v3.0',
@@ -103,7 +103,7 @@ class KeystoneTestCase(base.TestCase):
region_name='fake',
auth_url=expected_url)
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_get_service_url_version_override(self, mock_ks):
mock_ks.return_value = FakeClient()
self.config(group='keystone_authtoken',
@@ -115,14 +115,14 @@ class KeystoneTestCase(base.TestCase):
region_name='fake',
auth_url=expected_url)
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_get_admin_auth_token(self, mock_ks):
fake_client = FakeClient()
fake_client.auth_token = '123456'
mock_ks.return_value = fake_client
self.assertEqual('123456', keystone.get_admin_auth_token())
- @mock.patch('keystoneclient.v2_0.client.Client')
+ @mock.patch('keystoneclient.v2_0.client.Client', autospec=True)
def test_get_region_name_v2(self, mock_ks):
mock_ks.return_value = FakeClient()
self.config(group='keystone', region_name='fake_region')
@@ -134,7 +134,7 @@ class KeystoneTestCase(base.TestCase):
region_name=expected_region,
auth_url=expected_url)
- @mock.patch('keystoneclient.v3.client.Client')
+ @mock.patch('keystoneclient.v3.client.Client', autospec=True)
def test_get_region_name_v3(self, mock_ks):
mock_ks.return_value = FakeClient()
self.config(group='keystone', region_name='fake_region')
diff --git a/ironic/tests/test_pxe_utils.py b/ironic/tests/test_pxe_utils.py
index f00a26116..1792d840a 100644
--- a/ironic/tests/test_pxe_utils.py
+++ b/ironic/tests/test_pxe_utils.py
@@ -66,6 +66,14 @@ class TestPXEUtils(db_base.DbTestCase):
}
self.agent_pxe_options.update(common_pxe_options)
+ self.ipxe_options = self.pxe_options.copy()
+ self.ipxe_options.update({
+ 'deployment_aki_path': 'http://1.2.3.4:1234/deploy_kernel',
+ 'deployment_ari_path': 'http://1.2.3.4:1234/deploy_ramdisk',
+ 'aki_path': 'http://1.2.3.4:1234/kernel',
+ 'ari_path': 'http://1.2.3.4:1234/ramdisk',
+ })
+
self.node = object_utils.create_test_node(self.context)
def test__build_pxe_config(self):
@@ -88,9 +96,39 @@ class TestPXEUtils(db_base.DbTestCase):
self.assertEqual(unicode(expected_template), rendered_template)
- @mock.patch('ironic.common.utils.create_link_without_raise')
- @mock.patch('ironic.common.utils.unlink_without_raise')
- @mock.patch('ironic.drivers.utils.get_node_mac_addresses')
+ def test__build_ipxe_config(self):
+ # NOTE(lucasagomes): iPXE is just an extension of the PXE driver,
+ # it doesn't have it's own configuration option for template.
+ # More info:
+ # http://docs.openstack.org/developer/ironic/deploy/install-guide.html
+ self.config(
+ pxe_config_template='ironic/drivers/modules/ipxe_config.template',
+ group='pxe'
+ )
+ self.config(http_url='http://1.2.3.4:1234', group='pxe')
+ rendered_template = pxe_utils._build_pxe_config(
+ self.ipxe_options, CONF.pxe.pxe_config_template)
+
+ expected_template = open(
+ 'ironic/tests/drivers/ipxe_config.template').read().rstrip()
+
+ self.assertEqual(unicode(expected_template), rendered_template)
+
+ def test__build_elilo_config(self):
+ pxe_opts = self.pxe_options
+ pxe_opts['boot_mode'] = 'uefi'
+ rendered_template = pxe_utils._build_pxe_config(
+ pxe_opts, CONF.pxe.uefi_pxe_config_template)
+
+ expected_template = open(
+ 'ironic/tests/drivers/elilo_efi_pxe_config.template'
+ ).read().rstrip()
+
+ self.assertEqual(unicode(expected_template), rendered_template)
+
+ @mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
+ @mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
+ @mock.patch('ironic.drivers.utils.get_node_mac_addresses', autospec=True)
def test__write_mac_pxe_configs(self, get_macs_mock, unlink_mock,
create_link_mock):
macs = [
@@ -106,7 +144,7 @@ class TestPXEUtils(db_base.DbTestCase):
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66'),
- mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-67')
+ mock.call('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-67'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_mac_pxe_configs(task)
@@ -114,9 +152,43 @@ class TestPXEUtils(db_base.DbTestCase):
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
- @mock.patch('ironic.common.utils.create_link_without_raise')
- @mock.patch('ironic.common.utils.unlink_without_raise')
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider')
+ @mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
+ @mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
+ @mock.patch('ironic.drivers.utils.get_node_mac_addresses', autospec=True)
+ def test__write_mac_ipxe_configs(self, get_macs_mock, unlink_mock,
+ create_link_mock):
+ self.config(ipxe_enabled=True, group='pxe')
+ macs = [
+ '00:11:22:33:44:55:66',
+ '00:11:22:33:44:55:67'
+ ]
+ get_macs_mock.return_value = macs
+ create_link_calls = [
+ mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
+ '/httpboot/pxelinux.cfg/00-11-22-33-44-55-66'),
+ mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
+ '/httpboot/pxelinux.cfg/00112233445566'),
+ mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
+ '/httpboot/pxelinux.cfg/00-11-22-33-44-55-67'),
+ mock.call(u'/httpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
+ '/httpboot/pxelinux.cfg/00112233445567'),
+ ]
+ unlink_calls = [
+ mock.call('/httpboot/pxelinux.cfg/00-11-22-33-44-55-66'),
+ mock.call('/httpboot/pxelinux.cfg/00112233445566'),
+ mock.call('/httpboot/pxelinux.cfg/00-11-22-33-44-55-67'),
+ mock.call('/httpboot/pxelinux.cfg/00112233445567'),
+ ]
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ pxe_utils._link_mac_pxe_configs(task)
+
+ unlink_mock.assert_has_calls(unlink_calls)
+ create_link_mock.assert_has_calls(create_link_calls)
+
+ @mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
+ @mock.patch('ironic.common.utils.unlink_without_raise', autospec=True)
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
+ autospec=True)
def test__link_ip_address_pxe_configs(self, provider_mock, unlink_mock,
create_link_mock):
ip_address = '10.10.0.1'
@@ -135,9 +207,9 @@ class TestPXEUtils(db_base.DbTestCase):
unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
create_link_mock.assert_has_calls(create_link_calls)
- @mock.patch('ironic.common.utils.write_to_file')
- @mock.patch.object(pxe_utils, '_build_pxe_config')
- @mock.patch('ironic.openstack.common.fileutils.ensure_tree')
+ @mock.patch('ironic.common.utils.write_to_file', autospec=True)
+ @mock.patch.object(pxe_utils, '_build_pxe_config', autospec=True)
+ @mock.patch('ironic.openstack.common.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config(self, ensure_tree_mock, build_mock,
write_mock):
build_mock.return_value = self.pxe_options
@@ -150,7 +222,7 @@ class TestPXEUtils(db_base.DbTestCase):
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg'))
]
- ensure_tree_mock.has_calls(ensure_calls)
+ ensure_tree_mock.assert_has_calls(ensure_calls)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path, self.pxe_options)
@@ -179,7 +251,7 @@ class TestPXEUtils(db_base.DbTestCase):
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root='/httpboot', group='pxe')
mac = '00:11:22:33:AA:BB:CC'
- self.assertEqual('/httpboot/pxelinux.cfg/00112233aabbcc',
+ self.assertEqual('/httpboot/pxelinux.cfg/00-11-22-33-aa-bb-cc',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_ip_address_path(self):
@@ -308,6 +380,27 @@ class TestPXEUtils(db_base.DbTestCase):
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
- unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
- rmtree_mock.assert_called_once_with(
+ unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
+ rmtree_mock.assert_called_once_with(
+ os.path.join(CONF.pxe.tftp_root, self.node.uuid))
+
+ @mock.patch('ironic.common.utils.rmtree_without_raise')
+ @mock.patch('ironic.common.utils.unlink_without_raise')
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider')
+ def test_clean_up_pxe_config_uefi_instance_info(self,
+ provider_mock, unlink_mock,
+ rmtree_mock):
+ ip_address = '10.10.0.1'
+ address = "aa:aa:aa:aa:aa:aa"
+ object_utils.create_test_port(self.context, node_id=self.node.id,
+ address=address)
+
+ provider_mock.get_ip_addresses.return_value = [ip_address]
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.node.instance_info['deploy_boot_mode'] = 'uefi'
+ pxe_utils.clean_up_pxe_config(task)
+
+ unlink_mock.assert_called_once_with('/tftpboot/0A0A0001.conf')
+ rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
diff --git a/ironic/tests/test_swift.py b/ironic/tests/test_swift.py
index 07af10656..9daa06ead 100644
--- a/ironic/tests/test_swift.py
+++ b/ironic/tests/test_swift.py
@@ -28,7 +28,7 @@ from ironic.tests import base
CONF = cfg.CONF
-@mock.patch.object(swift_client, 'Connection')
+@mock.patch.object(swift_client, 'Connection', autospec=True)
class SwiftTestCase(base.TestCase):
def setUp(self):
@@ -59,7 +59,7 @@ class SwiftTestCase(base.TestCase):
'auth_version': '2'}
connection_mock.assert_called_once_with(**params)
- @mock.patch.object(__builtin__, 'open')
+ @mock.patch.object(__builtin__, 'open', autospec=True)
def test_create_object(self, open_mock, connection_mock):
swiftapi = swift.SwiftAPI()
connection_obj_mock = connection_mock.return_value
@@ -77,7 +77,7 @@ class SwiftTestCase(base.TestCase):
'object', 'file-object', headers=None)
self.assertEqual('object-uuid', object_uuid)
- @mock.patch.object(__builtin__, 'open')
+ @mock.patch.object(__builtin__, 'open', autospec=True)
def test_create_object_create_container_fails(self, open_mock,
connection_mock):
swiftapi = swift.SwiftAPI()
@@ -89,7 +89,7 @@ class SwiftTestCase(base.TestCase):
connection_obj_mock.put_container.assert_called_once_with('container')
self.assertFalse(connection_obj_mock.put_object.called)
- @mock.patch.object(__builtin__, 'open')
+ @mock.patch.object(__builtin__, 'open', autospec=True)
def test_create_object_put_object_fails(self, open_mock, connection_mock):
swiftapi = swift.SwiftAPI()
mock_file_handle = mock.MagicMock(spec=file)
@@ -105,7 +105,7 @@ class SwiftTestCase(base.TestCase):
connection_obj_mock.put_object.assert_called_once_with('container',
'object', 'file-object', headers=None)
- @mock.patch.object(swift_utils, 'generate_temp_url')
+ @mock.patch.object(swift_utils, 'generate_temp_url', autospec=True)
def test_get_temp_url(self, gen_temp_url_mock, connection_mock):
swiftapi = swift.SwiftAPI()
connection_obj_mock = connection_mock.return_value
diff --git a/ironic/tests/test_utils.py b/ironic/tests/test_utils.py
index 044f32bee..4ea1cb2af 100644
--- a/ironic/tests/test_utils.py
+++ b/ironic/tests/test_utils.py
@@ -43,25 +43,25 @@ class BareMetalUtilsTestCase(base.TestCase):
self.assertEqual(100, len(s))
def test_unlink(self):
- with mock.patch.object(os, "unlink") as unlink_mock:
+ with mock.patch.object(os, "unlink", autospec=True) as unlink_mock:
unlink_mock.return_value = None
utils.unlink_without_raise("/fake/path")
unlink_mock.assert_called_once_with("/fake/path")
def test_unlink_ENOENT(self):
- with mock.patch.object(os, "unlink") as unlink_mock:
+ with mock.patch.object(os, "unlink", autospec=True) as unlink_mock:
unlink_mock.side_effect = OSError(errno.ENOENT)
utils.unlink_without_raise("/fake/path")
unlink_mock.assert_called_once_with("/fake/path")
def test_create_link(self):
- with mock.patch.object(os, "symlink") as symlink_mock:
+ with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.return_value = None
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
def test_create_link_EEXIST(self):
- with mock.patch.object(os, "symlink") as symlink_mock:
+ with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.side_effect = OSError(errno.EEXIST)
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
@@ -163,15 +163,15 @@ grep foo
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
- @mock.patch.object(processutils, 'execute')
- @mock.patch.object(os.environ, 'copy', return_value={})
+ @mock.patch.object(processutils, 'execute', autospec=True)
+ @mock.patch.object(os.environ, 'copy', return_value={}, autospec=True)
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
- @mock.patch.object(processutils, 'execute')
+ @mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
@@ -180,7 +180,7 @@ grep foo
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
- @mock.patch.object(processutils, 'execute')
+ @mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
@@ -188,14 +188,16 @@ grep foo
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
- with mock.patch.object(processutils, 'execute') as execute_mock:
+ with mock.patch.object(
+ processutils, 'execute', autospec=True) as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
- with mock.patch.object(processutils, 'execute') as execute_mock:
+ with mock.patch.object(
+ processutils, 'execute', autospec=True) as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
@@ -226,7 +228,8 @@ class GenericUtilsTestCase(base.TestCase):
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_read_cached_file(self):
- with mock.patch.object(os.path, "getmtime") as getmtime_mock:
+ with mock.patch.object(
+ os.path, "getmtime", autospec=True) as getmtime_mock:
getmtime_mock.return_value = 1
cache_data = {"data": 1123, "mtime": 1}
@@ -235,8 +238,10 @@ class GenericUtilsTestCase(base.TestCase):
getmtime_mock.assert_called_once_with(mock.ANY)
def test_read_modified_cached_file(self):
- with mock.patch.object(os.path, "getmtime") as getmtime_mock:
- with mock.patch.object(__builtin__, 'open') as open_mock:
+ with mock.patch.object(
+ os.path, "getmtime", autospec=True) as getmtime_mock:
+ with mock.patch.object(
+ __builtin__, 'open', autospec=True) as open_mock:
getmtime_mock.return_value = 2
fake_contents = "lorem ipsum"
fake_file = mock.Mock()
@@ -342,6 +347,7 @@ class GenericUtilsTestCase(base.TestCase):
self.assertFalse(utils.is_hostname_safe('-spam'))
self.assertFalse(utils.is_hostname_safe('spam-'))
self.assertTrue(utils.is_hostname_safe('spam-eggs'))
+ self.assertFalse(utils.is_hostname_safe('spam_eggs'))
self.assertFalse(utils.is_hostname_safe('spam eggs'))
self.assertTrue(utils.is_hostname_safe('spam.eggs'))
self.assertTrue(utils.is_hostname_safe('9spam'))
@@ -360,16 +366,28 @@ class GenericUtilsTestCase(base.TestCase):
# Need to ensure a binary response for success or fail
self.assertIsNotNone(utils.is_hostname_safe('spam'))
self.assertIsNotNone(utils.is_hostname_safe('-spam'))
+ self.assertTrue(utils.is_hostname_safe('www.rackspace.com'))
+ self.assertTrue(utils.is_hostname_safe('www.rackspace.com.'))
+ self.assertTrue(utils.is_hostname_safe('http._sctp.www.example.com'))
+ self.assertTrue(utils.is_hostname_safe('mail.pets_r_us.net'))
+ self.assertTrue(utils.is_hostname_safe('mail-server-15.my_host.org'))
+ self.assertFalse(utils.is_hostname_safe('www.nothere.com_'))
+ self.assertFalse(utils.is_hostname_safe('www.nothere_.com'))
+ self.assertFalse(utils.is_hostname_safe('www..nothere.com'))
+ long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
+ self.assertTrue(utils.is_hostname_safe(long_str))
+ self.assertFalse(utils.is_hostname_safe(long_str + '.'))
+ self.assertFalse(utils.is_hostname_safe('a' * 255))
def test_validate_and_normalize_mac(self):
mac = 'AA:BB:CC:DD:EE:FF'
- with mock.patch.object(utils, 'is_valid_mac') as m_mock:
+ with mock.patch.object(utils, 'is_valid_mac', autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(mac.lower(),
utils.validate_and_normalize_mac(mac))
def test_validate_and_normalize_mac_invalid_format(self):
- with mock.patch.object(utils, 'is_valid_mac') as m_mock:
+ with mock.patch.object(utils, 'is_valid_mac', autospec=True) as m_mock:
m_mock.return_value = False
self.assertRaises(exception.InvalidMAC,
utils.validate_and_normalize_mac, 'invalid-mac')
@@ -394,7 +412,7 @@ class GenericUtilsTestCase(base.TestCase):
class MkfsTestCase(base.TestCase):
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_mkfs(self, execute_mock):
utils.mkfs('ext4', '/my/block/dev')
utils.mkfs('msdos', '/my/msdos/block/dev')
@@ -411,7 +429,7 @@ class MkfsTestCase(base.TestCase):
use_standard_locale=True)]
self.assertEqual(expected, execute_mock.call_args_list)
- @mock.patch.object(utils, 'execute')
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_mkfs_with_label(self, execute_mock):
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
@@ -428,14 +446,14 @@ class MkfsTestCase(base.TestCase):
use_standard_locale=True)]
self.assertEqual(expected, execute_mock.call_args_list)
- @mock.patch.object(utils, 'execute',
+ @mock.patch.object(utils, 'execute', autospec=True,
side_effect=processutils.ProcessExecutionError(
stderr=os.strerror(errno.ENOENT)))
def test_mkfs_with_unsupported_fs(self, execute_mock):
self.assertRaises(exception.FileSystemNotSupported,
utils.mkfs, 'foo', '/my/block/dev')
- @mock.patch.object(utils, 'execute',
+ @mock.patch.object(utils, 'execute', autospec=True,
side_effect=processutils.ProcessExecutionError(
stderr='fake'))
def test_mkfs_with_unexpected_error(self, execute_mock):
@@ -453,13 +471,13 @@ class TempFilesTestCase(base.TestCase):
dirname = tempdir
self.assertFalse(os.path.exists(dirname))
- @mock.patch.object(shutil, 'rmtree')
- @mock.patch.object(tempfile, 'mkdtemp')
+ @mock.patch.object(shutil, 'rmtree', autospec=True)
+ @mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
- kwargs = {'a': 'b'}
+ kwargs = {'dir': 'b'}
with utils.tempdir(**kwargs) as tempdir:
self.assertEqual('temp-dir', tempdir)
@@ -468,9 +486,9 @@ class TempFilesTestCase(base.TestCase):
mkdtemp_mock.assert_called_once_with(**kwargs)
rmtree_mock.assert_called_once_with(tempdir_created)
- @mock.patch.object(utils, 'LOG')
- @mock.patch.object(shutil, 'rmtree')
- @mock.patch.object(tempfile, 'mkdtemp')
+ @mock.patch.object(utils, 'LOG', autospec=True)
+ @mock.patch.object(shutil, 'rmtree', autospec=True)
+ @mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock,
log_mock):
diff --git a/requirements.txt b/requirements.txt
index 54ed1be48..5233bb45a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,7 +4,7 @@
pbr>=0.6,!=0.7,<1.0
SQLAlchemy>=0.9.7,<=0.9.99
alembic>=0.7.2
-eventlet>=0.16.1
+eventlet>=0.16.1,!=0.17.0
lxml>=2.3
WebOb>=1.2.3
greenlet>=0.3.2
@@ -16,25 +16,25 @@ python-neutronclient>=2.3.11,<3
python-glanceclient>=0.15.0
python-keystoneclient>=1.1.0
python-swiftclient>=2.2.0
-stevedore>=1.1.0 # Apache-2.0
+stevedore>=1.3.0,<1.4.0 # Apache-2.0
pysendfile==2.0.0
websockify>=0.6.0,<0.7
-oslo.concurrency>=1.4.1 # Apache-2.0
-oslo.config>=1.9.0 # Apache-2.0
-oslo.context>=0.2.0
-oslo.db>=1.5.0 # Apache-2.0
-oslo.rootwrap>=1.5.0
-oslo.i18n>=1.3.0 # Apache-2.0
-oslo.policy>=0.3.0 # Apache-2.0
-oslo.serialization>=1.2.0 # Apache-2.0
-oslo.utils>=1.2.0 # Apache-2.0
+oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0
+oslo.config>=1.9.3,<1.10.0 # Apache-2.0
+oslo.context>=0.2.0,<0.3.0 # Apache-2.0
+oslo.db>=1.7.0,<1.8.0 # Apache-2.0
+oslo.rootwrap>=1.6.0,<1.7.0 # Apache-2.0
+oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0
+oslo.policy>=0.3.1,<0.4.0 # Apache-2.0
+oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0
+oslo.utils>=1.4.0,<1.5.0 # Apache-2.0
pecan>=0.8.0
requests>=2.2.0,!=2.4.0
six>=1.9.0
jsonpatch>=1.1
WSME>=0.6
Jinja2>=2.6 # BSD License (3 clause)
-keystonemiddleware>=1.0.0
-oslo.messaging>=1.6.0 # Apache-2.0
+keystonemiddleware>=1.5.0
+oslo.messaging>=1.8.0,<1.9.0 # Apache-2.0
retrying>=1.2.3,!=1.3.0 # Apache-2.0
posix_ipc
diff --git a/test-requirements.txt b/test-requirements.txt
index 9506ee49b..917126967 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -8,7 +8,7 @@ fixtures>=0.3.14
mock>=1.0
Babel>=1.3
MySQL-python
-oslotest>=1.2.0 # Apache-2.0
+oslotest>=1.5.1,<1.6.0 # Apache-2.0
psycopg2
python-ironicclient>=0.2.1
python-subunit>=0.0.18
@@ -18,5 +18,5 @@ testtools>=0.9.36,!=1.2.0
# Doc requirements
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
sphinxcontrib-pecanwsme>=0.8
-oslosphinx>=2.2.0 # Apache-2.0
+oslosphinx>=2.5.0,<2.6.0 # Apache-2.0
diff --git a/tools/config/oslo.config.generator.rc b/tools/config/oslo.config.generator.rc
index e224cf753..84c865ab5 100644
--- a/tools/config/oslo.config.generator.rc
+++ b/tools/config/oslo.config.generator.rc
@@ -1,2 +1,2 @@
-export IRONIC_CONFIG_GENERATOR_EXTRA_LIBRARIES='oslo.db oslo.messaging keystonemiddleware.auth_token'
+export IRONIC_CONFIG_GENERATOR_EXTRA_LIBRARIES='oslo.db oslo.messaging keystonemiddleware.auth_token oslo.concurrency oslo.policy'
export IRONIC_CONFIG_GENERATOR_EXTRA_MODULES=
diff --git a/tools/states_to_dot.py b/tools/states_to_dot.py
index 75f46fc9a..4fa76fdcf 100755
--- a/tools/states_to_dot.py
+++ b/tools/states_to_dot.py
@@ -66,7 +66,7 @@ def main():
options.filename = 'states.%s' % options.format
source = states.machine
- graph_name = "Ironic states"
+ graph_name = '"Ironic states"'
g = pydot.Dot(graph_name=graph_name, rankdir='LR',
nodesep='0.25', overlap='false',
ranksep="0.5", splines='true',
diff --git a/tox.ini b/tox.ini
index 9b46e5ad3..5b83d7f5a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -43,6 +43,7 @@ commands =
bash tools/config/generate_sample.sh -b . -p ironic -o etc/ironic
[testenv:gendocs]
+setenv = PYTHONHASHSEED=0
sitepackages = False
envdir = {toxworkdir}/venv
commands =
@@ -50,7 +51,6 @@ commands =
[testenv:venv]
setenv = PYTHONHASHSEED=0
- LANGUAGE=en_US
commands = {posargs}
[flake8]