summaryrefslogtreecommitdiff
path: root/ironic/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'ironic/drivers')
-rw-r--r--ironic/drivers/generic.py4
-rw-r--r--ironic/drivers/modules/ansible/__init__.py0
-rw-r--r--ironic/drivers/modules/ansible/deploy.py619
-rw-r--r--ironic/drivers/modules/ansible/playbooks/add-ironic-nodes.yaml11
-rw-r--r--ironic/drivers/modules/ansible/playbooks/ansible.cfg35
-rw-r--r--ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.ini15
-rw-r--r--ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.py148
-rw-r--r--ironic/drivers/modules/ansible/playbooks/clean.yaml6
-rw-r--r--ironic/drivers/modules/ansible/playbooks/clean_steps.yaml19
-rw-r--r--ironic/drivers/modules/ansible/playbooks/deploy.yaml12
-rw-r--r--ironic/drivers/modules/ansible/playbooks/inventory1
-rw-r--r--ironic/drivers/modules/ansible/playbooks/library/facts_wwn.py64
-rw-r--r--ironic/drivers/modules/ansible/playbooks/library/root_hints.py97
-rw-r--r--ironic/drivers/modules/ansible/playbooks/library/stream_url.py118
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/clean/defaults/main.yaml1
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/main.yaml6
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/shred.yaml8
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/wipe.yaml24
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/zap.yaml16
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/configure/defaults/main.yaml1
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/grub.yaml79
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml4
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/mounts.yaml8
-rwxr-xr-xironic/drivers/modules/ansible/playbooks/roles/deploy/files/partition_configdrive.sh110
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/configdrive.yaml44
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/download.yaml13
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/main.yaml7
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/write.yaml20
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/main.yaml13
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/roothints.yaml9
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml2
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/parted.yaml45
-rw-r--r--ironic/drivers/modules/ansible/playbooks/roles/shutdown/tasks/main.yaml6
-rw-r--r--ironic/drivers/modules/ansible/playbooks/shutdown.yaml6
34 files changed, 1570 insertions, 1 deletions
diff --git a/ironic/drivers/generic.py b/ironic/drivers/generic.py
index 6e232831b..a651ac667 100644
--- a/ironic/drivers/generic.py
+++ b/ironic/drivers/generic.py
@@ -18,6 +18,7 @@ Generic hardware types.
from ironic.drivers import hardware_type
from ironic.drivers.modules import agent
+from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
@@ -45,7 +46,8 @@ class GenericHardware(hardware_type.AbstractHardwareType):
@property
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
- return [iscsi_deploy.ISCSIDeploy, agent.AgentDeploy]
+ return [iscsi_deploy.ISCSIDeploy, agent.AgentDeploy,
+ ansible_deploy.AnsibleDeploy]
@property
def supported_inspect_interfaces(self):
diff --git a/ironic/drivers/modules/ansible/__init__.py b/ironic/drivers/modules/ansible/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ironic/drivers/modules/ansible/__init__.py
diff --git a/ironic/drivers/modules/ansible/deploy.py b/ironic/drivers/modules/ansible/deploy.py
new file mode 100644
index 000000000..af30290e2
--- /dev/null
+++ b/ironic/drivers/modules/ansible/deploy.py
@@ -0,0 +1,619 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Ansible deploy interface
+"""
+
+import json
+import os
+import shlex
+
+from ironic_lib import metrics_utils
+from ironic_lib import utils as irlib_utils
+from oslo_concurrency import processutils
+from oslo_log import log
+from oslo_utils import strutils
+from oslo_utils import units
+import retrying
+import six
+import six.moves.urllib.parse as urlparse
+import yaml
+
+from ironic.common import dhcp_factory
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import images
+from ironic.common import states
+from ironic.common import utils
+from ironic.conductor import task_manager
+from ironic.conductor import utils as manager_utils
+from ironic.conf import CONF
+from ironic.drivers import base
+from ironic.drivers.modules import agent_base_vendor as agent_base
+from ironic.drivers.modules import deploy_utils
+
+
+LOG = log.getLogger(__name__)
+
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+DEFAULT_PLAYBOOKS = {
+ 'deploy': 'deploy.yaml',
+ 'shutdown': 'shutdown.yaml',
+ 'clean': 'clean.yaml'
+}
+DEFAULT_CLEAN_STEPS = 'clean_steps.yaml'
+
+OPTIONAL_PROPERTIES = {
+ 'ansible_deploy_username': _('Deploy ramdisk username for Ansible. '
+ 'This user must have passwordless sudo '
+ 'permissions. Default is "ansible". '
+ 'Optional.'),
+ 'ansible_deploy_key_file': _('Path to private key file. If not specified, '
+ 'default keys for user running '
+ 'ironic-conductor process will be used. '
+ 'Note that for keys with password, those '
+ 'must be pre-loaded into ssh-agent. '
+ 'Optional.'),
+ 'ansible_deploy_playbook': _('Name of the Ansible playbook used for '
+ 'deployment. Default is %s. Optional.'
+ ) % DEFAULT_PLAYBOOKS['deploy'],
+ 'ansible_shutdown_playbook': _('Name of the Ansible playbook used to '
+ 'power off the node in-band. '
+ 'Default is %s. Optional.'
+ ) % DEFAULT_PLAYBOOKS['shutdown'],
+ 'ansible_clean_playbook': _('Name of the Ansible playbook used for '
+ 'cleaning. Default is %s. Optional.'
+ ) % DEFAULT_PLAYBOOKS['clean'],
+ 'ansible_clean_steps_config': _('Name of the file with default cleaning '
+ 'steps configuration. Default is %s. '
+ 'Optional.'
+ ) % DEFAULT_CLEAN_STEPS
+}
+COMMON_PROPERTIES = OPTIONAL_PROPERTIES
+
+INVENTORY_FILE = os.path.join(CONF.ansible.playbooks_path, 'inventory')
+
+
+class PlaybookNotFound(exception.IronicException):
+ _msg_fmt = _('Failed to set ansible playbook for action %(action)s')
+
+
+def _parse_ansible_driver_info(node, action='deploy'):
+ user = node.driver_info.get('ansible_deploy_username', 'ansible')
+ key = node.driver_info.get('ansible_deploy_key_file')
+ playbook = node.driver_info.get('ansible_%s_playbook' % action,
+ DEFAULT_PLAYBOOKS.get(action))
+ if not playbook:
+ raise PlaybookNotFound(action=action)
+ return playbook, user, key
+
+
+def _get_configdrive_path(basename):
+ return os.path.join(CONF.tempdir, basename + '.cndrive')
+
+
+def _get_node_ip(task):
+ callback_url = task.node.driver_internal_info.get('agent_url', '')
+ return urlparse.urlparse(callback_url).netloc.split(':')[0]
+
+
+def _prepare_extra_vars(host_list, variables=None):
+ nodes_var = []
+ for node_uuid, ip, user, extra in host_list:
+ nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
+ extra_vars = dict(nodes=nodes_var)
+ if variables:
+ extra_vars.update(variables)
+ return extra_vars
+
+
+def _run_playbook(name, extra_vars, key, tags=None, notags=None):
+ """Execute ansible-playbook."""
+ playbook = os.path.join(CONF.ansible.playbooks_path, name)
+ ironic_vars = {'ironic': extra_vars}
+ args = [CONF.ansible.ansible_playbook_script, playbook,
+ '-i', INVENTORY_FILE,
+ '-e', json.dumps(ironic_vars),
+ ]
+
+ if CONF.ansible.config_file_path:
+ env = ['env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path]
+ args = env + args
+
+ if tags:
+ args.append('--tags=%s' % ','.join(tags))
+
+ if notags:
+ args.append('--skip-tags=%s' % ','.join(notags))
+
+ if key:
+ args.append('--private-key=%s' % key)
+
+ verbosity = CONF.ansible.verbosity
+ if verbosity is None and CONF.debug:
+ verbosity = 4
+ if verbosity:
+ args.append('-' + 'v' * verbosity)
+
+ if CONF.ansible.ansible_extra_args:
+ args.extend(shlex.split(CONF.ansible.ansible_extra_args))
+
+ try:
+ out, err = utils.execute(*args)
+ return out, err
+ except processutils.ProcessExecutionError as e:
+ raise exception.InstanceDeployFailure(reason=e)
+
+
+def _calculate_memory_req(task):
+ image_source = task.node.instance_info['image_source']
+ image_size = images.download_size(task.context, image_source)
+ return image_size // units.Mi + CONF.ansible.extra_memory
+
+
+def _parse_partitioning_info(node):
+
+ info = node.instance_info
+ i_info = {'label': deploy_utils.get_disk_label(node) or 'msdos'}
+ is_gpt = i_info['label'] == 'gpt'
+ unit = 'MiB'
+ partitions = {}
+
+ def add_partition(name, start, end):
+ partitions[name] = {'number': len(partitions) + 1,
+ 'part_start': '%i%s' % (start, unit),
+ 'part_end': '%i%s' % (end, unit)}
+ if is_gpt:
+ partitions[name]['name'] = name
+
+ end = 1
+ if is_gpt:
+ # prepend 1MiB bios_grub partition for GPT so that grub(2) installs
+ start, end = end, end + 1
+ add_partition('bios', start, end)
+ partitions['bios']['flags'] = ['bios_grub']
+
+ ephemeral_mb = info['ephemeral_mb']
+ if ephemeral_mb:
+ start, end = end, end + ephemeral_mb
+ add_partition('ephemeral', start, end)
+ i_info['ephemeral_format'] = info['ephemeral_format']
+ i_info['preserve_ephemeral'] = (
+ 'yes' if info['preserve_ephemeral'] else 'no')
+
+ swap_mb = info['swap_mb']
+ if swap_mb:
+ start, end = end, end + swap_mb
+ add_partition('swap', start, end)
+
+ configdrive = info.get('configdrive')
+ if configdrive:
+ # pre-create 64MiB partition for configdrive
+ start, end = end, end + 64
+ add_partition('configdrive', start, end)
+
+ # NOTE(pas-ha) make the root partition last so that
+ # e.g. cloud-init can grow it on first start
+ start, end = end, end + info['root_mb']
+ add_partition('root', start, end)
+ if not is_gpt:
+ partitions['root']['flags'] = ['boot']
+ i_info['partitions'] = partitions
+ return {'partition_info': i_info}
+
+
+def _parse_root_device_hints(node):
+ """Convert string with hints to dict. """
+ root_device = node.properties.get('root_device')
+ if not root_device:
+ return {}
+ try:
+ parsed_hints = irlib_utils.parse_root_device_hints(root_device)
+ except ValueError as e:
+ raise exception.InvalidParameterValue(
+ _('Failed to validate the root device hints for node %(node)s. '
+ 'Error: %(error)s') % {'node': node.uuid, 'error': e})
+ root_device_hints = {}
+ advanced = {}
+ for hint, value in parsed_hints.items():
+ if isinstance(value, six.string_types):
+ if value.startswith('== '):
+ root_device_hints[hint] = int(value[3:])
+ elif value.startswith('s== '):
+ root_device_hints[hint] = urlparse.unquote(value[4:])
+ else:
+ advanced[hint] = value
+ else:
+ root_device_hints[hint] = value
+ if advanced:
+ raise exception.InvalidParameterValue(
+ _('Ansible-deploy does not support advanced root device hints '
+ 'based on oslo.utils operators. '
+ 'Present advanced hints for node %(node)s are %(hints)s.') % {
+ 'node': node.uuid, 'hints': advanced})
+ return root_device_hints
+
+
+def _add_ssl_image_options(image):
+ image['validate_certs'] = ('no' if CONF.ansible.image_store_insecure
+ else 'yes')
+ if CONF.ansible.image_store_cafile:
+ image['cafile'] = CONF.ansible.image_store_cafile
+ if CONF.ansible.image_store_certfile and CONF.ansible.image_store_keyfile:
+ image['client_cert'] = CONF.ansible.image_store_certfile
+ image['client_key'] = CONF.ansible.image_store_keyfile
+
+
+def _prepare_variables(task):
+ node = task.node
+ i_info = node.instance_info
+ image = {}
+ for i_key, i_value in i_info.items():
+ if i_key.startswith('image_'):
+ image[i_key[6:]] = i_value
+ image['mem_req'] = _calculate_memory_req(task)
+
+ checksum = image.get('checksum')
+ if checksum:
+ # NOTE(pas-ha) checksum can be in <algo>:<checksum> format
+ # as supported by various Ansible modules, mostly good for
+ # standalone Ironic case when instance_info is populated manually.
+ # With no <algo> we take that instance_info is populated from Glance,
+ # where API reports checksum as MD5 always.
+ if ':' not in checksum:
+ image['checksum'] = 'md5:%s' % checksum
+ _add_ssl_image_options(image)
+ variables = {'image': image}
+ configdrive = i_info.get('configdrive')
+ if configdrive:
+ if urlparse.urlparse(configdrive).scheme in ('http', 'https'):
+ cfgdrv_type = 'url'
+ cfgdrv_location = configdrive
+ else:
+ cfgdrv_location = _get_configdrive_path(node.uuid)
+ with open(cfgdrv_location, 'w') as f:
+ f.write(configdrive)
+ cfgdrv_type = 'file'
+ variables['configdrive'] = {'type': cfgdrv_type,
+ 'location': cfgdrv_location}
+
+ root_device_hints = _parse_root_device_hints(node)
+ if root_device_hints:
+ variables['root_device_hints'] = root_device_hints
+
+ return variables
+
+
+def _validate_clean_steps(steps, node_uuid):
+ missing = []
+ for step in steps:
+ name = step.get('name')
+ if not name:
+ missing.append({'name': 'undefined', 'field': 'name'})
+ continue
+ if 'interface' not in step:
+ missing.append({'name': name, 'field': 'interface'})
+ args = step.get('args', {})
+ for arg_name, arg in args.items():
+ if arg.get('required', False) and 'value' not in arg:
+ missing.append({'name': name,
+ 'field': '%s.value' % arg_name})
+ if missing:
+ err_string = ', '.join(
+ 'name %(name)s, field %(field)s' % i for i in missing)
+ msg = _("Malformed clean_steps file: %s") % err_string
+ LOG.error(msg)
+ raise exception.NodeCleaningFailure(node=node_uuid,
+ reason=msg)
+ if len(set(s['name'] for s in steps)) != len(steps):
+ msg = _("Cleaning steps do not have unique names.")
+ LOG.error(msg)
+ raise exception.NodeCleaningFailure(node=node_uuid,
+ reason=msg)
+
+
+def _get_clean_steps(node, interface=None, override_priorities=None):
+ """Get cleaning steps."""
+ clean_steps_file = node.driver_info.get('ansible_clean_steps_config',
+ DEFAULT_CLEAN_STEPS)
+ path = os.path.join(CONF.ansible.playbooks_path, clean_steps_file)
+ try:
+ with open(path) as f:
+ internal_steps = yaml.safe_load(f)
+ except Exception as e:
+ msg = _('Failed to load clean steps from file '
+ '%(file)s: %(exc)s') % {'file': path, 'exc': e}
+ raise exception.NodeCleaningFailure(node=node.uuid, reason=msg)
+
+ _validate_clean_steps(internal_steps, node.uuid)
+
+ steps = []
+ override = override_priorities or {}
+ for params in internal_steps:
+ name = params['name']
+ clean_if = params['interface']
+ if interface is not None and interface != clean_if:
+ continue
+ new_priority = override.get(name)
+ priority = (new_priority if new_priority is not None else
+ params.get('priority', 0))
+ args = {}
+ argsinfo = params.get('args', {})
+ for arg, arg_info in argsinfo.items():
+ args[arg] = arg_info.pop('value', None)
+ step = {
+ 'interface': clean_if,
+ 'step': name,
+ 'priority': priority,
+ 'abortable': False,
+ 'argsinfo': argsinfo,
+ 'args': args
+ }
+ steps.append(step)
+
+ return steps
+
+
+class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
+ """Interface for deploy-related actions."""
+
+ def __init__(self):
+ super(AnsibleDeploy, self).__init__()
+ # NOTE(pas-ha) overriding agent creation as we won't be
+ # communicating with it, only processing heartbeats
+ self._client = None
+
+ def get_properties(self):
+ """Return the properties of the interface."""
+ props = COMMON_PROPERTIES.copy()
+ # NOTE(pas-ha) this is to get the deploy_forces_oob_reboot property
+ props.update(agent_base.VENDOR_PROPERTIES)
+ return props
+
+ @METRICS.timer('AnsibleDeploy.validate')
+ def validate(self, task):
+ """Validate the driver-specific Node deployment info."""
+ task.driver.boot.validate(task)
+
+ node = task.node
+ iwdi = node.driver_internal_info.get('is_whole_disk_image')
+ if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
+ raise exception.InvalidParameterValue(_(
+ "Node %(node)s is configured to use the %(driver)s driver "
+ "which does not support netboot.") % {'node': node.uuid,
+ 'driver': node.driver})
+
+ params = {}
+ image_source = node.instance_info.get('image_source')
+ params['instance_info.image_source'] = image_source
+ error_msg = _('Node %s failed to validate deploy image info. Some '
+ 'parameters were missing') % node.uuid
+ deploy_utils.check_for_missing_params(params, error_msg)
+ # validate root device hints, proper exceptions are raised from there
+ _parse_root_device_hints(node)
+
+ def _ansible_deploy(self, task, node_address):
+ """Internal function for deployment to a node."""
+ node = task.node
+ LOG.debug('IP of node %(node)s is %(ip)s',
+ {'node': node.uuid, 'ip': node_address})
+ variables = _prepare_variables(task)
+ if not node.driver_internal_info.get('is_whole_disk_image'):
+ variables.update(_parse_partitioning_info(task.node))
+ playbook, user, key = _parse_ansible_driver_info(task.node)
+ node_list = [(node.uuid, node_address, user, node.extra)]
+ extra_vars = _prepare_extra_vars(node_list, variables=variables)
+
+ LOG.debug('Starting deploy on node %s', node.uuid)
+ # any caller should manage exceptions raised from here
+ _run_playbook(playbook, extra_vars, key)
+
+ @METRICS.timer('AnsibleDeploy.deploy')
+ @task_manager.require_exclusive_lock
+ def deploy(self, task):
+ """Perform a deployment to a node."""
+ manager_utils.node_power_action(task, states.REBOOT)
+ return states.DEPLOYWAIT
+
+ @METRICS.timer('AnsibleDeploy.tear_down')
+ @task_manager.require_exclusive_lock
+ def tear_down(self, task):
+ """Tear down a previous deployment on the task's node."""
+ manager_utils.node_power_action(task, states.POWER_OFF)
+ task.driver.network.unconfigure_tenant_networks(task)
+ return states.DELETED
+
+ @METRICS.timer('AnsibleDeploy.prepare')
+ def prepare(self, task):
+ """Prepare the deployment environment for this node."""
+ node = task.node
+ # TODO(pas-ha) investigate takeover scenario
+ if node.provision_state == states.DEPLOYING:
+ # adding network-driver dependent provisioning ports
+ manager_utils.node_power_action(task, states.POWER_OFF)
+ task.driver.network.add_provisioning_network(task)
+ if node.provision_state not in [states.ACTIVE, states.ADOPTING]:
+ node.instance_info = deploy_utils.build_instance_info_for_deploy(
+ task)
+ node.save()
+ boot_opt = deploy_utils.build_agent_options(node)
+ task.driver.boot.prepare_ramdisk(task, boot_opt)
+
+ @METRICS.timer('AnsibleDeploy.clean_up')
+ def clean_up(self, task):
+ """Clean up the deployment environment for this node."""
+ task.driver.boot.clean_up_ramdisk(task)
+ provider = dhcp_factory.DHCPFactory()
+ provider.clean_dhcp(task)
+ irlib_utils.unlink_without_raise(
+ _get_configdrive_path(task.node.uuid))
+
+ def take_over(self, task):
+ LOG.error("Ansible deploy does not support take over. "
+ "You must redeploy the node %s explicitly.",
+ task.node.uuid)
+
+ def get_clean_steps(self, task):
+ """Get the list of clean steps from the file.
+
+ :param task: a TaskManager object containing the node
+ :returns: A list of clean step dictionaries
+ """
+ new_priorities = {
+ 'erase_devices': CONF.deploy.erase_devices_priority,
+ 'erase_devices_metadata':
+ CONF.deploy.erase_devices_metadata_priority
+ }
+ return _get_clean_steps(task.node, interface='deploy',
+ override_priorities=new_priorities)
+
+ @METRICS.timer('AnsibleDeploy.execute_clean_step')
+ def execute_clean_step(self, task, step):
+ """Execute a clean step.
+
+ :param task: a TaskManager object containing the node
+ :param step: a clean step dictionary to execute
+ :returns: None
+ """
+ node = task.node
+ playbook, user, key = _parse_ansible_driver_info(
+ task.node, action='clean')
+ stepname = step['step']
+
+ node_address = _get_node_ip(task)
+
+ node_list = [(node.uuid, node_address, user, node.extra)]
+ extra_vars = _prepare_extra_vars(node_list)
+
+ LOG.debug('Starting cleaning step %(step)s on node %(node)s',
+ {'node': node.uuid, 'step': stepname})
+ step_tags = step['args'].get('tags', [])
+ try:
+ _run_playbook(playbook, extra_vars, key,
+ tags=step_tags)
+ except exception.InstanceDeployFailure as e:
+ LOG.error("Ansible failed cleaning step %(step)s "
+ "on node %(node)s.",
+ {'node': node.uuid, 'step': stepname})
+ manager_utils.cleaning_error_handler(task, six.text_type(e))
+ else:
+ LOG.info('Ansible completed cleaning step %(step)s '
+ 'on node %(node)s.',
+ {'node': node.uuid, 'step': stepname})
+
+ @METRICS.timer('AnsibleDeploy.prepare_cleaning')
+ def prepare_cleaning(self, task):
+ """Boot into the ramdisk to prepare for cleaning.
+
+ :param task: a TaskManager object containing the node
+ :raises NodeCleaningFailure: if the previous cleaning ports cannot
+ be removed or if new cleaning ports cannot be created
+ :returns: None or states.CLEANWAIT for async prepare.
+ """
+ node = task.node
+ manager_utils.set_node_cleaning_steps(task)
+ if not node.driver_internal_info['clean_steps']:
+ # no clean steps configured, nothing to do.
+ return
+ task.driver.network.add_cleaning_network(task)
+ boot_opt = deploy_utils.build_agent_options(node)
+ task.driver.boot.prepare_ramdisk(task, boot_opt)
+ manager_utils.node_power_action(task, states.REBOOT)
+ return states.CLEANWAIT
+
+ @METRICS.timer('AnsibleDeploy.tear_down_cleaning')
+ def tear_down_cleaning(self, task):
+ """Clean up the PXE and DHCP files after cleaning.
+
+ :param task: a TaskManager object containing the node
+ :raises NodeCleaningFailure: if the cleaning ports cannot be
+ removed
+ """
+ manager_utils.node_power_action(task, states.POWER_OFF)
+ task.driver.boot.clean_up_ramdisk(task)
+ task.driver.network.remove_cleaning_network(task)
+
+ @METRICS.timer('AnsibleDeploy.continue_deploy')
+ def continue_deploy(self, task):
+ # NOTE(pas-ha) the lock should be already upgraded in heartbeat,
+ # just setting its purpose for better logging
+ task.upgrade_lock(purpose='deploy')
+ task.process_event('resume')
+ # NOTE(pas-ha) this method is called from heartbeat processing only,
+ # so we are sure we need this particular method, not the general one
+ node_address = _get_node_ip(task)
+ self._ansible_deploy(task, node_address)
+ self.reboot_to_instance(task)
+
+ @METRICS.timer('AnsibleDeploy.reboot_to_instance')
+ def reboot_to_instance(self, task):
+ node = task.node
+ LOG.info('Ansible complete deploy on node %s', node.uuid)
+
+ LOG.debug('Rebooting node %s to instance', node.uuid)
+ manager_utils.node_set_boot_device(task, 'disk', persistent=True)
+ self.reboot_and_finish_deploy(task)
+ task.driver.boot.clean_up_ramdisk(task)
+
+ @METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy')
+ def reboot_and_finish_deploy(self, task):
+ wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
+ attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
+
+ @retrying.retry(
+ stop_max_attempt_number=attempts,
+ retry_on_result=lambda state: state != states.POWER_OFF,
+ wait_fixed=wait
+ )
+ def _wait_until_powered_off(task):
+ return task.driver.power.get_power_state(task)
+
+ node = task.node
+ oob_power_off = strutils.bool_from_string(
+ node.driver_info.get('deploy_forces_oob_reboot', False))
+ try:
+ if not oob_power_off:
+ try:
+ node_address = _get_node_ip(task)
+ playbook, user, key = _parse_ansible_driver_info(
+ node, action='shutdown')
+ node_list = [(node.uuid, node_address, user, node.extra)]
+ extra_vars = _prepare_extra_vars(node_list)
+ _run_playbook(playbook, extra_vars, key)
+ _wait_until_powered_off(task)
+ except Exception as e:
+ LOG.warning('Failed to soft power off node %(node_uuid)s '
+ 'in at least %(timeout)d seconds. '
+ 'Error: %(error)s',
+ {'node_uuid': node.uuid,
+ 'timeout': (wait * (attempts - 1)) / 1000,
+ 'error': e})
+ # NOTE(pas-ha) flush is a part of deploy playbook
+ # so if it finished successfully we can safely
+ # power off the node out-of-band
+ manager_utils.node_power_action(task, states.POWER_OFF)
+ else:
+ manager_utils.node_power_action(task, states.POWER_OFF)
+ task.driver.network.remove_provisioning_network(task)
+ task.driver.network.configure_tenant_networks(task)
+ manager_utils.node_power_action(task, states.POWER_ON)
+ except Exception as e:
+ msg = (_('Error rebooting node %(node)s after deploy. '
+ 'Error: %(error)s') %
+ {'node': node.uuid, 'error': e})
+ agent_base.log_and_raise_deployment_error(task, msg)
+
+ task.process_event('done')
+ LOG.info('Deployment to node %s done', task.node.uuid)
diff --git a/ironic/drivers/modules/ansible/playbooks/add-ironic-nodes.yaml b/ironic/drivers/modules/ansible/playbooks/add-ironic-nodes.yaml
new file mode 100644
index 000000000..568ff2830
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/add-ironic-nodes.yaml
@@ -0,0 +1,11 @@
+- hosts: conductor
+ gather_facts: no
+ tasks:
+ - add_host:
+ group: ironic
+ hostname: "{{ item.name }}"
+ ansible_host: "{{ item.ip }}"
+ ansible_user: "{{ item.user }}"
+ ironic_extra: "{{ item.extra | default({}) }}"
+ with_items: "{{ ironic.nodes }}"
+ tags: always
diff --git a/ironic/drivers/modules/ansible/playbooks/ansible.cfg b/ironic/drivers/modules/ansible/playbooks/ansible.cfg
new file mode 100644
index 000000000..cd524cd33
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/ansible.cfg
@@ -0,0 +1,35 @@
+[defaults]
+# retries through the ansible-deploy driver are not supported
+retry_files_enabled = False
+
+# this is using supplied callback_plugin to interleave ansible event logs
+# into Ironic-conductor log as set in ironic configuration file,
+# see callback_plugin/ironic_log.ini for some options to set
+# (DevStack _needs_ some tweaks)
+callback_whitelist = ironic_log
+
+# For better security, bake SSH host keys into bootstrap image,
+# add those to ~/.ssh/known_hosts for user running ironic-conductor service
+# on all nodes where ironic-conductor and ansible-deploy driver are installed,
+# and set the host_key_checking to True (or comment it out, it is the default)
+host_key_checking = False
+
+# uncomment if you have problem with ramdisk locale on ansible >= 2.1
+#module_set_locale=False
+
+# This sets the interval (in seconds) of Ansible internal processes polling
+# each other. Lower values improve performance with large playbooks at
+# the expense of extra CPU load. Higher values are more suitable for Ansible
+# usage in automation scenarios, when UI responsiveness is not required but
+# CPU usage might be a concern.
+# Default corresponds to the value hardcoded in Ansible ≤ 2.1:
+#internal_poll_interval = 0.001
+
+[ssh_connection]
+# pipelining greatly increases speed of deployment, disable it only when
+# your version of ssh client on ironic node or server in bootstrap image
+# do not support it or if you can not disable "requiretty" for the
+# passwordless sudoer user in the bootstrap image.
+# See Ansible documentation for more info:
+# http://docs.ansible.com/ansible/intro_configuration.html#pipelining
+pipelining = True
diff --git a/ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.ini b/ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.ini
new file mode 100644
index 000000000..4d1093398
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.ini
@@ -0,0 +1,15 @@
+[ironic]
+# If Ironic's config is not in one of default oslo_config locations,
+# specify the path to it here
+#config_file =
+
+# Force usage of journald
+#use_journal = True
+
+# Force usage of syslog
+#use_syslog = False
+
+# Force usage of given file to log to.
+# Useful for a testing system with only stderr logging
+# (e.g. DevStack deployed w/o systemd)
+#log_file =
diff --git a/ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.py b/ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.py
new file mode 100644
index 000000000..55fa5b834
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/callback_plugins/ironic_log.py
@@ -0,0 +1,148 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ConfigParser
+import os
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import strutils
+import pbr.version
+
+
+CONF = cfg.CONF
+DOMAIN = 'ironic'
+VERSION = pbr.version.VersionInfo(DOMAIN).release_string()
+
+
+# find and parse callback config file
+def parse_callback_config():
+ basename = os.path.splitext(__file__)[0]
+ config = ConfigParser.ConfigParser()
+ callback_config = {'ironic_config': None,
+ 'ironic_log_file': None,
+ 'use_journal': True,
+ 'use_syslog': False}
+ try:
+ config.readfp(open(basename + ".ini"))
+ if config.has_option('ironic', 'config_file'):
+ callback_config['ironic_config'] = config.get(
+ 'ironic', 'config_file')
+ if config.has_option('ironic', 'log_file'):
+ callback_config['ironic_log_file'] = config.get(
+ 'ironic', 'log_file')
+ if config.has_option('ironic', 'use_journal'):
+ callback_config['use_journal'] = strutils.bool_from_string(
+ config.get('ironic', 'use_journal'))
+ if config.has_option('ironic', 'use_syslog'):
+ callback_config['use_syslog'] = strutils.bool_from_string(
+ config.get('ironic', 'use_syslog'))
+ except Exception:
+ pass
+ return callback_config
+
+
+def setup_log():
+
+ logging.register_options(CONF)
+
+ conf_kwargs = dict(args=[], project=DOMAIN, version=VERSION)
+ callback_config = parse_callback_config()
+
+ if callback_config['ironic_config']:
+ conf_kwargs['default_config_files'] = [
+ callback_config['ironic_config']]
+ CONF(**conf_kwargs)
+
+ if callback_config['use_journal']:
+ CONF.set_override('use_journal', True)
+ if callback_config['use_syslog']:
+ CONF.set_override('use_syslog', True)
+ if callback_config['ironic_log_file']:
+ CONF.set_override("log_file", callback_config['ironic_log_file'])
+
+ logging.setup(CONF, DOMAIN)
+
+
+class CallbackModule(object):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'ironic_log'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ setup_log()
+ self.log = logging.getLogger(__name__)
+ self.node = None
+ self.opts = {}
+
+ # NOTE(pas-ha) this method is required for Ansible>=2.4
+ # TODO(pas-ha) rewrite to support defining callback plugin options
+ # in ansible.cfg after we require Ansible >=2.4
+ def set_options(self, options):
+ self.opts = options
+
+ def runner_msg_dict(self, result):
+ self.node = result._host.get_name()
+ name = result._task.get_name()
+ res = str(result._result)
+ return dict(node=self.node, name=name, res=res)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ # NOTE(pas-ha) I do not know (yet) how to obtain a ref to host
+ # until first task is processed
+ node = self.node or "Node"
+ name = task.get_name()
+ if name == 'setup':
+ self.log.debug("Processing task %(name)s.", dict(name=name))
+ else:
+ self.log.debug("Processing task %(name)s on node %(node)s.",
+ dict(name=name, node=node))
+
+ def v2_runner_on_failed(self, result, *args, **kwargs):
+ self.log.error(
+ "Ansible task %(name)s failed on node %(node)s: %(res)s",
+ self.runner_msg_dict(result))
+
+ def v2_runner_on_ok(self, result):
+ msg_dict = self.runner_msg_dict(result)
+ if msg_dict['name'] == 'setup':
+ self.log.info("Ansible task 'setup' complete on node %(node)s",
+ msg_dict)
+ else:
+ self.log.info("Ansible task %(name)s complete on node %(node)s: "
+ "%(res)s", msg_dict)
+
+ def v2_runner_on_unreachable(self, result):
+ self.log.error(
+ "Node %(node)s was unreachable for Ansible task %(name)s: %(res)s",
+ self.runner_msg_dict(result))
+
+ def v2_runner_on_async_poll(self, result):
+ self.log.debug("Polled ansible task %(name)s for complete "
+ "on node %(node)s: %(res)s",
+ self.runner_msg_dict(result))
+
+ def v2_runner_on_async_ok(self, result):
+ self.log.info("Async Ansible task %(name)s complete on node %(node)s: "
+ "%(res)s", self.runner_msg_dict(result))
+
+ def v2_runner_on_async_failed(self, result):
+ self.log.error("Async Ansible task %(name)s failed on node %(node)s: "
+ "%(res)s", self.runner_msg_dict(result))
+
+ def v2_runner_on_skipped(self, result):
+ self.log.debug(
+ "Ansible task %(name)s skipped on node %(node)s: %(res)s",
+ self.runner_msg_dict(result))
diff --git a/ironic/drivers/modules/ansible/playbooks/clean.yaml b/ironic/drivers/modules/ansible/playbooks/clean.yaml
new file mode 100644
index 000000000..04645cdc1
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/clean.yaml
@@ -0,0 +1,6 @@
+---
+- import_playbook: add-ironic-nodes.yaml
+
+- hosts: ironic
+ roles:
+ - clean
diff --git a/ironic/drivers/modules/ansible/playbooks/clean_steps.yaml b/ironic/drivers/modules/ansible/playbooks/clean_steps.yaml
new file mode 100644
index 000000000..b40481993
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/clean_steps.yaml
@@ -0,0 +1,19 @@
+- name: erase_devices_metadata
+ priority: 99
+ interface: deploy
+ args:
+ tags:
+ required: true
+ description: list of playbook tags used to erase partition table on disk devices
+ value:
+ - zap
+
+- name: erase_devices
+ priority: 10
+ interface: deploy
+ args:
+ tags:
+ required: true
+ description: list of playbook tags used to erase disk devices
+ value:
+ - shred
diff --git a/ironic/drivers/modules/ansible/playbooks/deploy.yaml b/ironic/drivers/modules/ansible/playbooks/deploy.yaml
new file mode 100644
index 000000000..3fbb60d22
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/deploy.yaml
@@ -0,0 +1,12 @@
+---
+- import_playbook: add-ironic-nodes.yaml
+
+- hosts: ironic
+ roles:
+ - discover
+ - prepare
+ - deploy
+ - configure
+ post_tasks:
+ - name: flush disk state
+ command: sync
diff --git a/ironic/drivers/modules/ansible/playbooks/inventory b/ironic/drivers/modules/ansible/playbooks/inventory
new file mode 100644
index 000000000..f6599ef67
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/inventory
@@ -0,0 +1 @@
+conductor ansible_connection=local
diff --git a/ironic/drivers/modules/ansible/playbooks/library/facts_wwn.py b/ironic/drivers/modules/ansible/playbooks/library/facts_wwn.py
new file mode 100644
index 000000000..7703f570c
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/library/facts_wwn.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+COLLECT_INFO = (('wwn', 'WWN'), ('serial', 'SERIAL_SHORT'),
+ ('wwn_with_extension', 'WWN_WITH_EXTENSION'),
+ ('wwn_vendor_extension', 'WWN_VENDOR_EXTENSION'))
+
+
+def get_devices_wwn(devices, module):
+ try:
+ import pyudev
+ # NOTE(pas-ha) creating context might fail if udev is missing
+ context = pyudev.Context()
+ except ImportError:
+ module.warn('Can not collect "wwn", "wwn_with_extension", '
+ '"wwn_vendor_extension" and "serial" when using '
+ 'root device hints because there\'s no UDEV python '
+ 'binds installed')
+ return {}
+
+ dev_dict = {}
+ for device in devices:
+ name = '/dev/' + device
+ try:
+ udev = pyudev.Device.from_device_file(context, name)
+ except (ValueError, EnvironmentError, pyudev.DeviceNotFoundError) as e:
+ module.warn('Device %(dev)s is inaccessible, skipping... '
+ 'Error: %(error)s' % {'dev': name, 'error': e})
+ continue
+
+ dev_dict[device] = {}
+ for key, udev_key in COLLECT_INFO:
+ dev_dict[device][key] = udev.get('ID_%s' % udev_key)
+
+ return {"ansible_facts": {"devices_wwn": dev_dict}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ devices=dict(required=True, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ devices = module.params['devices']
+ data = get_devices_wwn(devices, module)
+ module.exit_json(**data)
+
+
+from ansible.module_utils.basic import * # noqa
+if __name__ == '__main__':
+ main()
diff --git a/ironic/drivers/modules/ansible/playbooks/library/root_hints.py b/ironic/drivers/modules/ansible/playbooks/library/root_hints.py
new file mode 100644
index 000000000..32473eb77
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/library/root_hints.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+GIB = 1 << 30
+
+EXTRA_PARAMS = set(['wwn', 'serial', 'wwn_with_extension',
+ 'wwn_vendor_extension'])
+
+
+# NOTE: ansible calculates device size as float with 2-digits precision,
+# Ironic requires size in GiB, if we will use ansible size parameter
+# a bug is possible for devices > 1 TB
+def size_gib(device_info):
+ sectors = device_info.get('sectors')
+ sectorsize = device_info.get('sectorsize')
+ if sectors is None or sectorsize is None:
+ return '0'
+
+ return str((int(sectors) * int(sectorsize)) // GIB)
+
+
+def merge_devices_info(devices, devices_wwn):
+ merged_info = devices.copy()
+ for device in merged_info:
+ if device in devices_wwn:
+ merged_info[device].update(devices_wwn[device])
+
+ # replace size
+ merged_info[device]['size'] = size_gib(merged_info[device])
+
+ return merged_info
+
+
+def root_hint(hints, devices):
+ hint = None
+ name = hints.pop('name', None)
+ for device in devices:
+ for key in hints:
+ if hints[key] != devices[device].get(key):
+ break
+ else:
+ # If multiple hints are specified, a device must satisfy all
+ # the hints
+ dev_name = '/dev/' + device
+ if name is None or name == dev_name:
+ hint = dev_name
+ break
+
+ return hint
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ root_device_hints=dict(required=True, type='dict'),
+ ansible_devices=dict(required=True, type='dict'),
+ ansible_devices_wwn=dict(required=True, type='dict')
+ ),
+ supports_check_mode=True)
+
+ hints = module.params['root_device_hints']
+ devices = module.params['ansible_devices']
+ devices_wwn = module.params['ansible_devices_wwn']
+
+ if not devices_wwn:
+ extra = set(hints) & EXTRA_PARAMS
+ if extra:
+ module.fail_json(msg='Extra hints (supported by additional ansible'
+ ' module) are set but this information can not be'
+ ' collected. Extra hints: %s' % ', '.join(extra))
+
+ devices_info = merge_devices_info(devices, devices_wwn or {})
+ hint = root_hint(hints, devices_info)
+
+ if hint is None:
+ module.fail_json(msg='Root device hints are set, but none of the '
+ 'devices satisfy them. Collected devices info: %s'
+ % devices_info)
+
+ ret_data = {'ansible_facts': {'ironic_root_device': hint}}
+ module.exit_json(**ret_data)
+
+
+from ansible.module_utils.basic import * # noqa
+if __name__ == '__main__':
+ main()
diff --git a/ironic/drivers/modules/ansible/playbooks/library/stream_url.py b/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
new file mode 100644
index 000000000..dd750d637
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import string
+
+import requests
+
+# adapted from IPA
+DEFAULT_CHUNK_SIZE = 1024 * 1024 # 1MB
+
+
+class StreamingDownloader(object):
+
+ def __init__(self, url, chunksize, hash_algo=None, verify=True,
+ certs=None):
+ if hash_algo is not None:
+ self.hasher = hashlib.new(hash_algo)
+ else:
+ self.hasher = None
+ self.chunksize = chunksize
+ resp = requests.get(url, stream=True, verify=verify, certs=certs)
+ if resp.status_code != 200:
+ raise Exception('Invalid response code: %s' % resp.status_code)
+
+ self._request = resp
+
+ def __iter__(self):
+ for chunk in self._request.iter_content(chunk_size=self.chunksize):
+ if self.hasher is not None:
+ self.hasher.update(chunk)
+ yield chunk
+
+ def checksum(self):
+ if self.hasher is not None:
+ return self.hasher.hexdigest()
+
+
+def stream_to_dest(url, dest, chunksize, hash_algo, verify=True, certs=None):
+ downloader = StreamingDownloader(url, chunksize, hash_algo,
+ verify=verify, certs=certs)
+
+ with open(dest, 'wb+') as f:
+ for chunk in downloader:
+ f.write(chunk)
+
+ return downloader.checksum()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(required=True, type='str'),
+ dest=dict(required=True, type='str'),
+ checksum=dict(required=False, type='str', default=''),
+ chunksize=dict(required=False, type='int',
+ default=DEFAULT_CHUNK_SIZE),
+ validate_certs=dict(required=False, type='bool', default=True),
+ client_cert=dict(required=False, type='str', default=''),
+ client_key=dict(required=False, type='str', default='')
+
+ ))
+
+ url = module.params['url']
+ dest = module.params['dest']
+ checksum = module.params['checksum']
+ chunksize = module.params['chunksize']
+ validate = module.params['validate_certs']
+ client_cert = module.params['client_cert']
+ client_key = module.params['client_key']
+ if client_cert:
+ certs = (client_cert, client_key) if client_key else client_cert
+ else:
+ certs = None
+
+ if checksum == '':
+ hash_algo, checksum = None, None
+ else:
+ try:
+ hash_algo, checksum = checksum.rsplit(':', 1)
+ except ValueError:
+ module.fail_json(msg='The checksum parameter has to be in format '
+ '"<algorithm>:<checksum>"')
+ checksum = checksum.lower()
+ if not all(c in string.hexdigits for c in checksum):
+ module.fail_json(msg='The checksum must be valid HEX number')
+
+ if hash_algo not in hashlib.algorithms_available:
+ module.fail_json(msg="%s checksums are not supported" % hash_algo)
+
+ try:
+ actual_checksum = stream_to_dest(
+ url, dest, chunksize, hash_algo, verify=validate, certs=certs)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ else:
+ if hash_algo and actual_checksum != checksum:
+ module.fail_json(msg='Invalid dest checksum')
+ else:
+ module.exit_json(changed=True)
+
+
+# NOTE(pas-ha) Ansible's module_utils.basic is licensed under BSD (2 clause)
+from ansible.module_utils.basic import * # noqa
+if __name__ == '__main__':
+ main()
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/clean/defaults/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/clean/defaults/main.yaml
new file mode 100644
index 000000000..225025487
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/clean/defaults/main.yaml
@@ -0,0 +1 @@
+sectors_to_wipe: 1024
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/main.yaml
new file mode 100644
index 000000000..587b8d277
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/main.yaml
@@ -0,0 +1,6 @@
+- import_tasks: zap.yaml
+ tags:
+ - zap
+- import_tasks: shred.yaml
+ tags:
+ - shred
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/shred.yaml b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/shred.yaml
new file mode 100644
index 000000000..511229064
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/shred.yaml
@@ -0,0 +1,8 @@
+- name: clean block devices
+ become: yes
+ command: shred -f -z /dev/{{ item.key }}
+ async: 3600
+ poll: 30
+ with_dict: "{{ ansible_devices }}"
+ when:
+ - item.value.host
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/wipe.yaml b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/wipe.yaml
new file mode 100644
index 000000000..877f8f3df
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/wipe.yaml
@@ -0,0 +1,24 @@
+- name: store start and end of disk
+ set_fact:
+ start_sectors:
+ - 0
+ end_sectors:
+ - "{{ (device.value.sectors | int) - sectors_to_wipe }}"
+ when:
+ - device.value.host
+
+- name: update start and end sectors with such for partitions
+ set_fact:
+ start_sectors: "{{ start_sectors + [item.value.start | int ] }}"
+ end_sectors: "{{ end_sectors + [ (item.value.start | int) + ( item.value.sectors | int) - sectors_to_wipe ] }}"
+ with_dict: "{{ device.value.partitions }}"
+ when:
+ - device.value.host
+
+- name: wipe starts and ends of disks and partitions
+ command: dd if=/dev/zero of=/dev/{{ device.key }} ibs={{ device.value.sectorsize }} obs={{ device.value.sectorsize }} count={{ sectors_to_wipe }} seek={{ item }}
+ with_flattened:
+ - "{{ start_sectors | map('int') | list | sort (reverse=True) }}"
+ - "{{ end_sectors | map('int') | list | sort (reverse=True) }}"
+ when:
+ - device.value.host
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/zap.yaml b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/zap.yaml
new file mode 100644
index 000000000..d406d4daf
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/clean/tasks/zap.yaml
@@ -0,0 +1,16 @@
+# NOTE(pas-ha) this is to ensure that partition metadata that might be stored
+# in the start or end of partiton itself also becomes unusable
+# and does not interfere with future partition scheme if new partitions
+# happen to fall on the same boundaries where old partitions were.
+# NOTE(pas-ha) loop_control works with Ansible >= 2.1
+- include_tasks: wipe.yaml
+ with_dict: "{{ ansible_devices }}"
+ loop_control:
+ loop_var: device
+
+- name: wipe general partition table metadata
+ become: yes
+ command: sgdisk -Z /dev/{{ item.key }}
+ with_dict: "{{ ansible_devices }}"
+ when:
+ - item.value.host
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/configure/defaults/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/configure/defaults/main.yaml
new file mode 100644
index 000000000..9fdad71fb
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/configure/defaults/main.yaml
@@ -0,0 +1 @@
+tmp_rootfs_mount: /tmp/rootfs
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/grub.yaml b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/grub.yaml
new file mode 100644
index 000000000..2c40e8164
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/grub.yaml
@@ -0,0 +1,79 @@
+- name: discover grub-install command
+ find:
+ paths:
+ - "{{ tmp_rootfs_mount }}/usr/sbin"
+ pattern: "grub*-install"
+ register: grub_install_found
+
+- name: discover grub-mkconfig command
+ find:
+ paths:
+ - "{{ tmp_rootfs_mount }}/usr/sbin"
+ pattern: "grub*-mkconfig"
+ register: grub_config_found
+
+- name: find grub config file
+ find:
+ paths:
+ - "{{ tmp_rootfs_mount }}/boot"
+ pattern: "grub*.cfg"
+ recurse: yes
+ register: grub_file_found
+
+- name: test if all needed grub files were found
+ assert:
+ that:
+ - "{{ grub_install_found.matched > 0 }}"
+ - "{{ grub_config_found.matched > 0 }}"
+ - "{{ grub_file_found.matched > 0 }}"
+
+- name: set paths to grub commands
+ set_fact:
+ grub_install_cmd: "{{ grub_install_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
+ grub_config_cmd: "{{ grub_config_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
+ grub_config_file: "{{ grub_file_found.files[0].path | replace(tmp_rootfs_mount,'') }}"
+
+- name: make dirs for chroot
+ become: yes
+ file:
+ state: directory
+ path: "{{ tmp_rootfs_mount }}/{{ item }}"
+ with_items:
+ - dev
+ - sys
+ - proc
+
+- name: mount dirs for chroot
+ become: yes
+ command: mount -o bind /{{ item }} {{ tmp_rootfs_mount }}/{{ item }}
+ with_items:
+ - dev
+ - sys
+ - proc
+
+- block:
+ - name: get grub version string
+ become: yes
+ command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_install_cmd }} --version'
+ register: grub_version_string
+ - name: install grub to disk
+ become: yes
+ command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_install_cmd }} {{ ironic_root_device }}'
+ - name: preload lvm modules for grub2
+ become: yes
+ lineinfile:
+ dest: "{{ tmp_rootfs_mount }}/etc/default/grub"
+ state: present
+ line: GRUB_PRELOAD_MODULES=lvm
+ when: grub_version_string.stdout.split() | last | first == '2'
+ - name: create grub config
+ become: yes
+ command: chroot {{ tmp_rootfs_mount }} /bin/sh -c '{{ grub_config_cmd }} -o {{ grub_config_file }}'
+ always:
+ - name: unmount dirs for chroot
+ become: yes
+ command: umount {{ tmp_rootfs_mount }}/{{ item }}
+ with_items:
+ - dev
+ - sys
+ - proc
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml
new file mode 100644
index 000000000..9baa882a6
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/main.yaml
@@ -0,0 +1,4 @@
+- import_tasks: mounts.yaml
+ when: ironic.image.type | default('whole-disk-image') == 'partition'
+- import_tasks: grub.yaml
+ when: ironic.image.type | default('whole-disk-image') == 'partition'
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/mounts.yaml b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/mounts.yaml
new file mode 100644
index 000000000..870fa9af8
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/configure/tasks/mounts.yaml
@@ -0,0 +1,8 @@
+- name: create tmp mount point for root
+ file:
+ state: directory
+ path: "{{ tmp_rootfs_mount }}"
+
+- name: mount user image root
+ become: yes
+ command: mount {{ ironic_image_target }} {{ tmp_rootfs_mount }}
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/deploy/files/partition_configdrive.sh b/ironic/drivers/modules/ansible/playbooks/roles/deploy/files/partition_configdrive.sh
new file mode 100755
index 000000000..056a8152c
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/deploy/files/partition_configdrive.sh
@@ -0,0 +1,110 @@
+#!/bin/sh
+
+# Copyright 2013 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# NOTE(pas-ha) this is mostly copied over from Ironic Python Agent
+# compared to the original file in IPA,
+
+# TODO(pas-ha) rewrite this shell script to be a proper Ansible module
+
+# This should work with almost any image that uses MBR partitioning and
+# doesn't already have 3 or more partitions -- or else you'll no longer
+# be able to create extended partitions on the disk.
+
+# Takes one argument - block device
+
+log() {
+ echo "`basename $0`: $@"
+}
+
+fail() {
+ log "Error: $@"
+ exit 1
+}
+
+MAX_DISK_PARTITIONS=128
+MAX_MBR_SIZE_MB=2097152
+
+DEVICE="$1"
+
+[ -b $DEVICE ] || fail "(DEVICE) $DEVICE is not a block device"
+
+# We need to run partx -u to ensure all partitions are visible so the
+# following blkid command returns partitions just imaged to the device
+partx -u $DEVICE || fail "running partx -u $DEVICE"
+
+# todo(jayf): partx -u doesn't work in all cases, but partprobe fails in
+# devstack. We run both commands now as a temporary workaround for bug 1433812
+# long term, this should all be refactored into python and share code with
+# the other partition-modifying code in the agent.
+partprobe $DEVICE || true
+
+# Check for preexisting partition for configdrive
+EXISTING_PARTITION=`/sbin/blkid -l -o device $DEVICE -t LABEL=config-2`
+if [ -z $EXISTING_PARTITION ]; then
+ # Check if it is GPT partition and needs to be re-sized
+ if [ `partprobe $DEVICE print 2>&1 | grep "fix the GPT to use all of the space"` ]; then
+ log "Fixing GPT to use all of the space on device $DEVICE"
+ sgdisk -e $DEVICE || fail "move backup GPT data structures to the end of ${DEVICE}"
+
+ # Need to create new partition for config drive
+ # Not all images have partion numbers in a sequential numbers. There are holes.
+ # These holes get filled up when a new partition is created.
+ TEMP_DIR="$(mktemp -d)"
+ EXISTING_PARTITION_LIST=$TEMP_DIR/existing_partitions
+ UPDATED_PARTITION_LIST=$TEMP_DIR/updated_partitions
+
+ gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $EXISTING_PARTITION_LIST
+
+ # Create small partition at the end of the device
+ log "Adding configdrive partition to $DEVICE"
+ sgdisk -n 0:-64MB:0 $DEVICE || fail "creating configdrive on ${DEVICE}"
+
+ gdisk -l $DEVICE | grep -A$MAX_DISK_PARTITIONS "Number Start" | grep -v "Number Start" > $UPDATED_PARTITION_LIST
+
+ CONFIG_PARTITION_ID=`diff $EXISTING_PARTITION_LIST $UPDATED_PARTITION_LIST | tail -n1 |awk '{print $2}'`
+ ISO_PARTITION="${DEVICE}${CONFIG_PARTITION_ID}"
+ else
+ log "Working on MBR only device $DEVICE"
+
+ # get total disk size, to detect if that exceeds 2TB msdos limit
+ disksize_bytes=$(blockdev --getsize64 $DEVICE)
+ disksize_mb=$(( ${disksize_bytes%% *} / 1024 / 1024))
+
+ startlimit=-64MiB
+ endlimit=-0
+ if [ "$disksize_mb" -gt "$MAX_MBR_SIZE_MB" ]; then
+ # Create small partition at 2TB limit
+ startlimit=$(($MAX_MBR_SIZE_MB - 65))
+ endlimit=$(($MAX_MBR_SIZE_MB - 1))
+ fi
+
+ log "Adding configdrive partition to $DEVICE"
+ parted -a optimal -s -- $DEVICE mkpart primary fat32 $startlimit $endlimit || fail "creating configdrive on ${DEVICE}"
+
+ # Find partition we just created
+ # Dump all partitions, ignore empty ones, then get the last partition ID
+ ISO_PARTITION=`sfdisk --dump $DEVICE | grep -v ' 0,' | tail -n1 | awk -F ':' '{print $1}' | sed -e 's/\s*$//'` || fail "finding ISO partition created on ${DEVICE}"
+
+ # Wait for udev to pick up the partition
+ udevadm settle --exit-if-exists=$ISO_PARTITION
+ fi
+else
+ log "Existing configdrive found on ${DEVICE} at ${EXISTING_PARTITION}"
+ ISO_PARTITION=$EXISTING_PARTITION
+fi
+
+# Output the created/discovered partition for configdrive
+echo "configdrive $ISO_PARTITION"
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/configdrive.yaml b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/configdrive.yaml
new file mode 100644
index 000000000..702797b64
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/configdrive.yaml
@@ -0,0 +1,44 @@
+- name: download configdrive data
+ get_url:
+ url: "{{ ironic.configdrive.location }}"
+ dest: /tmp/{{ inventory_hostname }}.gz.base64
+ validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
+ async: 600
+ poll: 15
+ when: ironic.configdrive.type|default('') == 'url'
+
+- block:
+ - name: copy configdrive file to node
+ copy:
+ src: "{{ ironic.configdrive.location }}"
+ dest: /tmp/{{ inventory_hostname }}.gz.base64
+ - name: remove configdrive from conductor
+ delegate_to: conductor
+ file:
+ path: "{{ ironic.configdrive.location }}"
+ state: absent
+ when: ironic.configdrive.type|default('') == 'file'
+
+- name: unpack configdrive
+ shell: cat /tmp/{{ inventory_hostname }}.gz.base64 | base64 --decode | gunzip > /tmp/{{ inventory_hostname }}.cndrive
+
+- block:
+ - name: prepare config drive partition
+ become: yes
+ script: partition_configdrive.sh {{ ironic_root_device }}
+ register: configdrive_partition_output
+
+ - name: test the output of configdrive partitioner
+ assert:
+ that:
+ - "{{ (configdrive_partition_output.stdout_lines | last).split() | length == 2 }}"
+ - "{{ (configdrive_partition_output.stdout_lines | last).split() | first == 'configdrive' }}"
+
+ - name: store configdrive partition
+ set_fact:
+ ironic_configdrive_target: "{{ (configdrive_partition_output.stdout_lines | last).split() | last }}"
+ when: ironic_configdrive_target is undefined
+
+- name: write configdrive
+ become: yes
+ command: dd if=/tmp/{{ inventory_hostname }}.cndrive of={{ ironic_configdrive_target }} bs=64K oflag=direct
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/download.yaml b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/download.yaml
new file mode 100644
index 000000000..87f2501db
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/download.yaml
@@ -0,0 +1,13 @@
+- name: check that downloaded image will fit into memory
+ assert:
+ that: "{{ ansible_memfree_mb }} >= {{ ironic.image.mem_req }}"
+ msg: "The image size is too big, no free memory available"
+
+- name: download image with checksum validation
+ get_url:
+ url: "{{ ironic.image.url }}"
+ dest: /tmp/{{ inventory_hostname }}.img
+ checksum: "{{ ironic.image.checksum|default(omit) }}"
+ validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
+ async: 600
+ poll: 15
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/main.yaml
new file mode 100644
index 000000000..235a4711c
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/main.yaml
@@ -0,0 +1,7 @@
+- import_tasks: download.yaml
+ when: ironic.image.disk_format != 'raw'
+
+- import_tasks: write.yaml
+
+- import_tasks: configdrive.yaml
+ when: ironic.configdrive is defined
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/write.yaml b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/write.yaml
new file mode 100644
index 000000000..ed0cc85b6
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/deploy/tasks/write.yaml
@@ -0,0 +1,20 @@
+- name: convert and write
+ become: yes
+ command: qemu-img convert -t directsync -O host_device /tmp/{{ inventory_hostname }}.img {{ ironic_image_target }}
+ async: 1200
+ poll: 10
+ when: ironic.image.disk_format != 'raw'
+
+- name: stream to target
+ become: yes
+ stream_url:
+ url: "{{ ironic.image.url }}"
+ dest: "{{ ironic_image_target }}"
+ checksum: "{{ ironic.image.checksum|default(omit) }}"
+ validate_certs: "{{ ironic.image.validate_certs|default(omit) }}"
+ async: 600
+ poll: 15
+ when: ironic.image.disk_format == 'raw'
+
+- name: flush
+ command: sync
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/main.yaml
new file mode 100644
index 000000000..f80d5b545
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/main.yaml
@@ -0,0 +1,13 @@
+- import_tasks: roothints.yaml
+ when: ironic.root_device_hints is defined
+
+- set_fact:
+ ironic_root_device: /dev/{{ item.key }}
+ with_dict: "{{ ansible_devices }}"
+ when:
+ - ironic_root_device is undefined
+ - item.value.host
+
+- set_fact:
+ ironic_image_target: "{{ ironic_root_device }}"
+ when: ironic_image_target is undefined
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/roothints.yaml b/ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/roothints.yaml
new file mode 100644
index 000000000..488a21813
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/discover/tasks/roothints.yaml
@@ -0,0 +1,9 @@
+- name: get devices wwn facts
+ facts_wwn:
+ devices: "{{ ansible_devices.keys() }}"
+
+- name: calculate root hint
+ root_hints:
+ root_device_hints: "{{ ironic.root_device_hints }}"
+ ansible_devices: "{{ ansible_devices }}"
+ ansible_devices_wwn: "{{ devices_wwn | default({}) }}"
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml
new file mode 100644
index 000000000..e92aba69d
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/main.yaml
@@ -0,0 +1,2 @@
+- import_tasks: parted.yaml
+ when: ironic.image.type | default('whole-disk-image') == 'partition'
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/parted.yaml b/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/parted.yaml
new file mode 100644
index 000000000..9dab1218b
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/prepare/tasks/parted.yaml
@@ -0,0 +1,45 @@
+# this is to handle no autocleaning in ironic
+- name: erase partition table
+ become: yes
+ command: dd if=/dev/zero of={{ ironic_root_device }} bs=512 count=36
+ when: not ironic.partition_info.preserve_ephemeral|default('no')|bool
+
+- name: run parted
+ become: yes
+ parted:
+ device: "{{ ironic_root_device }}"
+ label: "{{ ironic.partition_info.label }}"
+ state: "{{ item.1.state | default('present') }}"
+ name: "{{ item.1.name | default(omit) }}"
+ number: "{{ item.1.number }}"
+ part_type: "{{ item.1.part_type | default(omit) }}"
+ part_start: "{{ item.1.part_start }}"
+ part_end: "{{ item.1.part_end }}"
+ flags: "{{ item.1.flags | default(omit) }}"
+ align: "{{ item.1.align | default(omit) }}"
+ unit: "{{ item.1.unit | default(omit) }}"
+ with_items:
+ - "{{ ironic.partition_info.partitions.items() | sort(attribute='1.number') }}"
+
+- name: reset image target to root partition
+ set_fact:
+ ironic_image_target: "{{ ironic_root_device }}{{ ironic.partition_info.partitions.root.number }}"
+
+- name: make swap
+ become: yes
+ command: mkswap -L swap1 "{{ ironic_root_device }}{{ ironic.partition_info.partitions.swap.number }}"
+ when: ironic.partition_info.partitions.swap is defined
+
+- name: format ephemeral partition
+ become: yes
+ filesystem:
+ dev: "{{ ironic_root_device }}{{ ironic.partition_info.partitions.ephemeral.number }}"
+ fstype: "{{ ironic.partition_info.ephemeral_format }}"
+ force: yes
+ opts: "-L ephemeral0"
+ when: ironic.partition_info.partitions.ephemeral is defined and not ironic.partition_info.preserve_ephemeral|default('no')|bool
+
+- name: save block device for configdrive if partition was created
+ set_fact:
+ ironic_configdrive_target: "{{ ironic_root_device }}{{ ironic.partition_info.partitions.configdrive.number }}"
+ when: ironic.partition_info.partitions.configdrive is defined
diff --git a/ironic/drivers/modules/ansible/playbooks/roles/shutdown/tasks/main.yaml b/ironic/drivers/modules/ansible/playbooks/roles/shutdown/tasks/main.yaml
new file mode 100644
index 000000000..3172f5d3a
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/roles/shutdown/tasks/main.yaml
@@ -0,0 +1,6 @@
+- name: soft power off
+ become: yes
+ shell: sleep 5 && poweroff
+ async: 1
+ poll: 0
+ ignore_errors: true
diff --git a/ironic/drivers/modules/ansible/playbooks/shutdown.yaml b/ironic/drivers/modules/ansible/playbooks/shutdown.yaml
new file mode 100644
index 000000000..f8b84f759
--- /dev/null
+++ b/ironic/drivers/modules/ansible/playbooks/shutdown.yaml
@@ -0,0 +1,6 @@
+---
+- import_playbook: add-ironic-nodes.yaml
+
+- hosts: ironic
+ roles:
+ - shutdown