"Fossies" - the Fresh Open Source Software Archive

Member "ironic-16.0.3/ironic/drivers/modules/drac/raid.py" (18 Jan 2021, 63805 Bytes) of package /linux/misc/openstack/ironic-16.0.3.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "raid.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 16.0.2_vs_16.0.3.

    1 #
    2 # Licensed under the Apache License, Version 2.0 (the "License"); you may
    3 # not use this file except in compliance with the License. You may obtain
    4 # a copy of the License at
    5 #
    6 #      http://www.apache.org/licenses/LICENSE-2.0
    7 #
    8 # Unless required by applicable law or agreed to in writing, software
    9 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   10 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   11 # License for the specific language governing permissions and limitations
   12 # under the License.
   13 
   14 """
   15 DRAC RAID specific methods
   16 """
   17 
   18 from collections import defaultdict
   19 import math
   20 
   21 from futurist import periodics
   22 from ironic_lib import metrics_utils
   23 from oslo_log import log as logging
   24 from oslo_utils import importutils
   25 from oslo_utils import units
   26 
   27 from ironic.common import exception
   28 from ironic.common.i18n import _
   29 from ironic.common import raid as raid_common
   30 from ironic.conductor import task_manager
   31 from ironic.conductor import utils as manager_utils
   32 from ironic.conf import CONF
   33 from ironic.drivers import base
   34 from ironic.drivers.modules import deploy_utils
   35 from ironic.drivers.modules.drac import common as drac_common
   36 from ironic.drivers.modules.drac import job as drac_job
   37 
   38 drac_exceptions = importutils.try_import('dracclient.exceptions')
   39 drac_constants = importutils.try_import('dracclient.constants')
   40 
   41 LOG = logging.getLogger(__name__)
   42 
   43 METRICS = metrics_utils.get_metrics_logger(__name__)
   44 
   45 _CURRENT_RAID_CONTROLLER_MODE = "RAIDCurrentControllerMode"
   46 _REQUESTED_RAID_CONTROLLER_MODE = "RAIDRequestedControllerMode"
   47 _EHBA_MODE = "Enhanced HBA"
   48 _RAID_MODE = "RAID"
   49 
   50 RAID_LEVELS = {
   51     '0': {
   52         'min_disks': 1,
   53         'max_disks': 1000,
   54         'type': 'simple',
   55         'overhead': 0
   56     },
   57     '1': {
   58         'min_disks': 2,
   59         'max_disks': 2,
   60         'type': 'simple',
   61         'overhead': 1
   62     },
   63     '5': {
   64         'min_disks': 3,
   65         'max_disks': 1000,
   66         'type': 'simple',
   67         'overhead': 1
   68     },
   69     '6': {
   70         'min_disks': 4,
   71         'max_disks': 1000,
   72         'type': 'simple',
   73         'overhead': 2
   74     },
   75     '1+0': {
   76         'type': 'spanned',
   77         'span_type': '1'
   78     },
   79     '5+0': {
   80         'type': 'spanned',
   81         'span_type': '5'
   82     },
   83     '6+0': {
   84         'type': 'spanned',
   85         'span_type': '6'
   86     }
   87 }
   88 
   89 
   90 def list_raid_controllers(node):
   91     """List the RAID controllers of the node.
   92 
   93     :param node: an ironic node object.
   94     :returns: a list of RAIDController objects from dracclient.
   95     :raises: DracOperationError on an error from python-dracclient.
   96     """
   97     client = drac_common.get_drac_client(node)
   98 
   99     try:
  100         return client.list_raid_controllers()
  101     except drac_exceptions.BaseClientException as exc:
  102         LOG.error('DRAC driver failed to get the list of RAID controllers '
  103                   'for node %(node_uuid)s. Reason: %(error)s.',
  104                   {'node_uuid': node.uuid, 'error': exc})
  105         raise exception.DracOperationError(error=exc)
  106 
  107 
  108 def list_virtual_disks(node):
  109     """List the virtual disks of the node.
  110 
  111     :param node: an ironic node object.
  112     :returns: a list of VirtualDisk objects from dracclient.
  113     :raises: DracOperationError on an error from python-dracclient.
  114     """
  115     client = drac_common.get_drac_client(node)
  116 
  117     try:
  118         return client.list_virtual_disks()
  119     except drac_exceptions.BaseClientException as exc:
  120         LOG.error('DRAC driver failed to get the list of virtual disks '
  121                   'for node %(node_uuid)s. Reason: %(error)s.',
  122                   {'node_uuid': node.uuid, 'error': exc})
  123         raise exception.DracOperationError(error=exc)
  124 
  125 
  126 def list_physical_disks(node):
  127     """List the physical disks of the node.
  128 
  129     :param node: an ironic node object.
  130     :returns: a list of PhysicalDisk objects from dracclient.
  131     :raises: DracOperationError on an error from python-dracclient.
  132     """
  133     client = drac_common.get_drac_client(node)
  134 
  135     try:
  136         return client.list_physical_disks()
  137     except drac_exceptions.BaseClientException as exc:
  138         LOG.error('DRAC driver failed to get the list of physical disks '
  139                   'for node %(node_uuid)s. Reason: %(error)s.',
  140                   {'node_uuid': node.uuid, 'error': exc})
  141         raise exception.DracOperationError(error=exc)
  142 
  143 
  144 def _is_raid_controller(node, raid_controller_fqdd, raid_controllers=None):
  145     """Find out if object's fqdd is for a raid controller or not
  146 
  147     :param node: an ironic node object
  148     :param raid_controller_fqdd: The object's fqdd we are testing to see
  149                                  if it is a raid controller or not.
  150     :param raid_controllers: A list of RAIDControllers used to check for
  151                              the presence of BOSS cards.  If None, the
  152                              iDRAC will be queried for the list of
  153                              controllers.
  154     :returns: boolean, True if the device is a RAID controller,
  155               False if not.
  156     """
  157     client = drac_common.get_drac_client(node)
  158 
  159     try:
  160         return client.is_raid_controller(raid_controller_fqdd,
  161                                          raid_controllers)
  162     except drac_exceptions.BaseClientException as exc:
  163         LOG.error('Unable to determine if controller %(raid_controller_fqdd)s '
  164                   'on node %(node_uuid)s is a RAID controller. '
  165                   'Reason: %(error)s. ',
  166                   {'raid_controller_fqdd': raid_controller_fqdd,
  167                    'node_uuid': node.uuid, 'error': exc})
  168 
  169         raise exception.DracOperationError(error=exc)
  170 
  171 
  172 def _validate_job_queue(node, raid_controller=None):
  173     """Validate that there are no pending jobs for this controller.
  174 
  175     :param node: an ironic node object.
  176     :param raid_controller: id of the RAID controller.
  177     """
  178     kwargs = {}
  179     if raid_controller:
  180         kwargs["name_prefix"] = "Config:RAID:%s" % raid_controller
  181     drac_job.validate_job_queue(node, **kwargs)
  182 
  183 
  184 def create_virtual_disk(node, raid_controller, physical_disks, raid_level,
  185                         size_mb, disk_name=None, span_length=None,
  186                         span_depth=None):
  187     """Create a single virtual disk on a RAID controller.
  188 
  189     The created virtual disk will be in pending state. The DRAC card will do
  190     the actual configuration once the changes are applied by calling the
  191     ``commit_config`` method.
  192 
  193     :param node: an ironic node object.
  194     :param raid_controller: id of the RAID controller.
  195     :param physical_disks: ids of the physical disks.
  196     :param raid_level: RAID level of the virtual disk.
  197     :param size_mb: size of the virtual disk.
  198     :param disk_name: name of the virtual disk. (optional)
  199     :param span_depth: Number of spans in virtual disk. (optional)
  200     :param span_length: Number of disks per span. (optional)
  201     :returns: a dictionary containing the commit_needed key with a boolean
  202               value indicating whether a config job must be created for the
  203               values to be applied.
  204     :raises: DracOperationError on an error from python-dracclient.
  205     """
  206     # This causes config to fail, because the boot mode is set via a config
  207     # job.
  208     _validate_job_queue(node, raid_controller)
  209 
  210     client = drac_common.get_drac_client(node)
  211 
  212     try:
  213         return client.create_virtual_disk(raid_controller, physical_disks,
  214                                           raid_level, size_mb, disk_name,
  215                                           span_length, span_depth)
  216     except drac_exceptions.BaseClientException as exc:
  217         LOG.error('DRAC driver failed to create virtual disk for node '
  218                   '%(node_uuid)s. Reason: %(error)s.',
  219                   {'node_uuid': node.uuid,
  220                    'error': exc})
  221         raise exception.DracOperationError(error=exc)
  222 
  223 
  224 def delete_virtual_disk(node, virtual_disk):
  225     """Delete a single virtual disk on a RAID controller.
  226 
  227     The deleted virtual disk will be in pending state. The DRAC card will do
  228     the actual configuration once the changes are applied by calling the
  229     ``commit_config`` method.
  230 
  231     :param node: an ironic node object.
  232     :param virtual_disk: id of the virtual disk.
  233     :returns: a dictionary containing the commit_needed key with a boolean
  234               value indicating whether a config job must be created for the
  235               values to be applied.
  236     :raises: DracOperationError on an error from python-dracclient.
  237     """
  238     # NOTE(mgoddard): Cannot specify raid_controller as we don't know it.
  239     _validate_job_queue(node)
  240 
  241     client = drac_common.get_drac_client(node)
  242 
  243     try:
  244         return client.delete_virtual_disk(virtual_disk)
  245     except drac_exceptions.BaseClientException as exc:
  246         LOG.error('DRAC driver failed to delete virtual disk '
  247                   '%(virtual_disk_fqdd)s for node %(node_uuid)s. '
  248                   'Reason: %(error)s.',
  249                   {'virtual_disk_fqdd': virtual_disk,
  250                    'node_uuid': node.uuid,
  251                    'error': exc})
  252         raise exception.DracOperationError(error=exc)
  253 
  254 
  255 def _reset_raid_config(node, raid_controller):
  256     """Delete all virtual disk and unassign all hotspares physical disk
  257 
  258     :param node: an ironic node object.
  259     :param raid_controller: id of the RAID controller.
  260     :returns: a dictionary containing
  261               - The is_commit_required needed key with a
  262               boolean value indicating whether a config job must be created
  263               for the values to be applied.
  264               - The is_reboot_required key with a RebootRequired enumerated
  265               value indicating whether the server must be rebooted to
  266               reset configuration.
  267     :raises: DracOperationError on an error from python-dracclient.
  268     """
  269     try:
  270 
  271         _validate_job_queue(node, raid_controller)
  272 
  273         client = drac_common.get_drac_client(node)
  274         return client.reset_raid_config(raid_controller)
  275     except drac_exceptions.BaseClientException as exc:
  276         LOG.error('DRAC driver failed to delete all virtual disk '
  277                   'and unassign all hotspares '
  278                   'on %(raid_controller_fqdd)s '
  279                   'for node %(node_uuid)s. '
  280                   'Reason: %(error)s.',
  281                   {'raid_controller_fqdd': raid_controller,
  282                    'node_uuid': node.uuid,
  283                    'error': exc})
  284         raise exception.DracOperationError(error=exc)
  285 
  286 
  287 def clear_foreign_config(node, raid_controller):
  288     """Free up the foreign drives.
  289 
  290     :param node: an ironic node object.
  291     :param raid_controller: id of the RAID controller.
  292     :returns: a dictionary containing
  293               - The is_commit_required needed key with a
  294               boolean value indicating whether a config job must be created
  295               for the values to be applied.
  296               - The is_reboot_required key with a RebootRequired enumerated
  297               value indicating whether the server must be rebooted to
  298               clear foreign configuration.
  299     :raises: DracOperationError on an error from python-dracclient.
  300     """
  301     try:
  302 
  303         _validate_job_queue(node, raid_controller)
  304 
  305         client = drac_common.get_drac_client(node)
  306         return client.clear_foreign_config(raid_controller)
  307     except drac_exceptions.BaseClientException as exc:
  308         LOG.error('DRAC driver failed to free foreign driver '
  309                   'on %(raid_controller_fqdd)s '
  310                   'for node %(node_uuid)s. '
  311                   'Reason: %(error)s.',
  312                   {'raid_controller_fqdd': raid_controller,
  313                    'node_uuid': node.uuid,
  314                    'error': exc})
  315         raise exception.DracOperationError(error=exc)
  316 
  317 
  318 def set_raid_settings(node, controller_fqdd, settings):
  319     """Sets the RAID configuration
  320 
  321     It sets the pending_value parameter for each of the attributes
  322     passed in. For the values to be applied, a config job must
  323     be created.
  324 
  325     :param node: an ironic node object.
  326     :param controller_fqdd: the ID of the RAID controller.
  327     :param settings: a dictionary containing the proposed values, with
  328                      each key being the name of attribute and the value
  329                      being the proposed value.
  330     :returns: a dictionary containing:
  331               - The is_commit_required key with a boolean value indicating
  332               whether a config job must be created for the values to be
  333               applied.
  334               - The is_reboot_required key with a RebootRequired enumerated
  335               value indicating whether the server must be rebooted for the
  336               values to be applied. Possible values are true and false.
  337     :raises: DRACOperationFailed on error reported back by the DRAC
  338              interface
  339     """
  340     try:
  341 
  342         drac_job.validate_job_queue(node)
  343 
  344         client = drac_common.get_drac_client(node)
  345         return client.set_raid_settings(controller_fqdd, settings)
  346     except drac_exceptions.BaseClientException as exc:
  347         LOG.error('DRAC driver failed to set raid settings '
  348                   'on %(raid_controller_fqdd)s '
  349                   'for node %(node_uuid)s. '
  350                   'Reason: %(error)s.',
  351                   {'raid_controller_fqdd': controller_fqdd,
  352                    'node_uuid': node.uuid,
  353                    'error': exc})
  354         raise exception.DracOperationError(error=exc)
  355 
  356 
  357 def list_raid_settings(node):
  358     """List the RAID configuration settings
  359 
  360     :param node: an ironic node object.
  361     :returns: a dictionary with the RAID settings using InstanceID as the
  362               key. The attributes are RAIDEnumerableAttribute,
  363               RAIDStringAttribute and RAIDIntegerAttribute objects.
  364     :raises: DRACOperationFailed on error reported back by the DRAC
  365              interface
  366     """
  367     try:
  368 
  369         drac_job.validate_job_queue(node)
  370 
  371         client = drac_common.get_drac_client(node)
  372         return client.list_raid_settings()
  373     except drac_exceptions.BaseClientException as exc:
  374         LOG.error('DRAC driver failed to list raid settings '
  375                   'for node %(node_uuid)s. '
  376                   'Reason: %(error)s.',
  377                   {'node_uuid': node.uuid,
  378                    'error': exc})
  379         raise exception.DracOperationError(error=exc)
  380 
  381 
  382 def change_physical_disk_state(node, mode=None,
  383                                controllers_to_physical_disk_ids=None):
  384     """Convert disks RAID status
  385 
  386     This method converts the requested physical disks from
  387     RAID to JBOD or vice versa.  It does this by only converting the
  388     disks that are not already in the correct state.
  389 
  390     :param node: an ironic node object.
  391     :param mode: the mode to change the disks either to RAID or JBOD.
  392     :param controllers_to_physical_disk_ids: Dictionary of controllers and
  393            corresponding disk ids to convert to the requested mode.
  394     :return: a dictionary containing:
  395              - conversion_results, a dictionary that maps controller ids
  396              to the conversion results for that controller.
  397              The conversion results are a dict that contains:
  398              - The is_commit_required key with the value always set to
  399              True indicating that a config job must be created to
  400              complete disk conversion.
  401              - The is_reboot_required key with a RebootRequired
  402              enumerated value indicating whether the server must be
  403              rebooted to complete disk conversion.
  404     :raises: DRACOperationError on an error from python-dracclient.
  405     """
  406     try:
  407         drac_job.validate_job_queue(node)
  408         client = drac_common.get_drac_client(node)
  409         return client.change_physical_disk_state(
  410             mode, controllers_to_physical_disk_ids)
  411     except drac_exceptions.BaseClientException as exc:
  412         LOG.error('DRAC driver failed to change physical drives '
  413                   'to %(mode)s mode for node %(node_uuid)s. '
  414                   'Reason: %(error)s.',
  415                   {'mode': mode, 'node_uuid': node.uuid, 'error': exc})
  416         raise exception.DracOperationError(error=exc)
  417 
  418 
  419 def commit_config(node, raid_controller, reboot=False, realtime=False):
  420     """Apply all pending changes on a RAID controller.
  421 
  422     :param node: an ironic node object.
  423     :param raid_controller: id of the RAID controller.
  424     :param reboot: indicates whether a reboot job should be automatically
  425                    created with the config job. (optional, defaults to False)
  426     :param realtime: indicates RAID controller supports realtime.
  427                      (optional, defaults to False)
  428     :returns: id of the created job
  429     :raises: DracOperationError on an error from python-dracclient.
  430     """
  431     client = drac_common.get_drac_client(node)
  432 
  433     try:
  434         return client.commit_pending_raid_changes(
  435             raid_controller=raid_controller,
  436             reboot=reboot,
  437             realtime=realtime)
  438     except drac_exceptions.BaseClientException as exc:
  439         LOG.error('DRAC driver failed to commit pending RAID config for'
  440                   ' controller %(raid_controller_fqdd)s on node '
  441                   '%(node_uuid)s. Reason: %(error)s.',
  442                   {'raid_controller_fqdd': raid_controller,
  443                    'node_uuid': node.uuid,
  444                    'error': exc})
  445         raise exception.DracOperationError(error=exc)
  446 
  447 
  448 def _change_physical_disk_mode(node, mode=None,
  449                                controllers_to_physical_disk_ids=None,
  450                                substep="completed"):
  451     """Physical drives conversion from RAID to JBOD or vice-versa.
  452 
  453     :param node: an ironic node object.
  454     :param mode: the mode to change the disks either to RAID or JBOD.
  455     :param controllers_to_physical_disk_ids: Dictionary of controllers and
  456            corresponding disk ids to convert to the requested mode.
  457     :returns: states.CLEANWAIT if deletion is in progress asynchronously
  458               or None if it is completed.
  459     """
  460     change_disk_state = change_physical_disk_state(
  461         node, mode, controllers_to_physical_disk_ids)
  462 
  463     controllers = list()
  464     conversion_results = change_disk_state['conversion_results']
  465     for controller_id, result in conversion_results.items():
  466         controller = {'raid_controller': controller_id,
  467                       'is_reboot_required': result['is_reboot_required'],
  468                       'is_commit_required': result['is_commit_required']}
  469         controllers.append(controller)
  470 
  471     return _commit_to_controllers(
  472         node,
  473         controllers, substep=substep)
  474 
  475 
  476 def abandon_config(node, raid_controller):
  477     """Deletes all pending changes on a RAID controller.
  478 
  479     :param node: an ironic node object.
  480     :param raid_controller: id of the RAID controller.
  481     :raises: DracOperationError on an error from python-dracclient.
  482     """
  483     client = drac_common.get_drac_client(node)
  484 
  485     try:
  486         client.abandon_pending_raid_changes(raid_controller)
  487     except drac_exceptions.BaseClientException as exc:
  488         LOG.error('DRAC driver failed to delete pending RAID config '
  489                   'for controller %(raid_controller_fqdd)s on node '
  490                   '%(node_uuid)s. Reason: %(error)s.',
  491                   {'raid_controller_fqdd': raid_controller,
  492                    'node_uuid': node.uuid,
  493                    'error': exc})
  494         raise exception.DracOperationError(error=exc)
  495 
  496 
  497 def _calculate_spans(raid_level, disks_count):
  498     """Calculates number of spans for a RAID level given a physical disk count
  499 
  500     :param raid_level: RAID level of the virtual disk.
  501     :param disk_count: number of physical disks used for the virtual disk.
  502     :returns: number of spans.
  503     """
  504     if raid_level in ['0', '1', '5', '6']:
  505         return 1
  506     elif raid_level in ['5+0', '6+0']:
  507         return 2
  508     elif raid_level in ['1+0']:
  509         return disks_count >> 1
  510     else:
  511         reason = (_('Cannot calculate spans for RAID level "%s"') %
  512                   raid_level)
  513         raise exception.DracOperationError(error=reason)
  514 
  515 
  516 def _usable_disks_count(raid_level, disks_count):
  517     """Calculates the number of disks usable for a RAID level
  518 
  519     ...given a physical disk count
  520 
  521     :param raid_level: RAID level of the virtual disk.
  522     :param disk_count: number of physical disks used for the virtual disk.
  523     :returns: number of disks.
  524     """
  525     if raid_level in ['0', '1', '5', '6']:
  526         return disks_count
  527     elif raid_level in ['5+0', '6+0', '1+0']:
  528         # largest even number less than disk_count
  529         return (disks_count >> 1) << 1
  530     else:
  531         reason = (_('RAID level %(raid_level)s is not supported by the '
  532                     'driver. Supported RAID levels: %(supported_raid_levels)s')
  533                   % {'raid_level': raid_level,
  534                      'supported_raid_levels': list(RAID_LEVELS)})
  535         raise exception.DracOperationError(error=reason)
  536 
  537 
  538 def _raid_level_min_disks(raid_level, spans_count=1):
  539     try:
  540         raid_level_info = RAID_LEVELS[raid_level]
  541     except KeyError:
  542         reason = (_('RAID level %(raid_level)s is not supported by the '
  543                     'driver. Supported RAID levels: %(supported_raid_levels)s')
  544                   % {'raid_level': raid_level,
  545                      'supported_raid_levels': list(RAID_LEVELS)})
  546         raise exception.DracOperationError(error=reason)
  547 
  548     if raid_level_info['type'] == 'spanned':
  549         if spans_count <= 1:
  550             reason = _('Spanned RAID volumes cannot contain a single span')
  551             raise exception.DracOperationError(error=reason)
  552 
  553         span_type = raid_level_info['span_type']
  554         raid_level_info = RAID_LEVELS[span_type]
  555 
  556     return raid_level_info['min_disks'] * spans_count
  557 
  558 
  559 def _raid_level_max_disks(raid_level, spans_count=1):
  560     try:
  561         raid_level_info = RAID_LEVELS[raid_level]
  562     except KeyError:
  563         reason = (_('RAID level %(raid_level)s is not supported by the '
  564                     'driver. Supported RAID levels: %(supported_raid_levels)s')
  565                   % {'raid_level': raid_level,
  566                      'supported_raid_levels': list(RAID_LEVELS)})
  567         raise exception.DracOperationError(error=reason)
  568 
  569     if raid_level_info['type'] == 'spanned':
  570         if spans_count <= 1:
  571             reason = _('Spanned RAID volumes cannot contain a single span')
  572             raise exception.DracOperationError(error=reason)
  573 
  574         span_type = raid_level_info['span_type']
  575         raid_level_info = RAID_LEVELS[span_type]
  576 
  577     return raid_level_info['max_disks'] * spans_count
  578 
  579 
  580 def _raid_level_overhead(raid_level, spans_count=1):
  581     try:
  582         raid_level_info = RAID_LEVELS[raid_level]
  583     except KeyError:
  584         reason = (_('RAID level %(raid_level)s is not supported by the '
  585                     'driver. Supported RAID levels: %(supported_raid_levels)s')
  586                   % {'raid_level': raid_level,
  587                      'supported_raid_levels': list(RAID_LEVELS)})
  588         raise exception.DracOperationError(error=reason)
  589 
  590     if raid_level_info['type'] == 'spanned':
  591         if spans_count <= 1:
  592             reason = _('Spanned RAID volumes cannot contain a single span')
  593             raise exception.DracOperationError(error=reason)
  594 
  595         span_type = raid_level_info['span_type']
  596         raid_level_info = RAID_LEVELS[span_type]
  597 
  598     return raid_level_info['overhead'] * spans_count
  599 
  600 
  601 def _max_volume_size_mb(raid_level, physical_disks, free_space_mb,
  602                         spans_count=1, stripe_size_kb=64 * units.Ki):
  603     # restrict the size to the smallest available space
  604     free_spaces = [free_space_mb[disk] for disk in physical_disks]
  605     size_kb = min(free_spaces) * units.Ki
  606 
  607     # NOTE(ifarkas): using math.floor so we get a volume size that does not
  608     #                exceed the available space
  609     stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb))
  610 
  611     disks_count = len(physical_disks)
  612     overhead_disks_count = _raid_level_overhead(raid_level, spans_count)
  613 
  614     return int(stripes_per_disk * stripe_size_kb
  615                * (disks_count - overhead_disks_count) / units.Ki)
  616 
  617 
  618 def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1,
  619                               stripe_size_kb=64 * units.Ki):
  620     disks_count = len(physical_disks)
  621     overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'],
  622                                                 spans_count)
  623     volume_size_kb = logical_disk['size_mb'] * units.Ki
  624     # NOTE(ifarkas): using math.ceil so we get the largest disk usage
  625     #                possible, so we can avoid over-committing
  626     stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb)
  627 
  628     stripes_per_disk = math.ceil(
  629         float(stripes_per_volume) / (disks_count - overhead_disks_count))
  630     return int(stripes_per_disk * stripe_size_kb / units.Ki)
  631 
  632 
  633 def _find_configuration(logical_disks, physical_disks, pending_delete):
  634     """Find RAID configuration.
  635 
  636     This method transforms the RAID configuration defined in Ironic to a format
  637     that is required by dracclient. This includes matching the physical disks
  638     to RAID volumes when it's not pre-defined, or in general calculating
  639     missing properties.
  640 
  641     :param logical_disks: list of logical disk definitions.
  642     :param physical_disks: list of physical disk definitions.
  643     :param pending_delete: Whether there is a pending deletion of virtual
  644         disks that should be accounted for.
  645     """
  646 
  647     # shared physical disks of RAID volumes size_gb='MAX' should be
  648     # deprioritized during the matching process to reserve as much space as
  649     # possible. Reserved means it won't be used during matching.
  650     volumes_with_reserved_physical_disks = [
  651         volume for volume in logical_disks
  652         if ('physical_disks' in volume and volume['size_mb'] == 'MAX'
  653             and volume.get('share_physical_disks', False))]
  654     reserved_physical_disks = [
  655         disk for disk in physical_disks
  656         for volume in volumes_with_reserved_physical_disks
  657         if disk.id in volume['physical_disks']]
  658 
  659     # we require each logical disk contain only homogeneous physical disks, so
  660     # sort them by type
  661     physical_disks_by_type = {}
  662     reserved_physical_disks_by_type = {}
  663     free_space_mb = {}
  664     for disk in physical_disks:
  665         # calculate free disk space
  666         free_space_mb[disk] = _get_disk_free_size_mb(disk, pending_delete)
  667 
  668         disk_type = (disk.controller, disk.media_type, disk.interface_type,
  669                      disk.size_mb)
  670         if disk_type not in physical_disks_by_type:
  671             physical_disks_by_type[disk_type] = []
  672             reserved_physical_disks_by_type[disk_type] = []
  673 
  674         if disk in reserved_physical_disks:
  675             reserved_physical_disks_by_type[disk_type].append(disk)
  676         else:
  677             physical_disks_by_type[disk_type].append(disk)
  678 
  679     # exclude non-shared physical disks (predefined by the user) from
  680     # physical_disks_by_type because they are not going to be used during
  681     # matching
  682     for volume in logical_disks:
  683         if ('physical_disks' in volume
  684                 and not volume.get('share_physical_disks', False)):
  685             for disk in physical_disks:
  686                 if disk.id in volume['physical_disks']:
  687                     disk_type = (disk.controller, disk.media_type,
  688                                  disk.interface_type, disk.size_mb)
  689                     if disk in physical_disks_by_type[disk_type]:
  690                         physical_disks_by_type[disk_type].remove(disk)
  691 
  692     processed_volumes = []
  693 
  694     # step 1 - process volumes with predefined disks and exact size
  695     for volume in [volume for volume in logical_disks
  696                    if ('physical_disks' in volume
  697                        and volume['size_mb'] != 'MAX')]:
  698         _calculate_volume_props(volume, physical_disks, free_space_mb)
  699         processed_volumes.append(volume)
  700 
  701     # step 2 - process volumes without predefined disks
  702     volumes_without_disks = [disk for disk in logical_disks
  703                              if 'physical_disks' not in disk]
  704 
  705     if volumes_without_disks:
  706         result, free_space_mb = (
  707             _assign_disks_to_volume(volumes_without_disks,
  708                                     physical_disks_by_type, free_space_mb,
  709                                     pending_delete))
  710         if not result:
  711             # try again using the reserved physical disks in addition
  712             for disk_type, disks in physical_disks_by_type.items():
  713                 physical_disks_by_type[disk_type] += (
  714                     reserved_physical_disks_by_type[disk_type])
  715 
  716             result, free_space_mb = (
  717                 _assign_disks_to_volume(volumes_without_disks,
  718                                         physical_disks_by_type,
  719                                         free_space_mb,
  720                                         pending_delete))
  721             if not result:
  722                 error_msg = _('failed to find matching physical disks for all '
  723                               'logical disks')
  724                 LOG.error('DRAC driver failed to create RAID '
  725                           'configuration. Reason: %(error)s.',
  726                           {'error': error_msg})
  727                 raise exception.DracOperationError(error=error_msg)
  728 
  729     processed_volumes += volumes_without_disks
  730 
  731     # step 3 - process volumes with predefined disks and size_mb == 'MAX'
  732     for volume in [volume for volume in logical_disks
  733                    if ('physical_disks' in volume
  734                        and volume['size_mb'] == 'MAX')]:
  735         _calculate_volume_props(volume, physical_disks, free_space_mb)
  736         processed_volumes.append(volume)
  737 
  738     return processed_volumes
  739 
  740 
  741 def _calculate_volume_props(logical_disk, physical_disks, free_space_mb):
  742     selected_disks = [disk for disk in physical_disks
  743                       if disk.id in logical_disk['physical_disks']]
  744 
  745     spans_count = _calculate_spans(
  746         logical_disk['raid_level'], len(selected_disks))
  747 
  748     if len(selected_disks) % spans_count != 0:
  749         error_msg = _('invalid number of physical disks was provided')
  750         raise exception.DracOperationError(error=error_msg)
  751 
  752     disks_per_span = int(len(selected_disks) / spans_count)
  753 
  754     # Best practice is to not pass span_length and span_depth when creating a
  755     # RAID10.  The iDRAC will dynamically calculate these values using maximum
  756     # values obtained from the RAID controller.
  757     logical_disk['span_depth'] = None
  758     logical_disk['span_length'] = None
  759     if logical_disk['raid_level'] != '1+0':
  760         logical_disk['span_depth'] = spans_count
  761         logical_disk['span_length'] = disks_per_span
  762 
  763     max_volume_size_mb = _max_volume_size_mb(
  764         logical_disk['raid_level'], selected_disks, free_space_mb,
  765         spans_count=spans_count)
  766 
  767     if logical_disk['size_mb'] == 'MAX':
  768         if max_volume_size_mb == 0:
  769             error_msg = _("size set to 'MAX' but could not allocate physical "
  770                           "disk space")
  771             raise exception.DracOperationError(error=error_msg)
  772 
  773         logical_disk['size_mb'] = max_volume_size_mb
  774     elif max_volume_size_mb < logical_disk['size_mb']:
  775         if max_volume_size_mb == 0:
  776             error_msg = _('not enough physical disk space for the logical '
  777                           'disk')
  778             raise exception.DracOperationError(error=error_msg)
  779 
  780     disk_usage = _volume_usage_per_disk_mb(logical_disk, selected_disks,
  781                                            spans_count=spans_count)
  782 
  783     for disk in selected_disks:
  784         if free_space_mb[disk] < disk_usage:
  785             error_msg = _('not enough free space on physical disks for the '
  786                           'logical disk')
  787             raise exception.DracOperationError(error=error_msg)
  788         else:
  789             free_space_mb[disk] -= disk_usage
  790 
  791     if 'controller' not in logical_disk:
  792         logical_disk['controller'] = selected_disks[0].controller
  793 
  794 
  795 def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
  796                             free_space_mb, pending_delete):
  797     logical_disk = logical_disks.pop(0)
  798     raid_level = logical_disk['raid_level']
  799 
  800     # iterate over all possible configurations
  801     for (controller, disk_type,
  802          interface_type, size_mb), disks in physical_disks_by_type.items():
  803 
  804         if ('disk_type' in logical_disk
  805             and logical_disk['disk_type'] != disk_type):
  806             continue
  807 
  808         if ('interface_type' in logical_disk
  809             and logical_disk['interface_type'] != interface_type):
  810             continue
  811 
  812         # filter out disks without free disk space
  813         disks = [disk for disk in disks if free_space_mb[disk] > 0]
  814 
  815         # sort disks by free size which is important if we have max disks limit
  816         # on a volume
  817         disks = sorted(
  818             disks,
  819             key=lambda disk: free_space_mb[disk])
  820 
  821         # filter out disks already in use if sharing is disabled
  822         if ('share_physical_disks' not in logical_disk
  823                 or not logical_disk['share_physical_disks']):
  824             initial_free_size_mb = {
  825                 disk: _get_disk_free_size_mb(disk, pending_delete)
  826                 for disk in disks
  827             }
  828             disks = [disk for disk in disks
  829                      if initial_free_size_mb[disk] == free_space_mb[disk]]
  830 
  831         max_spans = _calculate_spans(raid_level, len(disks))
  832         min_spans = min([2, max_spans])
  833         min_disks = _raid_level_min_disks(raid_level,
  834                                           spans_count=min_spans)
  835         max_disks = _raid_level_max_disks(raid_level,
  836                                           spans_count=max_spans)
  837         candidate_max_disks = min([max_disks, len(disks)])
  838 
  839         for disks_count in range(min_disks, candidate_max_disks + 1):
  840             if ('number_of_physical_disks' in logical_disk
  841                     and (logical_disk['number_of_physical_disks']
  842                          != disks_count)):
  843                 continue
  844 
  845             # skip invalid disks_count
  846             if disks_count != _usable_disks_count(logical_disk['raid_level'],
  847                                                   disks_count):
  848                 continue
  849 
  850             selected_disks = disks[0:disks_count]
  851 
  852             candidate_volume = logical_disk.copy()
  853             candidate_free_space_mb = free_space_mb.copy()
  854             candidate_volume['physical_disks'] = [disk.id for disk
  855                                                   in selected_disks]
  856             try:
  857                 _calculate_volume_props(candidate_volume, selected_disks,
  858                                         candidate_free_space_mb)
  859             except exception.DracOperationError:
  860                 continue
  861 
  862             if len(logical_disks) > 0:
  863                 result, candidate_free_space_mb = (
  864                     _assign_disks_to_volume(logical_disks,
  865                                             physical_disks_by_type,
  866                                             candidate_free_space_mb,
  867                                             pending_delete))
  868                 if result:
  869                     logical_disks.append(candidate_volume)
  870                     return (True, candidate_free_space_mb)
  871             else:
  872                 logical_disks.append(candidate_volume)
  873                 return (True, candidate_free_space_mb)
  874     else:
  875         # put back the logical_disk to queue
  876         logical_disks.insert(0, logical_disk)
  877         return (False, free_space_mb)
  878 
  879 
  880 def _filter_logical_disks(logical_disks, include_root_volume,
  881                           include_nonroot_volumes):
  882     filtered_disks = []
  883     for disk in logical_disks:
  884         if include_root_volume and disk.get('is_root_volume'):
  885             filtered_disks.append(disk)
  886 
  887         if include_nonroot_volumes and not disk.get('is_root_volume'):
  888             filtered_disks.append(disk)
  889 
  890     return filtered_disks
  891 
  892 
  893 def _create_config_job(node, controller, reboot=False, realtime=False,
  894                        raid_config_job_ids=[],
  895                        raid_config_parameters=[]):
  896     job_id = commit_config(node, raid_controller=controller,
  897                            reboot=reboot, realtime=realtime)
  898 
  899     raid_config_job_ids.append(job_id)
  900     if controller not in raid_config_parameters:
  901         raid_config_parameters.append(controller)
  902 
  903     LOG.info('Change has been committed to RAID controller '
  904              '%(controller)s on node %(node)s. '
  905              'DRAC job id: %(job_id)s',
  906              {'controller': controller, 'node': node.uuid,
  907               'job_id': job_id})
  908     return {'raid_config_job_ids': raid_config_job_ids,
  909             'raid_config_parameters': raid_config_parameters}
  910 
  911 
  912 def _validate_volume_size(node, logical_disks):
  913     new_physical_disks = list_physical_disks(node)
  914     free_space_mb = {}
  915     new_processed_volumes = []
  916     for disk in new_physical_disks:
  917         free_space_mb[disk] = disk.free_size_mb
  918 
  919     for logical_disk in logical_disks:
  920         selected_disks = [disk for disk in new_physical_disks
  921                           if disk.id in logical_disk['physical_disks']]
  922 
  923         spans_count = _calculate_spans(
  924             logical_disk['raid_level'], len(selected_disks))
  925 
  926         new_max_vol_size_mb = _max_volume_size_mb(
  927             logical_disk['raid_level'],
  928             selected_disks,
  929             free_space_mb,
  930             spans_count=spans_count)
  931 
  932         if logical_disk['size_mb'] > new_max_vol_size_mb:
  933             logical_disk['size_mb'] = new_max_vol_size_mb
  934             LOG.info("Logical size does not match so calculating volume "
  935                      "properties for current logical_disk")
  936             _calculate_volume_props(
  937                 logical_disk, new_physical_disks, free_space_mb)
  938             new_processed_volumes.append(logical_disk)
  939 
  940     if new_processed_volumes:
  941         return new_processed_volumes
  942 
  943     return logical_disks
  944 
  945 
  946 def _switch_to_raid_mode(node, controller_fqdd):
  947     """Convert the controller mode from Enhanced HBA to RAID mode
  948 
  949     :param node: an ironic node object
  950     :param controller_fqdd: the ID of the RAID controller.
  951     :returns: a dictionary containing
  952               - The raid_controller key with a ID of the
  953               RAID controller value.
  954               - The is_commit_required needed key with a
  955               boolean value indicating whether a config job must be created
  956               for the values to be applied.
  957               - The is_reboot_required key with a RebootRequired enumerated
  958               value indicating whether the server must be rebooted to
  959               switch the controller mode to RAID.
  960     """
  961     # wait for pending jobs to complete
  962     drac_job.wait_for_job_completion(node)
  963 
  964     raid_attr = "{}:{}".format(controller_fqdd,
  965                                _REQUESTED_RAID_CONTROLLER_MODE)
  966     settings = {raid_attr: _RAID_MODE}
  967     settings_results = set_raid_settings(
  968         node, controller_fqdd, settings)
  969     controller = {
  970         'raid_controller': controller_fqdd,
  971         'is_reboot_required': settings_results['is_reboot_required'],
  972         'is_commit_required': settings_results['is_commit_required']}
  973     return controller
  974 
  975 
  976 def _commit_to_controllers(node, controllers, substep="completed"):
  977     """Commit changes to RAID controllers on the node.
  978 
  979     :param node: an ironic node object
  980     :param controllers: a list of dictionary containing
  981                         - The raid_controller key with raid controller
  982                         fqdd value indicating on which raid configuration
  983                         job needs to be perform.
  984                         - The is_commit_required needed key with a
  985                         boolean value indicating whether a config job must
  986                         be created.
  987                         - The is_reboot_required key with a RebootRequired
  988                         enumerated value indicating whether the server must
  989                         be rebooted only if raid controller does not support
  990                         realtime.
  991     :param substep: contain sub cleaning or deploy step which executes any raid
  992                     configuration job if set after cleaning or deploy step.
  993                     (default to completed)
  994     :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) if
  995               configuration is in progress asynchronously or None if it is
  996               completed.
  997     """
  998     # remove controller which does not require configuration job
  999     controllers = [controller for controller in controllers
 1000                    if controller['is_commit_required']]
 1001 
 1002     if not controllers:
 1003         LOG.debug('No changes on any of the controllers on node %s',
 1004                   node.uuid)
 1005         driver_internal_info = node.driver_internal_info
 1006         driver_internal_info['raid_config_substep'] = substep
 1007         driver_internal_info['raid_config_parameters'] = []
 1008         node.driver_internal_info = driver_internal_info
 1009         node.save()
 1010         return
 1011 
 1012     driver_internal_info = node.driver_internal_info
 1013     driver_internal_info['raid_config_substep'] = substep
 1014     driver_internal_info['raid_config_parameters'] = []
 1015 
 1016     if 'raid_config_job_ids' not in driver_internal_info:
 1017         driver_internal_info['raid_config_job_ids'] = []
 1018 
 1019     optional = drac_constants.RebootRequired.optional
 1020 
 1021     # all realtime controllers
 1022     all_realtime = all(
 1023         (cntlr['is_reboot_required'] == optional)
 1024         and not(cntlr.get('is_ehba_mode'))
 1025         for cntlr in controllers)
 1026 
 1027     # check any controller with ehba mode
 1028     any_ehba_controllers = any(
 1029         cntrl.get('is_ehba_mode') is True for cntrl in controllers)
 1030 
 1031     raid_config_job_ids = []
 1032     raid_config_parameters = []
 1033     if all_realtime:
 1034         for controller in controllers:
 1035             realtime_controller = controller['raid_controller']
 1036             job_details = _create_config_job(
 1037                 node, controller=realtime_controller,
 1038                 reboot=False, realtime=True,
 1039                 raid_config_job_ids=raid_config_job_ids,
 1040                 raid_config_parameters=raid_config_parameters)
 1041 
 1042     elif any_ehba_controllers:
 1043         commit_to_ehba_controllers = []
 1044         for controller in controllers:
 1045             if controller.get('is_ehba_mode'):
 1046                 job_details = _create_config_job(
 1047                     node, controller=controller['raid_controller'],
 1048                     reboot=False, realtime=True,
 1049                     raid_config_job_ids=raid_config_job_ids,
 1050                     raid_config_parameters=raid_config_parameters)
 1051 
 1052                 ehba_controller = _switch_to_raid_mode(
 1053                     node, controller['raid_controller'])
 1054                 commit_to_ehba_controllers.append(
 1055                     ehba_controller['raid_controller'])
 1056             else:
 1057                 job_details = _create_config_job(
 1058                     node, controller=controller['raid_controller'],
 1059                     reboot=False, realtime=False,
 1060                     raid_config_job_ids=raid_config_job_ids,
 1061                     raid_config_parameters=raid_config_parameters)
 1062 
 1063         for controller in commit_to_ehba_controllers:
 1064             LOG.debug("Create job with Reboot to apply configuration "
 1065                       "changes for ehba controllers")
 1066             job_details = _create_config_job(
 1067                 node, controller=controller,
 1068                 reboot=(controller == commit_to_ehba_controllers[-1]),
 1069                 realtime=False, raid_config_job_ids=raid_config_job_ids,
 1070                 raid_config_parameters=raid_config_parameters)
 1071     else:
 1072         for controller in controllers:
 1073             mix_controller = controller['raid_controller']
 1074             reboot = (controller == controllers[-1])
 1075             job_details = _create_config_job(
 1076                 node, controller=mix_controller,
 1077                 reboot=reboot, realtime=False,
 1078                 raid_config_job_ids=raid_config_job_ids,
 1079                 raid_config_parameters=raid_config_parameters)
 1080 
 1081     driver_internal_info['raid_config_job_ids'].extend(job_details[
 1082         'raid_config_job_ids'])
 1083 
 1084     driver_internal_info['raid_config_parameters'].extend(job_details[
 1085         'raid_config_parameters'])
 1086 
 1087     node.driver_internal_info = driver_internal_info
 1088 
 1089     # Signal whether the node has been rebooted, that we do not need to execute
 1090     # the step again, and that this completion of this step is triggered
 1091     # through async polling.
 1092     # NOTE(mgoddard): set_async_step_flags calls node.save().
 1093     deploy_utils.set_async_step_flags(
 1094         node,
 1095         reboot=not all_realtime,
 1096         skip_current_step=True,
 1097         polling=True)
 1098 
 1099     return deploy_utils.get_async_step_return_state(node)
 1100 
 1101 
 1102 def _create_virtual_disks(task, node):
 1103     logical_disks_to_create = node.driver_internal_info[
 1104         'logical_disks_to_create']
 1105 
 1106     # Check valid properties attached to voiume after drives conversion
 1107     isVolValidationNeeded = node.driver_internal_info[
 1108         'volume_validation']
 1109     if isVolValidationNeeded:
 1110         logical_disks_to_create = _validate_volume_size(
 1111             node, logical_disks_to_create)
 1112 
 1113     controllers = list()
 1114     for logical_disk in logical_disks_to_create:
 1115         controller = dict()
 1116         controller_cap = create_virtual_disk(
 1117             node,
 1118             raid_controller=logical_disk['controller'],
 1119             physical_disks=logical_disk['physical_disks'],
 1120             raid_level=logical_disk['raid_level'],
 1121             size_mb=logical_disk['size_mb'],
 1122             disk_name=logical_disk.get('name'),
 1123             span_length=logical_disk.get('span_length'),
 1124             span_depth=logical_disk.get('span_depth'))
 1125         controller['raid_controller'] = logical_disk['controller']
 1126         controller['is_reboot_required'] = controller_cap[
 1127             'is_reboot_required']
 1128         controller['is_commit_required'] = controller_cap[
 1129             'is_commit_required']
 1130         if controller not in controllers:
 1131             controllers.append(controller)
 1132 
 1133     return _commit_to_controllers(node, controllers)
 1134 
 1135 
 1136 def _controller_in_hba_mode(raid_settings, controller_fqdd):
 1137     controller_mode = raid_settings.get(
 1138         '{}:{}'.format(controller_fqdd, _CURRENT_RAID_CONTROLLER_MODE))
 1139 
 1140     return _EHBA_MODE in controller_mode.current_value
 1141 
 1142 
 1143 def _controller_supports_ehba_mode(settings, controller_fqdd):
 1144     raid_cntrl_attr = "{}:{}".format(controller_fqdd,
 1145                                      _CURRENT_RAID_CONTROLLER_MODE)
 1146     current_cntrl_mode = settings.get(raid_cntrl_attr)
 1147     if not current_cntrl_mode:
 1148         return False
 1149     else:
 1150         return _EHBA_MODE in current_cntrl_mode.possible_values
 1151 
 1152 
 1153 def _get_disk_free_size_mb(disk, pending_delete):
 1154     """Return the size of free space on the disk in MB.
 1155 
 1156     :param disk: a PhysicalDisk object.
 1157     :param pending_delete: Whether there is a pending deletion of all virtual
 1158         disks.
 1159     """
 1160     return disk.size_mb if pending_delete else disk.free_size_mb
 1161 
 1162 
 1163 class DracWSManRAID(base.RAIDInterface):
 1164 
 1165     def get_properties(self):
 1166         """Return the properties of the interface."""
 1167         return drac_common.COMMON_PROPERTIES
 1168 
 1169     @base.deploy_step(priority=0,
 1170                       argsinfo=base.RAID_APPLY_CONFIGURATION_ARGSINFO)
 1171     def apply_configuration(self, task, raid_config, create_root_volume=True,
 1172                             create_nonroot_volumes=False,
 1173                             delete_existing=True):
 1174         return super(DracWSManRAID, self).apply_configuration(
 1175             task, raid_config, create_root_volume=create_root_volume,
 1176             create_nonroot_volumes=create_nonroot_volumes,
 1177             delete_existing=delete_existing)
 1178 
 1179     @METRICS.timer('DracRAID.create_configuration')
 1180     @base.clean_step(priority=0, abortable=False, argsinfo={
 1181         'create_root_volume': {
 1182             'description': (
 1183                 'This specifies whether to create the root volume. '
 1184                 'Defaults to `True`.'
 1185             ),
 1186             'required': False
 1187         },
 1188         'create_nonroot_volumes': {
 1189             'description': (
 1190                 'This specifies whether to create the non-root volumes. '
 1191                 'Defaults to `True`.'
 1192             ),
 1193             'required': False
 1194         },
 1195         "delete_existing": {
 1196             "description": (
 1197                 "Setting this to 'True' indicates to delete existing RAID "
 1198                 "configuration prior to creating the new configuration. "
 1199                 "Default value is 'False'."
 1200             ),
 1201             "required": False,
 1202         }
 1203     })
 1204     def create_configuration(self, task,
 1205                              create_root_volume=True,
 1206                              create_nonroot_volumes=True,
 1207                              delete_existing=False):
 1208         """Create the RAID configuration.
 1209 
 1210         This method creates the RAID configuration on the given node.
 1211 
 1212         :param task: a TaskManager instance containing the node to act on.
 1213         :param create_root_volume: If True, a root volume is created
 1214             during RAID configuration. Otherwise, no root volume is
 1215             created. Default is True.
 1216         :param create_nonroot_volumes: If True, non-root volumes are
 1217             created. If False, no non-root volumes are created. Default
 1218             is True.
 1219         :param delete_existing: Setting this to True indicates to delete RAID
 1220             configuration prior to creating the new configuration. Default is
 1221             False.
 1222         :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
 1223             if creation is in progress asynchronously or None if it is
 1224             completed.
 1225         :raises: MissingParameterValue, if node.target_raid_config is missing
 1226             or empty.
 1227         :raises: DracOperationError on an error from python-dracclient.
 1228         """
 1229         node = task.node
 1230 
 1231         logical_disks = node.target_raid_config['logical_disks']
 1232 
 1233         for disk in logical_disks:
 1234             if disk['size_gb'] == 'MAX' and 'physical_disks' not in disk:
 1235                 raise exception.InvalidParameterValue(
 1236                     _("create_configuration called with invalid "
 1237                       "target_raid_configuration for node %(node_id)s. "
 1238                       "'physical_disks' is missing from logical_disk while "
 1239                       "'size_gb'='MAX' was requested: "
 1240                       "%(logical_disk)s") % {'node_id': node.uuid,
 1241                                              'logical_disk': disk})
 1242 
 1243             if disk['size_gb'] == 'MAX':
 1244                 disk['size_mb'] = 'MAX'
 1245             else:
 1246                 disk['size_mb'] = disk['size_gb'] * units.Ki
 1247 
 1248             del disk['size_gb']
 1249 
 1250         if delete_existing:
 1251             self._delete_configuration_no_commit(task)
 1252 
 1253         physical_disks = list_physical_disks(node)
 1254         logical_disks = _find_configuration(logical_disks, physical_disks,
 1255                                             pending_delete=delete_existing)
 1256 
 1257         logical_disks_to_create = _filter_logical_disks(
 1258             logical_disks, create_root_volume, create_nonroot_volumes)
 1259 
 1260         controllers_to_physical_disk_ids = defaultdict(list)
 1261         for logical_disk in logical_disks_to_create:
 1262             # Not applicable to JBOD logical disks.
 1263             if logical_disk['raid_level'] == 'JBOD':
 1264                 continue
 1265 
 1266             for physical_disk_name in logical_disk['physical_disks']:
 1267                 controllers_to_physical_disk_ids[
 1268                     logical_disk['controller']].append(
 1269                     physical_disk_name)
 1270 
 1271         # adding logical_disks to driver_internal_info to create virtual disks
 1272         driver_internal_info = node.driver_internal_info
 1273         driver_internal_info[
 1274             "logical_disks_to_create"] = logical_disks_to_create
 1275 
 1276         commit_results = None
 1277         if logical_disks_to_create:
 1278             LOG.debug(
 1279                 "Converting physical disks configured to back RAID "
 1280                 "logical disks to RAID mode for node %(node_uuid)s ",
 1281                 {"node_uuid": node.uuid})
 1282             raid_mode = drac_constants.RaidStatus.raid
 1283             commit_results = _change_physical_disk_mode(
 1284                 node, raid_mode,
 1285                 controllers_to_physical_disk_ids,
 1286                 substep="create_virtual_disks")
 1287 
 1288         volume_validation = True if commit_results else False
 1289         driver_internal_info['volume_validation'] = volume_validation
 1290         node.driver_internal_info = driver_internal_info
 1291         node.save()
 1292 
 1293         if commit_results:
 1294             return commit_results
 1295         else:
 1296             LOG.debug("Controller does not support drives conversion "
 1297                       "so creating virtual disks")
 1298             return _create_virtual_disks(task, node)
 1299 
 1300     @METRICS.timer('DracRAID.delete_configuration')
 1301     @base.clean_step(priority=0)
 1302     @base.deploy_step(priority=0)
 1303     def delete_configuration(self, task):
 1304         """Delete the RAID configuration.
 1305 
 1306         :param task: a TaskManager instance containing the node to act on.
 1307         :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
 1308             if deletion is in progress asynchronously or None if it is
 1309             completed.
 1310         :raises: DracOperationError on an error from python-dracclient.
 1311         """
 1312 
 1313         controllers = self._delete_configuration_no_commit(task)
 1314         return _commit_to_controllers(task.node, controllers,
 1315                                       substep="delete_foreign_config")
 1316 
 1317     @METRICS.timer('DracRAID.get_logical_disks')
 1318     def get_logical_disks(self, task):
 1319         """Get the RAID configuration of the node.
 1320 
 1321         :param task: a TaskManager instance containing the node to act on.
 1322         :returns: A dictionary of properties.
 1323         :raises: DracOperationError on an error from python-dracclient.
 1324         """
 1325         node = task.node
 1326 
 1327         logical_disks = []
 1328         for disk in list_virtual_disks(node):
 1329             logical_disk = {
 1330                 'id': disk.id,
 1331                 'controller': disk.controller,
 1332                 'size_gb': int(disk.size_mb / units.Ki),
 1333                 'raid_level': disk.raid_level
 1334             }
 1335 
 1336             if disk.name is not None:
 1337                 logical_disk['name'] = disk.name
 1338 
 1339             logical_disks.append(logical_disk)
 1340 
 1341         return {'logical_disks': logical_disks}
 1342 
 1343     @METRICS.timer('DracRAID._query_raid_config_job_status')
 1344     @periodics.periodic(
 1345         spacing=CONF.drac.query_raid_config_job_status_interval)
 1346     def _query_raid_config_job_status(self, manager, context):
 1347         """Periodic task to check the progress of running RAID config jobs."""
 1348 
 1349         filters = {'reserved': False, 'maintenance': False}
 1350         fields = ['driver_internal_info']
 1351 
 1352         node_list = manager.iter_nodes(fields=fields, filters=filters)
 1353         for (node_uuid, driver, conductor_group,
 1354              driver_internal_info) in node_list:
 1355             try:
 1356                 lock_purpose = 'checking async raid configuration jobs'
 1357                 with task_manager.acquire(context, node_uuid,
 1358                                           purpose=lock_purpose,
 1359                                           shared=True) as task:
 1360                     if not isinstance(task.driver.raid, DracWSManRAID):
 1361                         continue
 1362 
 1363                     job_ids = driver_internal_info.get('raid_config_job_ids')
 1364                     if not job_ids:
 1365                         continue
 1366 
 1367                     self._check_node_raid_jobs(task)
 1368 
 1369             except exception.NodeNotFound:
 1370                 LOG.info("During query_raid_config_job_status, node "
 1371                          "%(node)s was not found and presumed deleted by "
 1372                          "another process.", {'node': node_uuid})
 1373             except exception.NodeLocked:
 1374                 LOG.info("During query_raid_config_job_status, node "
 1375                          "%(node)s was already locked by another process. "
 1376                          "Skip.", {'node': node_uuid})
 1377 
 1378     @METRICS.timer('DracRAID._check_node_raid_jobs')
 1379     def _check_node_raid_jobs(self, task):
 1380         """Check the progress of running RAID config jobs of a node."""
 1381 
 1382         node = task.node
 1383         raid_config_job_ids = node.driver_internal_info['raid_config_job_ids']
 1384         finished_job_ids = []
 1385 
 1386         for config_job_id in raid_config_job_ids:
 1387             config_job = drac_job.get_job(node, job_id=config_job_id)
 1388 
 1389             if config_job is None or config_job.status == 'Completed':
 1390                 finished_job_ids.append(config_job_id)
 1391             elif config_job.status == 'Failed':
 1392                 finished_job_ids.append(config_job_id)
 1393                 self._set_raid_config_job_failure(node)
 1394 
 1395         if not finished_job_ids:
 1396             return
 1397 
 1398         task.upgrade_lock()
 1399         self._delete_cached_config_job_id(node, finished_job_ids)
 1400 
 1401         if not node.driver_internal_info.get('raid_config_job_failure',
 1402                                              False):
 1403             if 'raid_config_substep' in node.driver_internal_info:
 1404                 substep = node.driver_internal_info['raid_config_substep']
 1405 
 1406                 if substep == 'delete_foreign_config':
 1407                     foreign_drives = self._execute_foreign_drives(task, node)
 1408                     if foreign_drives is None:
 1409                         return self._convert_drives(task, node)
 1410                 elif substep == 'physical_disk_conversion':
 1411                     self._convert_drives(task, node)
 1412                 elif substep == "create_virtual_disks":
 1413                     return _create_virtual_disks(task, node)
 1414                 elif substep == 'completed':
 1415                     self._complete_raid_substep(task, node)
 1416             else:
 1417                 self._complete_raid_substep(task, node)
 1418         else:
 1419             self._clear_raid_substep(node)
 1420             self._clear_raid_config_job_failure(node)
 1421             self._set_failed(task, config_job)
 1422 
 1423     def _execute_foreign_drives(self, task, node):
 1424         controllers = list()
 1425         jobs_required = False
 1426         for controller_id in node.driver_internal_info[
 1427                 'raid_config_parameters']:
 1428             controller_cap = clear_foreign_config(
 1429                 node, controller_id)
 1430             controller = {
 1431                 'raid_controller': controller_id,
 1432                 'is_reboot_required': controller_cap['is_reboot_required'],
 1433                 'is_commit_required': controller_cap['is_commit_required']}
 1434             controllers.append(controller)
 1435             jobs_required = jobs_required or controller_cap[
 1436                 'is_commit_required']
 1437 
 1438         if not jobs_required:
 1439             LOG.info(
 1440                 "No foreign drives detected, so "
 1441                 "resume %s", "cleaning" if node.clean_step else "deployment")
 1442             return None
 1443         else:
 1444             return _commit_to_controllers(
 1445                 node,
 1446                 controllers,
 1447                 substep='physical_disk_conversion')
 1448 
 1449     def _complete_raid_substep(self, task, node):
 1450         self._clear_raid_substep(node)
 1451         self._resume(task)
 1452 
 1453     def _convert_drives(self, task, node):
 1454         jbod = drac_constants.RaidStatus.jbod
 1455         drives_results = _change_physical_disk_mode(
 1456             node, mode=jbod)
 1457         if drives_results is None:
 1458             LOG.debug("Controller does not support drives "
 1459                       "conversion on %(node_uuid)s",
 1460                       {'node_uuid': node.uuid})
 1461             self._complete_raid_substep(task, node)
 1462 
 1463     def _clear_raid_substep(self, node):
 1464         driver_internal_info = node.driver_internal_info
 1465         driver_internal_info.pop('raid_config_substep', None)
 1466         driver_internal_info.pop('raid_config_parameters', None)
 1467         node.driver_internal_info = driver_internal_info
 1468         node.save()
 1469 
 1470     def _set_raid_config_job_failure(self, node):
 1471         driver_internal_info = node.driver_internal_info
 1472         driver_internal_info['raid_config_job_failure'] = True
 1473         node.driver_internal_info = driver_internal_info
 1474         node.save()
 1475 
 1476     def _clear_raid_config_job_failure(self, node):
 1477         driver_internal_info = node.driver_internal_info
 1478         del driver_internal_info['raid_config_job_failure']
 1479         node.driver_internal_info = driver_internal_info
 1480         node.save()
 1481 
 1482     def _delete_cached_config_job_id(self, node, finished_config_job_ids=None):
 1483         if finished_config_job_ids is None:
 1484             finished_config_job_ids = []
 1485         driver_internal_info = node.driver_internal_info
 1486         unfinished_job_ids = [job_id for job_id
 1487                               in driver_internal_info['raid_config_job_ids']
 1488                               if job_id not in finished_config_job_ids]
 1489         driver_internal_info['raid_config_job_ids'] = unfinished_job_ids
 1490         node.driver_internal_info = driver_internal_info
 1491         node.save()
 1492 
 1493     def _set_failed(self, task, config_job):
 1494         error_msg = (_("Failed config job: %(config_job_id)s. "
 1495                        "Message: '%(message)s'.") %
 1496                      {'config_job_id': config_job.id,
 1497                       'message': config_job.message})
 1498         log_msg = ("RAID configuration job failed for node %(node)s. "
 1499                    "%(error)s" %
 1500                    {'node': task.node.uuid, 'error': error_msg})
 1501         if task.node.clean_step:
 1502             manager_utils.cleaning_error_handler(task, error_msg)
 1503         else:
 1504             manager_utils.deploying_error_handler(task, log_msg, error_msg)
 1505 
 1506     def _resume(self, task):
 1507         raid_common.update_raid_info(
 1508             task.node, self.get_logical_disks(task))
 1509         if task.node.clean_step:
 1510             manager_utils.notify_conductor_resume_clean(task)
 1511         else:
 1512             manager_utils.notify_conductor_resume_deploy(task)
 1513 
 1514     def _delete_configuration_no_commit(self, task):
 1515         """Delete existing RAID configuration without committing the change.
 1516 
 1517         :param task: A TaskManager instance.
 1518         :returns: A set of names of RAID controllers which need RAID changes to
 1519             be committed.
 1520         """
 1521         node = task.node
 1522         controllers = list()
 1523         drac_raid_controllers = list_raid_controllers(node)
 1524         drac_raid_settings = list_raid_settings(node)
 1525         for cntrl in drac_raid_controllers:
 1526             if _is_raid_controller(node, cntrl.id, drac_raid_controllers):
 1527                 controller = dict()
 1528                 if _controller_supports_ehba_mode(
 1529                         drac_raid_settings,
 1530                         cntrl.id) and _controller_in_hba_mode(
 1531                             drac_raid_settings, cntrl.id):
 1532                     controller['is_ehba_mode'] = True
 1533                 controller_cap = _reset_raid_config(node, cntrl.id)
 1534                 controller["raid_controller"] = cntrl.id
 1535                 controller["is_reboot_required"] = controller_cap[
 1536                     "is_reboot_required"]
 1537                 controller["is_commit_required"] = controller_cap[
 1538                     "is_commit_required"]
 1539                 controllers.append(controller)
 1540         return controllers
 1541 
 1542 
 1543 class DracRAID(DracWSManRAID):
 1544     """Class alias of class DracWSManRAID.
 1545 
 1546     This class provides ongoing support of the deprecated 'idrac' RAID
 1547     interface implementation entrypoint.
 1548 
 1549     All bug fixes and new features should be implemented in its base
 1550     class, DracWSManRAID. That makes them available to both the
 1551     deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes
 1552     should not be made to this class.
 1553     """
 1554 
 1555     def __init__(self):
 1556         super(DracRAID, self).__init__()
 1557         LOG.warning("RAID interface 'idrac' is deprecated and may be removed "
 1558                     "in a future release. Use 'idrac-wsman' instead.")