"Fossies" - the Fresh Open Source Software Archive

Member "cinder-14.0.2/cinder/volume/drivers/netapp/dataontap/block_cmode.py" (4 Oct 2019, 25315 Bytes) of package /linux/misc/openstack/cinder-14.0.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "block_cmode.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 14.0.1_vs_14.0.2.

    1 # Copyright (c) 2012 NetApp, Inc.  All rights reserved.
    2 # Copyright (c) 2014 Ben Swartzlander.  All rights reserved.
    3 # Copyright (c) 2014 Navneet Singh.  All rights reserved.
    4 # Copyright (c) 2014 Clinton Knight.  All rights reserved.
    5 # Copyright (c) 2014 Alex Meade.  All rights reserved.
    6 # Copyright (c) 2014 Andrew Kerr.  All rights reserved.
    7 # Copyright (c) 2014 Jeff Applewhite.  All rights reserved.
    8 # Copyright (c) 2015 Tom Barron.  All rights reserved.
    9 # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
   10 # Copyright (c) 2016 Mike Rooney. All rights reserved.
   11 #
   12 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
   13 #    not use this file except in compliance with the License. You may obtain
   14 #    a copy of the License at
   15 #
   16 #         http://www.apache.org/licenses/LICENSE-2.0
   17 #
   18 #    Unless required by applicable law or agreed to in writing, software
   19 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   20 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   21 #    License for the specific language governing permissions and limitations
   22 #    under the License.
   23 """
   24 Volume driver library for NetApp C-mode block storage systems.
   25 """
   26 
   27 from oslo_log import log as logging
   28 from oslo_utils import units
   29 import six
   30 
   31 from cinder import exception
   32 from cinder.i18n import _
   33 from cinder.objects import fields
   34 from cinder import utils
   35 from cinder.volume.drivers.netapp.dataontap import block_base
   36 from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
   37 from cinder.volume.drivers.netapp.dataontap.utils import capabilities
   38 from cinder.volume.drivers.netapp.dataontap.utils import data_motion
   39 from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls
   40 from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
   41 from cinder.volume.drivers.netapp import options as na_opts
   42 from cinder.volume.drivers.netapp import utils as na_utils
   43 from cinder.volume import utils as volume_utils
   44 
   45 
   46 LOG = logging.getLogger(__name__)
   47 
   48 
   49 @six.add_metaclass(utils.TraceWrapperMetaclass)
   50 class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
   51                                      data_motion.DataMotionMixin):
   52     """NetApp block storage library for Data ONTAP (Cluster-mode)."""
   53 
   54     REQUIRED_CMODE_FLAGS = ['netapp_vserver']
   55 
   56     def __init__(self, driver_name, driver_protocol, **kwargs):
   57         super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name,
   58                                                              driver_protocol,
   59                                                              **kwargs)
   60         self.configuration.append_config_values(na_opts.netapp_cluster_opts)
   61         self.driver_mode = 'cluster'
   62         self.failed_over_backend_name = kwargs.get('active_backend_id')
   63         self.failed_over = self.failed_over_backend_name is not None
   64         self.replication_enabled = (
   65             True if self.get_replication_backend_names(
   66                 self.configuration) else False)
   67 
   68     def do_setup(self, context):
   69         super(NetAppBlockStorageCmodeLibrary, self).do_setup(context)
   70         na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
   71 
   72         # cDOT API client
   73         self.zapi_client = dot_utils.get_client_for_backend(
   74             self.failed_over_backend_name or self.backend_name)
   75         self.vserver = self.zapi_client.vserver
   76 
   77         # Storage service catalog
   78         self.ssc_library = capabilities.CapabilitiesLibrary(
   79             self.driver_protocol, self.vserver, self.zapi_client,
   80             self.configuration)
   81 
   82         self.ssc_library.check_api_permissions()
   83 
   84         self.using_cluster_credentials = (
   85             self.ssc_library.cluster_user_supported())
   86 
   87         # Performance monitoring library
   88         self.perf_library = perf_cmode.PerformanceCmodeLibrary(
   89             self.zapi_client)
   90 
   91     def _update_zapi_client(self, backend_name):
   92         """Set cDOT API client for the specified config backend stanza name."""
   93 
   94         self.zapi_client = dot_utils.get_client_for_backend(backend_name)
   95         self.vserver = self.zapi_client.vserver
   96         self.ssc_library._update_for_failover(self.zapi_client,
   97                                               self._get_flexvol_to_pool_map())
   98         ssc = self.ssc_library.get_ssc()
   99         self.perf_library._update_for_failover(self.zapi_client, ssc)
  100         # Clear LUN table cache
  101         self.lun_table = {}
  102 
  103     def check_for_setup_error(self):
  104         """Check that the driver is working and can communicate."""
  105         if not self._get_flexvol_to_pool_map():
  106             msg = _('No pools are available for provisioning volumes. '
  107                     'Ensure that the configuration option '
  108                     'netapp_pool_name_search_pattern is set correctly.')
  109             raise exception.NetAppDriverException(msg)
  110         self._add_looping_tasks()
  111         super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
  112 
  113     def _add_looping_tasks(self):
  114         """Add tasks that need to be executed at a fixed interval."""
  115 
  116         # Note(cknight): Run the update once in the current thread to prevent a
  117         # race with the first invocation of _update_volume_stats.
  118         self._update_ssc()
  119 
  120         # Add the task that updates the slow-changing storage service catalog
  121         self.loopingcalls.add_task(self._update_ssc,
  122                                    loopingcalls.ONE_HOUR,
  123                                    loopingcalls.ONE_HOUR)
  124 
  125         self.loopingcalls.add_task(
  126             self._handle_housekeeping_tasks,
  127             loopingcalls.TEN_MINUTES,
  128             0)
  129 
  130         super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks()
  131 
  132     def _handle_housekeeping_tasks(self):
  133         """Handle various cleanup activities."""
  134         active_backend = self.failed_over_backend_name or self.backend_name
  135 
  136         # Add the task that harvests soft-deleted QoS policy groups.
  137         if self.using_cluster_credentials:
  138             self.zapi_client.remove_unused_qos_policy_groups()
  139 
  140         LOG.debug("Current service state: Replication enabled: %("
  141                   "replication)s. Failed-Over: %(failed)s. Active Backend "
  142                   "ID: %(active)s",
  143                   {
  144                       'replication': self.replication_enabled,
  145                       'failed': self.failed_over,
  146                       'active': active_backend,
  147                   })
  148 
  149         # Create pool mirrors if whole-backend replication configured
  150         if self.replication_enabled and not self.failed_over:
  151             self.ensure_snapmirrors(
  152                 self.configuration, self.backend_name,
  153                 self.ssc_library.get_ssc_flexvol_names())
  154 
  155     def _handle_ems_logging(self):
  156         """Log autosupport messages."""
  157 
  158         base_ems_message = dot_utils.build_ems_log_message_0(
  159             self.driver_name, self.app_version)
  160         self.zapi_client.send_ems_log_message(base_ems_message)
  161 
  162         pool_ems_message = dot_utils.build_ems_log_message_1(
  163             self.driver_name, self.app_version, self.vserver,
  164             self.ssc_library.get_ssc_flexvol_names(), [])
  165         self.zapi_client.send_ems_log_message(pool_ems_message)
  166 
  167     def _create_lun(self, volume_name, lun_name, size,
  168                     metadata, qos_policy_group_name=None):
  169         """Creates a LUN, handling Data ONTAP differences as needed."""
  170 
  171         self.zapi_client.create_lun(
  172             volume_name, lun_name, size, metadata, qos_policy_group_name)
  173 
  174     def _create_lun_handle(self, metadata, vserver=None):
  175         """Returns LUN handle based on filer type."""
  176         vserver = vserver or self.vserver
  177         return '%s:%s' % (self.vserver, metadata['Path'])
  178 
  179     def _find_mapped_lun_igroup(self, path, initiator_list):
  180         """Find an igroup for a LUN mapped to the given initiator(s)."""
  181         initiator_igroups = self.zapi_client.get_igroup_by_initiators(
  182             initiator_list)
  183         lun_maps = self.zapi_client.get_lun_map(path)
  184         if initiator_igroups and lun_maps:
  185             for igroup in initiator_igroups:
  186                 igroup_name = igroup['initiator-group-name']
  187                 if igroup_name.startswith(na_utils.OPENSTACK_PREFIX):
  188                     for lun_map in lun_maps:
  189                         if lun_map['initiator-group'] == igroup_name:
  190                             return igroup_name, lun_map['lun-id']
  191         return None, None
  192 
  193     def _clone_lun(self, name, new_name, space_reserved=None,
  194                    qos_policy_group_name=None, src_block=0, dest_block=0,
  195                    block_count=0, source_snapshot=None, is_snapshot=False):
  196         """Clone LUN with the given handle to the new name."""
  197         if not space_reserved:
  198             space_reserved = self.lun_space_reservation
  199         metadata = self._get_lun_attr(name, 'metadata')
  200         volume = metadata['Volume']
  201 
  202         self.zapi_client.clone_lun(volume, name, new_name, space_reserved,
  203                                    qos_policy_group_name=qos_policy_group_name,
  204                                    src_block=src_block, dest_block=dest_block,
  205                                    block_count=block_count,
  206                                    source_snapshot=source_snapshot,
  207                                    is_snapshot=is_snapshot)
  208 
  209         LOG.debug("Cloned LUN with new name %s", new_name)
  210         lun = self.zapi_client.get_lun_by_args(vserver=self.vserver,
  211                                                path='/vol/%s/%s'
  212                                                % (volume, new_name))
  213         if len(lun) == 0:
  214             msg = _("No cloned LUN named %s found on the filer")
  215             raise exception.VolumeBackendAPIException(data=msg % new_name)
  216         clone_meta = self._create_lun_meta(lun[0])
  217         self._add_lun_to_table(
  218             block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'],
  219                                             clone_meta['Path']),
  220                                  new_name,
  221                                  lun[0].get_child_content('size'),
  222                                  clone_meta))
  223 
  224     def _create_lun_meta(self, lun):
  225         """Creates LUN metadata dictionary."""
  226         self.zapi_client.check_is_naelement(lun)
  227         meta_dict = {}
  228         meta_dict['Vserver'] = lun.get_child_content('vserver')
  229         meta_dict['Volume'] = lun.get_child_content('volume')
  230         meta_dict['Qtree'] = lun.get_child_content('qtree')
  231         meta_dict['Path'] = lun.get_child_content('path')
  232         meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
  233         meta_dict['SpaceReserved'] = \
  234             lun.get_child_content('is-space-reservation-enabled')
  235         meta_dict['UUID'] = lun.get_child_content('uuid')
  236         return meta_dict
  237 
  238     def _get_fc_target_wwpns(self, include_partner=True):
  239         return self.zapi_client.get_fc_target_wwpns()
  240 
  241     def _update_volume_stats(self, filter_function=None,
  242                              goodness_function=None):
  243         """Retrieve backend stats."""
  244 
  245         LOG.debug('Updating volume stats')
  246         data = {}
  247         backend_name = self.configuration.safe_get('volume_backend_name')
  248         data['volume_backend_name'] = backend_name or self.driver_name
  249         data['vendor_name'] = 'NetApp'
  250         data['driver_version'] = self.VERSION
  251         data['storage_protocol'] = self.driver_protocol
  252         data['pools'] = self._get_pool_stats(
  253             filter_function=filter_function,
  254             goodness_function=goodness_function)
  255         data['sparse_copy_volume'] = True
  256 
  257         # Used for service state report
  258         data['replication_enabled'] = self.replication_enabled
  259 
  260         self._stats = data
  261 
  262     def _get_pool_stats(self, filter_function=None, goodness_function=None):
  263         """Retrieve pool (Data ONTAP flexvol) stats.
  264 
  265         Pool statistics are assembled from static driver capabilities, the
  266         Storage Service Catalog of flexvol attributes, and real-time capacity
  267         and controller utilization metrics.  The pool name is the flexvol name.
  268         """
  269 
  270         pools = []
  271 
  272         ssc = self.ssc_library.get_ssc()
  273         if not ssc:
  274             return pools
  275 
  276         # Utilization and performance metrics require cluster-scoped
  277         # credentials
  278         if self.using_cluster_credentials:
  279             # Get up-to-date node utilization metrics just once
  280             self.perf_library.update_performance_cache(ssc)
  281 
  282             # Get up-to-date aggregate capacities just once
  283             aggregates = self.ssc_library.get_ssc_aggregates()
  284             aggr_capacities = self.zapi_client.get_aggregate_capacities(
  285                 aggregates)
  286         else:
  287             aggr_capacities = {}
  288 
  289         for ssc_vol_name, ssc_vol_info in ssc.items():
  290 
  291             pool = dict()
  292 
  293             # Add storage service catalog data
  294             pool.update(ssc_vol_info)
  295 
  296             # Add driver capabilities and config info
  297             pool['QoS_support'] = self.using_cluster_credentials
  298             pool['multiattach'] = True
  299             pool['online_extend_support'] = False
  300             pool['consistencygroup_support'] = True
  301             pool['consistent_group_snapshot_enabled'] = True
  302             pool['reserved_percentage'] = self.reserved_percentage
  303             pool['max_over_subscription_ratio'] = (
  304                 self.max_over_subscription_ratio)
  305 
  306             # Add up-to-date capacity info
  307             capacity = self.zapi_client.get_flexvol_capacity(
  308                 flexvol_name=ssc_vol_name)
  309 
  310             size_total_gb = capacity['size-total'] / units.Gi
  311             pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)
  312 
  313             size_available_gb = capacity['size-available'] / units.Gi
  314             pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)
  315 
  316             if self.using_cluster_credentials:
  317                 dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
  318                     ssc_vol_name)
  319             else:
  320                 dedupe_used = 0.0
  321             pool['netapp_dedupe_used_percent'] = na_utils.round_down(
  322                 dedupe_used)
  323 
  324             aggregate_name = ssc_vol_info.get('netapp_aggregate')
  325             aggr_capacity = aggr_capacities.get(aggregate_name, {})
  326             pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
  327                 'percent-used', 0)
  328 
  329             # Add utilization data
  330             utilization = self.perf_library.get_node_utilization_for_pool(
  331                 ssc_vol_name)
  332             pool['utilization'] = na_utils.round_down(utilization)
  333             pool['filter_function'] = filter_function
  334             pool['goodness_function'] = goodness_function
  335 
  336             # Add replication capabilities/stats
  337             pool.update(
  338                 self.get_replication_backend_stats(self.configuration))
  339 
  340             pools.append(pool)
  341 
  342         return pools
  343 
  344     def _update_ssc(self):
  345         """Refresh the storage service catalog with the latest set of pools."""
  346 
  347         self.ssc_library.update_ssc(self._get_flexvol_to_pool_map())
  348 
  349     def _get_flexvol_to_pool_map(self):
  350         """Get the flexvols that match the pool name search pattern.
  351 
  352         The map is of the format suitable for seeding the storage service
  353         catalog: {<flexvol_name> : {'pool_name': <flexvol_name>}}
  354         """
  355 
  356         pool_regex = na_utils.get_pool_name_filter_regex(self.configuration)
  357 
  358         pools = {}
  359         flexvol_names = self.zapi_client.list_flexvols()
  360 
  361         for flexvol_name in flexvol_names:
  362 
  363             msg_args = {
  364                 'flexvol': flexvol_name,
  365                 'vol_pattern': pool_regex.pattern,
  366             }
  367 
  368             if pool_regex.match(flexvol_name):
  369                 msg = "Volume '%(flexvol)s' matches %(vol_pattern)s"
  370                 LOG.debug(msg, msg_args)
  371                 pools[flexvol_name] = {'pool_name': flexvol_name}
  372             else:
  373                 msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s"
  374                 LOG.debug(msg, msg_args)
  375 
  376         return pools
  377 
  378     def delete_volume(self, volume):
  379         """Driver entry point for destroying existing volumes."""
  380         super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume)
  381         try:
  382             qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
  383                 volume)
  384         except exception.Invalid:
  385             # Delete even if there was invalid qos policy specified for the
  386             # volume.
  387             qos_policy_group_info = None
  388         self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
  389 
  390         msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s'
  391         LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
  392 
  393     def _setup_qos_for_volume(self, volume, extra_specs):
  394         try:
  395             qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
  396                 volume, extra_specs)
  397         except exception.Invalid:
  398             msg = _('Invalid QoS specification detected while getting QoS '
  399                     'policy for volume %s') % volume['id']
  400             raise exception.VolumeBackendAPIException(data=msg)
  401         self.zapi_client.provision_qos_policy_group(qos_policy_group_info)
  402         return qos_policy_group_info
  403 
  404     def _get_volume_model_update(self, volume):
  405         """Provide any updates necessary for a volume being created/managed."""
  406         if self.replication_enabled:
  407             return {'replication_status': fields.ReplicationStatus.ENABLED}
  408 
  409     def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
  410         self.zapi_client.mark_qos_policy_group_for_deletion(
  411             qos_policy_group_info)
  412 
  413     def unmanage(self, volume):
  414         """Removes the specified volume from Cinder management.
  415 
  416            Does not delete the underlying backend storage object.
  417         """
  418         try:
  419             qos_policy_group_info = na_utils.get_valid_qos_policy_group_info(
  420                 volume)
  421         except exception.Invalid:
  422             # Unmanage even if there was invalid qos policy specified for the
  423             # volume.
  424             qos_policy_group_info = None
  425         self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
  426         super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
  427 
  428     def failover_host(self, context, volumes, secondary_id=None, groups=None):
  429         """Failover a backend to a secondary replication target."""
  430 
  431         return self._failover_host(volumes, secondary_id=secondary_id)
  432 
  433     def _get_backing_flexvol_names(self):
  434         """Returns a list of backing flexvol names."""
  435 
  436         ssc = self.ssc_library.get_ssc()
  437         return list(ssc.keys())
  438 
  439     def create_group(self, group):
  440         """Driver entry point for creating a generic volume group.
  441 
  442         ONTAP does not maintain an actual Group construct. As a result, no
  443         communication to the backend is necessary for generic volume group
  444         creation.
  445 
  446         :returns: Hard-coded model update for generic volume group model.
  447         """
  448         model_update = {'status': fields.GroupStatus.AVAILABLE}
  449         return model_update
  450 
  451     def delete_group(self, group, volumes):
  452         """Driver entry point for deleting a group.
  453 
  454         :returns: Updated group model and list of volume models
  455                  for the volumes that were deleted.
  456         """
  457         model_update = {'status': fields.GroupStatus.DELETED}
  458         volumes_model_update = []
  459         for volume in volumes:
  460             try:
  461                 self._delete_lun(volume['name'])
  462                 volumes_model_update.append(
  463                     {'id': volume['id'], 'status': 'deleted'})
  464             except Exception:
  465                 volumes_model_update.append(
  466                     {'id': volume['id'],
  467                      'status': 'error_deleting'})
  468                 LOG.exception("Volume %(vol)s in the group could not be "
  469                               "deleted.", {'vol': volume})
  470         return model_update, volumes_model_update
  471 
  472     def update_group(self, group, add_volumes=None, remove_volumes=None):
  473         """Driver entry point for updating a generic volume group.
  474 
  475         Since no actual group construct is ever created in ONTAP, it is not
  476         necessary to update any metadata on the backend. Since this is a NO-OP,
  477         there is guaranteed to be no change in any of the volumes' statuses.
  478         """
  479         return None, None, None
  480 
  481     def create_group_snapshot(self, group_snapshot, snapshots):
  482         """Creates a Cinder group snapshot object.
  483 
  484         The Cinder group snapshot object is created by making use of an
  485         ephemeral ONTAP consistency group snapshot in order to provide
  486         write-order consistency for a set of flexvol snapshots. First, a list
  487         of the flexvols backing the given Cinder group must be gathered. An
  488         ONTAP group-snapshot of these flexvols will create a snapshot copy of
  489         all the Cinder volumes in the generic volume group. For each Cinder
  490         volume in the group, it is then necessary to clone its backing LUN from
  491         the ONTAP cg-snapshot. The naming convention used for the clones is
  492         what indicates the clone's role as a Cinder snapshot and its inclusion
  493         in a Cinder group. The ONTAP cg-snapshot of the flexvols is no longer
  494         required after having cloned the LUNs backing the Cinder volumes in
  495         the Cinder group.
  496 
  497         :returns: An implicit update for group snapshot and snapshots models
  498                  that is interpreted by the manager to set their models to
  499                  available.
  500         """
  501         try:
  502             if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
  503                 self._create_consistent_group_snapshot(group_snapshot,
  504                                                        snapshots)
  505             else:
  506                 for snapshot in snapshots:
  507                     self._create_snapshot(snapshot)
  508         except Exception as ex:
  509             err_msg = (_("Create group snapshot failed (%s).") % ex)
  510             LOG.exception(err_msg, resource=group_snapshot)
  511             raise exception.NetAppDriverException(err_msg)
  512 
  513         return None, None
  514 
  515     def _create_consistent_group_snapshot(self, group_snapshot, snapshots):
  516             flexvols = set()
  517             for snapshot in snapshots:
  518                 flexvols.add(volume_utils.extract_host(
  519                     snapshot['volume']['host'], level='pool'))
  520 
  521             self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id'])
  522 
  523             for snapshot in snapshots:
  524                 self._clone_lun(snapshot['volume']['name'], snapshot['name'],
  525                                 source_snapshot=group_snapshot['id'])
  526 
  527             for flexvol in flexvols:
  528                 try:
  529                     self.zapi_client.wait_for_busy_snapshot(
  530                         flexvol, group_snapshot['id'])
  531                     self.zapi_client.delete_snapshot(
  532                         flexvol, group_snapshot['id'])
  533                 except exception.SnapshotIsBusy:
  534                     self.zapi_client.mark_snapshot_for_deletion(
  535                         flexvol, group_snapshot['id'])
  536 
  537     def delete_group_snapshot(self, group_snapshot, snapshots):
  538         """Delete LUNs backing each snapshot in the group snapshot.
  539 
  540         :returns: An implicit update for snapshots models that is interpreted
  541                  by the manager to set their models to deleted.
  542         """
  543         for snapshot in snapshots:
  544             self._delete_lun(snapshot['name'])
  545             LOG.debug("Snapshot %s deletion successful", snapshot['name'])
  546 
  547         return None, None
  548 
  549     def create_group_from_src(self, group, volumes, group_snapshot=None,
  550                               snapshots=None, source_group=None,
  551                               source_vols=None):
  552         """Creates a group from a group snapshot or a group of cinder vols.
  553 
  554         :returns: An implicit update for the volumes model that is
  555                  interpreted by the manager as a successful operation.
  556         """
  557         LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes]))
  558         volume_model_updates = []
  559 
  560         if group_snapshot:
  561             vols = zip(volumes, snapshots)
  562 
  563             for volume, snapshot in vols:
  564                 source = {
  565                     'name': snapshot['name'],
  566                     'size': snapshot['volume_size'],
  567                 }
  568                 volume_model_update = self._clone_source_to_destination(
  569                     source, volume)
  570                 if volume_model_update is not None:
  571                     volume_model_update['id'] = volume['id']
  572                     volume_model_updates.append(volume_model_update)
  573 
  574         else:
  575             vols = zip(volumes, source_vols)
  576 
  577             for volume, old_src_vref in vols:
  578                 src_lun = self._get_lun_from_table(old_src_vref['name'])
  579                 source = {'name': src_lun.name, 'size': old_src_vref['size']}
  580                 volume_model_update = self._clone_source_to_destination(
  581                     source, volume)
  582                 if volume_model_update is not None:
  583                     volume_model_update['id'] = volume['id']
  584                     volume_model_updates.append(volume_model_update)
  585 
  586         return None, volume_model_updates