"Fossies" - the Fresh Open Source Software Archive

Member "manila-11.0.1/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py" (1 Feb 2021, 132894 Bytes) of package /linux/misc/openstack/manila-11.0.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "lib_base.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 11.0.0_vs_11.0.1.

    1 # Copyright (c) 2015 Clinton Knight.  All rights reserved.
    2 # Copyright (c) 2015 Tom Barron.  All rights reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 """
   16 NetApp Data ONTAP cDOT base storage driver library.
   17 
   18 This library is the abstract base for subclasses that complete the
   19 single-SVM or multi-SVM functionality needed by the cDOT Manila drivers.
   20 """
   21 
   22 import copy
   23 import datetime
   24 import json
   25 import math
   26 import socket
   27 
   28 from oslo_config import cfg
   29 from oslo_log import log
   30 from oslo_service import loopingcall
   31 from oslo_utils import timeutils
   32 from oslo_utils import units
   33 from oslo_utils import uuidutils
   34 import six
   35 
   36 from manila.common import constants
   37 from manila import exception
   38 from manila.i18n import _
   39 from manila.share.drivers.netapp.dataontap.client import api as netapp_api
   40 from manila.share.drivers.netapp.dataontap.client import client_cmode
   41 from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
   42 from manila.share.drivers.netapp.dataontap.cluster_mode import performance
   43 from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
   44 from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
   45 from manila.share.drivers.netapp import options as na_opts
   46 from manila.share.drivers.netapp import utils as na_utils
   47 from manila.share import share_types
   48 from manila.share import utils as share_utils
   49 from manila import utils as manila_utils
   50 
   51 LOG = log.getLogger(__name__)
   52 CONF = cfg.CONF
   53 
   54 
   55 class NetAppCmodeFileStorageLibrary(object):
   56 
   57     AUTOSUPPORT_INTERVAL_SECONDS = 3600  # hourly
   58     SSC_UPDATE_INTERVAL_SECONDS = 3600  # hourly
   59     HOUSEKEEPING_INTERVAL_SECONDS = 600  # ten minutes
   60 
   61     SUPPORTED_PROTOCOLS = ('nfs', 'cifs')
   62 
   63     DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
   64     DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
   65 
   66     # Internal states when dealing with data motion
   67     STATE_SPLITTING_VOLUME_CLONE = 'splitting_volume_clone'
   68     STATE_MOVING_VOLUME = 'moving_volume'
   69     STATE_SNAPMIRROR_DATA_COPYING = 'snapmirror_data_copying'
   70 
   71     # Maps NetApp qualified extra specs keys to corresponding backend API
   72     # client library argument keywords.  When we expose more backend
   73     # capabilities here, we will add them to this map.
   74     BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP = {
   75         'netapp:thin_provisioned': 'thin_provisioned',
   76         'netapp:dedup': 'dedup_enabled',
   77         'netapp:compression': 'compression_enabled',
   78         'netapp:split_clone_on_create': 'split',
   79         'netapp:hide_snapdir': 'hide_snapdir',
   80     }
   81 
   82     STRING_QUALIFIED_EXTRA_SPECS_MAP = {
   83 
   84         'netapp:snapshot_policy': 'snapshot_policy',
   85         'netapp:language': 'language',
   86         'netapp:max_files': 'max_files',
   87         'netapp:adaptive_qos_policy_group': 'adaptive_qos_policy_group',
   88     }
   89 
   90     # Maps standard extra spec keys to legacy NetApp keys
   91     STANDARD_BOOLEAN_EXTRA_SPECS_MAP = {
   92         'thin_provisioning': 'netapp:thin_provisioned',
   93         'dedupe': 'netapp:dedup',
   94         'compression': 'netapp:compression',
   95     }
   96 
   97     QOS_SPECS = {
   98         'netapp:maxiops': 'maxiops',
   99         'netapp:maxiopspergib': 'maxiopspergib',
  100         'netapp:maxbps': 'maxbps',
  101         'netapp:maxbpspergib': 'maxbpspergib',
  102     }
  103 
  104     HIDE_SNAPDIR_CFG_MAP = {
  105         'visible': False,
  106         'hidden': True,
  107         'default': None,
  108     }
  109 
  110     SIZE_DEPENDENT_QOS_SPECS = {'maxiopspergib', 'maxbpspergib'}
  111 
  112     # Maps the NFS config used by share-servers
  113     NFS_CONFIG_EXTRA_SPECS_MAP = {
  114 
  115         'netapp:tcp_max_xfer_size': 'tcp-max-xfer-size',
  116         'netapp:udp_max_xfer_size': 'udp-max-xfer-size',
  117     }
  118 
  119     def __init__(self, driver_name, **kwargs):
  120         na_utils.validate_driver_instantiation(**kwargs)
  121 
  122         self.driver_name = driver_name
  123 
  124         self.private_storage = kwargs['private_storage']
  125         self.configuration = kwargs['configuration']
  126         self.configuration.append_config_values(na_opts.netapp_connection_opts)
  127         self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
  128         self.configuration.append_config_values(na_opts.netapp_transport_opts)
  129         self.configuration.append_config_values(na_opts.netapp_support_opts)
  130         self.configuration.append_config_values(na_opts.netapp_cluster_opts)
  131         self.configuration.append_config_values(
  132             na_opts.netapp_provisioning_opts)
  133         self.configuration.append_config_values(
  134             na_opts.netapp_data_motion_opts)
  135 
  136         self._licenses = []
  137         self._client = None
  138         self._clients = {}
  139         self._ssc_stats = {}
  140         self._have_cluster_creds = None
  141         self._revert_to_snapshot_support = False
  142         self._cluster_info = {}
  143         self._default_nfs_config = None
  144         self.is_nfs_config_supported = False
  145 
  146         self._app_version = kwargs.get('app_version', 'unknown')
  147 
  148         na_utils.setup_tracing(self.configuration.netapp_trace_flags,
  149                                self.configuration.netapp_api_trace_pattern)
  150         self._backend_name = self.configuration.safe_get(
  151             'share_backend_name') or driver_name
  152 
  153     @na_utils.trace
  154     def do_setup(self, context):
  155         self._client = self._get_api_client()
  156         self._have_cluster_creds = self._client.check_for_cluster_credentials()
  157         if self._have_cluster_creds is True:
  158             self._set_cluster_info()
  159 
  160         self._licenses = self._get_licenses()
  161         self._revert_to_snapshot_support = self._check_snaprestore_license()
  162 
  163         # Performance monitoring library
  164         self._perf_library = performance.PerformanceLibrary(self._client)
  165 
  166         # NOTE(felipe_rodrigues): In case adding a parameter that can be
  167         # configured in old versions too, the "is_nfs_config_supported" should
  168         # be removed (always supporting), adding the logic of skipping the
  169         # transfer limit parameters when building the server nfs_config.
  170         if self._client.features.TRANSFER_LIMIT_NFS_CONFIG:
  171             self.is_nfs_config_supported = True
  172             self._default_nfs_config = self._client.get_nfs_config_default(
  173                 list(self.NFS_CONFIG_EXTRA_SPECS_MAP.values()))
  174             LOG.debug('The default NFS configuration: %s',
  175                       self._default_nfs_config)
  176 
  177     @na_utils.trace
  178     def _set_cluster_info(self):
  179         self._cluster_info['nve_support'] = (
  180             self._client.is_nve_supported()
  181             and self._client.features.FLEXVOL_ENCRYPTION)
  182 
  183     @na_utils.trace
  184     def check_for_setup_error(self):
  185         self._start_periodic_tasks()
  186 
  187     def _get_vserver(self, share_server=None):
  188         raise NotImplementedError()
  189 
  190     @na_utils.trace
  191     def _get_api_client(self, vserver=None):
  192 
  193         # Use cached value to prevent calls to system-get-ontapi-version.
  194         client = self._clients.get(vserver)
  195 
  196         if not client:
  197             client = client_cmode.NetAppCmodeClient(
  198                 transport_type=self.configuration.netapp_transport_type,
  199                 username=self.configuration.netapp_login,
  200                 password=self.configuration.netapp_password,
  201                 hostname=self.configuration.netapp_server_hostname,
  202                 port=self.configuration.netapp_server_port,
  203                 vserver=vserver,
  204                 trace=na_utils.TRACE_API,
  205                 api_trace_pattern=na_utils.API_TRACE_PATTERN)
  206             self._clients[vserver] = client
  207 
  208         return client
  209 
  210     @na_utils.trace
  211     def _get_licenses(self):
  212 
  213         if not self._have_cluster_creds:
  214             LOG.debug('License info not available without cluster credentials')
  215             return []
  216 
  217         self._licenses = self._client.get_licenses()
  218 
  219         log_data = {
  220             'backend': self._backend_name,
  221             'licenses': ', '.join(self._licenses),
  222         }
  223         LOG.info('Available licenses on %(backend)s '
  224                  'are %(licenses)s.', log_data)
  225 
  226         if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
  227             msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
  228             msg_args = {'backend': self._backend_name}
  229             LOG.error(msg, msg_args)
  230 
  231         return self._licenses
  232 
  233     @na_utils.trace
  234     def _start_periodic_tasks(self):
  235 
  236         # Run the task once in the current thread so prevent a race with
  237         # the first invocation of get_share_stats.
  238         self._update_ssc_info()
  239 
  240         # Start the task that updates the slow-changing storage service catalog
  241         ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
  242             self._update_ssc_info)
  243         ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL_SECONDS,
  244                                 initial_delay=self.SSC_UPDATE_INTERVAL_SECONDS)
  245 
  246         # Start the task that logs autosupport (EMS) data to the controller
  247         ems_periodic_task = loopingcall.FixedIntervalLoopingCall(
  248             self._handle_ems_logging)
  249         ems_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS,
  250                                 initial_delay=0)
  251 
  252         # Start the task that runs other housekeeping tasks, such as deletion
  253         # of previously soft-deleted storage artifacts.
  254         housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
  255             self._handle_housekeeping_tasks)
  256         housekeeping_periodic_task.start(
  257             interval=self.HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
  258 
  259     def _get_backend_share_name(self, share_id):
  260         """Get share name according to share name template."""
  261         return self.configuration.netapp_volume_name_template % {
  262             'share_id': share_id.replace('-', '_')}
  263 
  264     def _get_backend_snapshot_name(self, snapshot_id):
  265         """Get snapshot name according to snapshot name template."""
  266         return 'share_snapshot_' + snapshot_id.replace('-', '_')
  267 
  268     def _get_backend_cg_snapshot_name(self, snapshot_id):
  269         """Get snapshot name according to snapshot name template."""
  270         return 'share_cg_snapshot_' + snapshot_id.replace('-', '_')
  271 
  272     def _get_backend_qos_policy_group_name(self, share_id):
  273         """Get QoS policy name according to QoS policy group name template."""
  274         return self.configuration.netapp_qos_policy_group_name_template % {
  275             'share_id': share_id.replace('-', '_')}
  276 
  277     def _get_backend_snapmirror_policy_name_svm(self, share_server_id):
  278         return (self.configuration.netapp_snapmirror_policy_name_svm_template
  279                 % {'share_server_id': share_server_id.replace('-', '_')})
  280 
  281     @na_utils.trace
  282     def _get_aggregate_space(self):
  283         aggregates = self._find_matching_aggregates()
  284         if self._have_cluster_creds:
  285             return self._client.get_cluster_aggregate_capacities(aggregates)
  286         else:
  287             return self._client.get_vserver_aggregate_capacities(aggregates)
  288 
  289     @na_utils.trace
  290     def _check_snaprestore_license(self):
  291         """Check if snaprestore license is enabled."""
  292         if self._have_cluster_creds:
  293             return 'snaprestore' in self._licenses
  294         else:
  295             # NOTE: (felipe_rodrigues): workaround to find out whether the
  296             # backend has the license: since without cluster credentials it
  297             # cannot retrieve the ontap licenses, it sends a fake ONTAP
  298             # "snapshot-restore-volume" request which is only available when
  299             # the license exists. By the got error, it checks whether license
  300             # is installed or not.
  301             try:
  302                 self._client.restore_snapshot(
  303                     "fake_%s" % uuidutils.generate_uuid(dashed=False), "")
  304             except netapp_api.NaApiError as e:
  305                 no_license = 'is not licensed'
  306                 LOG.debug('Fake restore_snapshot request failed: %s', e)
  307                 return not (e.code == netapp_api.EAPIERROR and
  308                             no_license in e.message)
  309 
  310             # since it passed an empty snapshot, it should never get here
  311             msg = _("Caught an unexpected behavior: the fake restore to "
  312                     "snapshot request using 'fake' volume and empty string "
  313                     "snapshot as argument has not failed.")
  314             LOG.exception(msg)
  315             raise exception.NetAppException(msg)
  316 
  317     @na_utils.trace
  318     def _get_aggregate_node(self, aggregate_name):
  319         """Get home node for the specified aggregate, or None."""
  320         if self._have_cluster_creds:
  321             return self._client.get_node_for_aggregate(aggregate_name)
  322         else:
  323             return None
  324 
  325     def get_default_filter_function(self):
  326         """Get the default filter_function string."""
  327         return self.DEFAULT_FILTER_FUNCTION
  328 
  329     def get_default_goodness_function(self):
  330         """Get the default goodness_function string."""
  331         return self.DEFAULT_GOODNESS_FUNCTION
  332 
  333     @na_utils.trace
  334     def get_share_stats(self, filter_function=None, goodness_function=None):
  335         """Retrieve stats info from Data ONTAP backend."""
  336 
  337         data = {
  338             'share_backend_name': self._backend_name,
  339             'driver_name': self.driver_name,
  340             'vendor_name': 'NetApp',
  341             'driver_version': '1.0',
  342             'netapp_storage_family': 'ontap_cluster',
  343             'storage_protocol': 'NFS_CIFS',
  344             'pools': self._get_pools(filter_function=filter_function,
  345                                      goodness_function=goodness_function),
  346             'share_group_stats': {
  347                 'consistent_snapshot_support': 'host',
  348             },
  349         }
  350 
  351         if self.configuration.replication_domain:
  352             data['replication_type'] = 'dr'
  353             data['replication_domain'] = self.configuration.replication_domain
  354 
  355         return data
  356 
  357     @na_utils.trace
  358     def get_share_server_pools(self, share_server):
  359         """Return list of pools related to a particular share server.
  360 
  361         Note that the multi-SVM cDOT driver assigns all available pools to
  362         each Vserver, so there is no need to filter the pools any further
  363         by share_server.
  364 
  365         :param share_server: ShareServer class instance.
  366         """
  367         return self._get_pools()
  368 
  369     @na_utils.trace
  370     def _get_pools(self, filter_function=None, goodness_function=None):
  371         """Retrieve list of pools available to this backend."""
  372 
  373         pools = []
  374         aggr_space = self._get_aggregate_space()
  375         aggregates = aggr_space.keys()
  376 
  377         if self._have_cluster_creds:
  378             # Get up-to-date node utilization metrics just once.
  379             self._perf_library.update_performance_cache({}, self._ssc_stats)
  380             qos_support = True
  381         else:
  382             qos_support = False
  383 
  384         netapp_flexvol_encryption = self._cluster_info.get(
  385             'nve_support', False)
  386 
  387         for aggr_name in sorted(aggregates):
  388 
  389             reserved_percentage = self.configuration.reserved_share_percentage
  390             max_over_ratio = self.configuration.max_over_subscription_ratio
  391 
  392             total_capacity_gb = na_utils.round_down(float(
  393                 aggr_space[aggr_name].get('total', 0)) / units.Gi)
  394             free_capacity_gb = na_utils.round_down(float(
  395                 aggr_space[aggr_name].get('available', 0)) / units.Gi)
  396             allocated_capacity_gb = na_utils.round_down(float(
  397                 aggr_space[aggr_name].get('used', 0)) / units.Gi)
  398 
  399             if total_capacity_gb == 0.0:
  400                 total_capacity_gb = 'unknown'
  401 
  402             pool = {
  403                 'pool_name': aggr_name,
  404                 'filter_function': filter_function,
  405                 'goodness_function': goodness_function,
  406                 'total_capacity_gb': total_capacity_gb,
  407                 'free_capacity_gb': free_capacity_gb,
  408                 'allocated_capacity_gb': allocated_capacity_gb,
  409                 'qos': qos_support,
  410                 'reserved_percentage': reserved_percentage,
  411                 'max_over_subscription_ratio': max_over_ratio,
  412                 'dedupe': [True, False],
  413                 'compression': [True, False],
  414                 'netapp_flexvol_encryption': netapp_flexvol_encryption,
  415                 'thin_provisioning': [True, False],
  416                 'snapshot_support': True,
  417                 'create_share_from_snapshot_support': True,
  418                 'revert_to_snapshot_support': self._revert_to_snapshot_support,
  419             }
  420 
  421             # Add storage service catalog data.
  422             pool_ssc_stats = self._ssc_stats.get(aggr_name)
  423             if pool_ssc_stats:
  424                 pool.update(pool_ssc_stats)
  425 
  426             # Add utilization info, or nominal value if not available.
  427             utilization = self._perf_library.get_node_utilization_for_pool(
  428                 aggr_name)
  429             pool['utilization'] = na_utils.round_down(utilization)
  430 
  431             pools.append(pool)
  432 
  433         return pools
  434 
  435     @na_utils.trace
  436     def _handle_ems_logging(self):
  437         """Build and send an EMS log message."""
  438         self._client.send_ems_log_message(self._build_ems_log_message_0())
  439         self._client.send_ems_log_message(self._build_ems_log_message_1())
  440 
  441     def _build_base_ems_log_message(self):
  442         """Construct EMS Autosupport log message common to all events."""
  443 
  444         ems_log = {
  445             'computer-name': socket.gethostname() or 'Manila_node',
  446             'event-source': 'Manila driver %s' % self.driver_name,
  447             'app-version': self._app_version,
  448             'category': 'provisioning',
  449             'log-level': '5',
  450             'auto-support': 'false',
  451         }
  452         return ems_log
  453 
  454     @na_utils.trace
  455     def _build_ems_log_message_0(self):
  456         """Construct EMS Autosupport log message with deployment info."""
  457 
  458         ems_log = self._build_base_ems_log_message()
  459         ems_log.update({
  460             'event-id': '0',
  461             'event-description': 'OpenStack Manila connected to cluster node',
  462         })
  463         return ems_log
  464 
  465     @na_utils.trace
  466     def _build_ems_log_message_1(self):
  467         """Construct EMS Autosupport log message with storage pool info."""
  468 
  469         message = self._get_ems_pool_info()
  470 
  471         ems_log = self._build_base_ems_log_message()
  472         ems_log.update({
  473             'event-id': '1',
  474             'event-description': json.dumps(message),
  475         })
  476         return ems_log
  477 
  478     def _get_ems_pool_info(self):
  479         raise NotImplementedError()
  480 
  481     @na_utils.trace
  482     def _handle_housekeeping_tasks(self):
  483         """Handle various cleanup activities."""
  484 
  485     def _find_matching_aggregates(self):
  486         """Find all aggregates match pattern."""
  487         raise NotImplementedError()
  488 
  489     @na_utils.trace
  490     def _get_helper(self, share):
  491         """Returns driver which implements share protocol."""
  492         share_protocol = share['share_proto'].lower()
  493 
  494         if share_protocol not in self.SUPPORTED_PROTOCOLS:
  495             err_msg = _("Invalid NAS protocol supplied: %s.") % share_protocol
  496             raise exception.NetAppException(err_msg)
  497 
  498         self._check_license_for_protocol(share_protocol)
  499 
  500         if share_protocol == 'nfs':
  501             return nfs_cmode.NetAppCmodeNFSHelper()
  502         elif share_protocol == 'cifs':
  503             return cifs_cmode.NetAppCmodeCIFSHelper()
  504 
  505     @na_utils.trace
  506     def _check_license_for_protocol(self, share_protocol):
  507         """Validates protocol license if cluster APIs are accessible."""
  508         if not self._have_cluster_creds:
  509             return
  510 
  511         if share_protocol.lower() not in self._licenses:
  512             current_licenses = self._get_licenses()
  513             if share_protocol.lower() not in current_licenses:
  514                 msg_args = {
  515                     'protocol': share_protocol,
  516                     'host': self.configuration.netapp_server_hostname
  517                 }
  518                 msg = _('The protocol %(protocol)s is not licensed on '
  519                         'controller %(host)s') % msg_args
  520                 LOG.error(msg)
  521                 raise exception.NetAppException(msg)
  522 
  523     @na_utils.trace
  524     def get_pool(self, share):
  525         pool = share_utils.extract_host(share['host'], level='pool')
  526         if pool:
  527             return pool
  528 
  529         share_name = self._get_backend_share_name(share['id'])
  530         return self._client.get_aggregate_for_volume(share_name)
  531 
  532     @na_utils.trace
  533     def create_share(self, context, share, share_server):
  534         """Creates new share."""
  535         vserver, vserver_client = self._get_vserver(share_server=share_server)
  536         self._allocate_container(share, vserver, vserver_client)
  537         return self._create_export(share, share_server, vserver,
  538                                    vserver_client)
  539 
  540     @na_utils.trace
  541     def create_share_from_snapshot(self, context, share, snapshot,
  542                                    share_server=None, parent_share=None):
  543         """Creates new share from snapshot."""
  544         # TODO(dviroel) return progress info in asynchronous answers
  545         if parent_share['host'] == share['host']:
  546             src_vserver, src_vserver_client = self._get_vserver(
  547                 share_server=share_server)
  548             # Creating a new share from snapshot in the source share's pool
  549             self._allocate_container_from_snapshot(
  550                 share, snapshot, src_vserver, src_vserver_client)
  551             return self._create_export(share, share_server, src_vserver,
  552                                        src_vserver_client)
  553 
  554         parent_share_server = {}
  555         if parent_share['share_server'] is not None:
  556             # Get only the information needed by Data Motion
  557             ss_keys = ['id', 'identifier', 'backend_details', 'host']
  558             for key in ss_keys:
  559                 parent_share_server[key] = (
  560                     parent_share['share_server'].get(key))
  561 
  562         # Information to be saved in the private_storage that will need to be
  563         # retrieved later, in order to continue with the share creation flow
  564         src_share_instance = {
  565             'id': share['id'],
  566             'host': parent_share.get('host'),
  567             'share_server': parent_share_server or None
  568         }
  569         # NOTE(dviroel): Data Motion functions access share's 'share_server'
  570         # attribute to get vserser information.
  571         dest_share = copy.deepcopy(share.to_dict())
  572         dest_share['share_server'] = (share_server.to_dict()
  573                                       if share_server else None)
  574 
  575         dm_session = data_motion.DataMotionSession()
  576         # Source host info
  577         __, src_vserver, src_backend = (
  578             dm_session.get_backend_info_for_share(parent_share))
  579         src_vserver_client = data_motion.get_client_for_backend(
  580             src_backend, vserver_name=src_vserver)
  581         src_cluster_name = src_vserver_client.get_cluster_name()
  582 
  583         # Destination host info
  584         dest_vserver, dest_vserver_client = self._get_vserver(share_server)
  585         dest_cluster_name = dest_vserver_client.get_cluster_name()
  586 
  587         try:
  588             if (src_cluster_name != dest_cluster_name or
  589                     not self._have_cluster_creds):
  590                 # 1. Create a clone on source. We don't need to split from
  591                 # clone in order to replicate data
  592                 self._allocate_container_from_snapshot(
  593                     dest_share, snapshot, src_vserver, src_vserver_client,
  594                     split=False)
  595                 # 2. Create a replica in destination host
  596                 self._allocate_container(
  597                     dest_share, dest_vserver, dest_vserver_client,
  598                     replica=True)
  599                 # 3. Initialize snapmirror relationship with cloned share.
  600                 src_share_instance['replica_state'] = (
  601                     constants.REPLICA_STATE_ACTIVE)
  602                 dm_session.create_snapmirror(src_share_instance, dest_share)
  603                 # The snapmirror data copy can take some time to be concluded,
  604                 # we'll answer this call asynchronously
  605                 state = self.STATE_SNAPMIRROR_DATA_COPYING
  606             else:
  607                 # NOTE(dviroel): there's a need to split the cloned share from
  608                 # its parent in order to move it to a different aggregate or
  609                 # vserver
  610                 self._allocate_container_from_snapshot(
  611                     dest_share, snapshot, src_vserver,
  612                     src_vserver_client, split=True)
  613                 # The split volume clone operation can take some time to be
  614                 # concluded and we'll answer the call asynchronously
  615                 state = self.STATE_SPLITTING_VOLUME_CLONE
  616         except Exception:
  617             # If the share exists on the source vserser, we need to
  618             # delete it since it's a temporary share, not managed by the system
  619             dm_session.delete_snapmirror(src_share_instance, dest_share)
  620             self._delete_share(src_share_instance, src_vserver_client,
  621                                remove_export=False)
  622             msg = _('Could not create share %(share_id)s from snapshot '
  623                     '%(snapshot_id)s in the destination host %(dest_host)s.')
  624             msg_args = {'share_id': dest_share['id'],
  625                         'snapshot_id': snapshot['id'],
  626                         'dest_host': dest_share['host']}
  627             raise exception.NetAppException(msg % msg_args)
  628 
  629         # Store source share info on private storage using destination share id
  630         src_share_instance['internal_state'] = state
  631         src_share_instance['status'] = constants.STATUS_ACTIVE
  632         self.private_storage.update(dest_share['id'], {
  633             'source_share': json.dumps(src_share_instance)
  634         })
  635         return {
  636             'status': constants.STATUS_CREATING_FROM_SNAPSHOT,
  637         }
  638 
  639     def _update_create_from_snapshot_status(self, share, share_server=None):
  640         # TODO(dviroel) return progress info in asynchronous answers
  641         # If the share is creating from snapshot and copying data in background
  642         # we'd verify if the operation has finished and trigger new operations
  643         # if necessary.
  644         source_share_str = self.private_storage.get(share['id'],
  645                                                     'source_share')
  646         if source_share_str is None:
  647             msg = _('Could not update share %(share_id)s status due to invalid'
  648                     ' internal state. Aborting share creation.')
  649             msg_args = {'share_id': share['id']}
  650             LOG.error(msg, msg_args)
  651             return {'status': constants.STATUS_ERROR}
  652         try:
  653             # Check if current operation had finished and continue to move the
  654             # source share towards its destination
  655             return self._create_from_snapshot_continue(share, share_server)
  656         except Exception:
  657             # Delete everything associated to the temporary clone created on
  658             # the source host.
  659             source_share = json.loads(source_share_str)
  660             dm_session = data_motion.DataMotionSession()
  661 
  662             dm_session.delete_snapmirror(source_share, share)
  663             __, src_vserver, src_backend = (
  664                 dm_session.get_backend_info_for_share(source_share))
  665             src_vserver_client = data_motion.get_client_for_backend(
  666                 src_backend, vserver_name=src_vserver)
  667 
  668             self._delete_share(source_share, src_vserver_client,
  669                                remove_export=False)
  670             # Delete private storage info
  671             self.private_storage.delete(share['id'])
  672             msg = _('Could not complete share %(share_id)s creation due to an '
  673                     'internal error.')
  674             msg_args = {'share_id': share['id']}
  675             LOG.error(msg, msg_args)
  676             return {'status': constants.STATUS_ERROR}
  677 
  678     def _create_from_snapshot_continue(self, share, share_server=None):
  679         return_values = {
  680             'status': constants.STATUS_CREATING_FROM_SNAPSHOT
  681         }
  682         apply_qos_on_dest = False
  683         # Data motion session used to extract host info and manage snapmirrors
  684         dm_session = data_motion.DataMotionSession()
  685         # Get info from private storage
  686         src_share_str = self.private_storage.get(share['id'], 'source_share')
  687         src_share = json.loads(src_share_str)
  688         current_state = src_share['internal_state']
  689         share['share_server'] = share_server
  690 
  691         # Source host info
  692         __, src_vserver, src_backend = (
  693             dm_session.get_backend_info_for_share(src_share))
  694         src_aggr = share_utils.extract_host(src_share['host'], level='pool')
  695         src_vserver_client = data_motion.get_client_for_backend(
  696             src_backend, vserver_name=src_vserver)
  697         # Destination host info
  698         dest_vserver, dest_vserver_client = self._get_vserver(share_server)
  699         dest_aggr = share_utils.extract_host(share['host'], level='pool')
  700 
  701         if current_state == self.STATE_SPLITTING_VOLUME_CLONE:
  702             if self._check_volume_clone_split_completed(
  703                     src_share, src_vserver_client):
  704                 # Rehost volume if source and destination are hosted in
  705                 # different vservers
  706                 if src_vserver != dest_vserver:
  707                     # NOTE(dviroel): some volume policies, policy rules and
  708                     # configurations are lost from the source volume after
  709                     # rehost operation.
  710                     qos_policy_for_share = (
  711                         self._get_backend_qos_policy_group_name(share['id']))
  712                     src_vserver_client.mark_qos_policy_group_for_deletion(
  713                         qos_policy_for_share)
  714                     # Apply QoS on destination share
  715                     apply_qos_on_dest = True
  716 
  717                     self._rehost_and_mount_volume(
  718                         share, src_vserver, src_vserver_client,
  719                         dest_vserver, dest_vserver_client)
  720                 # Move the share to the expected aggregate
  721                 if src_aggr != dest_aggr:
  722                     # Move volume and 'defer' the cutover. If it fails, the
  723                     # share will be deleted afterwards
  724                     self._move_volume_after_splitting(
  725                         src_share, share, share_server, cutover_action='defer')
  726                     # Move a volume can take longer, we'll answer
  727                     # asynchronously
  728                     current_state = self.STATE_MOVING_VOLUME
  729                 else:
  730                     return_values['status'] = constants.STATUS_AVAILABLE
  731 
  732         elif current_state == self.STATE_MOVING_VOLUME:
  733             if self._check_volume_move_completed(share, share_server):
  734                 if src_vserver != dest_vserver:
  735                     # NOTE(dviroel): at this point we already rehosted the
  736                     # share, but we missed applying the qos since it was moving
  737                     # the share between aggregates
  738                     apply_qos_on_dest = True
  739                 return_values['status'] = constants.STATUS_AVAILABLE
  740 
  741         elif current_state == self.STATE_SNAPMIRROR_DATA_COPYING:
  742             replica_state = self.update_replica_state(
  743                 None,  # no context is needed
  744                 [src_share],
  745                 share,
  746                 [],  # access_rules
  747                 [],  # snapshot list
  748                 share_server)
  749             if replica_state in [None, constants.STATUS_ERROR]:
  750                 msg = _("Destination share has failed on replicating data "
  751                         "from source share.")
  752                 LOG.exception(msg)
  753                 raise exception.NetAppException(msg)
  754             elif replica_state == constants.REPLICA_STATE_IN_SYNC:
  755                 try:
  756                     # 1. Start an update to try to get a last minute
  757                     # transfer before we quiesce and break
  758                     dm_session.update_snapmirror(src_share, share)
  759                 except exception.StorageCommunicationException:
  760                     # Ignore any errors since the current source replica
  761                     # may be unreachable
  762                     pass
  763                 # 2. Break SnapMirror
  764                 # NOTE(dviroel): if it fails on break/delete a snapmirror
  765                 # relationship, we won't be able to delete the share.
  766                 dm_session.break_snapmirror(src_share, share)
  767                 dm_session.delete_snapmirror(src_share, share)
  768                 # 3. Delete the source volume
  769                 self._delete_share(src_share, src_vserver_client,
  770                                    remove_export=False)
  771                 share_name = self._get_backend_share_name(src_share['id'])
  772                 # 4. Set File system size fixed to false
  773                 dest_vserver_client.set_volume_filesys_size_fixed(
  774                     share_name, filesys_size_fixed=False)
  775                 apply_qos_on_dest = True
  776                 return_values['status'] = constants.STATUS_AVAILABLE
  777         else:
  778             # Delete this share from private storage since we'll abort this
  779             # operation.
  780             self.private_storage.delete(share['id'])
  781             msg_args = {
  782                 'state': current_state,
  783                 'id': share['id'],
  784             }
  785             msg = _("Caught an unexpected internal state '%(state)s' for "
  786                     "share %(id)s. Aborting operation.") % msg_args
  787             LOG.exception(msg)
  788             raise exception.NetAppException(msg)
  789 
  790         if return_values['status'] == constants.STATUS_AVAILABLE:
  791             if apply_qos_on_dest:
  792                 extra_specs = share_types.get_extra_specs_from_share(share)
  793                 provisioning_options = self._get_provisioning_options(
  794                     extra_specs)
  795                 qos_policy_group_name = (
  796                     self._modify_or_create_qos_for_existing_share(
  797                         share, extra_specs, dest_vserver, dest_vserver_client))
  798                 if qos_policy_group_name:
  799                     provisioning_options['qos_policy_group'] = (
  800                         qos_policy_group_name)
  801                 share_name = self._get_backend_share_name(share['id'])
  802                 # Modify volume to match extra specs
  803                 dest_vserver_client.modify_volume(
  804                     dest_aggr, share_name, **provisioning_options)
  805 
  806             self.private_storage.delete(share['id'])
  807             return_values['export_locations'] = self._create_export(
  808                 share, share_server, dest_vserver, dest_vserver_client,
  809                 clear_current_export_policy=False)
  810         else:
  811             new_src_share = copy.deepcopy(src_share)
  812             new_src_share['internal_state'] = current_state
  813             self.private_storage.update(share['id'], {
  814                 'source_share': json.dumps(new_src_share)
  815             })
  816         return return_values
  817 
  818     @na_utils.trace
  819     def _allocate_container(self, share, vserver, vserver_client,
  820                             replica=False):
  821         """Create new share on aggregate."""
  822         share_name = self._get_backend_share_name(share['id'])
  823 
  824         # Get Data ONTAP aggregate name as pool name.
  825         pool_name = share_utils.extract_host(share['host'], level='pool')
  826         if pool_name is None:
  827             msg = _("Pool is not available in the share host field.")
  828             raise exception.InvalidHost(reason=msg)
  829 
  830         provisioning_options = self._get_provisioning_options_for_share(
  831             share, vserver, vserver_client=vserver_client, replica=replica)
  832 
  833         if replica:
  834             # If this volume is intended to be a replication destination,
  835             # create it as the 'data-protection' type
  836             provisioning_options['volume_type'] = 'dp'
  837 
  838         hide_snapdir = provisioning_options.pop('hide_snapdir')
  839 
  840         LOG.debug('Creating share %(share)s on pool %(pool)s with '
  841                   'provisioning options %(options)s',
  842                   {'share': share_name, 'pool': pool_name,
  843                    'options': provisioning_options})
  844         vserver_client.create_volume(
  845             pool_name, share_name, share['size'],
  846             snapshot_reserve=self.configuration.
  847             netapp_volume_snapshot_reserve_percent, **provisioning_options)
  848 
  849         if hide_snapdir:
  850             self._apply_snapdir_visibility(
  851                 hide_snapdir, share_name, vserver_client)
  852 
  853     def _apply_snapdir_visibility(
  854             self, hide_snapdir, share_name, vserver_client):
  855 
  856         LOG.debug('Applying snapshot visibility according to hide_snapdir '
  857                   'value of %(hide_snapdir)s on share %(share)s.',
  858                   {'hide_snapdir': hide_snapdir, 'share': share_name})
  859 
  860         vserver_client.set_volume_snapdir_access(share_name, hide_snapdir)
  861 
  862     @na_utils.trace
  863     def _remap_standard_boolean_extra_specs(self, extra_specs):
  864         """Replace standard boolean extra specs with NetApp-specific ones."""
  865         specs = copy.deepcopy(extra_specs)
  866         for (key, netapp_key) in self.STANDARD_BOOLEAN_EXTRA_SPECS_MAP.items():
  867             if key in specs:
  868                 bool_value = share_types.parse_boolean_extra_spec(key,
  869                                                                   specs[key])
  870                 specs[netapp_key] = 'true' if bool_value else 'false'
  871                 del specs[key]
  872         return specs
  873 
  874     @na_utils.trace
  875     def _check_extra_specs_validity(self, share, extra_specs):
  876         """Check if the extra_specs have valid values."""
  877         self._check_boolean_extra_specs_validity(
  878             share, extra_specs, list(self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP))
  879         self._check_string_extra_specs_validity(share, extra_specs)
  880 
  881     @na_utils.trace
  882     def _check_string_extra_specs_validity(self, share, extra_specs):
  883         """Check if the string_extra_specs have valid values."""
  884         if 'netapp:max_files' in extra_specs:
  885             self._check_if_max_files_is_valid(share,
  886                                               extra_specs['netapp:max_files'])
  887 
  888     @na_utils.trace
  889     def _check_if_max_files_is_valid(self, share, value):
  890         """Check if max_files has a valid value."""
  891         if int(value) < 0:
  892             args = {'value': value, 'key': 'netapp:max_files',
  893                     'type_id': share['share_type_id'], 'share_id': share['id']}
  894             msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  895                     'in share_type %(type_id)s for share %(share_id)s.')
  896             raise exception.NetAppException(msg % args)
  897 
  898     @na_utils.trace
  899     def _check_boolean_extra_specs_validity(self, share, specs,
  900                                             keys_of_interest):
  901         # cDOT compression requires deduplication.
  902         dedup = specs.get('netapp:dedup', None)
  903         compression = specs.get('netapp:compression', None)
  904         if dedup is not None and compression is not None:
  905             if dedup.lower() == 'false' and compression.lower() == 'true':
  906                 spec = {'netapp:dedup': dedup,
  907                         'netapp:compression': compression}
  908                 type_id = share['share_type_id']
  909                 share_id = share['id']
  910                 args = {'type_id': type_id, 'share_id': share_id, 'spec': spec}
  911                 msg = _('Invalid combination of extra_specs in share_type '
  912                         '%(type_id)s for share %(share_id)s: %(spec)s: '
  913                         'deduplication must be enabled in order for '
  914                         'compression to be enabled.')
  915                 raise exception.Invalid(msg % args)
  916         """Check if the boolean_extra_specs have valid values."""
  917         # Extra spec values must be (ignoring case) 'true' or 'false'.
  918         for key in keys_of_interest:
  919             value = specs.get(key)
  920             if value is not None and value.lower() not in ['true', 'false']:
  921                 type_id = share['share_type_id']
  922                 share_id = share['id']
  923                 arg_map = {'value': value, 'key': key, 'type_id': type_id,
  924                            'share_id': share_id}
  925                 msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  926                         'in share_type %(type_id)s for share %(share_id)s.')
  927                 raise exception.Invalid(msg % arg_map)
  928 
  929     @na_utils.trace
  930     def _get_boolean_provisioning_options(self, specs, boolean_specs_map):
  931         """Given extra specs, return corresponding client library kwargs.
  932 
  933         Build a full set of client library provisioning kwargs, filling in a
  934         default value if an explicit value has not been supplied via a
  935         corresponding extra spec.  Boolean extra spec values are "true" or
  936         "false", with missing specs treated as "false".  Provisioning kwarg
  937         values are True or False.
  938         """
  939         # Extract the extra spec keys of concern and their corresponding
  940         # kwarg keys as lists.
  941         keys_of_interest = list(boolean_specs_map)
  942         provisioning_args = [boolean_specs_map[key]
  943                              for key in keys_of_interest]
  944         # Set missing spec values to 'false'
  945         for key in keys_of_interest:
  946             if key not in specs:
  947                 specs[key] = 'false'
  948         # Build a list of Boolean provisioning arguments from the string
  949         # equivalents in the spec values.
  950         provisioning_values = [specs[key].lower() == 'true' for key in
  951                                keys_of_interest]
  952         # Combine the list of provisioning args and the list of provisioning
  953         # values into a dictionary suitable for use as kwargs when invoking
  954         # provisioning methods from the client API library.
  955         return dict(zip(provisioning_args, provisioning_values))
  956 
  957     @na_utils.trace
  958     def get_string_provisioning_options(self, specs, string_specs_map):
  959         """Given extra specs, return corresponding client library kwargs.
  960 
  961         Build a full set of client library provisioning kwargs, filling in a
  962         default value if an explicit value has not been supplied via a
  963         corresponding extra spec.
  964         """
  965         # Extract the extra spec keys of concern and their corresponding
  966         # kwarg keys as lists.
  967         keys_of_interest = list(string_specs_map)
  968         provisioning_args = [string_specs_map[key]
  969                              for key in keys_of_interest]
  970         # Set missing spec values to 'false'
  971         for key in keys_of_interest:
  972             if key not in specs:
  973                 specs[key] = None
  974         provisioning_values = [specs[key] for key in keys_of_interest]
  975 
  976         # Combine the list of provisioning args and the list of provisioning
  977         # values into a dictionary suitable for use as kwargs when invoking
  978         # provisioning methods from the client API library.
  979         return dict(zip(provisioning_args, provisioning_values))
  980 
  981     def _get_normalized_qos_specs(self, extra_specs):
  982         if not extra_specs.get('qos'):
  983             return {}
  984 
  985         normalized_qos_specs = {
  986             self.QOS_SPECS[key.lower()]: value
  987             for key, value in extra_specs.items()
  988             if self.QOS_SPECS.get(key.lower())
  989         }
  990         if not normalized_qos_specs:
  991             msg = _("The extra-spec 'qos' is set to True, but no netapp "
  992                     "supported qos-specs have been specified in the share "
  993                     "type. Cannot provision a QoS policy. Specify any of the "
  994                     "following extra-specs and try again: %s")
  995             raise exception.NetAppException(msg % list(self.QOS_SPECS))
  996 
  997         # TODO(gouthamr): Modify check when throughput floors are allowed
  998         if len(normalized_qos_specs) > 1:
  999             msg = _('Only one NetApp QoS spec can be set at a time. '
 1000                     'Specified QoS limits: %s')
 1001             raise exception.NetAppException(msg % normalized_qos_specs)
 1002 
 1003         return normalized_qos_specs
 1004 
 1005     def _get_max_throughput(self, share_size, qos_specs):
 1006         # QoS limits are exclusive of one another.
 1007         if 'maxiops' in qos_specs:
 1008             return '%siops' % qos_specs['maxiops']
 1009         elif 'maxiopspergib' in qos_specs:
 1010             return '%siops' % six.text_type(
 1011                 int(qos_specs['maxiopspergib']) * int(share_size))
 1012         elif 'maxbps' in qos_specs:
 1013             return '%sB/s' % qos_specs['maxbps']
 1014         elif 'maxbpspergib' in qos_specs:
 1015             return '%sB/s' % six.text_type(
 1016                 int(qos_specs['maxbpspergib']) * int(share_size))
 1017 
 1018     @na_utils.trace
 1019     def _create_qos_policy_group(self, share, vserver, qos_specs,
 1020                                  vserver_client=None):
 1021         max_throughput = self._get_max_throughput(share['size'], qos_specs)
 1022         qos_policy_group_name = self._get_backend_qos_policy_group_name(
 1023             share['id'])
 1024         client = vserver_client or self._client
 1025         client.qos_policy_group_create(qos_policy_group_name, vserver,
 1026                                        max_throughput=max_throughput)
 1027         return qos_policy_group_name
 1028 
 1029     @na_utils.trace
 1030     def _get_provisioning_options_for_share(
 1031             self, share, vserver, vserver_client=None, replica=False):
 1032         """Return provisioning options from a share.
 1033 
 1034         Starting with a share, this method gets the extra specs, rationalizes
 1035         NetApp vs. standard extra spec values, ensures their validity, and
 1036         returns them in a form suitable for passing to various API client
 1037         methods.
 1038         """
 1039         extra_specs = share_types.get_extra_specs_from_share(share)
 1040         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 1041         self._check_extra_specs_validity(share, extra_specs)
 1042         provisioning_options = self._get_provisioning_options(extra_specs)
 1043         qos_specs = self._get_normalized_qos_specs(extra_specs)
 1044         self.validate_provisioning_options_for_share(provisioning_options,
 1045                                                      extra_specs=extra_specs,
 1046                                                      qos_specs=qos_specs)
 1047         if qos_specs and not replica:
 1048             qos_policy_group = self._create_qos_policy_group(
 1049                 share, vserver, qos_specs, vserver_client)
 1050             provisioning_options['qos_policy_group'] = qos_policy_group
 1051         return provisioning_options
 1052 
 1053     @na_utils.trace
 1054     def _get_provisioning_options(self, specs):
 1055         """Return a merged result of string and binary provisioning options."""
 1056         boolean_args = self._get_boolean_provisioning_options(
 1057             specs, self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)
 1058 
 1059         string_args = self.get_string_provisioning_options(
 1060             specs, self.STRING_QUALIFIED_EXTRA_SPECS_MAP)
 1061         result = boolean_args.copy()
 1062         result.update(string_args)
 1063 
 1064         result['encrypt'] = self._get_nve_option(specs)
 1065 
 1066         return result
 1067 
 1068     @na_utils.trace
 1069     def validate_provisioning_options_for_share(self, provisioning_options,
 1070                                                 extra_specs=None,
 1071                                                 qos_specs=None):
 1072         """Checks if provided provisioning options are valid."""
 1073         adaptive_qos = provisioning_options.get('adaptive_qos_policy_group')
 1074         replication_type = (extra_specs.get('replication_type')
 1075                             if extra_specs else None)
 1076         if adaptive_qos and qos_specs:
 1077             msg = _('Share cannot be provisioned with both qos_specs '
 1078                     '%(qos_specs_string)s and adaptive_qos_policy_group '
 1079                     '%(adaptive_qos_policy_group)s.')
 1080             qos_specs_string = ""
 1081             for key in qos_specs:
 1082                 qos_specs_string += key + "=" + str(qos_specs[key]) + " "
 1083             msg_args = {
 1084                 'adaptive_qos_policy_group':
 1085                     provisioning_options['adaptive_qos_policy_group'],
 1086                 'qos_specs_string': qos_specs_string
 1087             }
 1088             raise exception.NetAppException(msg % msg_args)
 1089 
 1090         if adaptive_qos and replication_type:
 1091             msg = _("The extra spec 'adaptive_qos_policy_group' is not "
 1092                     "supported by share replication feature.")
 1093             raise exception.NetAppException(msg)
 1094 
 1095         # NOTE(dviroel): This validation will need to be updated if newer
 1096         # versions of ONTAP stop requiring cluster credentials to associate
 1097         # QoS to volumes.
 1098         if (adaptive_qos or qos_specs) and not self._have_cluster_creds:
 1099             msg = _('Share cannot be provisioned with QoS without having '
 1100                     'cluster credentials.')
 1101             raise exception.NetAppException(msg)
 1102 
 1103     def _get_nve_option(self, specs):
 1104         if 'netapp_flexvol_encryption' in specs:
 1105             nve = specs['netapp_flexvol_encryption'].lower() == 'true'
 1106         else:
 1107             nve = False
 1108 
 1109         return nve
 1110 
 1111     @na_utils.trace
 1112     def _check_aggregate_extra_specs_validity(self, aggregate_name, specs):
 1113 
 1114         for specs_key in ('netapp_disk_type', 'netapp_raid_type'):
 1115             aggr_value = self._ssc_stats.get(aggregate_name, {}).get(specs_key)
 1116             specs_value = specs.get(specs_key)
 1117 
 1118             if aggr_value and specs_value and aggr_value != specs_value:
 1119                 msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
 1120                         'in aggregate %(aggr)s.')
 1121                 msg_args = {
 1122                     'value': specs_value,
 1123                     'key': specs_key,
 1124                     'aggr': aggregate_name
 1125                 }
 1126                 raise exception.NetAppException(msg % msg_args)
 1127 
 1128     @na_utils.trace
 1129     def _allocate_container_from_snapshot(
 1130             self, share, snapshot, vserver, vserver_client,
 1131             snapshot_name_func=_get_backend_snapshot_name, split=None):
 1132         """Clones existing share."""
 1133         share_name = self._get_backend_share_name(share['id'])
 1134         parent_share_name = self._get_backend_share_name(snapshot['share_id'])
 1135         if snapshot.get('provider_location') is None:
 1136             parent_snapshot_name = snapshot_name_func(self, snapshot['id'])
 1137         else:
 1138             parent_snapshot_name = snapshot['provider_location']
 1139 
 1140         provisioning_options = self._get_provisioning_options_for_share(
 1141             share, vserver, vserver_client=vserver_client)
 1142 
 1143         hide_snapdir = provisioning_options.pop('hide_snapdir')
 1144         if split is not None:
 1145             provisioning_options['split'] = split
 1146 
 1147         LOG.debug('Creating share from snapshot %s', snapshot['id'])
 1148         vserver_client.create_volume_clone(
 1149             share_name, parent_share_name, parent_snapshot_name,
 1150             **provisioning_options)
 1151 
 1152         if share['size'] > snapshot['size']:
 1153             vserver_client.set_volume_size(share_name, share['size'])
 1154 
 1155         if hide_snapdir:
 1156             self._apply_snapdir_visibility(
 1157                 hide_snapdir, share_name, vserver_client)
 1158 
 1159     @na_utils.trace
 1160     def _share_exists(self, share_name, vserver_client):
 1161         return vserver_client.volume_exists(share_name)
 1162 
 1163     @na_utils.trace
 1164     def _delete_share(self, share, vserver_client, remove_export=True):
 1165         share_name = self._get_backend_share_name(share['id'])
 1166         if self._share_exists(share_name, vserver_client):
 1167             if remove_export:
 1168                 self._remove_export(share, vserver_client)
 1169             self._deallocate_container(share_name, vserver_client)
 1170             qos_policy_for_share = self._get_backend_qos_policy_group_name(
 1171                 share['id'])
 1172             vserver_client.mark_qos_policy_group_for_deletion(
 1173                 qos_policy_for_share)
 1174         else:
 1175             LOG.info("Share %s does not exist.", share['id'])
 1176 
 1177     @na_utils.trace
 1178     def delete_share(self, context, share, share_server=None):
 1179         """Deletes share."""
 1180         try:
 1181             vserver, vserver_client = self._get_vserver(
 1182                 share_server=share_server)
 1183         except (exception.InvalidInput,
 1184                 exception.VserverNotSpecified,
 1185                 exception.VserverNotFound) as error:
 1186             LOG.warning("Could not determine share server for share being "
 1187                         "deleted: %(share)s. Deletion of share record "
 1188                         "will proceed anyway. Error: %(error)s",
 1189                         {'share': share['id'], 'error': error})
 1190             return
 1191         self._delete_share(share, vserver_client)
 1192 
 1193     @na_utils.trace
 1194     def _deallocate_container(self, share_name, vserver_client):
 1195         """Free share space."""
 1196         vserver_client.unmount_volume(share_name, force=True)
 1197         vserver_client.offline_volume(share_name)
 1198         vserver_client.delete_volume(share_name)
 1199 
 1200     @na_utils.trace
 1201     def _create_export(self, share, share_server, vserver, vserver_client,
 1202                        clear_current_export_policy=True,
 1203                        ensure_share_already_exists=False):
 1204         """Creates NAS storage."""
 1205         helper = self._get_helper(share)
 1206         helper.set_client(vserver_client)
 1207         share_name = self._get_backend_share_name(share['id'])
 1208 
 1209         interfaces = vserver_client.get_network_interfaces(
 1210             protocols=[share['share_proto']])
 1211 
 1212         if not interfaces:
 1213             msg = _('Cannot find network interfaces for Vserver %(vserver)s '
 1214                     'and protocol %(proto)s.')
 1215             msg_args = {'vserver': vserver, 'proto': share['share_proto']}
 1216             raise exception.NetAppException(msg % msg_args)
 1217 
 1218         # Get LIF addresses with metadata
 1219         export_addresses = self._get_export_addresses_with_metadata(
 1220             share, share_server, interfaces)
 1221 
 1222         # Create the share and get a callback for generating export locations
 1223         callback = helper.create_share(
 1224             share, share_name,
 1225             clear_current_export_policy=clear_current_export_policy,
 1226             ensure_share_already_exists=ensure_share_already_exists)
 1227 
 1228         # Generate export locations using addresses, metadata and callback
 1229         export_locations = [
 1230             {
 1231                 'path': callback(export_address),
 1232                 'is_admin_only': metadata.pop('is_admin_only', False),
 1233                 'metadata': metadata,
 1234             }
 1235             for export_address, metadata
 1236             in copy.deepcopy(export_addresses).items()
 1237         ]
 1238 
 1239         # Sort the export locations to report preferred paths first
 1240         export_locations = self._sort_export_locations_by_preferred_paths(
 1241             export_locations)
 1242 
 1243         return export_locations
 1244 
 1245     @na_utils.trace
 1246     def _get_export_addresses_with_metadata(self, share, share_server,
 1247                                             interfaces):
 1248         """Return interface addresses with locality and other metadata."""
 1249 
 1250         # Get home node so we can identify preferred paths
 1251         aggregate_name = share_utils.extract_host(share['host'], level='pool')
 1252         home_node = self._get_aggregate_node(aggregate_name)
 1253 
 1254         # Get admin LIF addresses so we can identify admin export locations
 1255         admin_addresses = self._get_admin_addresses_for_share_server(
 1256             share_server)
 1257 
 1258         addresses = {}
 1259         for interface in interfaces:
 1260 
 1261             address = interface['address']
 1262             is_admin_only = address in admin_addresses
 1263 
 1264             if home_node:
 1265                 preferred = interface.get('home-node') == home_node
 1266             else:
 1267                 preferred = False
 1268 
 1269             addresses[address] = {
 1270                 'is_admin_only': is_admin_only,
 1271                 'preferred': preferred,
 1272             }
 1273 
 1274         return addresses
 1275 
 1276     @na_utils.trace
 1277     def _get_admin_addresses_for_share_server(self, share_server):
 1278 
 1279         if not share_server:
 1280             return []
 1281 
 1282         admin_addresses = []
 1283         for network_allocation in share_server.get('network_allocations'):
 1284             if network_allocation['label'] == 'admin':
 1285                 admin_addresses.append(network_allocation['ip_address'])
 1286 
 1287         return admin_addresses
 1288 
 1289     @na_utils.trace
 1290     def _sort_export_locations_by_preferred_paths(self, export_locations):
 1291         """Sort the export locations to report preferred paths first."""
 1292 
 1293         sort_key = lambda location: location.get(  # noqa: E731
 1294             'metadata', {}).get('preferred') is not True
 1295 
 1296         return sorted(export_locations, key=sort_key)
 1297 
 1298     @na_utils.trace
 1299     def _remove_export(self, share, vserver_client):
 1300         """Deletes NAS storage."""
 1301         helper = self._get_helper(share)
 1302         helper.set_client(vserver_client)
 1303         share_name = self._get_backend_share_name(share['id'])
 1304         target = helper.get_target(share)
 1305         # Share may be in error state, so there's no share and target.
 1306         if target:
 1307             helper.delete_share(share, share_name)
 1308 
 1309     @na_utils.trace
 1310     def create_snapshot(self, context, snapshot, share_server=None):
 1311         """Creates a snapshot of a share."""
 1312         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1313         share_name = self._get_backend_share_name(snapshot['share_id'])
 1314         snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
 1315         LOG.debug('Creating snapshot %s', snapshot_name)
 1316         vserver_client.create_snapshot(share_name, snapshot_name)
 1317         return {'provider_location': snapshot_name}
 1318 
 1319     def revert_to_snapshot(self, context, snapshot, share_server=None):
 1320         """Reverts a share (in place) to the specified snapshot."""
 1321         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1322         share_name = self._get_backend_share_name(snapshot['share_id'])
 1323         snapshot_name = (snapshot.get('provider_location') or
 1324                          self._get_backend_snapshot_name(snapshot['id']))
 1325         LOG.debug('Restoring snapshot %s', snapshot_name)
 1326         vserver_client.restore_snapshot(share_name, snapshot_name)
 1327 
 1328     @na_utils.trace
 1329     def delete_snapshot(self, context, snapshot, share_server=None,
 1330                         snapshot_name=None):
 1331         """Deletes a snapshot of a share."""
 1332         try:
 1333             vserver, vserver_client = self._get_vserver(
 1334                 share_server=share_server)
 1335         except (exception.InvalidInput,
 1336                 exception.VserverNotSpecified,
 1337                 exception.VserverNotFound) as error:
 1338             LOG.warning("Could not determine share server for snapshot "
 1339                         "being deleted: %(snap)s. Deletion of snapshot "
 1340                         "record will proceed anyway. Error: %(error)s",
 1341                         {'snap': snapshot['id'], 'error': error})
 1342             return
 1343 
 1344         share_name = self._get_backend_share_name(snapshot['share_id'])
 1345         snapshot_name = (snapshot.get('provider_location') or snapshot_name or
 1346                          self._get_backend_snapshot_name(snapshot['id']))
 1347 
 1348         try:
 1349             self._delete_snapshot(vserver_client, share_name, snapshot_name)
 1350         except exception.SnapshotResourceNotFound:
 1351             msg = ("Snapshot %(snap)s does not exist on share %(share)s.")
 1352             msg_args = {'snap': snapshot_name, 'share': share_name}
 1353             LOG.info(msg, msg_args)
 1354 
 1355     def _delete_snapshot(self, vserver_client, share_name, snapshot_name):
 1356         """Deletes a backend snapshot, handling busy snapshots as needed."""
 1357 
 1358         backend_snapshot = vserver_client.get_snapshot(share_name,
 1359                                                        snapshot_name)
 1360 
 1361         LOG.debug('Deleting snapshot %(snap)s for share %(share)s.',
 1362                   {'snap': snapshot_name, 'share': share_name})
 1363 
 1364         if not backend_snapshot['busy']:
 1365             vserver_client.delete_snapshot(share_name, snapshot_name)
 1366 
 1367         elif backend_snapshot['owners'] == {'volume clone'}:
 1368             # Snapshots are locked by clone(s), so split clone and soft delete
 1369             snapshot_children = vserver_client.get_clone_children_for_snapshot(
 1370                 share_name, snapshot_name)
 1371             for snapshot_child in snapshot_children:
 1372                 vserver_client.split_volume_clone(snapshot_child['name'])
 1373 
 1374             vserver_client.soft_delete_snapshot(share_name, snapshot_name)
 1375 
 1376         else:
 1377             raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
 1378 
 1379     @na_utils.trace
 1380     def manage_existing(self, share, driver_options, share_server=None):
 1381         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1382         share_size = self._manage_container(share, vserver, vserver_client)
 1383         export_locations = self._create_export(share, share_server, vserver,
 1384                                                vserver_client)
 1385         return {'size': share_size, 'export_locations': export_locations}
 1386 
 1387     @na_utils.trace
 1388     def unmanage(self, share, share_server=None):
 1389         pass
 1390 
 1391     @na_utils.trace
 1392     def _manage_container(self, share, vserver, vserver_client):
 1393         """Bring existing volume under management as a share."""
 1394 
 1395         protocol_helper = self._get_helper(share)
 1396         protocol_helper.set_client(vserver_client)
 1397 
 1398         volume_name = protocol_helper.get_share_name_for_share(share)
 1399         if not volume_name:
 1400             msg = _('Volume could not be determined from export location '
 1401                     '%(export)s.')
 1402             msg_args = {'export': share['export_location']}
 1403             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1404 
 1405         share_name = self._get_backend_share_name(share['id'])
 1406         aggregate_name = share_utils.extract_host(share['host'], level='pool')
 1407 
 1408         # Get existing volume info
 1409         volume = vserver_client.get_volume_to_manage(aggregate_name,
 1410                                                      volume_name)
 1411 
 1412         if not volume:
 1413             msg = _('Volume %(volume)s not found on aggregate %(aggr)s.')
 1414             msg_args = {'volume': volume_name, 'aggr': aggregate_name}
 1415             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1416 
 1417         # When calculating the size, round up to the next GB.
 1418         volume_size = int(math.ceil(float(volume['size']) / units.Gi))
 1419 
 1420         # Validate extra specs
 1421         extra_specs = share_types.get_extra_specs_from_share(share)
 1422         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 1423         try:
 1424             self._check_extra_specs_validity(share, extra_specs)
 1425             self._check_aggregate_extra_specs_validity(aggregate_name,
 1426                                                        extra_specs)
 1427         except exception.ManilaException as ex:
 1428             raise exception.ManageExistingShareTypeMismatch(
 1429                 reason=six.text_type(ex))
 1430 
 1431         # Ensure volume is manageable
 1432         self._validate_volume_for_manage(volume, vserver_client)
 1433 
 1434         provisioning_options = self._get_provisioning_options(extra_specs)
 1435         qos_specs = self._get_normalized_qos_specs(extra_specs)
 1436         self.validate_provisioning_options_for_share(provisioning_options,
 1437                                                      extra_specs=extra_specs,
 1438                                                      qos_specs=qos_specs)
 1439 
 1440         debug_args = {
 1441             'share': share_name,
 1442             'aggr': aggregate_name,
 1443             'options': provisioning_options
 1444         }
 1445         LOG.debug('Managing share %(share)s on aggregate %(aggr)s with '
 1446                   'provisioning options %(options)s', debug_args)
 1447 
 1448         # Rename & remount volume on new path
 1449         vserver_client.unmount_volume(volume_name)
 1450         vserver_client.set_volume_name(volume_name, share_name)
 1451         vserver_client.mount_volume(share_name)
 1452 
 1453         qos_policy_group_name = self._modify_or_create_qos_for_existing_share(
 1454             share, extra_specs, vserver, vserver_client)
 1455         if qos_policy_group_name:
 1456             provisioning_options['qos_policy_group'] = qos_policy_group_name
 1457 
 1458         # Modify volume to match extra specs
 1459         vserver_client.modify_volume(aggregate_name, share_name,
 1460                                      **provisioning_options)
 1461 
 1462         # Save original volume info to private storage
 1463         original_data = {
 1464             'original_name': volume['name'],
 1465             'original_junction_path': volume['junction-path']
 1466         }
 1467         self.private_storage.update(share['id'], original_data)
 1468 
 1469         return volume_size
 1470 
 1471     @na_utils.trace
 1472     def _validate_volume_for_manage(self, volume, vserver_client):
 1473         """Ensure volume is a candidate for becoming a share."""
 1474 
 1475         # Check volume info, extra specs validity
 1476         if volume['type'] != 'rw' or volume['style'] != 'flex':
 1477             msg = _('Volume %(volume)s must be a read-write flexible volume.')
 1478             msg_args = {'volume': volume['name']}
 1479             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1480 
 1481         if vserver_client.volume_has_luns(volume['name']):
 1482             msg = _('Volume %(volume)s must not contain LUNs.')
 1483             msg_args = {'volume': volume['name']}
 1484             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1485 
 1486         if vserver_client.volume_has_junctioned_volumes(volume['name']):
 1487             msg = _('Volume %(volume)s must not have junctioned volumes.')
 1488             msg_args = {'volume': volume['name']}
 1489             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1490 
 1491         if vserver_client.volume_has_snapmirror_relationships(volume):
 1492             msg = _('Volume %(volume)s must not be in any snapmirror '
 1493                     'relationships.')
 1494             msg_args = {'volume': volume['name']}
 1495             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1496 
 1497     @na_utils.trace
 1498     def manage_existing_snapshot(
 1499             self, snapshot, driver_options, share_server=None):
 1500         """Brings an existing snapshot under Manila management."""
 1501         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1502         share_name = self._get_backend_share_name(snapshot['share_id'])
 1503         existing_snapshot_name = snapshot.get('provider_location')
 1504         new_snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
 1505 
 1506         if not existing_snapshot_name:
 1507             msg = _('provider_location not specified.')
 1508             raise exception.ManageInvalidShareSnapshot(reason=msg)
 1509 
 1510         # Get the volume containing the snapshot so we can report its size
 1511         try:
 1512             volume = vserver_client.get_volume(share_name)
 1513         except (netapp_api.NaApiError,
 1514                 exception.StorageResourceNotFound,
 1515                 exception.NetAppException):
 1516             msg = _('Could not determine snapshot %(snap)s size from '
 1517                     'volume %(vol)s.')
 1518             msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
 1519             LOG.exception(msg, msg_args)
 1520             raise exception.ShareNotFound(share_id=snapshot['share_id'])
 1521 
 1522         # Ensure there aren't any mirrors on this volume
 1523         if vserver_client.volume_has_snapmirror_relationships(volume):
 1524             msg = _('Share %s has SnapMirror relationships.')
 1525             msg_args = {'vol': share_name}
 1526             raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args)
 1527 
 1528         # Rename snapshot
 1529         try:
 1530             vserver_client.rename_snapshot(share_name,
 1531                                            existing_snapshot_name,
 1532                                            new_snapshot_name)
 1533         except netapp_api.NaApiError:
 1534             msg = _('Could not rename snapshot %(snap)s in share %(vol)s.')
 1535             msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
 1536             raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args)
 1537 
 1538         # Save original snapshot info to private storage
 1539         original_data = {'original_name': existing_snapshot_name}
 1540         self.private_storage.update(snapshot['id'], original_data)
 1541 
 1542         # When calculating the size, round up to the next GB.
 1543         size = int(math.ceil(float(volume['size']) / units.Gi))
 1544 
 1545         return {'size': size, 'provider_location': new_snapshot_name}
 1546 
 1547     @na_utils.trace
 1548     def unmanage_snapshot(self, snapshot, share_server=None):
 1549         """Removes the specified snapshot from Manila management."""
 1550 
 1551     @na_utils.trace
 1552     def create_consistency_group_from_cgsnapshot(
 1553             self, context, cg_dict, cgsnapshot_dict, share_server=None):
 1554         """Creates a consistency group from an existing CG snapshot."""
 1555         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1556 
 1557         # Ensure there is something to do
 1558         if not cgsnapshot_dict['share_group_snapshot_members']:
 1559             return None, None
 1560 
 1561         clone_list = self._collate_cg_snapshot_info(cg_dict, cgsnapshot_dict)
 1562         share_update_list = []
 1563 
 1564         LOG.debug('Creating consistency group from CG snapshot %s.',
 1565                   cgsnapshot_dict['id'])
 1566 
 1567         for clone in clone_list:
 1568 
 1569             self._allocate_container_from_snapshot(
 1570                 clone['share'], clone['snapshot'], vserver, vserver_client,
 1571                 NetAppCmodeFileStorageLibrary._get_backend_cg_snapshot_name)
 1572 
 1573             export_locations = self._create_export(clone['share'],
 1574                                                    share_server,
 1575                                                    vserver,
 1576                                                    vserver_client)
 1577             share_update_list.append({
 1578                 'id': clone['share']['id'],
 1579                 'export_locations': export_locations,
 1580             })
 1581 
 1582         return None, share_update_list
 1583 
 1584     def _collate_cg_snapshot_info(self, cg_dict, cgsnapshot_dict):
 1585         """Collate the data for a clone of a CG snapshot.
 1586 
 1587         Given two data structures, a CG snapshot (cgsnapshot_dict) and a new
 1588         CG to be cloned from the snapshot (cg_dict), match up both structures
 1589         into a list of dicts (share & snapshot) suitable for use by existing
 1590         driver methods that clone individual share snapshots.
 1591         """
 1592 
 1593         clone_list = list()
 1594 
 1595         for share in cg_dict['shares']:
 1596 
 1597             clone_info = {'share': share}
 1598 
 1599             for cgsnapshot_member in (
 1600                     cgsnapshot_dict['share_group_snapshot_members']):
 1601                 if (share['source_share_group_snapshot_member_id'] ==
 1602                         cgsnapshot_member['id']):
 1603                     clone_info['snapshot'] = {
 1604                         'share_id': cgsnapshot_member['share_id'],
 1605                         'id': cgsnapshot_dict['id'],
 1606                         'size': cgsnapshot_member['size'],
 1607                     }
 1608                     break
 1609 
 1610             else:
 1611                 msg = _("Invalid data supplied for creating consistency group "
 1612                         "from CG snapshot %s.") % cgsnapshot_dict['id']
 1613                 raise exception.InvalidShareGroup(reason=msg)
 1614 
 1615             clone_list.append(clone_info)
 1616 
 1617         return clone_list
 1618 
 1619     @na_utils.trace
 1620     def create_cgsnapshot(self, context, snap_dict, share_server=None):
 1621         """Creates a consistency group snapshot."""
 1622         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1623 
 1624         share_names = [self._get_backend_share_name(member['share_id'])
 1625                        for member in
 1626                        snap_dict.get('share_group_snapshot_members', [])]
 1627         snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
 1628 
 1629         if share_names:
 1630             LOG.debug('Creating CG snapshot %s.', snapshot_name)
 1631             vserver_client.create_cg_snapshot(share_names, snapshot_name)
 1632 
 1633         return None, None
 1634 
 1635     @na_utils.trace
 1636     def delete_cgsnapshot(self, context, snap_dict, share_server=None):
 1637         """Deletes a consistency group snapshot."""
 1638         try:
 1639             vserver, vserver_client = self._get_vserver(
 1640                 share_server=share_server)
 1641         except (exception.InvalidInput,
 1642                 exception.VserverNotSpecified,
 1643                 exception.VserverNotFound) as error:
 1644             LOG.warning("Could not determine share server for CG snapshot "
 1645                         "being deleted: %(snap)s. Deletion of CG snapshot "
 1646                         "record will proceed anyway. Error: %(error)s",
 1647                         {'snap': snap_dict['id'], 'error': error})
 1648             return None, None
 1649 
 1650         share_names = [self._get_backend_share_name(member['share_id'])
 1651                        for member in (
 1652                            snap_dict.get('share_group_snapshot_members', []))]
 1653         snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
 1654 
 1655         for share_name in share_names:
 1656             try:
 1657                 self._delete_snapshot(
 1658                     vserver_client, share_name, snapshot_name)
 1659             except exception.SnapshotResourceNotFound:
 1660                 msg = ("Snapshot %(snap)s does not exist on share "
 1661                        "%(share)s.")
 1662                 msg_args = {'snap': snapshot_name, 'share': share_name}
 1663                 LOG.info(msg, msg_args)
 1664                 continue
 1665 
 1666         return None, None
 1667 
 1668     @staticmethod
 1669     def _is_group_cg(context, share_group):
 1670         return 'host' == share_group.consistent_snapshot_support
 1671 
 1672     @na_utils.trace
 1673     def create_group_snapshot(self, context, snap_dict, fallback_create,
 1674                               share_server=None):
 1675         share_group = snap_dict['share_group']
 1676         if self._is_group_cg(context, share_group):
 1677             return self.create_cgsnapshot(context, snap_dict,
 1678                                           share_server=share_server)
 1679         else:
 1680             return fallback_create(context, snap_dict,
 1681                                    share_server=share_server)
 1682 
 1683     @na_utils.trace
 1684     def delete_group_snapshot(self, context, snap_dict, fallback_delete,
 1685                               share_server=None):
 1686         share_group = snap_dict['share_group']
 1687         if self._is_group_cg(context, share_group):
 1688             return self.delete_cgsnapshot(context, snap_dict,
 1689                                           share_server=share_server)
 1690         else:
 1691             return fallback_delete(context, snap_dict,
 1692                                    share_server=share_server)
 1693 
 1694     @na_utils.trace
 1695     def create_group_from_snapshot(self, context, share_group,
 1696                                    snapshot_dict, fallback_create,
 1697                                    share_server=None):
 1698         share_group2 = snapshot_dict['share_group']
 1699         if self._is_group_cg(context, share_group2):
 1700             return self.create_consistency_group_from_cgsnapshot(
 1701                 context, share_group, snapshot_dict,
 1702                 share_server=share_server)
 1703         else:
 1704             return fallback_create(context, share_group, snapshot_dict,
 1705                                    share_server=share_server)
 1706 
 1707     @na_utils.trace
 1708     def _adjust_qos_policy_with_volume_resize(self, share, new_size,
 1709                                               vserver_client):
 1710         # Adjust QoS policy on a share if any
 1711         if self._have_cluster_creds:
 1712             share_name = self._get_backend_share_name(share['id'])
 1713             share_on_the_backend = vserver_client.get_volume(share_name)
 1714             qos_policy_on_share = share_on_the_backend['qos-policy-group-name']
 1715             if qos_policy_on_share is None:
 1716                 return
 1717 
 1718             extra_specs = share_types.get_extra_specs_from_share(share)
 1719             qos_specs = self._get_normalized_qos_specs(extra_specs)
 1720             size_dependent_specs = {k: v for k, v in qos_specs.items() if k in
 1721                                     self.SIZE_DEPENDENT_QOS_SPECS}
 1722             if size_dependent_specs:
 1723                 max_throughput = self._get_max_throughput(
 1724                     new_size, size_dependent_specs)
 1725                 self._client.qos_policy_group_modify(
 1726                     qos_policy_on_share, max_throughput)
 1727 
 1728     @na_utils.trace
 1729     def extend_share(self, share, new_size, share_server=None):
 1730         """Extends size of existing share."""
 1731         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1732         share_name = self._get_backend_share_name(share['id'])
 1733         vserver_client.set_volume_filesys_size_fixed(share_name,
 1734                                                      filesys_size_fixed=False)
 1735         LOG.debug('Extending share %(name)s to %(size)s GB.',
 1736                   {'name': share_name, 'size': new_size})
 1737         vserver_client.set_volume_size(share_name, new_size)
 1738         self._adjust_qos_policy_with_volume_resize(share, new_size,
 1739                                                    vserver_client)
 1740 
 1741     @na_utils.trace
 1742     def shrink_share(self, share, new_size, share_server=None):
 1743         """Shrinks size of existing share."""
 1744         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1745         share_name = self._get_backend_share_name(share['id'])
 1746         vserver_client.set_volume_filesys_size_fixed(share_name,
 1747                                                      filesys_size_fixed=False)
 1748         LOG.debug('Shrinking share %(name)s to %(size)s GB.',
 1749                   {'name': share_name, 'size': new_size})
 1750 
 1751         try:
 1752             vserver_client.set_volume_size(share_name, new_size)
 1753         except netapp_api.NaApiError as e:
 1754             if e.code == netapp_api.EVOLOPNOTSUPP:
 1755                 msg = _('Failed to shrink share %(share_id)s. '
 1756                         'The current used space is larger than the the size'
 1757                         ' requested.')
 1758                 msg_args = {'share_id': share['id']}
 1759                 LOG.error(msg, msg_args)
 1760                 raise exception.ShareShrinkingPossibleDataLoss(
 1761                     share_id=share['id'])
 1762 
 1763         self._adjust_qos_policy_with_volume_resize(
 1764             share, new_size, vserver_client)
 1765 
 1766     @na_utils.trace
 1767     def update_access(self, context, share, access_rules, add_rules,
 1768                       delete_rules, share_server=None):
 1769         """Updates access rules for a share."""
 1770         # NOTE(ameade): We do not need to add export rules to a non-active
 1771         # replica as it will fail.
 1772         replica_state = share.get('replica_state')
 1773         if (replica_state is not None and
 1774                 replica_state != constants.REPLICA_STATE_ACTIVE):
 1775             return
 1776         try:
 1777             vserver, vserver_client = self._get_vserver(
 1778                 share_server=share_server)
 1779         except (exception.InvalidInput,
 1780                 exception.VserverNotSpecified,
 1781                 exception.VserverNotFound) as error:
 1782             LOG.warning("Could not determine share server for share "
 1783                         "%(share)s during access rules update. "
 1784                         "Error: %(error)s",
 1785                         {'share': share['id'], 'error': error})
 1786             return
 1787 
 1788         share_name = self._get_backend_share_name(share['id'])
 1789         if self._share_exists(share_name, vserver_client):
 1790             helper = self._get_helper(share)
 1791             helper.set_client(vserver_client)
 1792             helper.update_access(share, share_name, access_rules)
 1793         else:
 1794             raise exception.ShareResourceNotFound(share_id=share['id'])
 1795 
 1796     def setup_server(self, network_info, metadata=None):
 1797         raise NotImplementedError()
 1798 
 1799     def teardown_server(self, server_details, security_services=None):
 1800         raise NotImplementedError()
 1801 
 1802     def get_network_allocations_number(self):
 1803         """Get number of network interfaces to be created."""
 1804         raise NotImplementedError()
 1805 
 1806     @na_utils.trace
 1807     def _update_ssc_info(self):
 1808         """Periodically runs to update Storage Service Catalog data.
 1809 
 1810         The self._ssc_stats attribute is updated with the following format.
 1811         {<aggregate_name> : {<ssc_key>: <ssc_value>}}
 1812         """
 1813         LOG.info("Updating storage service catalog information for "
 1814                  "backend '%s'", self._backend_name)
 1815 
 1816         # Work on a copy and update the ssc data atomically before returning.
 1817         ssc_stats = copy.deepcopy(self._ssc_stats)
 1818 
 1819         aggregate_names = self._find_matching_aggregates()
 1820 
 1821         # Initialize entries for each aggregate.
 1822         for aggregate_name in aggregate_names:
 1823             if aggregate_name not in ssc_stats:
 1824                 ssc_stats[aggregate_name] = {
 1825                     'netapp_aggregate': aggregate_name,
 1826                 }
 1827 
 1828         if aggregate_names:
 1829             self._update_ssc_aggr_info(aggregate_names, ssc_stats)
 1830 
 1831         self._ssc_stats = ssc_stats
 1832 
 1833     @na_utils.trace
 1834     def _update_ssc_aggr_info(self, aggregate_names, ssc_stats):
 1835         """Updates the given SSC dictionary with new disk type information.
 1836 
 1837         :param aggregate_names: The aggregates this driver cares about
 1838         :param ssc_stats: The dictionary to update
 1839         """
 1840 
 1841         if not self._have_cluster_creds:
 1842             return
 1843 
 1844         for aggregate_name in aggregate_names:
 1845 
 1846             aggregate = self._client.get_aggregate(aggregate_name)
 1847             hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
 1848                       if 'is-hybrid' in aggregate else None)
 1849             disk_types = self._client.get_aggregate_disk_types(aggregate_name)
 1850 
 1851             ssc_stats[aggregate_name].update({
 1852                 'netapp_raid_type': aggregate.get('raid-type'),
 1853                 'netapp_hybrid_aggregate': hybrid,
 1854                 'netapp_disk_type': disk_types,
 1855             })
 1856 
 1857     def find_active_replica(self, replica_list):
 1858         # NOTE(ameade): Find current active replica. There can only be one
 1859         # active replica (SnapMirror source volume) at a time in cDOT.
 1860         for r in replica_list:
 1861             if r['replica_state'] == constants.REPLICA_STATE_ACTIVE:
 1862                 return r
 1863 
 1864     def _find_nonactive_replicas(self, replica_list):
 1865         """Returns a list of all except the active replica."""
 1866         return [replica for replica in replica_list
 1867                 if replica['replica_state'] != constants.REPLICA_STATE_ACTIVE]
 1868 
 1869     def create_replica(self, context, replica_list, new_replica,
 1870                        access_rules, share_snapshots, share_server=None):
 1871         """Creates the new replica on this backend and sets up SnapMirror."""
 1872         active_replica = self.find_active_replica(replica_list)
 1873         dm_session = data_motion.DataMotionSession()
 1874 
 1875         # 1. Create the destination share
 1876         dest_backend = share_utils.extract_host(new_replica['host'],
 1877                                                 level='backend_name')
 1878 
 1879         vserver = (dm_session.get_vserver_from_share(new_replica) or
 1880                    self.configuration.netapp_vserver)
 1881 
 1882         vserver_client = data_motion.get_client_for_backend(
 1883             dest_backend, vserver_name=vserver)
 1884 
 1885         self._allocate_container(new_replica, vserver, vserver_client,
 1886                                  replica=True)
 1887 
 1888         # 2. Setup SnapMirror
 1889         dm_session.create_snapmirror(active_replica, new_replica)
 1890 
 1891         model_update = {
 1892             'export_locations': [],
 1893             'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
 1894             'access_rules_status': constants.STATUS_ACTIVE,
 1895         }
 1896 
 1897         return model_update
 1898 
 1899     def delete_replica(self, context, replica_list, replica, share_snapshots,
 1900                        share_server=None):
 1901         """Removes the replica on this backend and destroys SnapMirror."""
 1902         dm_session = data_motion.DataMotionSession()
 1903         # 1. Remove SnapMirror
 1904         dest_backend = share_utils.extract_host(replica['host'],
 1905                                                 level='backend_name')
 1906         vserver = (dm_session.get_vserver_from_share(replica) or
 1907                    self.configuration.netapp_vserver)
 1908 
 1909         # Ensure that all potential snapmirror relationships and their metadata
 1910         # involving the replica are destroyed.
 1911         for other_replica in replica_list:
 1912             if other_replica['id'] != replica['id']:
 1913                 dm_session.delete_snapmirror(other_replica, replica)
 1914                 dm_session.delete_snapmirror(replica, other_replica)
 1915 
 1916         # 2. Delete share
 1917         vserver_client = data_motion.get_client_for_backend(
 1918             dest_backend, vserver_name=vserver)
 1919         share_name = self._get_backend_share_name(replica['id'])
 1920         if self._share_exists(share_name, vserver_client):
 1921             self._deallocate_container(share_name, vserver_client)
 1922 
 1923     def update_replica_state(self, context, replica_list, replica,
 1924                              access_rules, share_snapshots, share_server=None):
 1925         """Returns the status of the given replica on this backend."""
 1926         active_replica = self.find_active_replica(replica_list)
 1927 
 1928         share_name = self._get_backend_share_name(replica['id'])
 1929         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1930 
 1931         if not vserver_client.volume_exists(share_name):
 1932             msg = _("Volume %(share_name)s does not exist on vserver "
 1933                     "%(vserver)s.")
 1934             msg_args = {'share_name': share_name, 'vserver': vserver}
 1935             raise exception.ShareResourceNotFound(msg % msg_args)
 1936 
 1937         # NOTE(cknight): The SnapMirror may have been intentionally broken by
 1938         # a revert-to-snapshot operation, in which case this method should not
 1939         # attempt to change anything.
 1940         if active_replica['status'] == constants.STATUS_REVERTING:
 1941             return None
 1942 
 1943         dm_session = data_motion.DataMotionSession()
 1944         try:
 1945             snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
 1946         except netapp_api.NaApiError:
 1947             LOG.exception("Could not get snapmirrors for replica %s.",
 1948                           replica['id'])
 1949             return constants.STATUS_ERROR
 1950 
 1951         if not snapmirrors:
 1952             if replica['status'] != constants.STATUS_CREATING:
 1953                 try:
 1954                     dm_session.create_snapmirror(active_replica, replica)
 1955                 except netapp_api.NaApiError:
 1956                     LOG.exception("Could not create snapmirror for "
 1957                                   "replica %s.", replica['id'])
 1958                     return constants.STATUS_ERROR
 1959             return constants.REPLICA_STATE_OUT_OF_SYNC
 1960 
 1961         snapmirror = snapmirrors[0]
 1962         # NOTE(dviroel): Don't try to resume or resync a SnapMirror that has
 1963         # one of the in progress transfer states, because the storage will
 1964         # answer with an error.
 1965         in_progress_status = ['preparing', 'transferring', 'finalizing']
 1966         if (snapmirror.get('mirror-state') != 'snapmirrored' and
 1967                 snapmirror.get('relationship-status') in in_progress_status):
 1968             return constants.REPLICA_STATE_OUT_OF_SYNC
 1969 
 1970         if snapmirror.get('mirror-state') != 'snapmirrored':
 1971             try:
 1972                 vserver_client.resume_snapmirror_vol(
 1973                     snapmirror['source-vserver'],
 1974                     snapmirror['source-volume'],
 1975                     vserver,
 1976                     share_name)
 1977                 vserver_client.resync_snapmirror_vol(
 1978                     snapmirror['source-vserver'],
 1979                     snapmirror['source-volume'],
 1980                     vserver,
 1981                     share_name)
 1982                 return constants.REPLICA_STATE_OUT_OF_SYNC
 1983             except netapp_api.NaApiError:
 1984                 LOG.exception("Could not resync snapmirror.")
 1985                 return constants.STATUS_ERROR
 1986 
 1987         last_update_timestamp = float(
 1988             snapmirror.get('last-transfer-end-timestamp', 0))
 1989         # TODO(ameade): Have a configurable RPO for replicas, for now it is
 1990         # one hour.
 1991         if (last_update_timestamp and
 1992             (timeutils.is_older_than(
 1993                 datetime.datetime.utcfromtimestamp(last_update_timestamp)
 1994                 .isoformat(), 3600))):
 1995             return constants.REPLICA_STATE_OUT_OF_SYNC
 1996 
 1997         # Check all snapshots exist
 1998         snapshots = [snap['share_replica_snapshot']
 1999                      for snap in share_snapshots]
 2000         for snap in snapshots:
 2001             snapshot_name = snap.get('provider_location')
 2002             if not vserver_client.snapshot_exists(snapshot_name, share_name):
 2003                 return constants.REPLICA_STATE_OUT_OF_SYNC
 2004 
 2005         return constants.REPLICA_STATE_IN_SYNC
 2006 
 2007     def promote_replica(self, context, replica_list, replica, access_rules,
 2008                         share_server=None):
 2009         """Switch SnapMirror relationships and allow r/w ops on replica.
 2010 
 2011         Creates a DataMotion session and switches the direction of the
 2012         SnapMirror relationship between the currently 'active' instance (
 2013         SnapMirror source volume) and the replica. Also attempts setting up
 2014         SnapMirror relationships between the other replicas and the new
 2015         SnapMirror source volume ('active' instance).
 2016         :param context: Request Context
 2017         :param replica_list: List of replicas, including the 'active' instance
 2018         :param replica: Replica to promote to SnapMirror source
 2019         :param access_rules: Access rules to apply to the replica
 2020         :param share_server: ShareServer class instance of replica
 2021         :return: Updated replica_list
 2022         """
 2023         orig_active_replica = self.find_active_replica(replica_list)
 2024 
 2025         dm_session = data_motion.DataMotionSession()
 2026 
 2027         new_replica_list = []
 2028 
 2029         # Setup the new active replica
 2030         try:
 2031             new_active_replica = (
 2032                 self._convert_destination_replica_to_independent(
 2033                     context, dm_session, orig_active_replica, replica,
 2034                     access_rules, share_server=share_server))
 2035         except exception.StorageCommunicationException:
 2036             LOG.exception("Could not communicate with the backend "
 2037                           "for replica %s during promotion.",
 2038                           replica['id'])
 2039             new_active_replica = replica.copy()
 2040             new_active_replica['replica_state'] = (
 2041                 constants.STATUS_ERROR)
 2042             new_active_replica['status'] = constants.STATUS_ERROR
 2043             return [new_active_replica]
 2044 
 2045         new_replica_list.append(new_active_replica)
 2046 
 2047         # Change the source replica for all destinations to the new
 2048         # active replica.
 2049         for r in replica_list:
 2050             if r['id'] != replica['id']:
 2051                 r = self._safe_change_replica_source(dm_session, r,
 2052                                                      orig_active_replica,
 2053                                                      replica,
 2054                                                      replica_list)
 2055                 new_replica_list.append(r)
 2056 
 2057         orig_active_vserver = dm_session.get_vserver_from_share(
 2058             orig_active_replica)
 2059 
 2060         # Cleanup the original active share if necessary
 2061         orig_active_replica_backend = (
 2062             share_utils.extract_host(orig_active_replica['host'],
 2063                                      level='backend_name'))
 2064         orig_active_replica_name = self._get_backend_share_name(
 2065             orig_active_replica['id'])
 2066         orig_active_vserver_client = data_motion.get_client_for_backend(
 2067             orig_active_replica_backend, vserver_name=orig_active_vserver)
 2068 
 2069         orig_active_replica_helper = self._get_helper(orig_active_replica)
 2070         orig_active_replica_helper.set_client(orig_active_vserver_client)
 2071 
 2072         try:
 2073             orig_active_replica_helper.cleanup_demoted_replica(
 2074                 orig_active_replica, orig_active_replica_name)
 2075         except exception.StorageCommunicationException:
 2076             LOG.exception("Could not cleanup the original active replica %s.",
 2077                           orig_active_replica['id'])
 2078 
 2079         # Unmount the original active replica.
 2080         self._unmount_orig_active_replica(orig_active_replica,
 2081                                           orig_active_vserver)
 2082 
 2083         self._handle_qos_on_replication_change(dm_session,
 2084                                                new_active_replica,
 2085                                                orig_active_replica,
 2086                                                share_server=share_server)
 2087 
 2088         return new_replica_list
 2089 
 2090     def _unmount_orig_active_replica(self, orig_active_replica,
 2091                                      orig_active_vserver=None):
 2092         orig_active_replica_backend = (
 2093             share_utils.extract_host(orig_active_replica['host'],
 2094                                      level='backend_name'))
 2095         orig_active_vserver_client = data_motion.get_client_for_backend(
 2096             orig_active_replica_backend,
 2097             vserver_name=orig_active_vserver)
 2098         share_name = self._get_backend_share_name(
 2099             orig_active_replica['id'])
 2100         try:
 2101             orig_active_vserver_client.unmount_volume(share_name,
 2102                                                       force=True)
 2103             LOG.info("Unmount of the original active replica %s successful.",
 2104                      orig_active_replica['id'])
 2105         except exception.StorageCommunicationException:
 2106             LOG.exception("Could not unmount the original active replica %s.",
 2107                           orig_active_replica['id'])
 2108 
 2109     def _handle_qos_on_replication_change(self, dm_session, new_active_replica,
 2110                                           orig_active_replica,
 2111                                           share_server=None):
 2112         # QoS operations: Remove and purge QoS policy on old active replica
 2113         # if any and create a new policy on the destination if necessary.
 2114         extra_specs = share_types.get_extra_specs_from_share(
 2115             orig_active_replica)
 2116         qos_specs = self._get_normalized_qos_specs(extra_specs)
 2117 
 2118         if qos_specs and self._have_cluster_creds:
 2119             dm_session.remove_qos_on_old_active_replica(orig_active_replica)
 2120             # Check if a QoS policy already exists for the promoted replica,
 2121             # if it does, modify it as necessary, else create it:
 2122             try:
 2123                 new_active_replica_qos_policy = (
 2124                     self._get_backend_qos_policy_group_name(
 2125                         new_active_replica['id']))
 2126                 vserver, vserver_client = self._get_vserver(
 2127                     share_server=share_server)
 2128 
 2129                 volume_name_on_backend = self._get_backend_share_name(
 2130                     new_active_replica['id'])
 2131                 if not self._client.qos_policy_group_exists(
 2132                         new_active_replica_qos_policy):
 2133                     self._create_qos_policy_group(
 2134                         new_active_replica, vserver, qos_specs)
 2135                 else:
 2136                     max_throughput = self._get_max_throughput(
 2137                         new_active_replica['size'], qos_specs)
 2138                     self._client.qos_policy_group_modify(
 2139                         new_active_replica_qos_policy, max_throughput)
 2140                 vserver_client.set_qos_policy_group_for_volume(
 2141                     volume_name_on_backend, new_active_replica_qos_policy)
 2142 
 2143                 LOG.info("QoS policy applied successfully for promoted "
 2144                          "replica: %s", new_active_replica['id'])
 2145             except Exception:
 2146                 LOG.exception("Could not apply QoS to the promoted replica.")
 2147 
 2148     def _convert_destination_replica_to_independent(
 2149             self, context, dm_session, orig_active_replica, replica,
 2150             access_rules, share_server=None):
 2151         """Breaks SnapMirror and allows r/w ops on the destination replica.
 2152 
 2153         For promotion, the existing SnapMirror relationship must be broken
 2154         and access rules have to be granted to the broken off replica to
 2155         use it as an independent share.
 2156         :param context: Request Context
 2157         :param dm_session: Data motion object for SnapMirror operations
 2158         :param orig_active_replica: Original SnapMirror source
 2159         :param replica: Replica to promote to SnapMirror source
 2160         :param access_rules: Access rules to apply to the replica
 2161         :param share_server: ShareServer class instance of replica
 2162         :return: Updated replica
 2163         """
 2164         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2165         share_name = self._get_backend_share_name(replica['id'])
 2166 
 2167         try:
 2168             # 1. Start an update to try to get a last minute transfer before we
 2169             # quiesce and break
 2170             dm_session.update_snapmirror(orig_active_replica, replica)
 2171         except exception.StorageCommunicationException:
 2172             # Ignore any errors since the current source replica may be
 2173             # unreachable
 2174             pass
 2175         # 2. Break SnapMirror
 2176         dm_session.break_snapmirror(orig_active_replica, replica)
 2177 
 2178         # 3. Setup access rules
 2179         new_active_replica = replica.copy()
 2180         new_active_replica['export_locations'] = self._create_export(
 2181             new_active_replica, share_server, vserver, vserver_client)
 2182 
 2183         helper = self._get_helper(replica)
 2184         helper.set_client(vserver_client)
 2185         try:
 2186             helper.update_access(replica, share_name, access_rules)
 2187         except Exception:
 2188             new_active_replica['access_rules_status'] = (
 2189                 constants.SHARE_INSTANCE_RULES_SYNCING)
 2190         else:
 2191             new_active_replica['access_rules_status'] = constants.STATUS_ACTIVE
 2192 
 2193         new_active_replica['replica_state'] = constants.REPLICA_STATE_ACTIVE
 2194 
 2195         # 4. Set File system size fixed to false
 2196         vserver_client.set_volume_filesys_size_fixed(share_name,
 2197                                                      filesys_size_fixed=False)
 2198 
 2199         return new_active_replica
 2200 
 2201     def _safe_change_replica_source(self, dm_session, replica,
 2202                                     orig_source_replica,
 2203                                     new_source_replica, replica_list):
 2204         """Attempts to change the SnapMirror source to new source.
 2205 
 2206         If the attempt fails, 'replica_state' is set to 'error'.
 2207         :param dm_session: Data motion object for SnapMirror operations
 2208         :param replica: Replica that requires a change of source
 2209         :param orig_source_replica: Original SnapMirror source volume
 2210         :param new_source_replica: New SnapMirror source volume
 2211         :return: Updated replica
 2212         """
 2213         try:
 2214             dm_session.change_snapmirror_source(replica,
 2215                                                 orig_source_replica,
 2216                                                 new_source_replica,
 2217                                                 replica_list)
 2218         except exception.StorageCommunicationException:
 2219             replica['status'] = constants.STATUS_ERROR
 2220             replica['replica_state'] = constants.STATUS_ERROR
 2221             replica['export_locations'] = []
 2222             msg = ("Failed to change replica (%s) to a SnapMirror "
 2223                    "destination. Replica backend is unreachable.")
 2224 
 2225             LOG.exception(msg, replica['id'])
 2226             return replica
 2227         except netapp_api.NaApiError:
 2228             replica['replica_state'] = constants.STATUS_ERROR
 2229             replica['export_locations'] = []
 2230             msg = ("Failed to change replica (%s) to a SnapMirror "
 2231                    "destination.")
 2232             LOG.exception(msg, replica['id'])
 2233             return replica
 2234 
 2235         replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
 2236         replica['export_locations'] = []
 2237 
 2238         return replica
 2239 
 2240     def create_replicated_snapshot(self, context, replica_list,
 2241                                    snapshot_instances, share_server=None):
 2242         active_replica = self.find_active_replica(replica_list)
 2243         active_snapshot = [x for x in snapshot_instances
 2244                            if x['share_id'] == active_replica['id']][0]
 2245         snapshot_name = self._get_backend_snapshot_name(active_snapshot['id'])
 2246 
 2247         self.create_snapshot(context, active_snapshot,
 2248                              share_server=share_server)
 2249 
 2250         active_snapshot['status'] = constants.STATUS_AVAILABLE
 2251         active_snapshot['provider_location'] = snapshot_name
 2252         snapshots = [active_snapshot]
 2253         instances = zip(sorted(replica_list,
 2254                                key=lambda x: x['id']),
 2255                         sorted(snapshot_instances,
 2256                                key=lambda x: x['share_id']))
 2257 
 2258         for replica, snapshot in instances:
 2259             if snapshot['id'] != active_snapshot['id']:
 2260                 snapshot['provider_location'] = snapshot_name
 2261                 snapshots.append(snapshot)
 2262                 dm_session = data_motion.DataMotionSession()
 2263                 if replica.get('host'):
 2264                     try:
 2265                         dm_session.update_snapmirror(active_replica,
 2266                                                      replica)
 2267                     except netapp_api.NaApiError as e:
 2268                         if e.code != netapp_api.EOBJECTNOTFOUND:
 2269                             raise
 2270         return snapshots
 2271 
 2272     def delete_replicated_snapshot(self, context, replica_list,
 2273                                    snapshot_instances, share_server=None):
 2274         active_replica = self.find_active_replica(replica_list)
 2275         active_snapshot = [x for x in snapshot_instances
 2276                            if x['share_id'] == active_replica['id']][0]
 2277 
 2278         self.delete_snapshot(context, active_snapshot,
 2279                              share_server=share_server,
 2280                              snapshot_name=active_snapshot['provider_location']
 2281                              )
 2282         active_snapshot['status'] = constants.STATUS_DELETED
 2283         instances = zip(sorted(replica_list,
 2284                                key=lambda x: x['id']),
 2285                         sorted(snapshot_instances,
 2286                                key=lambda x: x['share_id']))
 2287 
 2288         for replica, snapshot in instances:
 2289             if snapshot['id'] != active_snapshot['id']:
 2290                 dm_session = data_motion.DataMotionSession()
 2291                 if replica.get('host'):
 2292                     try:
 2293                         dm_session.update_snapmirror(active_replica, replica)
 2294                     except netapp_api.NaApiError as e:
 2295                         if e.code != netapp_api.EOBJECTNOTFOUND:
 2296                             raise
 2297 
 2298         return [active_snapshot]
 2299 
 2300     def update_replicated_snapshot(self, replica_list, share_replica,
 2301                                    snapshot_instances, snapshot_instance,
 2302                                    share_server=None):
 2303         active_replica = self.find_active_replica(replica_list)
 2304         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2305         share_name = self._get_backend_share_name(
 2306             snapshot_instance['share_id'])
 2307         snapshot_name = snapshot_instance.get('provider_location')
 2308         # NOTE(ameade): If there is no provider location,
 2309         # then grab from active snapshot instance
 2310         if snapshot_name is None:
 2311             active_snapshot = [x for x in snapshot_instances
 2312                                if x['share_id'] == active_replica['id']][0]
 2313             snapshot_name = active_snapshot.get('provider_location')
 2314             if not snapshot_name:
 2315                 return
 2316 
 2317         try:
 2318             snapshot_exists = vserver_client.snapshot_exists(snapshot_name,
 2319                                                              share_name)
 2320         except exception.SnapshotUnavailable:
 2321             # The volume must still be offline
 2322             return
 2323 
 2324         if (snapshot_exists and
 2325                 snapshot_instance['status'] == constants.STATUS_CREATING):
 2326             return {
 2327                 'status': constants.STATUS_AVAILABLE,
 2328                 'provider_location': snapshot_name,
 2329             }
 2330         elif (not snapshot_exists and
 2331               snapshot_instance['status'] == constants.STATUS_DELETING):
 2332             raise exception.SnapshotResourceNotFound(
 2333                 name=snapshot_instance.get('provider_location'))
 2334 
 2335         dm_session = data_motion.DataMotionSession()
 2336         try:
 2337             dm_session.update_snapmirror(active_replica, share_replica)
 2338         except netapp_api.NaApiError as e:
 2339             if e.code != netapp_api.EOBJECTNOTFOUND:
 2340                 raise
 2341 
 2342     def revert_to_replicated_snapshot(self, context, active_replica,
 2343                                       replica_list, active_replica_snapshot,
 2344                                       replica_snapshots, share_server=None):
 2345         """Reverts a replicated share (in place) to the specified snapshot."""
 2346         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2347         share_name = self._get_backend_share_name(
 2348             active_replica_snapshot['share_id'])
 2349         snapshot_name = (
 2350             active_replica_snapshot.get('provider_location') or
 2351             self._get_backend_snapshot_name(active_replica_snapshot['id']))
 2352 
 2353         LOG.debug('Restoring snapshot %s', snapshot_name)
 2354 
 2355         dm_session = data_motion.DataMotionSession()
 2356         non_active_replica_list = self._find_nonactive_replicas(replica_list)
 2357 
 2358         # Ensure source snapshot exists
 2359         vserver_client.get_snapshot(share_name, snapshot_name)
 2360 
 2361         # Break all mirrors
 2362         for replica in non_active_replica_list:
 2363             try:
 2364                 dm_session.break_snapmirror(
 2365                     active_replica, replica, mount=False)
 2366             except netapp_api.NaApiError as e:
 2367                 if e.code != netapp_api.EOBJECTNOTFOUND:
 2368                     raise
 2369 
 2370         # Delete source SnapMirror snapshots that will prevent a snap restore
 2371         snapmirror_snapshot_names = vserver_client.list_snapmirror_snapshots(
 2372             share_name)
 2373         for snapmirror_snapshot_name in snapmirror_snapshot_names:
 2374             vserver_client.delete_snapshot(
 2375                 share_name, snapmirror_snapshot_name, ignore_owners=True)
 2376 
 2377         # Restore source snapshot of interest
 2378         vserver_client.restore_snapshot(share_name, snapshot_name)
 2379 
 2380         # Reestablish mirrors
 2381         for replica in non_active_replica_list:
 2382             try:
 2383                 dm_session.resync_snapmirror(active_replica, replica)
 2384             except netapp_api.NaApiError as e:
 2385                 if e.code != netapp_api.EOBJECTNOTFOUND:
 2386                     raise
 2387 
 2388     def _check_destination_vserver_for_vol_move(self, source_share,
 2389                                                 source_vserver,
 2390                                                 dest_share_server):
 2391         try:
 2392             destination_vserver, __ = self._get_vserver(
 2393                 share_server=dest_share_server)
 2394         except exception.InvalidParameterValue:
 2395             destination_vserver = None
 2396 
 2397         if source_vserver != destination_vserver:
 2398             msg = _("Cannot migrate %(shr)s efficiently from source "
 2399                     "VServer %(src)s to destination VServer %(dest)s.")
 2400             msg_args = {
 2401                 'shr': source_share['id'],
 2402                 'src': source_vserver,
 2403                 'dest': destination_vserver,
 2404             }
 2405             raise exception.NetAppException(msg % msg_args)
 2406 
 2407     def migration_check_compatibility(self, context, source_share,
 2408                                       destination_share, share_server=None,
 2409                                       destination_share_server=None):
 2410         """Checks compatibility between self.host and destination host."""
 2411         # We need cluster creds to perform an intra-cluster data motion
 2412         compatible = False
 2413         destination_host = destination_share['host']
 2414 
 2415         if self._have_cluster_creds:
 2416             try:
 2417                 backend = share_utils.extract_host(
 2418                     destination_host, level='backend_name')
 2419                 destination_aggregate = share_utils.extract_host(
 2420                     destination_host, level='pool')
 2421                 # Validate new extra-specs are valid on the destination
 2422                 extra_specs = share_types.get_extra_specs_from_share(
 2423                     destination_share)
 2424                 self._check_extra_specs_validity(
 2425                     destination_share, extra_specs)
 2426                 # NOTE(dviroel): Check if the destination share-type has valid
 2427                 # provisioning options.
 2428                 provisioning_options = self._get_provisioning_options(
 2429                     extra_specs)
 2430                 qos_specs = self._get_normalized_qos_specs(extra_specs)
 2431                 self.validate_provisioning_options_for_share(
 2432                     provisioning_options, extra_specs=extra_specs,
 2433                     qos_specs=qos_specs)
 2434 
 2435                 # NOTE (felipe_rodrigues): NetApp only can migrate within the
 2436                 # same server, so it does not need to check that the
 2437                 # destination share has the same NFS config as the destination
 2438                 # server.
 2439 
 2440                 # TODO(gouthamr): Check whether QoS min-throughputs can be
 2441                 # honored on the destination aggregate when supported.
 2442                 self._check_aggregate_extra_specs_validity(
 2443                     destination_aggregate, extra_specs)
 2444 
 2445                 data_motion.get_backend_configuration(backend)
 2446 
 2447                 source_vserver, __ = self._get_vserver(
 2448                     share_server=share_server)
 2449                 share_volume = self._get_backend_share_name(
 2450                     source_share['id'])
 2451 
 2452                 # NOTE(dviroel): If source and destination vservers are
 2453                 # compatible for volume move, the provisioning option
 2454                 # 'adaptive_qos_policy_group' will also be supported since the
 2455                 # share will remain in the same vserver.
 2456                 self._check_destination_vserver_for_vol_move(
 2457                     source_share, source_vserver, destination_share_server)
 2458 
 2459                 encrypt_dest = self._get_dest_flexvol_encryption_value(
 2460                     destination_share)
 2461                 self._client.check_volume_move(
 2462                     share_volume, source_vserver, destination_aggregate,
 2463                     encrypt_destination=encrypt_dest)
 2464 
 2465             except Exception:
 2466                 msg = ("Cannot migrate share %(shr)s efficiently between "
 2467                        "%(src)s and %(dest)s.")
 2468                 msg_args = {
 2469                     'shr': source_share['id'],
 2470                     'src': source_share['host'],
 2471                     'dest': destination_host,
 2472                 }
 2473                 LOG.exception(msg, msg_args)
 2474             else:
 2475                 compatible = True
 2476         else:
 2477             msg = ("Cluster credentials have not been configured "
 2478                    "with this share driver. Cannot perform volume move "
 2479                    "operations.")
 2480             LOG.warning(msg)
 2481 
 2482         compatibility = {
 2483             'compatible': compatible,
 2484             'writable': compatible,
 2485             'nondisruptive': compatible,
 2486             'preserve_metadata': compatible,
 2487             'preserve_snapshots': compatible,
 2488         }
 2489 
 2490         return compatibility
 2491 
 2492     def _move_volume_after_splitting(self, source_share, destination_share,
 2493                                      share_server=None, cutover_action='wait'):
 2494         retries = (self.configuration.netapp_start_volume_move_timeout / 5
 2495                    or 1)
 2496 
 2497         @manila_utils.retry(exception.ShareBusyException, interval=5,
 2498                             retries=retries, backoff_rate=1)
 2499         def try_move_volume():
 2500             try:
 2501                 self._move_volume(source_share, destination_share,
 2502                                   share_server, cutover_action)
 2503             except netapp_api.NaApiError as e:
 2504                 undergoing_split = 'undergoing a clone split'
 2505                 msg_args = {'id': source_share['id']}
 2506                 if (e.code == netapp_api.EAPIERROR and
 2507                         undergoing_split in e.message):
 2508                     msg = _('The volume %(id)s is undergoing a clone split '
 2509                             'operation. Will retry the operation.') % msg_args
 2510                     LOG.warning(msg)
 2511                     raise exception.ShareBusyException(reason=msg)
 2512                 else:
 2513                     msg = _("Unable to perform move operation for the volume "
 2514                             "%(id)s. Caught an unexpected error. Not "
 2515                             "retrying.") % msg_args
 2516                     raise exception.NetAppException(message=msg)
 2517         try:
 2518             try_move_volume()
 2519         except exception.ShareBusyException:
 2520             msg_args = {'id': source_share['id']}
 2521             msg = _("Unable to perform move operation for the volume %(id)s "
 2522                     "because a clone split operation is still in progress. "
 2523                     "Retries exhausted. Not retrying.") % msg_args
 2524             raise exception.NetAppException(message=msg)
 2525 
 2526     def _move_volume(self, source_share, destination_share, share_server=None,
 2527                      cutover_action='wait'):
 2528         # Intra-cluster migration
 2529         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2530         share_volume = self._get_backend_share_name(source_share['id'])
 2531         destination_aggregate = share_utils.extract_host(
 2532             destination_share['host'], level='pool')
 2533 
 2534         # If the destination's share type extra-spec for Flexvol encryption
 2535         # is different than the source's, then specify the volume-move
 2536         # operation to set the correct 'encrypt' attribute on the destination
 2537         # volume.
 2538         encrypt_dest = self._get_dest_flexvol_encryption_value(
 2539             destination_share)
 2540 
 2541         self._client.start_volume_move(
 2542             share_volume,
 2543             vserver,
 2544             destination_aggregate,
 2545             cutover_action=cutover_action,
 2546             encrypt_destination=encrypt_dest)
 2547 
 2548         msg = ("Began volume move operation of share %(shr)s from %(src)s "
 2549                "to %(dest)s.")
 2550         msg_args = {
 2551             'shr': source_share['id'],
 2552             'src': source_share['host'],
 2553             'dest': destination_share['host'],
 2554         }
 2555         LOG.info(msg, msg_args)
 2556 
 2557     def migration_start(self, context, source_share, destination_share,
 2558                         source_snapshots, snapshot_mappings,
 2559                         share_server=None, destination_share_server=None):
 2560         """Begins data motion from source_share to destination_share."""
 2561         self._move_volume(source_share, destination_share, share_server)
 2562 
 2563     def _get_volume_move_status(self, source_share, share_server):
 2564         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2565         share_volume = self._get_backend_share_name(source_share['id'])
 2566         status = self._client.get_volume_move_status(share_volume, vserver)
 2567         return status
 2568 
 2569     def _check_volume_clone_split_completed(self, share, vserver_client):
 2570         share_volume = self._get_backend_share_name(share['id'])
 2571         return vserver_client.check_volume_clone_split_completed(share_volume)
 2572 
 2573     def _get_dest_flexvol_encryption_value(self, destination_share):
 2574         dest_share_type_encrypted_val = share_types.get_share_type_extra_specs(
 2575             destination_share['share_type_id'],
 2576             'netapp_flexvol_encryption')
 2577         encrypt_destination = share_types.parse_boolean_extra_spec(
 2578             'netapp_flexvol_encryption', dest_share_type_encrypted_val)
 2579 
 2580         return encrypt_destination
 2581 
 2582     def _check_volume_move_completed(self, source_share, share_server):
 2583         """Check progress of volume move operation."""
 2584         status = self._get_volume_move_status(source_share, share_server)
 2585         completed_phases = (
 2586             'cutover_hard_deferred', 'cutover_soft_deferred', 'completed')
 2587 
 2588         move_phase = status['phase'].lower()
 2589         if move_phase == 'failed':
 2590             msg_args = {
 2591                 'shr': source_share['id'],
 2592                 'reason': status['details'],
 2593             }
 2594             msg = _("Volume move operation for share %(shr)s failed. Reason: "
 2595                     "%(reason)s") % msg_args
 2596             LOG.exception(msg)
 2597             raise exception.NetAppException(msg)
 2598         elif move_phase in completed_phases:
 2599             return True
 2600 
 2601         return False
 2602 
 2603     def migration_continue(self, context, source_share, destination_share,
 2604                            source_snapshots, snapshot_mappings,
 2605                            share_server=None, destination_share_server=None):
 2606         """Check progress of migration, try to repair data motion errors."""
 2607         return self._check_volume_move_completed(source_share, share_server)
 2608 
 2609     def _get_volume_move_progress(self, source_share, share_server):
 2610         status = self._get_volume_move_status(source_share, share_server)
 2611 
 2612         # NOTE (gouthamr): If the volume move is waiting for a manual
 2613         # intervention to cut-over, the copy is done with respect to the
 2614         # user. Volume move copies the rest of the data before cut-over anyway.
 2615         if status['phase'] in ('cutover_hard_deferred',
 2616                                'cutover_soft_deferred'):
 2617             status['percent-complete'] = 100
 2618 
 2619         msg = ("Volume move status for share %(share)s: (State) %(state)s. "
 2620                "(Phase) %(phase)s. Details: %(details)s")
 2621         msg_args = {
 2622             'state': status['state'],
 2623             'details': status['details'],
 2624             'share': source_share['id'],
 2625             'phase': status['phase'],
 2626         }
 2627         LOG.info(msg, msg_args)
 2628 
 2629         return {
 2630             'total_progress': status['percent-complete'] or 0,
 2631             'state': status['state'],
 2632             'estimated_completion_time': status['estimated-completion-time'],
 2633             'phase': status['phase'],
 2634             'details': status['details'],
 2635         }
 2636 
 2637     def migration_get_progress(self, context, source_share,
 2638                                destination_share, source_snapshots,
 2639                                snapshot_mappings, share_server=None,
 2640                                destination_share_server=None):
 2641         """Return detailed progress of the migration in progress."""
 2642         return self._get_volume_move_progress(source_share, share_server)
 2643 
 2644     def migration_cancel(self, context, source_share, destination_share,
 2645                          source_snapshots, snapshot_mappings,
 2646                          share_server=None, destination_share_server=None):
 2647         """Abort an ongoing migration."""
 2648         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2649         share_volume = self._get_backend_share_name(source_share['id'])
 2650         retries = (self.configuration.netapp_migration_cancel_timeout / 5 or
 2651                    1)
 2652 
 2653         try:
 2654             self._get_volume_move_status(source_share, share_server)
 2655         except exception.NetAppException:
 2656             LOG.exception("Could not get volume move status.")
 2657             return
 2658 
 2659         self._client.abort_volume_move(share_volume, vserver)
 2660 
 2661         @manila_utils.retry(exception.InUse, interval=5,
 2662                             retries=retries, backoff_rate=1)
 2663         def wait_for_migration_cancel_complete():
 2664             move_status = self._get_volume_move_status(source_share,
 2665                                                        share_server)
 2666             if move_status['state'] == 'failed':
 2667                 return
 2668             else:
 2669                 msg = "Migration cancelation isn't finished yet."
 2670                 raise exception.InUse(message=msg)
 2671 
 2672         try:
 2673             wait_for_migration_cancel_complete()
 2674         except exception.InUse:
 2675             move_status = self._get_volume_move_status(source_share,
 2676                                                        share_server)
 2677             msg_args = {
 2678                 'share_move_state': move_status['state']
 2679             }
 2680             msg = _("Migration cancellation was not successful. The share "
 2681                     "migration state failed while transitioning from "
 2682                     "%(share_move_state)s state to 'failed'. Retries "
 2683                     "exhausted.") % msg_args
 2684             raise exception.NetAppException(message=msg)
 2685         except exception.NetAppException:
 2686             LOG.exception("Could not get volume move status.")
 2687 
 2688         msg = ("Share volume move operation for share %(shr)s from host "
 2689                "%(src)s to %(dest)s was successfully aborted.")
 2690         msg_args = {
 2691             'shr': source_share['id'],
 2692             'src': source_share['host'],
 2693             'dest': destination_share['host'],
 2694         }
 2695         LOG.info(msg, msg_args)
 2696 
 2697     def migration_complete(self, context, source_share, destination_share,
 2698                            source_snapshots, snapshot_mappings,
 2699                            share_server=None, destination_share_server=None):
 2700         """Initiate the cutover to destination share after move is complete."""
 2701         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2702         share_volume = self._get_backend_share_name(source_share['id'])
 2703 
 2704         status = self._get_volume_move_status(source_share, share_server)
 2705 
 2706         move_phase = status['phase'].lower()
 2707         if move_phase == 'completed':
 2708             LOG.debug("Volume move operation was already successfully "
 2709                       "completed for share %(shr)s.",
 2710                       {'shr': source_share['id']})
 2711         elif move_phase in ('cutover_hard_deferred', 'cutover_soft_deferred'):
 2712             self._client.trigger_volume_move_cutover(share_volume, vserver)
 2713             self._wait_for_cutover_completion(
 2714                 source_share, share_server)
 2715         else:
 2716             msg_args = {
 2717                 'shr': source_share['id'],
 2718                 'status': status['state'],
 2719                 'phase': status['phase'],
 2720                 'details': status['details'],
 2721             }
 2722             msg = _("Cannot complete volume move operation for share %(shr)s. "
 2723                     "Current volume move status: %(status)s, phase: "
 2724                     "%(phase)s. Details: %(details)s") % msg_args
 2725             LOG.exception(msg)
 2726             raise exception.NetAppException(msg)
 2727 
 2728         new_share_volume_name = self._get_backend_share_name(
 2729             destination_share['id'])
 2730         vserver_client.set_volume_name(share_volume, new_share_volume_name)
 2731 
 2732         # Modify volume properties per share type extra-specs
 2733         extra_specs = share_types.get_extra_specs_from_share(
 2734             destination_share)
 2735         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 2736         self._check_extra_specs_validity(destination_share, extra_specs)
 2737         provisioning_options = self._get_provisioning_options(extra_specs)
 2738         qos_policy_group_name = self._modify_or_create_qos_for_existing_share(
 2739             destination_share, extra_specs, vserver, vserver_client)
 2740         if qos_policy_group_name:
 2741             provisioning_options['qos_policy_group'] = qos_policy_group_name
 2742         else:
 2743             # Removing the QOS Policy on the migrated share as the
 2744             # new extra-spec for which this share is being migrated to
 2745             # does not specify any QOS settings.
 2746             provisioning_options['qos_policy_group'] = "none"
 2747 
 2748             qos_policy_of_src_share = self._get_backend_qos_policy_group_name(
 2749                 source_share['id'])
 2750             self._client.mark_qos_policy_group_for_deletion(
 2751                 qos_policy_of_src_share)
 2752 
 2753         destination_aggregate = share_utils.extract_host(
 2754             destination_share['host'], level='pool')
 2755 
 2756         # Modify volume to match extra specs
 2757         vserver_client.modify_volume(destination_aggregate,
 2758                                      new_share_volume_name,
 2759                                      **provisioning_options)
 2760 
 2761         msg = ("Volume move operation for share %(shr)s has completed "
 2762                "successfully. Share has been moved from %(src)s to "
 2763                "%(dest)s.")
 2764         msg_args = {
 2765             'shr': source_share['id'],
 2766             'src': source_share['host'],
 2767             'dest': destination_share['host'],
 2768         }
 2769         LOG.info(msg, msg_args)
 2770 
 2771         # NOTE(gouthamr): For nondisruptive migration, current export
 2772         # policy will not be cleared, the export policy will be renamed to
 2773         # match the name of the share.
 2774         export_locations = self._create_export(
 2775             destination_share, share_server, vserver, vserver_client,
 2776             clear_current_export_policy=False)
 2777         src_snaps_dict = {s['id']: s for s in source_snapshots}
 2778         snapshot_updates = {}
 2779 
 2780         for source_snap_id, destination_snap in snapshot_mappings.items():
 2781             p_location = src_snaps_dict[source_snap_id]['provider_location']
 2782 
 2783             snapshot_updates.update(
 2784                 {destination_snap['id']: {'provider_location': p_location}})
 2785 
 2786         return {
 2787             'export_locations': export_locations,
 2788             'snapshot_updates': snapshot_updates,
 2789         }
 2790 
 2791     @na_utils.trace
 2792     def _modify_or_create_qos_for_existing_share(self, share, extra_specs,
 2793                                                  vserver, vserver_client):
 2794         """Gets/Creates QoS policy for an existing FlexVol.
 2795 
 2796         The share's assigned QoS policy is renamed and adjusted if the policy
 2797         is exclusive to the FlexVol. If the policy includes other workloads
 2798         besides the FlexVol, a new policy is created with the specs necessary.
 2799         """
 2800         qos_specs = self._get_normalized_qos_specs(extra_specs)
 2801         if not qos_specs:
 2802             return
 2803 
 2804         backend_share_name = self._get_backend_share_name(share['id'])
 2805         qos_policy_group_name = self._get_backend_qos_policy_group_name(
 2806             share['id'])
 2807 
 2808         create_new_qos_policy_group = True
 2809 
 2810         backend_volume = vserver_client.get_volume(
 2811             backend_share_name)
 2812         backend_volume_size = int(
 2813             math.ceil(float(backend_volume['size']) / units.Gi))
 2814 
 2815         LOG.debug("Checking for a pre-existing QoS policy group that "
 2816                   "is exclusive to the volume %s.", backend_share_name)
 2817 
 2818         # Does the volume have an exclusive QoS policy that we can rename?
 2819         if backend_volume['qos-policy-group-name'] is not None:
 2820             existing_qos_policy_group = self._client.qos_policy_group_get(
 2821                 backend_volume['qos-policy-group-name'])
 2822             if existing_qos_policy_group['num-workloads'] == 1:
 2823                 # Yay, can set max-throughput and rename
 2824 
 2825                 msg = ("Found pre-existing QoS policy %(policy)s and it is "
 2826                        "exclusive to the volume %(volume)s. Modifying and "
 2827                        "renaming this policy to %(new_policy)s.")
 2828                 msg_args = {
 2829                     'policy': backend_volume['qos-policy-group-name'],
 2830                     'volume': backend_share_name,
 2831                     'new_policy': qos_policy_group_name,
 2832                 }
 2833                 LOG.debug(msg, msg_args)
 2834 
 2835                 max_throughput = self._get_max_throughput(
 2836                     backend_volume_size, qos_specs)
 2837                 self._client.qos_policy_group_modify(
 2838                     backend_volume['qos-policy-group-name'], max_throughput)
 2839                 self._client.qos_policy_group_rename(
 2840                     backend_volume['qos-policy-group-name'],
 2841                     qos_policy_group_name)
 2842                 create_new_qos_policy_group = False
 2843 
 2844         if create_new_qos_policy_group:
 2845             share_obj = {
 2846                 'size': backend_volume_size,
 2847                 'id': share['id'],
 2848             }
 2849             LOG.debug("No existing QoS policy group found for "
 2850                       "volume. Creating  a new one with name %s.",
 2851                       qos_policy_group_name)
 2852             self._create_qos_policy_group(share_obj, vserver, qos_specs,
 2853                                           vserver_client=vserver_client)
 2854         return qos_policy_group_name
 2855 
 2856     def _wait_for_cutover_completion(self, source_share, share_server):
 2857 
 2858         retries = (self.configuration.netapp_volume_move_cutover_timeout / 5
 2859                    or 1)
 2860 
 2861         @manila_utils.retry(exception.ShareBusyException, interval=5,
 2862                             retries=retries, backoff_rate=1)
 2863         def check_move_completion():
 2864             status = self._get_volume_move_status(source_share, share_server)
 2865             if status['phase'].lower() != 'completed':
 2866                 msg_args = {
 2867                     'shr': source_share['id'],
 2868                     'phs': status['phase'],
 2869                 }
 2870                 msg = _('Volume move operation for share %(shr)s is not '
 2871                         'complete. Current Phase: %(phs)s. '
 2872                         'Retrying.') % msg_args
 2873                 LOG.warning(msg)
 2874                 raise exception.ShareBusyException(reason=msg)
 2875 
 2876         try:
 2877             check_move_completion()
 2878         except exception.ShareBusyException:
 2879             msg = _("Volume move operation did not complete after cut-over "
 2880                     "was triggered. Retries exhausted. Not retrying.")
 2881             raise exception.NetAppException(message=msg)
 2882 
 2883     def get_backend_info(self, context):
 2884         snapdir_visibility = self.configuration.netapp_reset_snapdir_visibility
 2885         return {
 2886             'snapdir_visibility': snapdir_visibility,
 2887         }
 2888 
 2889     def ensure_shares(self, context, shares):
 2890         cfg_snapdir = self.configuration.netapp_reset_snapdir_visibility
 2891         hide_snapdir = self.HIDE_SNAPDIR_CFG_MAP[cfg_snapdir.lower()]
 2892         if hide_snapdir is not None:
 2893             for share in shares:
 2894                 share_server = share.get('share_server')
 2895                 vserver, vserver_client = self._get_vserver(
 2896                     share_server=share_server)
 2897                 share_name = self._get_backend_share_name(share['id'])
 2898                 self._apply_snapdir_visibility(
 2899                     hide_snapdir, share_name, vserver_client)
 2900 
 2901     def get_share_status(self, share, share_server=None):
 2902         if share['status'] == constants.STATUS_CREATING_FROM_SNAPSHOT:
 2903             return self._update_create_from_snapshot_status(share,
 2904                                                             share_server)
 2905         else:
 2906             LOG.warning("Caught an unexpected share status '%s' during share "
 2907                         "status update routine. Skipping.", share['status'])
 2908 
 2909     def volume_rehost(self, share, src_vserver, dest_vserver):
 2910         volume_name = self._get_backend_share_name(share['id'])
 2911         msg = ("Rehosting volume of share %(shr)s from vserver %(src)s "
 2912                "to vserver %(dest)s.")
 2913         msg_args = {
 2914             'shr': share['id'],
 2915             'src': src_vserver,
 2916             'dest': dest_vserver,
 2917         }
 2918         LOG.info(msg, msg_args)
 2919         self._client.rehost_volume(volume_name, src_vserver, dest_vserver)
 2920 
 2921     def _rehost_and_mount_volume(self, share, src_vserver, src_vserver_client,
 2922                                  dest_vserver, dest_vserver_client):
 2923         volume_name = self._get_backend_share_name(share['id'])
 2924         # Unmount volume in the source vserver:
 2925         src_vserver_client.unmount_volume(volume_name)
 2926         # Rehost the volume
 2927         self.volume_rehost(share, src_vserver, dest_vserver)
 2928         # Mount the volume on the destination vserver
 2929         dest_vserver_client.mount_volume(volume_name)
 2930 
 2931     def _check_capacity_compatibility(self, pools, thin_provision, size):
 2932         """Check if the size requested is suitable for the available pools"""
 2933 
 2934         backend_free_capacity = 0.0
 2935 
 2936         for pool in pools:
 2937             if "unknown" in (pool['free_capacity_gb'],
 2938                              pool['total_capacity_gb']):
 2939                 return False
 2940             reserved = float(pool['reserved_percentage']) / 100
 2941 
 2942             total_pool_free = math.floor(
 2943                 pool['free_capacity_gb'] -
 2944                 pool['total_capacity_gb'] * reserved)
 2945 
 2946             if thin_provision:
 2947                 # If thin provision is enabled it's necessary recalculate the
 2948                 # total_pool_free considering the max over subscription ratio
 2949                 # for each pool. After summing the free space for each pool we
 2950                 # have the total backend free capacity to compare with the
 2951                 # requested size.
 2952                 if pool['max_over_subscription_ratio'] >= 1:
 2953                     total_pool_free = math.floor(
 2954                         total_pool_free * pool['max_over_subscription_ratio'])
 2955 
 2956             backend_free_capacity += total_pool_free
 2957 
 2958         return size <= backend_free_capacity