"Fossies" - the Fresh Open Source Software Archive

Member "manila-8.1.3/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py" (20 Jul 2020, 106321 Bytes) of package /linux/misc/openstack/manila-8.1.3.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "lib_base.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 8.1.2_vs_8.1.3.

    1 # Copyright (c) 2015 Clinton Knight.  All rights reserved.
    2 # Copyright (c) 2015 Tom Barron.  All rights reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 """
   16 NetApp Data ONTAP cDOT base storage driver library.
   17 
   18 This library is the abstract base for subclasses that complete the
   19 single-SVM or multi-SVM functionality needed by the cDOT Manila drivers.
   20 """
   21 
   22 import copy
   23 import datetime
   24 import json
   25 import math
   26 import socket
   27 
   28 from oslo_config import cfg
   29 from oslo_log import log
   30 from oslo_service import loopingcall
   31 from oslo_utils import timeutils
   32 from oslo_utils import units
   33 from oslo_utils import uuidutils
   34 import six
   35 
   36 from manila.common import constants
   37 from manila import exception
   38 from manila.i18n import _
   39 from manila.share.drivers.netapp.dataontap.client import api as netapp_api
   40 from manila.share.drivers.netapp.dataontap.client import client_cmode
   41 from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
   42 from manila.share.drivers.netapp.dataontap.cluster_mode import performance
   43 from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
   44 from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
   45 from manila.share.drivers.netapp import options as na_opts
   46 from manila.share.drivers.netapp import utils as na_utils
   47 from manila.share import share_types
   48 from manila.share import utils as share_utils
   49 from manila import utils as manila_utils
   50 
   51 LOG = log.getLogger(__name__)
   52 CONF = cfg.CONF
   53 
   54 
   55 class NetAppCmodeFileStorageLibrary(object):
   56 
   57     AUTOSUPPORT_INTERVAL_SECONDS = 3600  # hourly
   58     SSC_UPDATE_INTERVAL_SECONDS = 3600  # hourly
   59     HOUSEKEEPING_INTERVAL_SECONDS = 600  # ten minutes
   60 
   61     SUPPORTED_PROTOCOLS = ('nfs', 'cifs')
   62 
   63     DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
   64     DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
   65 
   66     # Maps NetApp qualified extra specs keys to corresponding backend API
   67     # client library argument keywords.  When we expose more backend
   68     # capabilities here, we will add them to this map.
   69     BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP = {
   70         'netapp:thin_provisioned': 'thin_provisioned',
   71         'netapp:dedup': 'dedup_enabled',
   72         'netapp:compression': 'compression_enabled',
   73         'netapp:split_clone_on_create': 'split',
   74         'netapp:hide_snapdir': 'hide_snapdir',
   75     }
   76 
   77     STRING_QUALIFIED_EXTRA_SPECS_MAP = {
   78 
   79         'netapp:snapshot_policy': 'snapshot_policy',
   80         'netapp:language': 'language',
   81         'netapp:max_files': 'max_files',
   82     }
   83 
   84     # Maps standard extra spec keys to legacy NetApp keys
   85     STANDARD_BOOLEAN_EXTRA_SPECS_MAP = {
   86         'thin_provisioning': 'netapp:thin_provisioned',
   87         'dedupe': 'netapp:dedup',
   88         'compression': 'netapp:compression',
   89     }
   90 
   91     QOS_SPECS = {
   92         'netapp:maxiops': 'maxiops',
   93         'netapp:maxiopspergib': 'maxiopspergib',
   94         'netapp:maxbps': 'maxbps',
   95         'netapp:maxbpspergib': 'maxbpspergib',
   96     }
   97 
   98     HIDE_SNAPDIR_CFG_MAP = {
   99         'visible': False,
  100         'hidden': True,
  101         'default': None,
  102     }
  103 
  104     SIZE_DEPENDENT_QOS_SPECS = {'maxiopspergib', 'maxbpspergib'}
  105 
  106     def __init__(self, driver_name, **kwargs):
  107         na_utils.validate_driver_instantiation(**kwargs)
  108 
  109         self.driver_name = driver_name
  110 
  111         self.private_storage = kwargs['private_storage']
  112         self.configuration = kwargs['configuration']
  113         self.configuration.append_config_values(na_opts.netapp_connection_opts)
  114         self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
  115         self.configuration.append_config_values(na_opts.netapp_transport_opts)
  116         self.configuration.append_config_values(na_opts.netapp_support_opts)
  117         self.configuration.append_config_values(na_opts.netapp_cluster_opts)
  118         self.configuration.append_config_values(
  119             na_opts.netapp_provisioning_opts)
  120         self.configuration.append_config_values(
  121             na_opts.netapp_data_motion_opts)
  122 
  123         self._licenses = []
  124         self._client = None
  125         self._clients = {}
  126         self._ssc_stats = {}
  127         self._have_cluster_creds = None
  128         self._revert_to_snapshot_support = False
  129         self._cluster_info = {}
  130 
  131         self._app_version = kwargs.get('app_version', 'unknown')
  132 
  133         na_utils.setup_tracing(self.configuration.netapp_trace_flags,
  134                                self.configuration.netapp_api_trace_pattern)
  135         self._backend_name = self.configuration.safe_get(
  136             'share_backend_name') or driver_name
  137 
  138     @na_utils.trace
  139     def do_setup(self, context):
  140         self._client = self._get_api_client()
  141         self._have_cluster_creds = self._client.check_for_cluster_credentials()
  142         if self._have_cluster_creds is True:
  143             self._set_cluster_info()
  144 
  145         self._licenses = self._get_licenses()
  146         self._revert_to_snapshot_support = self._check_snaprestore_license()
  147 
  148         # Performance monitoring library
  149         self._perf_library = performance.PerformanceLibrary(self._client)
  150 
  151     @na_utils.trace
  152     def _set_cluster_info(self):
  153         self._cluster_info['nve_support'] = (
  154             self._client.is_nve_supported()
  155             and self._client.features.FLEXVOL_ENCRYPTION)
  156 
  157     @na_utils.trace
  158     def check_for_setup_error(self):
  159         self._start_periodic_tasks()
  160 
  161     def _get_vserver(self, share_server=None):
  162         raise NotImplementedError()
  163 
  164     @na_utils.trace
  165     def _get_api_client(self, vserver=None):
  166 
  167         # Use cached value to prevent calls to system-get-ontapi-version.
  168         client = self._clients.get(vserver)
  169 
  170         if not client:
  171             client = client_cmode.NetAppCmodeClient(
  172                 transport_type=self.configuration.netapp_transport_type,
  173                 username=self.configuration.netapp_login,
  174                 password=self.configuration.netapp_password,
  175                 hostname=self.configuration.netapp_server_hostname,
  176                 port=self.configuration.netapp_server_port,
  177                 vserver=vserver,
  178                 trace=na_utils.TRACE_API,
  179                 api_trace_pattern=na_utils.API_TRACE_PATTERN)
  180             self._clients[vserver] = client
  181 
  182         return client
  183 
  184     @na_utils.trace
  185     def _get_licenses(self):
  186 
  187         if not self._have_cluster_creds:
  188             LOG.debug('License info not available without cluster credentials')
  189             return []
  190 
  191         self._licenses = self._client.get_licenses()
  192 
  193         log_data = {
  194             'backend': self._backend_name,
  195             'licenses': ', '.join(self._licenses),
  196         }
  197         LOG.info('Available licenses on %(backend)s '
  198                  'are %(licenses)s.', log_data)
  199 
  200         if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
  201             msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
  202             msg_args = {'backend': self._backend_name}
  203             LOG.error(msg, msg_args)
  204 
  205         return self._licenses
  206 
  207     @na_utils.trace
  208     def _start_periodic_tasks(self):
  209 
  210         # Run the task once in the current thread so prevent a race with
  211         # the first invocation of get_share_stats.
  212         self._update_ssc_info()
  213 
  214         # Start the task that updates the slow-changing storage service catalog
  215         ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
  216             self._update_ssc_info)
  217         ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL_SECONDS,
  218                                 initial_delay=self.SSC_UPDATE_INTERVAL_SECONDS)
  219 
  220         # Start the task that logs autosupport (EMS) data to the controller
  221         ems_periodic_task = loopingcall.FixedIntervalLoopingCall(
  222             self._handle_ems_logging)
  223         ems_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS,
  224                                 initial_delay=0)
  225 
  226         # Start the task that runs other housekeeping tasks, such as deletion
  227         # of previously soft-deleted storage artifacts.
  228         housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
  229             self._handle_housekeeping_tasks)
  230         housekeeping_periodic_task.start(
  231             interval=self.HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
  232 
  233     def _get_backend_share_name(self, share_id):
  234         """Get share name according to share name template."""
  235         return self.configuration.netapp_volume_name_template % {
  236             'share_id': share_id.replace('-', '_')}
  237 
  238     def _get_backend_snapshot_name(self, snapshot_id):
  239         """Get snapshot name according to snapshot name template."""
  240         return 'share_snapshot_' + snapshot_id.replace('-', '_')
  241 
  242     def _get_backend_cg_snapshot_name(self, snapshot_id):
  243         """Get snapshot name according to snapshot name template."""
  244         return 'share_cg_snapshot_' + snapshot_id.replace('-', '_')
  245 
  246     def _get_backend_qos_policy_group_name(self, share_id):
  247         """Get QoS policy name according to QoS policy group name template."""
  248         return self.configuration.netapp_qos_policy_group_name_template % {
  249             'share_id': share_id.replace('-', '_')}
  250 
  251     @na_utils.trace
  252     def _get_aggregate_space(self):
  253         aggregates = self._find_matching_aggregates()
  254         if self._have_cluster_creds:
  255             return self._client.get_cluster_aggregate_capacities(aggregates)
  256         else:
  257             return self._client.get_vserver_aggregate_capacities(aggregates)
  258 
  259     @na_utils.trace
  260     def _check_snaprestore_license(self):
  261         """Check if snaprestore license is enabled."""
  262         if self._have_cluster_creds:
  263             return 'snaprestore' in self._licenses
  264         else:
  265             # NOTE: (felipe_rodrigues): workaround to find out whether the
  266             # backend has the license: since without cluster credentials it
  267             # cannot retrieve the ontap licenses, it sends a fake ONTAP
  268             # "snapshot-restore-volume" request which is only available when
  269             # the license exists. By the got error, it checks whether license
  270             # is installed or not.
  271             try:
  272                 self._client.restore_snapshot(
  273                     "fake_%s" % uuidutils.generate_uuid(dashed=False), "")
  274             except netapp_api.NaApiError as e:
  275                 no_license = 'is not licensed'
  276                 LOG.debug('Fake restore_snapshot request failed: %s', e)
  277                 return not (e.code == netapp_api.EAPIERROR and
  278                             no_license in e.message)
  279 
  280             # since it passed an empty snapshot, it should never get here
  281             msg = _("Caught an unexpected behavior: the fake restore to "
  282                     "snapshot request using 'fake' volume and empty string "
  283                     "snapshot as argument has not failed.")
  284             LOG.exception(msg)
  285             raise exception.NetAppException(msg)
  286 
  287     @na_utils.trace
  288     def _get_aggregate_node(self, aggregate_name):
  289         """Get home node for the specified aggregate, or None."""
  290         if self._have_cluster_creds:
  291             return self._client.get_node_for_aggregate(aggregate_name)
  292         else:
  293             return None
  294 
  295     def get_default_filter_function(self):
  296         """Get the default filter_function string."""
  297         return self.DEFAULT_FILTER_FUNCTION
  298 
  299     def get_default_goodness_function(self):
  300         """Get the default goodness_function string."""
  301         return self.DEFAULT_GOODNESS_FUNCTION
  302 
  303     @na_utils.trace
  304     def get_share_stats(self, filter_function=None, goodness_function=None):
  305         """Retrieve stats info from Data ONTAP backend."""
  306 
  307         data = {
  308             'share_backend_name': self._backend_name,
  309             'driver_name': self.driver_name,
  310             'vendor_name': 'NetApp',
  311             'driver_version': '1.0',
  312             'netapp_storage_family': 'ontap_cluster',
  313             'storage_protocol': 'NFS_CIFS',
  314             'pools': self._get_pools(filter_function=filter_function,
  315                                      goodness_function=goodness_function),
  316             'share_group_stats': {
  317                 'consistent_snapshot_support': 'host',
  318             },
  319         }
  320 
  321         if (self.configuration.replication_domain and
  322                 not self.configuration.driver_handles_share_servers):
  323             data['replication_type'] = 'dr'
  324             data['replication_domain'] = self.configuration.replication_domain
  325 
  326         return data
  327 
  328     @na_utils.trace
  329     def get_share_server_pools(self, share_server):
  330         """Return list of pools related to a particular share server.
  331 
  332         Note that the multi-SVM cDOT driver assigns all available pools to
  333         each Vserver, so there is no need to filter the pools any further
  334         by share_server.
  335 
  336         :param share_server: ShareServer class instance.
  337         """
  338         return self._get_pools()
  339 
  340     @na_utils.trace
  341     def _get_pools(self, filter_function=None, goodness_function=None):
  342         """Retrieve list of pools available to this backend."""
  343 
  344         pools = []
  345         aggr_space = self._get_aggregate_space()
  346         aggregates = aggr_space.keys()
  347 
  348         if self._have_cluster_creds:
  349             # Get up-to-date node utilization metrics just once.
  350             self._perf_library.update_performance_cache({}, self._ssc_stats)
  351             qos_support = True
  352         else:
  353             qos_support = False
  354 
  355         netapp_flexvol_encryption = self._cluster_info.get(
  356             'nve_support', False)
  357 
  358         for aggr_name in sorted(aggregates):
  359 
  360             reserved_percentage = self.configuration.reserved_share_percentage
  361 
  362             total_capacity_gb = na_utils.round_down(float(
  363                 aggr_space[aggr_name].get('total', 0)) / units.Gi)
  364             free_capacity_gb = na_utils.round_down(float(
  365                 aggr_space[aggr_name].get('available', 0)) / units.Gi)
  366             allocated_capacity_gb = na_utils.round_down(float(
  367                 aggr_space[aggr_name].get('used', 0)) / units.Gi)
  368 
  369             if total_capacity_gb == 0.0:
  370                 total_capacity_gb = 'unknown'
  371 
  372             pool = {
  373                 'pool_name': aggr_name,
  374                 'filter_function': filter_function,
  375                 'goodness_function': goodness_function,
  376                 'total_capacity_gb': total_capacity_gb,
  377                 'free_capacity_gb': free_capacity_gb,
  378                 'allocated_capacity_gb': allocated_capacity_gb,
  379                 'qos': qos_support,
  380                 'reserved_percentage': reserved_percentage,
  381                 'dedupe': [True, False],
  382                 'compression': [True, False],
  383                 'netapp_flexvol_encryption': netapp_flexvol_encryption,
  384                 'thin_provisioning': [True, False],
  385                 'snapshot_support': True,
  386                 'create_share_from_snapshot_support': True,
  387                 'revert_to_snapshot_support': self._revert_to_snapshot_support,
  388             }
  389 
  390             # Add storage service catalog data.
  391             pool_ssc_stats = self._ssc_stats.get(aggr_name)
  392             if pool_ssc_stats:
  393                 pool.update(pool_ssc_stats)
  394 
  395             # Add utilization info, or nominal value if not available.
  396             utilization = self._perf_library.get_node_utilization_for_pool(
  397                 aggr_name)
  398             pool['utilization'] = na_utils.round_down(utilization)
  399 
  400             pools.append(pool)
  401 
  402         return pools
  403 
  404     @na_utils.trace
  405     def _handle_ems_logging(self):
  406         """Build and send an EMS log message."""
  407         self._client.send_ems_log_message(self._build_ems_log_message_0())
  408         self._client.send_ems_log_message(self._build_ems_log_message_1())
  409 
  410     def _build_base_ems_log_message(self):
  411         """Construct EMS Autosupport log message common to all events."""
  412 
  413         ems_log = {
  414             'computer-name': socket.gethostname() or 'Manila_node',
  415             'event-source': 'Manila driver %s' % self.driver_name,
  416             'app-version': self._app_version,
  417             'category': 'provisioning',
  418             'log-level': '5',
  419             'auto-support': 'false',
  420         }
  421         return ems_log
  422 
  423     @na_utils.trace
  424     def _build_ems_log_message_0(self):
  425         """Construct EMS Autosupport log message with deployment info."""
  426 
  427         ems_log = self._build_base_ems_log_message()
  428         ems_log.update({
  429             'event-id': '0',
  430             'event-description': 'OpenStack Manila connected to cluster node',
  431         })
  432         return ems_log
  433 
  434     @na_utils.trace
  435     def _build_ems_log_message_1(self):
  436         """Construct EMS Autosupport log message with storage pool info."""
  437 
  438         message = self._get_ems_pool_info()
  439 
  440         ems_log = self._build_base_ems_log_message()
  441         ems_log.update({
  442             'event-id': '1',
  443             'event-description': json.dumps(message),
  444         })
  445         return ems_log
  446 
  447     def _get_ems_pool_info(self):
  448         raise NotImplementedError()
  449 
  450     @na_utils.trace
  451     def _handle_housekeeping_tasks(self):
  452         """Handle various cleanup activities."""
  453 
  454     def _find_matching_aggregates(self):
  455         """Find all aggregates match pattern."""
  456         raise NotImplementedError()
  457 
  458     @na_utils.trace
  459     def _get_helper(self, share):
  460         """Returns driver which implements share protocol."""
  461         share_protocol = share['share_proto'].lower()
  462 
  463         if share_protocol not in self.SUPPORTED_PROTOCOLS:
  464             err_msg = _("Invalid NAS protocol supplied: %s.") % share_protocol
  465             raise exception.NetAppException(err_msg)
  466 
  467         self._check_license_for_protocol(share_protocol)
  468 
  469         if share_protocol == 'nfs':
  470             return nfs_cmode.NetAppCmodeNFSHelper()
  471         elif share_protocol == 'cifs':
  472             return cifs_cmode.NetAppCmodeCIFSHelper()
  473 
  474     @na_utils.trace
  475     def _check_license_for_protocol(self, share_protocol):
  476         """Validates protocol license if cluster APIs are accessible."""
  477         if not self._have_cluster_creds:
  478             return
  479 
  480         if share_protocol.lower() not in self._licenses:
  481             current_licenses = self._get_licenses()
  482             if share_protocol.lower() not in current_licenses:
  483                 msg_args = {
  484                     'protocol': share_protocol,
  485                     'host': self.configuration.netapp_server_hostname
  486                 }
  487                 msg = _('The protocol %(protocol)s is not licensed on '
  488                         'controller %(host)s') % msg_args
  489                 LOG.error(msg)
  490                 raise exception.NetAppException(msg)
  491 
  492     @na_utils.trace
  493     def get_pool(self, share):
  494         pool = share_utils.extract_host(share['host'], level='pool')
  495         if pool:
  496             return pool
  497 
  498         share_name = self._get_backend_share_name(share['id'])
  499         return self._client.get_aggregate_for_volume(share_name)
  500 
  501     @na_utils.trace
  502     def create_share(self, context, share, share_server):
  503         """Creates new share."""
  504         vserver, vserver_client = self._get_vserver(share_server=share_server)
  505         self._allocate_container(share, vserver, vserver_client)
  506         return self._create_export(share, share_server, vserver,
  507                                    vserver_client)
  508 
  509     @na_utils.trace
  510     def create_share_from_snapshot(self, context, share, snapshot,
  511                                    share_server=None):
  512         """Creates new share from snapshot."""
  513         vserver, vserver_client = self._get_vserver(share_server=share_server)
  514         self._allocate_container_from_snapshot(
  515             share, snapshot, vserver, vserver_client)
  516         return self._create_export(share, share_server, vserver,
  517                                    vserver_client)
  518 
  519     @na_utils.trace
  520     def _allocate_container(self, share, vserver, vserver_client,
  521                             replica=False):
  522         """Create new share on aggregate."""
  523         share_name = self._get_backend_share_name(share['id'])
  524 
  525         # Get Data ONTAP aggregate name as pool name.
  526         pool_name = share_utils.extract_host(share['host'], level='pool')
  527         if pool_name is None:
  528             msg = _("Pool is not available in the share host field.")
  529             raise exception.InvalidHost(reason=msg)
  530 
  531         provisioning_options = self._get_provisioning_options_for_share(
  532             share, vserver, replica=replica)
  533 
  534         if replica:
  535             # If this volume is intended to be a replication destination,
  536             # create it as the 'data-protection' type
  537             provisioning_options['volume_type'] = 'dp'
  538 
  539         hide_snapdir = provisioning_options.pop('hide_snapdir')
  540 
  541         LOG.debug('Creating share %(share)s on pool %(pool)s with '
  542                   'provisioning options %(options)s',
  543                   {'share': share_name, 'pool': pool_name,
  544                    'options': provisioning_options})
  545         vserver_client.create_volume(
  546             pool_name, share_name, share['size'],
  547             snapshot_reserve=self.configuration.
  548             netapp_volume_snapshot_reserve_percent, **provisioning_options)
  549 
  550         if hide_snapdir:
  551             self._apply_snapdir_visibility(
  552                 hide_snapdir, share_name, vserver_client)
  553 
  554     def _apply_snapdir_visibility(
  555             self, hide_snapdir, share_name, vserver_client):
  556 
  557         LOG.debug('Applying snapshot visibility according to hide_snapdir '
  558                   'value of %(hide_snapdir)s on share %(share)s.',
  559                   {'hide_snapdir': hide_snapdir, 'share': share_name})
  560 
  561         vserver_client.set_volume_snapdir_access(share_name, hide_snapdir)
  562 
  563     @na_utils.trace
  564     def _remap_standard_boolean_extra_specs(self, extra_specs):
  565         """Replace standard boolean extra specs with NetApp-specific ones."""
  566         specs = copy.deepcopy(extra_specs)
  567         for (key, netapp_key) in self.STANDARD_BOOLEAN_EXTRA_SPECS_MAP.items():
  568             if key in specs:
  569                 bool_value = share_types.parse_boolean_extra_spec(key,
  570                                                                   specs[key])
  571                 specs[netapp_key] = 'true' if bool_value else 'false'
  572                 del specs[key]
  573         return specs
  574 
  575     @na_utils.trace
  576     def _check_extra_specs_validity(self, share, extra_specs):
  577         """Check if the extra_specs have valid values."""
  578         self._check_boolean_extra_specs_validity(
  579             share, extra_specs, list(self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP))
  580         self._check_string_extra_specs_validity(share, extra_specs)
  581 
  582     @na_utils.trace
  583     def _check_string_extra_specs_validity(self, share, extra_specs):
  584         """Check if the string_extra_specs have valid values."""
  585         if 'netapp:max_files' in extra_specs:
  586             self._check_if_max_files_is_valid(share,
  587                                               extra_specs['netapp:max_files'])
  588 
  589     @na_utils.trace
  590     def _check_if_max_files_is_valid(self, share, value):
  591         """Check if max_files has a valid value."""
  592         if int(value) < 0:
  593             args = {'value': value, 'key': 'netapp:max_files',
  594                     'type_id': share['share_type_id'], 'share_id': share['id']}
  595             msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  596                     'in share_type %(type_id)s for share %(share_id)s.')
  597             raise exception.NetAppException(msg % args)
  598 
  599     @na_utils.trace
  600     def _check_boolean_extra_specs_validity(self, share, specs,
  601                                             keys_of_interest):
  602         # cDOT compression requires deduplication.
  603         dedup = specs.get('netapp:dedup', None)
  604         compression = specs.get('netapp:compression', None)
  605         if dedup is not None and compression is not None:
  606             if dedup.lower() == 'false' and compression.lower() == 'true':
  607                 spec = {'netapp:dedup': dedup,
  608                         'netapp:compression': compression}
  609                 type_id = share['share_type_id']
  610                 share_id = share['id']
  611                 args = {'type_id': type_id, 'share_id': share_id, 'spec': spec}
  612                 msg = _('Invalid combination of extra_specs in share_type '
  613                         '%(type_id)s for share %(share_id)s: %(spec)s: '
  614                         'deduplication must be enabled in order for '
  615                         'compression to be enabled.')
  616                 raise exception.Invalid(msg % args)
  617         """Check if the boolean_extra_specs have valid values."""
  618         # Extra spec values must be (ignoring case) 'true' or 'false'.
  619         for key in keys_of_interest:
  620             value = specs.get(key)
  621             if value is not None and value.lower() not in ['true', 'false']:
  622                 type_id = share['share_type_id']
  623                 share_id = share['id']
  624                 arg_map = {'value': value, 'key': key, 'type_id': type_id,
  625                            'share_id': share_id}
  626                 msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  627                         'in share_type %(type_id)s for share %(share_id)s.')
  628                 raise exception.Invalid(msg % arg_map)
  629 
  630     @na_utils.trace
  631     def _get_boolean_provisioning_options(self, specs, boolean_specs_map):
  632         """Given extra specs, return corresponding client library kwargs.
  633 
  634         Build a full set of client library provisioning kwargs, filling in a
  635         default value if an explicit value has not been supplied via a
  636         corresponding extra spec.  Boolean extra spec values are "true" or
  637         "false", with missing specs treated as "false".  Provisioning kwarg
  638         values are True or False.
  639         """
  640         # Extract the extra spec keys of concern and their corresponding
  641         # kwarg keys as lists.
  642         keys_of_interest = list(boolean_specs_map)
  643         provisioning_args = [boolean_specs_map[key]
  644                              for key in keys_of_interest]
  645         # Set missing spec values to 'false'
  646         for key in keys_of_interest:
  647             if key not in specs:
  648                 specs[key] = 'false'
  649         # Build a list of Boolean provisioning arguments from the string
  650         # equivalents in the spec values.
  651         provisioning_values = [specs[key].lower() == 'true' for key in
  652                                keys_of_interest]
  653         # Combine the list of provisioning args and the list of provisioning
  654         # values into a dictionary suitable for use as kwargs when invoking
  655         # provisioning methods from the client API library.
  656         return dict(zip(provisioning_args, provisioning_values))
  657 
  658     @na_utils.trace
  659     def _get_string_provisioning_options(self, specs, string_specs_map):
  660         """Given extra specs, return corresponding client library kwargs.
  661 
  662         Build a full set of client library provisioning kwargs, filling in a
  663         default value if an explicit value has not been supplied via a
  664         corresponding extra spec.
  665         """
  666         # Extract the extra spec keys of concern and their corresponding
  667         # kwarg keys as lists.
  668         keys_of_interest = list(string_specs_map)
  669         provisioning_args = [string_specs_map[key]
  670                              for key in keys_of_interest]
  671         # Set missing spec values to 'false'
  672         for key in keys_of_interest:
  673             if key not in specs:
  674                 specs[key] = None
  675         provisioning_values = [specs[key] for key in keys_of_interest]
  676 
  677         # Combine the list of provisioning args and the list of provisioning
  678         # values into a dictionary suitable for use as kwargs when invoking
  679         # provisioning methods from the client API library.
  680         return dict(zip(provisioning_args, provisioning_values))
  681 
  682     def _get_normalized_qos_specs(self, extra_specs):
  683         if not extra_specs.get('qos'):
  684             return {}
  685 
  686         normalized_qos_specs = {
  687             self.QOS_SPECS[key.lower()]: value
  688             for key, value in extra_specs.items()
  689             if self.QOS_SPECS.get(key.lower())
  690         }
  691         if not normalized_qos_specs:
  692             msg = _("The extra-spec 'qos' is set to True, but no netapp "
  693                     "supported qos-specs have been specified in the share "
  694                     "type. Cannot provision a QoS policy. Specify any of the "
  695                     "following extra-specs and try again: %s")
  696             raise exception.NetAppException(msg % list(self.QOS_SPECS))
  697 
  698         # TODO(gouthamr): Modify check when throughput floors are allowed
  699         if len(normalized_qos_specs) > 1:
  700             msg = _('Only one NetApp QoS spec can be set at a time. '
  701                     'Specified QoS limits: %s')
  702             raise exception.NetAppException(msg % normalized_qos_specs)
  703 
  704         return normalized_qos_specs
  705 
  706     def _get_max_throughput(self, share_size, qos_specs):
  707         # QoS limits are exclusive of one another.
  708         if 'maxiops' in qos_specs:
  709             return '%siops' % qos_specs['maxiops']
  710         elif 'maxiopspergib' in qos_specs:
  711             return '%siops' % six.text_type(
  712                 int(qos_specs['maxiopspergib']) * int(share_size))
  713         elif 'maxbps' in qos_specs:
  714             return '%sB/s' % qos_specs['maxbps']
  715         elif 'maxbpspergib' in qos_specs:
  716             return '%sB/s' % six.text_type(
  717                 int(qos_specs['maxbpspergib']) * int(share_size))
  718 
  719     @na_utils.trace
  720     def _create_qos_policy_group(self, share, vserver, qos_specs):
  721         max_throughput = self._get_max_throughput(share['size'], qos_specs)
  722         qos_policy_group_name = self._get_backend_qos_policy_group_name(
  723             share['id'])
  724         self._client.qos_policy_group_create(qos_policy_group_name, vserver,
  725                                              max_throughput=max_throughput)
  726         return qos_policy_group_name
  727 
  728     @na_utils.trace
  729     def _get_provisioning_options_for_share(self, share, vserver,
  730                                             replica=False):
  731         """Return provisioning options from a share.
  732 
  733         Starting with a share, this method gets the extra specs, rationalizes
  734         NetApp vs. standard extra spec values, ensures their validity, and
  735         returns them in a form suitable for passing to various API client
  736         methods.
  737         """
  738         extra_specs = share_types.get_extra_specs_from_share(share)
  739         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
  740         self._check_extra_specs_validity(share, extra_specs)
  741         provisioning_options = self._get_provisioning_options(extra_specs)
  742         qos_specs = self._get_normalized_qos_specs(extra_specs)
  743         if qos_specs and not replica:
  744             qos_policy_group = self._create_qos_policy_group(
  745                 share, vserver, qos_specs)
  746             provisioning_options['qos_policy_group'] = qos_policy_group
  747         return provisioning_options
  748 
  749     @na_utils.trace
  750     def _get_provisioning_options(self, specs):
  751         """Return a merged result of string and binary provisioning options."""
  752         boolean_args = self._get_boolean_provisioning_options(
  753             specs, self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)
  754 
  755         string_args = self._get_string_provisioning_options(
  756             specs, self.STRING_QUALIFIED_EXTRA_SPECS_MAP)
  757         result = boolean_args.copy()
  758         result.update(string_args)
  759 
  760         result['encrypt'] = self._get_nve_option(specs)
  761 
  762         return result
  763 
  764     def _get_nve_option(self, specs):
  765         if 'netapp_flexvol_encryption' in specs:
  766             nve = specs['netapp_flexvol_encryption'].lower() == 'true'
  767         else:
  768             nve = False
  769 
  770         return nve
  771 
  772     @na_utils.trace
  773     def _check_aggregate_extra_specs_validity(self, aggregate_name, specs):
  774 
  775         for specs_key in ('netapp_disk_type', 'netapp_raid_type'):
  776             aggr_value = self._ssc_stats.get(aggregate_name, {}).get(specs_key)
  777             specs_value = specs.get(specs_key)
  778 
  779             if aggr_value and specs_value and aggr_value != specs_value:
  780                 msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  781                         'in aggregate %(aggr)s.')
  782                 msg_args = {
  783                     'value': specs_value,
  784                     'key': specs_key,
  785                     'aggr': aggregate_name
  786                 }
  787                 raise exception.NetAppException(msg % msg_args)
  788 
  789     @na_utils.trace
  790     def _allocate_container_from_snapshot(
  791             self, share, snapshot, vserver, vserver_client,
  792             snapshot_name_func=_get_backend_snapshot_name):
  793         """Clones existing share."""
  794         share_name = self._get_backend_share_name(share['id'])
  795         parent_share_name = self._get_backend_share_name(snapshot['share_id'])
  796         if snapshot.get('provider_location') is None:
  797             parent_snapshot_name = snapshot_name_func(self, snapshot['id'])
  798         else:
  799             parent_snapshot_name = snapshot['provider_location']
  800 
  801         provisioning_options = self._get_provisioning_options_for_share(
  802             share, vserver)
  803 
  804         hide_snapdir = provisioning_options.pop('hide_snapdir')
  805 
  806         LOG.debug('Creating share from snapshot %s', snapshot['id'])
  807         vserver_client.create_volume_clone(share_name, parent_share_name,
  808                                            parent_snapshot_name,
  809                                            **provisioning_options)
  810         if share['size'] > snapshot['size']:
  811             vserver_client.set_volume_size(share_name, share['size'])
  812 
  813         if hide_snapdir:
  814             self._apply_snapdir_visibility(
  815                 hide_snapdir, share_name, vserver_client)
  816 
  817     @na_utils.trace
  818     def _share_exists(self, share_name, vserver_client):
  819         return vserver_client.volume_exists(share_name)
  820 
  821     @na_utils.trace
  822     def delete_share(self, context, share, share_server=None):
  823         """Deletes share."""
  824         try:
  825             vserver, vserver_client = self._get_vserver(
  826                 share_server=share_server)
  827         except (exception.InvalidInput,
  828                 exception.VserverNotSpecified,
  829                 exception.VserverNotFound) as error:
  830             LOG.warning("Could not determine share server for share being "
  831                         "deleted: %(share)s. Deletion of share record "
  832                         "will proceed anyway. Error: %(error)s",
  833                         {'share': share['id'], 'error': error})
  834             return
  835 
  836         share_name = self._get_backend_share_name(share['id'])
  837         if self._share_exists(share_name, vserver_client):
  838             self._remove_export(share, vserver_client)
  839             self._deallocate_container(share_name, vserver_client)
  840             qos_policy_for_share = self._get_backend_qos_policy_group_name(
  841                 share['id'])
  842             self._client.mark_qos_policy_group_for_deletion(
  843                 qos_policy_for_share)
  844         else:
  845             LOG.info("Share %s does not exist.", share['id'])
  846 
  847     @na_utils.trace
  848     def _deallocate_container(self, share_name, vserver_client):
  849         """Free share space."""
  850         vserver_client.unmount_volume(share_name, force=True)
  851         vserver_client.offline_volume(share_name)
  852         vserver_client.delete_volume(share_name)
  853 
  854     @na_utils.trace
  855     def _create_export(self, share, share_server, vserver, vserver_client,
  856                        clear_current_export_policy=True):
  857         """Creates NAS storage."""
  858         helper = self._get_helper(share)
  859         helper.set_client(vserver_client)
  860         share_name = self._get_backend_share_name(share['id'])
  861 
  862         interfaces = vserver_client.get_network_interfaces(
  863             protocols=[share['share_proto']])
  864 
  865         if not interfaces:
  866             msg = _('Cannot find network interfaces for Vserver %(vserver)s '
  867                     'and protocol %(proto)s.')
  868             msg_args = {'vserver': vserver, 'proto': share['share_proto']}
  869             raise exception.NetAppException(msg % msg_args)
  870 
  871         # Get LIF addresses with metadata
  872         export_addresses = self._get_export_addresses_with_metadata(
  873             share, share_server, interfaces)
  874 
  875         # Create the share and get a callback for generating export locations
  876         callback = helper.create_share(
  877             share, share_name,
  878             clear_current_export_policy=clear_current_export_policy)
  879 
  880         # Generate export locations using addresses, metadata and callback
  881         export_locations = [
  882             {
  883                 'path': callback(export_address),
  884                 'is_admin_only': metadata.pop('is_admin_only', False),
  885                 'metadata': metadata,
  886             }
  887             for export_address, metadata
  888             in copy.deepcopy(export_addresses).items()
  889         ]
  890 
  891         # Sort the export locations to report preferred paths first
  892         export_locations = self._sort_export_locations_by_preferred_paths(
  893             export_locations)
  894 
  895         return export_locations
  896 
  897     @na_utils.trace
  898     def _get_export_addresses_with_metadata(self, share, share_server,
  899                                             interfaces):
  900         """Return interface addresses with locality and other metadata."""
  901 
  902         # Get home node so we can identify preferred paths
  903         aggregate_name = share_utils.extract_host(share['host'], level='pool')
  904         home_node = self._get_aggregate_node(aggregate_name)
  905 
  906         # Get admin LIF addresses so we can identify admin export locations
  907         admin_addresses = self._get_admin_addresses_for_share_server(
  908             share_server)
  909 
  910         addresses = {}
  911         for interface in interfaces:
  912 
  913             address = interface['address']
  914             is_admin_only = address in admin_addresses
  915 
  916             if home_node:
  917                 preferred = interface.get('home-node') == home_node
  918             else:
  919                 preferred = False
  920 
  921             addresses[address] = {
  922                 'is_admin_only': is_admin_only,
  923                 'preferred': preferred,
  924             }
  925 
  926         return addresses
  927 
  928     @na_utils.trace
  929     def _get_admin_addresses_for_share_server(self, share_server):
  930 
  931         if not share_server:
  932             return []
  933 
  934         admin_addresses = []
  935         for network_allocation in share_server.get('network_allocations'):
  936             if network_allocation['label'] == 'admin':
  937                 admin_addresses.append(network_allocation['ip_address'])
  938 
  939         return admin_addresses
  940 
  941     @na_utils.trace
  942     def _sort_export_locations_by_preferred_paths(self, export_locations):
  943         """Sort the export locations to report preferred paths first."""
  944 
  945         sort_key = lambda location: location.get(
  946             'metadata', {}).get('preferred') is not True
  947 
  948         return sorted(export_locations, key=sort_key)
  949 
  950     @na_utils.trace
  951     def _remove_export(self, share, vserver_client):
  952         """Deletes NAS storage."""
  953         helper = self._get_helper(share)
  954         helper.set_client(vserver_client)
  955         share_name = self._get_backend_share_name(share['id'])
  956         target = helper.get_target(share)
  957         # Share may be in error state, so there's no share and target.
  958         if target:
  959             helper.delete_share(share, share_name)
  960 
  961     @na_utils.trace
  962     def create_snapshot(self, context, snapshot, share_server=None):
  963         """Creates a snapshot of a share."""
  964         vserver, vserver_client = self._get_vserver(share_server=share_server)
  965         share_name = self._get_backend_share_name(snapshot['share_id'])
  966         snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
  967         LOG.debug('Creating snapshot %s', snapshot_name)
  968         vserver_client.create_snapshot(share_name, snapshot_name)
  969         return {'provider_location': snapshot_name}
  970 
  971     def revert_to_snapshot(self, context, snapshot, share_server=None):
  972         """Reverts a share (in place) to the specified snapshot."""
  973         vserver, vserver_client = self._get_vserver(share_server=share_server)
  974         share_name = self._get_backend_share_name(snapshot['share_id'])
  975         snapshot_name = (snapshot.get('provider_location') or
  976                          self._get_backend_snapshot_name(snapshot['id']))
  977         LOG.debug('Restoring snapshot %s', snapshot_name)
  978         vserver_client.restore_snapshot(share_name, snapshot_name)
  979 
  980     @na_utils.trace
  981     def delete_snapshot(self, context, snapshot, share_server=None,
  982                         snapshot_name=None):
  983         """Deletes a snapshot of a share."""
  984         try:
  985             vserver, vserver_client = self._get_vserver(
  986                 share_server=share_server)
  987         except (exception.InvalidInput,
  988                 exception.VserverNotSpecified,
  989                 exception.VserverNotFound) as error:
  990             LOG.warning("Could not determine share server for snapshot "
  991                         "being deleted: %(snap)s. Deletion of snapshot "
  992                         "record will proceed anyway. Error: %(error)s",
  993                         {'snap': snapshot['id'], 'error': error})
  994             return
  995 
  996         share_name = self._get_backend_share_name(snapshot['share_id'])
  997         snapshot_name = (snapshot.get('provider_location') or snapshot_name or
  998                          self._get_backend_snapshot_name(snapshot['id']))
  999 
 1000         try:
 1001             self._delete_snapshot(vserver_client, share_name, snapshot_name)
 1002         except exception.SnapshotResourceNotFound:
 1003             msg = ("Snapshot %(snap)s does not exist on share %(share)s.")
 1004             msg_args = {'snap': snapshot_name, 'share': share_name}
 1005             LOG.info(msg, msg_args)
 1006 
 1007     def _delete_snapshot(self, vserver_client, share_name, snapshot_name):
 1008         """Deletes a backend snapshot, handling busy snapshots as needed."""
 1009 
 1010         backend_snapshot = vserver_client.get_snapshot(share_name,
 1011                                                        snapshot_name)
 1012 
 1013         LOG.debug('Deleting snapshot %(snap)s for share %(share)s.',
 1014                   {'snap': snapshot_name, 'share': share_name})
 1015 
 1016         if not backend_snapshot['busy']:
 1017             vserver_client.delete_snapshot(share_name, snapshot_name)
 1018 
 1019         elif backend_snapshot['owners'] == {'volume clone'}:
 1020             # Snapshots are locked by clone(s), so split clone and soft delete
 1021             snapshot_children = vserver_client.get_clone_children_for_snapshot(
 1022                 share_name, snapshot_name)
 1023             for snapshot_child in snapshot_children:
 1024                 vserver_client.split_volume_clone(snapshot_child['name'])
 1025 
 1026             vserver_client.soft_delete_snapshot(share_name, snapshot_name)
 1027 
 1028         else:
 1029             raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
 1030 
 1031     @na_utils.trace
 1032     def manage_existing(self, share, driver_options, share_server=None):
 1033         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1034         share_size = self._manage_container(share, vserver, vserver_client)
 1035         export_locations = self._create_export(share, share_server, vserver,
 1036                                                vserver_client)
 1037         return {'size': share_size, 'export_locations': export_locations}
 1038 
 1039     @na_utils.trace
 1040     def unmanage(self, share, share_server=None):
 1041         pass
 1042 
 1043     @na_utils.trace
 1044     def _manage_container(self, share, vserver, vserver_client):
 1045         """Bring existing volume under management as a share."""
 1046 
 1047         protocol_helper = self._get_helper(share)
 1048         protocol_helper.set_client(vserver_client)
 1049 
 1050         volume_name = protocol_helper.get_share_name_for_share(share)
 1051         if not volume_name:
 1052             msg = _('Volume could not be determined from export location '
 1053                     '%(export)s.')
 1054             msg_args = {'export': share['export_location']}
 1055             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1056 
 1057         share_name = self._get_backend_share_name(share['id'])
 1058         aggregate_name = share_utils.extract_host(share['host'], level='pool')
 1059 
 1060         # Get existing volume info
 1061         volume = vserver_client.get_volume_to_manage(aggregate_name,
 1062                                                      volume_name)
 1063 
 1064         if not volume:
 1065             msg = _('Volume %(volume)s not found on aggregate %(aggr)s.')
 1066             msg_args = {'volume': volume_name, 'aggr': aggregate_name}
 1067             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1068 
 1069         # When calculating the size, round up to the next GB.
 1070         volume_size = int(math.ceil(float(volume['size']) / units.Gi))
 1071 
 1072         # Validate extra specs
 1073         extra_specs = share_types.get_extra_specs_from_share(share)
 1074         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 1075         try:
 1076             self._check_extra_specs_validity(share, extra_specs)
 1077             self._check_aggregate_extra_specs_validity(aggregate_name,
 1078                                                        extra_specs)
 1079         except exception.ManilaException as ex:
 1080             raise exception.ManageExistingShareTypeMismatch(
 1081                 reason=six.text_type(ex))
 1082 
 1083         # Ensure volume is manageable
 1084         self._validate_volume_for_manage(volume, vserver_client)
 1085 
 1086         provisioning_options = self._get_provisioning_options(extra_specs)
 1087 
 1088         debug_args = {
 1089             'share': share_name,
 1090             'aggr': aggregate_name,
 1091             'options': provisioning_options
 1092         }
 1093         LOG.debug('Managing share %(share)s on aggregate %(aggr)s with '
 1094                   'provisioning options %(options)s', debug_args)
 1095 
 1096         # Rename & remount volume on new path
 1097         vserver_client.unmount_volume(volume_name)
 1098         vserver_client.set_volume_name(volume_name, share_name)
 1099         vserver_client.mount_volume(share_name)
 1100 
 1101         qos_policy_group_name = self._modify_or_create_qos_for_existing_share(
 1102             share, extra_specs, vserver, vserver_client)
 1103         if qos_policy_group_name:
 1104             provisioning_options['qos_policy_group'] = qos_policy_group_name
 1105 
 1106         # Modify volume to match extra specs
 1107         vserver_client.modify_volume(aggregate_name, share_name,
 1108                                      **provisioning_options)
 1109 
 1110         # Save original volume info to private storage
 1111         original_data = {
 1112             'original_name': volume['name'],
 1113             'original_junction_path': volume['junction-path']
 1114         }
 1115         self.private_storage.update(share['id'], original_data)
 1116 
 1117         return volume_size
 1118 
 1119     @na_utils.trace
 1120     def _validate_volume_for_manage(self, volume, vserver_client):
 1121         """Ensure volume is a candidate for becoming a share."""
 1122 
 1123         # Check volume info, extra specs validity
 1124         if volume['type'] != 'rw' or volume['style'] != 'flex':
 1125             msg = _('Volume %(volume)s must be a read-write flexible volume.')
 1126             msg_args = {'volume': volume['name']}
 1127             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1128 
 1129         if vserver_client.volume_has_luns(volume['name']):
 1130             msg = _('Volume %(volume)s must not contain LUNs.')
 1131             msg_args = {'volume': volume['name']}
 1132             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1133 
 1134         if vserver_client.volume_has_junctioned_volumes(volume['name']):
 1135             msg = _('Volume %(volume)s must not have junctioned volumes.')
 1136             msg_args = {'volume': volume['name']}
 1137             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1138 
 1139         if vserver_client.volume_has_snapmirror_relationships(volume):
 1140             msg = _('Volume %(volume)s must not be in any snapmirror '
 1141                     'relationships.')
 1142             msg_args = {'volume': volume['name']}
 1143             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1144 
 1145     @na_utils.trace
 1146     def manage_existing_snapshot(
 1147             self, snapshot, driver_options, share_server=None):
 1148         """Brings an existing snapshot under Manila management."""
 1149         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1150         share_name = self._get_backend_share_name(snapshot['share_id'])
 1151         existing_snapshot_name = snapshot.get('provider_location')
 1152         new_snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
 1153 
 1154         if not existing_snapshot_name:
 1155             msg = _('provider_location not specified.')
 1156             raise exception.ManageInvalidShareSnapshot(reason=msg)
 1157 
 1158         # Get the volume containing the snapshot so we can report its size
 1159         try:
 1160             volume = vserver_client.get_volume(share_name)
 1161         except (netapp_api.NaApiError,
 1162                 exception.StorageResourceNotFound,
 1163                 exception.NetAppException):
 1164             msg = _('Could not determine snapshot %(snap)s size from '
 1165                     'volume %(vol)s.')
 1166             msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
 1167             LOG.exception(msg, msg_args)
 1168             raise exception.ShareNotFound(share_id=snapshot['share_id'])
 1169 
 1170         # Ensure there aren't any mirrors on this volume
 1171         if vserver_client.volume_has_snapmirror_relationships(volume):
 1172             msg = _('Share %s has SnapMirror relationships.')
 1173             msg_args = {'vol': share_name}
 1174             raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args)
 1175 
 1176         # Rename snapshot
 1177         try:
 1178             vserver_client.rename_snapshot(share_name,
 1179                                            existing_snapshot_name,
 1180                                            new_snapshot_name)
 1181         except netapp_api.NaApiError:
 1182             msg = _('Could not rename snapshot %(snap)s in share %(vol)s.')
 1183             msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
 1184             raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args)
 1185 
 1186         # Save original snapshot info to private storage
 1187         original_data = {'original_name': existing_snapshot_name}
 1188         self.private_storage.update(snapshot['id'], original_data)
 1189 
 1190         # When calculating the size, round up to the next GB.
 1191         size = int(math.ceil(float(volume['size']) / units.Gi))
 1192 
 1193         return {'size': size, 'provider_location': new_snapshot_name}
 1194 
 1195     @na_utils.trace
 1196     def unmanage_snapshot(self, snapshot, share_server=None):
 1197         """Removes the specified snapshot from Manila management."""
 1198 
 1199     @na_utils.trace
 1200     def create_consistency_group_from_cgsnapshot(
 1201             self, context, cg_dict, cgsnapshot_dict, share_server=None):
 1202         """Creates a consistency group from an existing CG snapshot."""
 1203         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1204 
 1205         # Ensure there is something to do
 1206         if not cgsnapshot_dict['share_group_snapshot_members']:
 1207             return None, None
 1208 
 1209         clone_list = self._collate_cg_snapshot_info(cg_dict, cgsnapshot_dict)
 1210         share_update_list = []
 1211 
 1212         LOG.debug('Creating consistency group from CG snapshot %s.',
 1213                   cgsnapshot_dict['id'])
 1214 
 1215         for clone in clone_list:
 1216 
 1217             self._allocate_container_from_snapshot(
 1218                 clone['share'], clone['snapshot'], vserver, vserver_client,
 1219                 NetAppCmodeFileStorageLibrary._get_backend_cg_snapshot_name)
 1220 
 1221             export_locations = self._create_export(clone['share'],
 1222                                                    share_server,
 1223                                                    vserver,
 1224                                                    vserver_client)
 1225             share_update_list.append({
 1226                 'id': clone['share']['id'],
 1227                 'export_locations': export_locations,
 1228             })
 1229 
 1230         return None, share_update_list
 1231 
 1232     def _collate_cg_snapshot_info(self, cg_dict, cgsnapshot_dict):
 1233         """Collate the data for a clone of a CG snapshot.
 1234 
 1235         Given two data structures, a CG snapshot (cgsnapshot_dict) and a new
 1236         CG to be cloned from the snapshot (cg_dict), match up both structures
 1237         into a list of dicts (share & snapshot) suitable for use by existing
 1238         driver methods that clone individual share snapshots.
 1239         """
 1240 
 1241         clone_list = list()
 1242 
 1243         for share in cg_dict['shares']:
 1244 
 1245             clone_info = {'share': share}
 1246 
 1247             for cgsnapshot_member in (
 1248                     cgsnapshot_dict['share_group_snapshot_members']):
 1249                 if (share['source_share_group_snapshot_member_id'] ==
 1250                         cgsnapshot_member['id']):
 1251                     clone_info['snapshot'] = {
 1252                         'share_id': cgsnapshot_member['share_id'],
 1253                         'id': cgsnapshot_dict['id'],
 1254                         'size': cgsnapshot_member['size'],
 1255                     }
 1256                     break
 1257 
 1258             else:
 1259                 msg = _("Invalid data supplied for creating consistency group "
 1260                         "from CG snapshot %s.") % cgsnapshot_dict['id']
 1261                 raise exception.InvalidShareGroup(reason=msg)
 1262 
 1263             clone_list.append(clone_info)
 1264 
 1265         return clone_list
 1266 
 1267     @na_utils.trace
 1268     def create_cgsnapshot(self, context, snap_dict, share_server=None):
 1269         """Creates a consistency group snapshot."""
 1270         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1271 
 1272         share_names = [self._get_backend_share_name(member['share_id'])
 1273                        for member in
 1274                        snap_dict.get('share_group_snapshot_members', [])]
 1275         snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
 1276 
 1277         if share_names:
 1278             LOG.debug('Creating CG snapshot %s.', snapshot_name)
 1279             vserver_client.create_cg_snapshot(share_names, snapshot_name)
 1280 
 1281         return None, None
 1282 
 1283     @na_utils.trace
 1284     def delete_cgsnapshot(self, context, snap_dict, share_server=None):
 1285         """Deletes a consistency group snapshot."""
 1286         try:
 1287             vserver, vserver_client = self._get_vserver(
 1288                 share_server=share_server)
 1289         except (exception.InvalidInput,
 1290                 exception.VserverNotSpecified,
 1291                 exception.VserverNotFound) as error:
 1292             LOG.warning("Could not determine share server for CG snapshot "
 1293                         "being deleted: %(snap)s. Deletion of CG snapshot "
 1294                         "record will proceed anyway. Error: %(error)s",
 1295                         {'snap': snap_dict['id'], 'error': error})
 1296             return None, None
 1297 
 1298         share_names = [self._get_backend_share_name(member['share_id'])
 1299                        for member in (
 1300                            snap_dict.get('share_group_snapshot_members', []))]
 1301         snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
 1302 
 1303         for share_name in share_names:
 1304             try:
 1305                 self._delete_snapshot(
 1306                     vserver_client, share_name, snapshot_name)
 1307             except exception.SnapshotResourceNotFound:
 1308                 msg = ("Snapshot %(snap)s does not exist on share "
 1309                        "%(share)s.")
 1310                 msg_args = {'snap': snapshot_name, 'share': share_name}
 1311                 LOG.info(msg, msg_args)
 1312                 continue
 1313 
 1314         return None, None
 1315 
 1316     @staticmethod
 1317     def _is_group_cg(context, share_group):
 1318         return 'host' == share_group.consistent_snapshot_support
 1319 
 1320     @na_utils.trace
 1321     def create_group_snapshot(self, context, snap_dict, fallback_create,
 1322                               share_server=None):
 1323         share_group = snap_dict['share_group']
 1324         if self._is_group_cg(context, share_group):
 1325             return self.create_cgsnapshot(context, snap_dict,
 1326                                           share_server=share_server)
 1327         else:
 1328             return fallback_create(context, snap_dict,
 1329                                    share_server=share_server)
 1330 
 1331     @na_utils.trace
 1332     def delete_group_snapshot(self, context, snap_dict, fallback_delete,
 1333                               share_server=None):
 1334         share_group = snap_dict['share_group']
 1335         if self._is_group_cg(context, share_group):
 1336             return self.delete_cgsnapshot(context, snap_dict,
 1337                                           share_server=share_server)
 1338         else:
 1339             return fallback_delete(context, snap_dict,
 1340                                    share_server=share_server)
 1341 
 1342     @na_utils.trace
 1343     def create_group_from_snapshot(self, context, share_group,
 1344                                    snapshot_dict, fallback_create,
 1345                                    share_server=None):
 1346         share_group2 = snapshot_dict['share_group']
 1347         if self._is_group_cg(context, share_group2):
 1348             return self.create_consistency_group_from_cgsnapshot(
 1349                 context, share_group, snapshot_dict,
 1350                 share_server=share_server)
 1351         else:
 1352             return fallback_create(context, share_group, snapshot_dict,
 1353                                    share_server=share_server)
 1354 
 1355     @na_utils.trace
 1356     def _adjust_qos_policy_with_volume_resize(self, share, new_size,
 1357                                               vserver_client):
 1358         # Adjust QoS policy on a share if any
 1359         if self._have_cluster_creds:
 1360             share_name = self._get_backend_share_name(share['id'])
 1361             share_on_the_backend = vserver_client.get_volume(share_name)
 1362             qos_policy_on_share = share_on_the_backend['qos-policy-group-name']
 1363             if qos_policy_on_share is None:
 1364                 return
 1365 
 1366             extra_specs = share_types.get_extra_specs_from_share(share)
 1367             qos_specs = self._get_normalized_qos_specs(extra_specs)
 1368             size_dependent_specs = {k: v for k, v in qos_specs.items() if k in
 1369                                     self.SIZE_DEPENDENT_QOS_SPECS}
 1370             if size_dependent_specs:
 1371                 max_throughput = self._get_max_throughput(
 1372                     new_size, size_dependent_specs)
 1373                 self._client.qos_policy_group_modify(
 1374                     qos_policy_on_share, max_throughput)
 1375 
 1376     @na_utils.trace
 1377     def extend_share(self, share, new_size, share_server=None):
 1378         """Extends size of existing share."""
 1379         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1380         share_name = self._get_backend_share_name(share['id'])
 1381         vserver_client.set_volume_filesys_size_fixed(share_name,
 1382                                                      filesys_size_fixed=False)
 1383         LOG.debug('Extending share %(name)s to %(size)s GB.',
 1384                   {'name': share_name, 'size': new_size})
 1385         vserver_client.set_volume_size(share_name, new_size)
 1386         self._adjust_qos_policy_with_volume_resize(share, new_size,
 1387                                                    vserver_client)
 1388 
 1389     @na_utils.trace
 1390     def shrink_share(self, share, new_size, share_server=None):
 1391         """Shrinks size of existing share."""
 1392         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1393         share_name = self._get_backend_share_name(share['id'])
 1394         vserver_client.set_volume_filesys_size_fixed(share_name,
 1395                                                      filesys_size_fixed=False)
 1396         LOG.debug('Shrinking share %(name)s to %(size)s GB.',
 1397                   {'name': share_name, 'size': new_size})
 1398 
 1399         try:
 1400             vserver_client.set_volume_size(share_name, new_size)
 1401         except netapp_api.NaApiError as e:
 1402             if e.code == netapp_api.EVOLOPNOTSUPP:
 1403                 msg = _('Failed to shrink share %(share_id)s. '
 1404                         'The current used space is larger than the the size'
 1405                         ' requested.')
 1406                 msg_args = {'share_id': share['id']}
 1407                 LOG.error(msg, msg_args)
 1408                 raise exception.ShareShrinkingPossibleDataLoss(
 1409                     share_id=share['id'])
 1410 
 1411         self._adjust_qos_policy_with_volume_resize(
 1412             share, new_size, vserver_client)
 1413 
 1414     @na_utils.trace
 1415     def update_access(self, context, share, access_rules, add_rules,
 1416                       delete_rules, share_server=None):
 1417         """Updates access rules for a share."""
 1418         # NOTE(ameade): We do not need to add export rules to a non-active
 1419         # replica as it will fail.
 1420         replica_state = share.get('replica_state')
 1421         if (replica_state is not None and
 1422                 replica_state != constants.REPLICA_STATE_ACTIVE):
 1423             return
 1424         try:
 1425             vserver, vserver_client = self._get_vserver(
 1426                 share_server=share_server)
 1427         except (exception.InvalidInput,
 1428                 exception.VserverNotSpecified,
 1429                 exception.VserverNotFound) as error:
 1430             LOG.warning("Could not determine share server for share "
 1431                         "%(share)s during access rules update. "
 1432                         "Error: %(error)s",
 1433                         {'share': share['id'], 'error': error})
 1434             return
 1435 
 1436         share_name = self._get_backend_share_name(share['id'])
 1437         if self._share_exists(share_name, vserver_client):
 1438             helper = self._get_helper(share)
 1439             helper.set_client(vserver_client)
 1440             helper.update_access(share, share_name, access_rules)
 1441         else:
 1442             raise exception.ShareResourceNotFound(share_id=share['id'])
 1443 
 1444     def setup_server(self, network_info, metadata=None):
 1445         raise NotImplementedError()
 1446 
 1447     def teardown_server(self, server_details, security_services=None):
 1448         raise NotImplementedError()
 1449 
 1450     def get_network_allocations_number(self):
 1451         """Get number of network interfaces to be created."""
 1452         raise NotImplementedError()
 1453 
 1454     @na_utils.trace
 1455     def _update_ssc_info(self):
 1456         """Periodically runs to update Storage Service Catalog data.
 1457 
 1458         The self._ssc_stats attribute is updated with the following format.
 1459         {<aggregate_name> : {<ssc_key>: <ssc_value>}}
 1460         """
 1461         LOG.info("Updating storage service catalog information for "
 1462                  "backend '%s'", self._backend_name)
 1463 
 1464         # Work on a copy and update the ssc data atomically before returning.
 1465         ssc_stats = copy.deepcopy(self._ssc_stats)
 1466 
 1467         aggregate_names = self._find_matching_aggregates()
 1468 
 1469         # Initialize entries for each aggregate.
 1470         for aggregate_name in aggregate_names:
 1471             if aggregate_name not in ssc_stats:
 1472                 ssc_stats[aggregate_name] = {
 1473                     'netapp_aggregate': aggregate_name,
 1474                 }
 1475 
 1476         if aggregate_names:
 1477             self._update_ssc_aggr_info(aggregate_names, ssc_stats)
 1478 
 1479         self._ssc_stats = ssc_stats
 1480 
 1481     @na_utils.trace
 1482     def _update_ssc_aggr_info(self, aggregate_names, ssc_stats):
 1483         """Updates the given SSC dictionary with new disk type information.
 1484 
 1485         :param aggregate_names: The aggregates this driver cares about
 1486         :param ssc_stats: The dictionary to update
 1487         """
 1488 
 1489         if not self._have_cluster_creds:
 1490             return
 1491 
 1492         for aggregate_name in aggregate_names:
 1493 
 1494             aggregate = self._client.get_aggregate(aggregate_name)
 1495             hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
 1496                       if 'is-hybrid' in aggregate else None)
 1497             disk_types = self._client.get_aggregate_disk_types(aggregate_name)
 1498 
 1499             ssc_stats[aggregate_name].update({
 1500                 'netapp_raid_type': aggregate.get('raid-type'),
 1501                 'netapp_hybrid_aggregate': hybrid,
 1502                 'netapp_disk_type': disk_types,
 1503             })
 1504 
 1505     def _find_active_replica(self, replica_list):
 1506         # NOTE(ameade): Find current active replica. There can only be one
 1507         # active replica (SnapMirror source volume) at a time in cDOT.
 1508         for r in replica_list:
 1509             if r['replica_state'] == constants.REPLICA_STATE_ACTIVE:
 1510                 return r
 1511 
 1512     def _find_nonactive_replicas(self, replica_list):
 1513         """Returns a list of all except the active replica."""
 1514         return [replica for replica in replica_list
 1515                 if replica['replica_state'] != constants.REPLICA_STATE_ACTIVE]
 1516 
 1517     def create_replica(self, context, replica_list, new_replica,
 1518                        access_rules, share_snapshots, share_server=None):
 1519         """Creates the new replica on this backend and sets up SnapMirror."""
 1520         active_replica = self._find_active_replica(replica_list)
 1521         dm_session = data_motion.DataMotionSession()
 1522 
 1523         # 1. Create the destination share
 1524         dest_backend = share_utils.extract_host(new_replica['host'],
 1525                                                 level='backend_name')
 1526 
 1527         vserver = (dm_session.get_vserver_from_share(new_replica) or
 1528                    self.configuration.netapp_vserver)
 1529 
 1530         vserver_client = data_motion.get_client_for_backend(
 1531             dest_backend, vserver_name=vserver)
 1532 
 1533         self._allocate_container(new_replica, vserver, vserver_client,
 1534                                  replica=True)
 1535 
 1536         # 2. Setup SnapMirror
 1537         dm_session.create_snapmirror(active_replica, new_replica)
 1538 
 1539         model_update = {
 1540             'export_locations': [],
 1541             'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
 1542             'access_rules_status': constants.STATUS_ACTIVE,
 1543         }
 1544 
 1545         return model_update
 1546 
 1547     def delete_replica(self, context, replica_list, replica, share_snapshots,
 1548                        share_server=None):
 1549         """Removes the replica on this backend and destroys SnapMirror."""
 1550         dm_session = data_motion.DataMotionSession()
 1551         # 1. Remove SnapMirror
 1552         dest_backend = share_utils.extract_host(replica['host'],
 1553                                                 level='backend_name')
 1554         vserver = (dm_session.get_vserver_from_share(replica) or
 1555                    self.configuration.netapp_vserver)
 1556 
 1557         # Ensure that all potential snapmirror relationships and their metadata
 1558         # involving the replica are destroyed.
 1559         for other_replica in replica_list:
 1560             if other_replica['id'] != replica['id']:
 1561                 dm_session.delete_snapmirror(other_replica, replica)
 1562                 dm_session.delete_snapmirror(replica, other_replica)
 1563 
 1564         # 2. Delete share
 1565         vserver_client = data_motion.get_client_for_backend(
 1566             dest_backend, vserver_name=vserver)
 1567         share_name = self._get_backend_share_name(replica['id'])
 1568         if self._share_exists(share_name, vserver_client):
 1569             self._deallocate_container(share_name, vserver_client)
 1570 
 1571     def update_replica_state(self, context, replica_list, replica,
 1572                              access_rules, share_snapshots, share_server=None):
 1573         """Returns the status of the given replica on this backend."""
 1574         active_replica = self._find_active_replica(replica_list)
 1575 
 1576         share_name = self._get_backend_share_name(replica['id'])
 1577         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1578 
 1579         if not vserver_client.volume_exists(share_name):
 1580             msg = _("Volume %(share_name)s does not exist on vserver "
 1581                     "%(vserver)s.")
 1582             msg_args = {'share_name': share_name, 'vserver': vserver}
 1583             raise exception.ShareResourceNotFound(msg % msg_args)
 1584 
 1585         # NOTE(cknight): The SnapMirror may have been intentionally broken by
 1586         # a revert-to-snapshot operation, in which case this method should not
 1587         # attempt to change anything.
 1588         if active_replica['status'] == constants.STATUS_REVERTING:
 1589             return None
 1590 
 1591         dm_session = data_motion.DataMotionSession()
 1592         try:
 1593             snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
 1594         except netapp_api.NaApiError:
 1595             LOG.exception("Could not get snapmirrors for replica %s.",
 1596                           replica['id'])
 1597             return constants.STATUS_ERROR
 1598 
 1599         if not snapmirrors:
 1600             if replica['status'] != constants.STATUS_CREATING:
 1601                 try:
 1602                     dm_session.create_snapmirror(active_replica, replica)
 1603                 except netapp_api.NaApiError:
 1604                     LOG.exception("Could not create snapmirror for "
 1605                                   "replica %s.", replica['id'])
 1606                     return constants.STATUS_ERROR
 1607             return constants.REPLICA_STATE_OUT_OF_SYNC
 1608 
 1609         snapmirror = snapmirrors[0]
 1610         # NOTE(dviroel): Don't try to resume or resync a SnapMirror that has
 1611         # one of the in progress transfer states, because the storage will
 1612         # answer with an error.
 1613         in_progress_status = ['preparing', 'transferring', 'finalizing']
 1614         if (snapmirror.get('mirror-state') != 'snapmirrored' and
 1615                 snapmirror.get('relationship-status') in in_progress_status):
 1616             return constants.REPLICA_STATE_OUT_OF_SYNC
 1617 
 1618         if snapmirror.get('mirror-state') != 'snapmirrored':
 1619             try:
 1620                 vserver_client.resume_snapmirror(snapmirror['source-vserver'],
 1621                                                  snapmirror['source-volume'],
 1622                                                  vserver,
 1623                                                  share_name)
 1624                 vserver_client.resync_snapmirror(snapmirror['source-vserver'],
 1625                                                  snapmirror['source-volume'],
 1626                                                  vserver,
 1627                                                  share_name)
 1628                 return constants.REPLICA_STATE_OUT_OF_SYNC
 1629             except netapp_api.NaApiError:
 1630                 LOG.exception("Could not resync snapmirror.")
 1631                 return constants.STATUS_ERROR
 1632 
 1633         last_update_timestamp = float(
 1634             snapmirror.get('last-transfer-end-timestamp', 0))
 1635         # TODO(ameade): Have a configurable RPO for replicas, for now it is
 1636         # one hour.
 1637         if (last_update_timestamp and
 1638             (timeutils.is_older_than(
 1639                 datetime.datetime.utcfromtimestamp(last_update_timestamp)
 1640                 .isoformat(), 3600))):
 1641             return constants.REPLICA_STATE_OUT_OF_SYNC
 1642 
 1643         # Check all snapshots exist
 1644         snapshots = [snap['share_replica_snapshot']
 1645                      for snap in share_snapshots]
 1646         for snap in snapshots:
 1647             snapshot_name = snap.get('provider_location')
 1648             if not vserver_client.snapshot_exists(snapshot_name, share_name):
 1649                 return constants.REPLICA_STATE_OUT_OF_SYNC
 1650 
 1651         return constants.REPLICA_STATE_IN_SYNC
 1652 
 1653     def promote_replica(self, context, replica_list, replica, access_rules,
 1654                         share_server=None):
 1655         """Switch SnapMirror relationships and allow r/w ops on replica.
 1656 
 1657         Creates a DataMotion session and switches the direction of the
 1658         SnapMirror relationship between the currently 'active' instance (
 1659         SnapMirror source volume) and the replica. Also attempts setting up
 1660         SnapMirror relationships between the other replicas and the new
 1661         SnapMirror source volume ('active' instance).
 1662         :param context: Request Context
 1663         :param replica_list: List of replicas, including the 'active' instance
 1664         :param replica: Replica to promote to SnapMirror source
 1665         :param access_rules: Access rules to apply to the replica
 1666         :param share_server: ShareServer class instance of replica
 1667         :return: Updated replica_list
 1668         """
 1669         orig_active_replica = self._find_active_replica(replica_list)
 1670 
 1671         dm_session = data_motion.DataMotionSession()
 1672 
 1673         new_replica_list = []
 1674 
 1675         # Setup the new active replica
 1676         try:
 1677             new_active_replica = (
 1678                 self._convert_destination_replica_to_independent(
 1679                     context, dm_session, orig_active_replica, replica,
 1680                     access_rules, share_server=share_server))
 1681         except exception.StorageCommunicationException:
 1682             LOG.exception("Could not communicate with the backend "
 1683                           "for replica %s during promotion.",
 1684                           replica['id'])
 1685             new_active_replica = copy.deepcopy(replica)
 1686             new_active_replica['replica_state'] = (
 1687                 constants.STATUS_ERROR)
 1688             new_active_replica['status'] = constants.STATUS_ERROR
 1689             return [new_active_replica]
 1690 
 1691         new_replica_list.append(new_active_replica)
 1692 
 1693         # Change the source replica for all destinations to the new
 1694         # active replica.
 1695         for r in replica_list:
 1696             if r['id'] != replica['id']:
 1697                 r = self._safe_change_replica_source(dm_session, r,
 1698                                                      orig_active_replica,
 1699                                                      replica,
 1700                                                      replica_list)
 1701                 new_replica_list.append(r)
 1702 
 1703         # Unmount the original active replica.
 1704         orig_active_vserver = dm_session.get_vserver_from_share(
 1705             orig_active_replica)
 1706         self._unmount_orig_active_replica(orig_active_replica,
 1707                                           orig_active_vserver)
 1708 
 1709         self._handle_qos_on_replication_change(dm_session,
 1710                                                new_active_replica,
 1711                                                orig_active_replica,
 1712                                                share_server=share_server)
 1713 
 1714         return new_replica_list
 1715 
 1716     def _unmount_orig_active_replica(self, orig_active_replica,
 1717                                      orig_active_vserver=None):
 1718         orig_active_replica_backend = (
 1719             share_utils.extract_host(orig_active_replica['host'],
 1720                                      level='backend_name'))
 1721         orig_active_vserver_client = data_motion.get_client_for_backend(
 1722             orig_active_replica_backend,
 1723             vserver_name=orig_active_vserver)
 1724         share_name = self._get_backend_share_name(
 1725             orig_active_replica['id'])
 1726         try:
 1727             orig_active_vserver_client.unmount_volume(share_name,
 1728                                                       force=True)
 1729             LOG.info("Unmount of the original active replica %s successful.",
 1730                      orig_active_replica['id'])
 1731         except exception.StorageCommunicationException:
 1732             LOG.exception("Could not unmount the original active replica %s.",
 1733                           orig_active_replica['id'])
 1734 
 1735     def _handle_qos_on_replication_change(self, dm_session, new_active_replica,
 1736                                           orig_active_replica,
 1737                                           share_server=None):
 1738         # QoS operations: Remove and purge QoS policy on old active replica
 1739         # if any and create a new policy on the destination if necessary.
 1740         extra_specs = share_types.get_extra_specs_from_share(
 1741             orig_active_replica)
 1742         qos_specs = self._get_normalized_qos_specs(extra_specs)
 1743 
 1744         if qos_specs and self._have_cluster_creds:
 1745             dm_session.remove_qos_on_old_active_replica(orig_active_replica)
 1746             # Check if a QoS policy already exists for the promoted replica,
 1747             # if it does, modify it as necessary, else create it:
 1748             try:
 1749                 new_active_replica_qos_policy = (
 1750                     self._get_backend_qos_policy_group_name(
 1751                         new_active_replica['id']))
 1752                 vserver, vserver_client = self._get_vserver(
 1753                     share_server=share_server)
 1754 
 1755                 volume_name_on_backend = self._get_backend_share_name(
 1756                     new_active_replica['id'])
 1757                 if not self._client.qos_policy_group_exists(
 1758                         new_active_replica_qos_policy):
 1759                     self._create_qos_policy_group(
 1760                         new_active_replica, vserver, qos_specs)
 1761                 else:
 1762                     max_throughput = self._get_max_throughput(
 1763                         new_active_replica['size'], qos_specs)
 1764                     self._client.qos_policy_group_modify(
 1765                         new_active_replica_qos_policy, max_throughput)
 1766                 vserver_client.set_qos_policy_group_for_volume(
 1767                     volume_name_on_backend, new_active_replica_qos_policy)
 1768 
 1769                 LOG.info("QoS policy applied successfully for promoted "
 1770                          "replica: %s", new_active_replica['id'])
 1771             except Exception:
 1772                 LOG.exception("Could not apply QoS to the promoted replica.")
 1773 
 1774     def _convert_destination_replica_to_independent(
 1775             self, context, dm_session, orig_active_replica, replica,
 1776             access_rules, share_server=None):
 1777         """Breaks SnapMirror and allows r/w ops on the destination replica.
 1778 
 1779         For promotion, the existing SnapMirror relationship must be broken
 1780         and access rules have to be granted to the broken off replica to
 1781         use it as an independent share.
 1782         :param context: Request Context
 1783         :param dm_session: Data motion object for SnapMirror operations
 1784         :param orig_active_replica: Original SnapMirror source
 1785         :param replica: Replica to promote to SnapMirror source
 1786         :param access_rules: Access rules to apply to the replica
 1787         :param share_server: ShareServer class instance of replica
 1788         :return: Updated replica
 1789         """
 1790         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1791         share_name = self._get_backend_share_name(replica['id'])
 1792 
 1793         try:
 1794             # 1. Start an update to try to get a last minute transfer before we
 1795             # quiesce and break
 1796             dm_session.update_snapmirror(orig_active_replica, replica)
 1797         except exception.StorageCommunicationException:
 1798             # Ignore any errors since the current source replica may be
 1799             # unreachable
 1800             pass
 1801         # 2. Break SnapMirror
 1802         dm_session.break_snapmirror(orig_active_replica, replica)
 1803 
 1804         # 3. Setup access rules
 1805         new_active_replica = copy.deepcopy(replica)
 1806         helper = self._get_helper(replica)
 1807         helper.set_client(vserver_client)
 1808         try:
 1809             helper.update_access(replica, share_name, access_rules)
 1810         except Exception:
 1811             new_active_replica['access_rules_status'] = (
 1812                 constants.SHARE_INSTANCE_RULES_SYNCING)
 1813         else:
 1814             new_active_replica['access_rules_status'] = constants.STATUS_ACTIVE
 1815 
 1816         new_active_replica['export_locations'] = self._create_export(
 1817             new_active_replica, share_server, vserver, vserver_client)
 1818         new_active_replica['replica_state'] = constants.REPLICA_STATE_ACTIVE
 1819 
 1820         # 4. Set File system size fixed to false
 1821         vserver_client.set_volume_filesys_size_fixed(share_name,
 1822                                                      filesys_size_fixed=False)
 1823 
 1824         return new_active_replica
 1825 
 1826     def _safe_change_replica_source(self, dm_session, replica,
 1827                                     orig_source_replica,
 1828                                     new_source_replica, replica_list):
 1829         """Attempts to change the SnapMirror source to new source.
 1830 
 1831         If the attempt fails, 'replica_state' is set to 'error'.
 1832         :param dm_session: Data motion object for SnapMirror operations
 1833         :param replica: Replica that requires a change of source
 1834         :param orig_source_replica: Original SnapMirror source volume
 1835         :param new_source_replica: New SnapMirror source volume
 1836         :return: Updated replica
 1837         """
 1838         try:
 1839             dm_session.change_snapmirror_source(replica,
 1840                                                 orig_source_replica,
 1841                                                 new_source_replica,
 1842                                                 replica_list)
 1843         except exception.StorageCommunicationException:
 1844             replica['status'] = constants.STATUS_ERROR
 1845             replica['replica_state'] = constants.STATUS_ERROR
 1846             replica['export_locations'] = []
 1847             msg = ("Failed to change replica (%s) to a SnapMirror "
 1848                    "destination. Replica backend is unreachable.")
 1849 
 1850             LOG.exception(msg, replica['id'])
 1851             return replica
 1852         except netapp_api.NaApiError:
 1853             replica['replica_state'] = constants.STATUS_ERROR
 1854             replica['export_locations'] = []
 1855             msg = ("Failed to change replica (%s) to a SnapMirror "
 1856                    "destination.")
 1857             LOG.exception(msg, replica['id'])
 1858             return replica
 1859 
 1860         replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
 1861         replica['export_locations'] = []
 1862 
 1863         return replica
 1864 
 1865     def create_replicated_snapshot(self, context, replica_list,
 1866                                    snapshot_instances, share_server=None):
 1867         active_replica = self._find_active_replica(replica_list)
 1868         active_snapshot = [x for x in snapshot_instances
 1869                            if x['share_id'] == active_replica['id']][0]
 1870         snapshot_name = self._get_backend_snapshot_name(active_snapshot['id'])
 1871 
 1872         self.create_snapshot(context, active_snapshot,
 1873                              share_server=share_server)
 1874 
 1875         active_snapshot['status'] = constants.STATUS_AVAILABLE
 1876         active_snapshot['provider_location'] = snapshot_name
 1877         snapshots = [active_snapshot]
 1878         instances = zip(sorted(replica_list,
 1879                                key=lambda x: x['id']),
 1880                         sorted(snapshot_instances,
 1881                                key=lambda x: x['share_id']))
 1882 
 1883         for replica, snapshot in instances:
 1884             if snapshot['id'] != active_snapshot['id']:
 1885                 snapshot['provider_location'] = snapshot_name
 1886                 snapshots.append(snapshot)
 1887                 dm_session = data_motion.DataMotionSession()
 1888                 if replica.get('host'):
 1889                     try:
 1890                         dm_session.update_snapmirror(active_replica,
 1891                                                      replica)
 1892                     except netapp_api.NaApiError as e:
 1893                         if e.code != netapp_api.EOBJECTNOTFOUND:
 1894                             raise
 1895         return snapshots
 1896 
 1897     def delete_replicated_snapshot(self, context, replica_list,
 1898                                    snapshot_instances, share_server=None):
 1899         active_replica = self._find_active_replica(replica_list)
 1900         active_snapshot = [x for x in snapshot_instances
 1901                            if x['share_id'] == active_replica['id']][0]
 1902 
 1903         self.delete_snapshot(context, active_snapshot,
 1904                              share_server=share_server,
 1905                              snapshot_name=active_snapshot['provider_location']
 1906                              )
 1907         active_snapshot['status'] = constants.STATUS_DELETED
 1908         instances = zip(sorted(replica_list,
 1909                                key=lambda x: x['id']),
 1910                         sorted(snapshot_instances,
 1911                                key=lambda x: x['share_id']))
 1912 
 1913         for replica, snapshot in instances:
 1914             if snapshot['id'] != active_snapshot['id']:
 1915                 dm_session = data_motion.DataMotionSession()
 1916                 if replica.get('host'):
 1917                     try:
 1918                         dm_session.update_snapmirror(active_replica, replica)
 1919                     except netapp_api.NaApiError as e:
 1920                         if e.code != netapp_api.EOBJECTNOTFOUND:
 1921                             raise
 1922 
 1923         return [active_snapshot]
 1924 
 1925     def update_replicated_snapshot(self, replica_list, share_replica,
 1926                                    snapshot_instances, snapshot_instance,
 1927                                    share_server=None):
 1928         active_replica = self._find_active_replica(replica_list)
 1929         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1930         share_name = self._get_backend_share_name(
 1931             snapshot_instance['share_id'])
 1932         snapshot_name = snapshot_instance.get('provider_location')
 1933         # NOTE(ameade): If there is no provider location,
 1934         # then grab from active snapshot instance
 1935         if snapshot_name is None:
 1936             active_snapshot = [x for x in snapshot_instances
 1937                                if x['share_id'] == active_replica['id']][0]
 1938             snapshot_name = active_snapshot.get('provider_location')
 1939             if not snapshot_name:
 1940                 return
 1941 
 1942         try:
 1943             snapshot_exists = vserver_client.snapshot_exists(snapshot_name,
 1944                                                              share_name)
 1945         except exception.SnapshotUnavailable:
 1946             # The volume must still be offline
 1947             return
 1948 
 1949         if (snapshot_exists and
 1950                 snapshot_instance['status'] == constants.STATUS_CREATING):
 1951             return {
 1952                 'status': constants.STATUS_AVAILABLE,
 1953                 'provider_location': snapshot_name,
 1954             }
 1955         elif (not snapshot_exists and
 1956               snapshot_instance['status'] == constants.STATUS_DELETING):
 1957             raise exception.SnapshotResourceNotFound(
 1958                 name=snapshot_instance.get('provider_location'))
 1959 
 1960         dm_session = data_motion.DataMotionSession()
 1961         try:
 1962             dm_session.update_snapmirror(active_replica, share_replica)
 1963         except netapp_api.NaApiError as e:
 1964             if e.code != netapp_api.EOBJECTNOTFOUND:
 1965                 raise
 1966 
 1967     def revert_to_replicated_snapshot(self, context, active_replica,
 1968                                       replica_list, active_replica_snapshot,
 1969                                       replica_snapshots, share_server=None):
 1970         """Reverts a replicated share (in place) to the specified snapshot."""
 1971         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1972         share_name = self._get_backend_share_name(
 1973             active_replica_snapshot['share_id'])
 1974         snapshot_name = (
 1975             active_replica_snapshot.get('provider_location') or
 1976             self._get_backend_snapshot_name(active_replica_snapshot['id']))
 1977 
 1978         LOG.debug('Restoring snapshot %s', snapshot_name)
 1979 
 1980         dm_session = data_motion.DataMotionSession()
 1981         non_active_replica_list = self._find_nonactive_replicas(replica_list)
 1982 
 1983         # Ensure source snapshot exists
 1984         vserver_client.get_snapshot(share_name, snapshot_name)
 1985 
 1986         # Break all mirrors
 1987         for replica in non_active_replica_list:
 1988             try:
 1989                 dm_session.break_snapmirror(
 1990                     active_replica, replica, mount=False)
 1991             except netapp_api.NaApiError as e:
 1992                 if e.code != netapp_api.EOBJECTNOTFOUND:
 1993                     raise
 1994 
 1995         # Delete source SnapMirror snapshots that will prevent a snap restore
 1996         snapmirror_snapshot_names = vserver_client.list_snapmirror_snapshots(
 1997             share_name)
 1998         for snapmirror_snapshot_name in snapmirror_snapshot_names:
 1999             vserver_client.delete_snapshot(
 2000                 share_name, snapmirror_snapshot_name, ignore_owners=True)
 2001 
 2002         # Restore source snapshot of interest
 2003         vserver_client.restore_snapshot(share_name, snapshot_name)
 2004 
 2005         # Reestablish mirrors
 2006         for replica in non_active_replica_list:
 2007             try:
 2008                 dm_session.resync_snapmirror(active_replica, replica)
 2009             except netapp_api.NaApiError as e:
 2010                 if e.code != netapp_api.EOBJECTNOTFOUND:
 2011                     raise
 2012 
 2013     def _check_destination_vserver_for_vol_move(self, source_share,
 2014                                                 source_vserver,
 2015                                                 dest_share_server):
 2016         try:
 2017             destination_vserver, __ = self._get_vserver(
 2018                 share_server=dest_share_server)
 2019         except exception.InvalidParameterValue:
 2020             destination_vserver = None
 2021 
 2022         if source_vserver != destination_vserver:
 2023             msg = _("Cannot migrate %(shr)s efficiently from source "
 2024                     "VServer %(src)s to destination VServer %(dest)s.")
 2025             msg_args = {
 2026                 'shr': source_share['id'],
 2027                 'src': source_vserver,
 2028                 'dest': destination_vserver,
 2029             }
 2030             raise exception.NetAppException(msg % msg_args)
 2031 
 2032     def migration_check_compatibility(self, context, source_share,
 2033                                       destination_share, share_server=None,
 2034                                       destination_share_server=None):
 2035         """Checks compatibility between self.host and destination host."""
 2036         # We need cluster creds to perform an intra-cluster data motion
 2037         compatible = False
 2038         destination_host = destination_share['host']
 2039 
 2040         if self._have_cluster_creds:
 2041             try:
 2042                 backend = share_utils.extract_host(
 2043                     destination_host, level='backend_name')
 2044                 destination_aggregate = share_utils.extract_host(
 2045                     destination_host, level='pool')
 2046                 # Validate new extra-specs are valid on the destination
 2047                 extra_specs = share_types.get_extra_specs_from_share(
 2048                     destination_share)
 2049                 self._check_extra_specs_validity(
 2050                     destination_share, extra_specs)
 2051                 # TODO(gouthamr): Check whether QoS min-throughputs can be
 2052                 # honored on the destination aggregate when supported.
 2053                 self._check_aggregate_extra_specs_validity(
 2054                     destination_aggregate, extra_specs)
 2055 
 2056                 data_motion.get_backend_configuration(backend)
 2057 
 2058                 source_vserver, __ = self._get_vserver(
 2059                     share_server=share_server)
 2060                 share_volume = self._get_backend_share_name(
 2061                     source_share['id'])
 2062 
 2063                 self._check_destination_vserver_for_vol_move(
 2064                     source_share, source_vserver, destination_share_server)
 2065 
 2066                 encrypt_dest = self._get_dest_flexvol_encryption_value(
 2067                     destination_share)
 2068                 self._client.check_volume_move(
 2069                     share_volume, source_vserver, destination_aggregate,
 2070                     encrypt_destination=encrypt_dest)
 2071 
 2072             except Exception:
 2073                 msg = ("Cannot migrate share %(shr)s efficiently between "
 2074                        "%(src)s and %(dest)s.")
 2075                 msg_args = {
 2076                     'shr': source_share['id'],
 2077                     'src': source_share['host'],
 2078                     'dest': destination_host,
 2079                 }
 2080                 LOG.exception(msg, msg_args)
 2081             else:
 2082                 compatible = True
 2083         else:
 2084             msg = ("Cluster credentials have not been configured "
 2085                    "with this share driver. Cannot perform volume move "
 2086                    "operations.")
 2087             LOG.warning(msg)
 2088 
 2089         compatibility = {
 2090             'compatible': compatible,
 2091             'writable': compatible,
 2092             'nondisruptive': compatible,
 2093             'preserve_metadata': compatible,
 2094             'preserve_snapshots': compatible,
 2095         }
 2096 
 2097         return compatibility
 2098 
 2099     def migration_start(self, context, source_share, destination_share,
 2100                         source_snapshots, snapshot_mappings,
 2101                         share_server=None, destination_share_server=None):
 2102         """Begins data motion from source_share to destination_share."""
 2103         # Intra-cluster migration
 2104         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2105         share_volume = self._get_backend_share_name(source_share['id'])
 2106         destination_aggregate = share_utils.extract_host(
 2107             destination_share['host'], level='pool')
 2108 
 2109         # If the destination's share type extra-spec for Flexvol encryption
 2110         # is different than the source's, then specify the volume-move
 2111         # operation to set the correct 'encrypt' attribute on the destination
 2112         # volume.
 2113         encrypt_dest = self._get_dest_flexvol_encryption_value(
 2114             destination_share)
 2115 
 2116         self._client.start_volume_move(
 2117             share_volume,
 2118             vserver,
 2119             destination_aggregate,
 2120             encrypt_destination=encrypt_dest)
 2121 
 2122         msg = ("Began volume move operation of share %(shr)s from %(src)s "
 2123                "to %(dest)s.")
 2124         msg_args = {
 2125             'shr': source_share['id'],
 2126             'src': source_share['host'],
 2127             'dest': destination_share['host'],
 2128         }
 2129         LOG.info(msg, msg_args)
 2130 
 2131     def _get_volume_move_status(self, source_share, share_server):
 2132         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2133         share_volume = self._get_backend_share_name(source_share['id'])
 2134         status = self._client.get_volume_move_status(share_volume, vserver)
 2135         return status
 2136 
 2137     def _get_dest_flexvol_encryption_value(self, destination_share):
 2138         dest_share_type_encrypted_val = share_types.get_share_type_extra_specs(
 2139             destination_share['share_type_id'],
 2140             'netapp_flexvol_encryption')
 2141         encrypt_destination = share_types.parse_boolean_extra_spec(
 2142             'netapp_flexvol_encryption', dest_share_type_encrypted_val)
 2143 
 2144         return encrypt_destination
 2145 
 2146     def migration_continue(self, context, source_share, destination_share,
 2147                            source_snapshots, snapshot_mappings,
 2148                            share_server=None, destination_share_server=None):
 2149         """Check progress of migration, try to repair data motion errors."""
 2150         status = self._get_volume_move_status(source_share, share_server)
 2151         completed_phases = (
 2152             'cutover_hard_deferred', 'cutover_soft_deferred', 'completed')
 2153 
 2154         move_phase = status['phase'].lower()
 2155         if move_phase == 'failed':
 2156             msg_args = {
 2157                 'shr': source_share['id'],
 2158                 'reason': status['details'],
 2159             }
 2160             msg = _("Volume move operation for share %(shr)s failed. Reason: "
 2161                     "%(reason)s") % msg_args
 2162             LOG.exception(msg)
 2163             raise exception.NetAppException(msg)
 2164         elif move_phase in completed_phases:
 2165             return True
 2166 
 2167         return False
 2168 
 2169     def migration_get_progress(self, context, source_share,
 2170                                destination_share, source_snapshots,
 2171                                snapshot_mappings, share_server=None,
 2172                                destination_share_server=None):
 2173         """Return detailed progress of the migration in progress."""
 2174         status = self._get_volume_move_status(source_share, share_server)
 2175 
 2176         # NOTE (gouthamr): If the volume move is waiting for a manual
 2177         # intervention to cut-over, the copy is done with respect to the
 2178         # user. Volume move copies the rest of the data before cut-over anyway.
 2179         if status['phase'] in ('cutover_hard_deferred',
 2180                                'cutover_soft_deferred'):
 2181             status['percent-complete'] = 100
 2182 
 2183         msg = ("Volume move status for share %(share)s: (State) %(state)s. "
 2184                "(Phase) %(phase)s. Details: %(details)s")
 2185         msg_args = {
 2186             'state': status['state'],
 2187             'details': status['details'],
 2188             'share': source_share['id'],
 2189             'phase': status['phase'],
 2190         }
 2191         LOG.info(msg, msg_args)
 2192 
 2193         return {
 2194             'total_progress': status['percent-complete'] or 0,
 2195             'state': status['state'],
 2196             'estimated_completion_time': status['estimated-completion-time'],
 2197             'phase': status['phase'],
 2198             'details': status['details'],
 2199         }
 2200 
 2201     def migration_cancel(self, context, source_share, destination_share,
 2202                          source_snapshots, snapshot_mappings,
 2203                          share_server=None, destination_share_server=None):
 2204         """Abort an ongoing migration."""
 2205         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2206         share_volume = self._get_backend_share_name(source_share['id'])
 2207 
 2208         try:
 2209             self._get_volume_move_status(source_share, share_server)
 2210         except exception.NetAppException:
 2211             LOG.exception("Could not get volume move status.")
 2212             return
 2213 
 2214         self._client.abort_volume_move(share_volume, vserver)
 2215 
 2216         msg = ("Share volume move operation for share %(shr)s from host "
 2217                "%(src)s to %(dest)s was successfully aborted.")
 2218         msg_args = {
 2219             'shr': source_share['id'],
 2220             'src': source_share['host'],
 2221             'dest': destination_share['host'],
 2222         }
 2223         LOG.info(msg, msg_args)
 2224 
 2225     def migration_complete(self, context, source_share, destination_share,
 2226                            source_snapshots, snapshot_mappings,
 2227                            share_server=None, destination_share_server=None):
 2228         """Initiate the cutover to destination share after move is complete."""
 2229         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2230         share_volume = self._get_backend_share_name(source_share['id'])
 2231 
 2232         status = self._get_volume_move_status(source_share, share_server)
 2233 
 2234         move_phase = status['phase'].lower()
 2235         if move_phase == 'completed':
 2236             LOG.debug("Volume move operation was already successfully "
 2237                       "completed for share %(shr)s.",
 2238                       {'shr': source_share['id']})
 2239         elif move_phase in ('cutover_hard_deferred', 'cutover_soft_deferred'):
 2240             self._client.trigger_volume_move_cutover(share_volume, vserver)
 2241             self._wait_for_cutover_completion(
 2242                 source_share, share_server)
 2243         else:
 2244             msg_args = {
 2245                 'shr': source_share['id'],
 2246                 'status': status['state'],
 2247                 'phase': status['phase'],
 2248                 'details': status['details'],
 2249             }
 2250             msg = _("Cannot complete volume move operation for share %(shr)s. "
 2251                     "Current volume move status: %(status)s, phase: "
 2252                     "%(phase)s. Details: %(details)s") % msg_args
 2253             LOG.exception(msg)
 2254             raise exception.NetAppException(msg)
 2255 
 2256         new_share_volume_name = self._get_backend_share_name(
 2257             destination_share['id'])
 2258         vserver_client.set_volume_name(share_volume, new_share_volume_name)
 2259 
 2260         # Modify volume properties per share type extra-specs
 2261         extra_specs = share_types.get_extra_specs_from_share(
 2262             destination_share)
 2263         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 2264         self._check_extra_specs_validity(destination_share, extra_specs)
 2265         provisioning_options = self._get_provisioning_options(extra_specs)
 2266         qos_policy_group_name = self._modify_or_create_qos_for_existing_share(
 2267             destination_share, extra_specs, vserver, vserver_client)
 2268         if qos_policy_group_name:
 2269             provisioning_options['qos_policy_group'] = qos_policy_group_name
 2270         else:
 2271             # Removing the QOS Policy on the migrated share as the
 2272             # new extra-spec for which this share is being migrated to
 2273             # does not specify any QOS settings.
 2274             provisioning_options['qos_policy_group'] = "none"
 2275 
 2276             qos_policy_of_src_share = self._get_backend_qos_policy_group_name(
 2277                 source_share['id'])
 2278             self._client.mark_qos_policy_group_for_deletion(
 2279                 qos_policy_of_src_share)
 2280 
 2281         destination_aggregate = share_utils.extract_host(
 2282             destination_share['host'], level='pool')
 2283 
 2284         # Modify volume to match extra specs
 2285         vserver_client.modify_volume(destination_aggregate,
 2286                                      new_share_volume_name,
 2287                                      **provisioning_options)
 2288 
 2289         msg = ("Volume move operation for share %(shr)s has completed "
 2290                "successfully. Share has been moved from %(src)s to "
 2291                "%(dest)s.")
 2292         msg_args = {
 2293             'shr': source_share['id'],
 2294             'src': source_share['host'],
 2295             'dest': destination_share['host'],
 2296         }
 2297         LOG.info(msg, msg_args)
 2298 
 2299         # NOTE(gouthamr): For nondisruptive migration, current export
 2300         # policy will not be cleared, the export policy will be renamed to
 2301         # match the name of the share.
 2302         export_locations = self._create_export(
 2303             destination_share, share_server, vserver, vserver_client,
 2304             clear_current_export_policy=False)
 2305         src_snaps_dict = {s['id']: s for s in source_snapshots}
 2306         snapshot_updates = {}
 2307 
 2308         for source_snap_id, destination_snap in snapshot_mappings.items():
 2309             p_location = src_snaps_dict[source_snap_id]['provider_location']
 2310 
 2311             snapshot_updates.update(
 2312                 {destination_snap['id']: {'provider_location': p_location}})
 2313 
 2314         return {
 2315             'export_locations': export_locations,
 2316             'snapshot_updates': snapshot_updates,
 2317         }
 2318 
 2319     @na_utils.trace
 2320     def _modify_or_create_qos_for_existing_share(self, share, extra_specs,
 2321                                                  vserver, vserver_client):
 2322         """Gets/Creates QoS policy for an existing FlexVol.
 2323 
 2324         The share's assigned QoS policy is renamed and adjusted if the policy
 2325         is exclusive to the FlexVol. If the policy includes other workloads
 2326         besides the FlexVol, a new policy is created with the specs necessary.
 2327         """
 2328         qos_specs = self._get_normalized_qos_specs(extra_specs)
 2329         if not qos_specs:
 2330             return
 2331 
 2332         backend_share_name = self._get_backend_share_name(share['id'])
 2333         qos_policy_group_name = self._get_backend_qos_policy_group_name(
 2334             share['id'])
 2335 
 2336         create_new_qos_policy_group = True
 2337 
 2338         backend_volume = vserver_client.get_volume(
 2339             backend_share_name)
 2340         backend_volume_size = int(
 2341             math.ceil(float(backend_volume['size']) / units.Gi))
 2342 
 2343         LOG.debug("Checking for a pre-existing QoS policy group that "
 2344                   "is exclusive to the volume %s.", backend_share_name)
 2345 
 2346         # Does the volume have an exclusive QoS policy that we can rename?
 2347         if backend_volume['qos-policy-group-name'] is not None:
 2348             existing_qos_policy_group = self._client.qos_policy_group_get(
 2349                 backend_volume['qos-policy-group-name'])
 2350             if existing_qos_policy_group['num-workloads'] == 1:
 2351                 # Yay, can set max-throughput and rename
 2352 
 2353                 msg = ("Found pre-existing QoS policy %(policy)s and it is "
 2354                        "exclusive to the volume %(volume)s. Modifying and "
 2355                        "renaming this policy to %(new_policy)s.")
 2356                 msg_args = {
 2357                     'policy': backend_volume['qos-policy-group-name'],
 2358                     'volume': backend_share_name,
 2359                     'new_policy': qos_policy_group_name,
 2360                 }
 2361                 LOG.debug(msg, msg_args)
 2362 
 2363                 max_throughput = self._get_max_throughput(
 2364                     backend_volume_size, qos_specs)
 2365                 self._client.qos_policy_group_modify(
 2366                     backend_volume['qos-policy-group-name'], max_throughput)
 2367                 self._client.qos_policy_group_rename(
 2368                     backend_volume['qos-policy-group-name'],
 2369                     qos_policy_group_name)
 2370                 create_new_qos_policy_group = False
 2371 
 2372         if create_new_qos_policy_group:
 2373             share_obj = {
 2374                 'size': backend_volume_size,
 2375                 'id': share['id'],
 2376             }
 2377             LOG.debug("No existing QoS policy group found for "
 2378                       "volume. Creating  a new one with name %s.",
 2379                       qos_policy_group_name)
 2380             self._create_qos_policy_group(share_obj, vserver, qos_specs)
 2381         return qos_policy_group_name
 2382 
 2383     def _wait_for_cutover_completion(self, source_share, share_server):
 2384 
 2385         retries = (self.configuration.netapp_volume_move_cutover_timeout / 5
 2386                    or 1)
 2387 
 2388         @manila_utils.retry(exception.ShareBusyException, interval=5,
 2389                             retries=retries, backoff_rate=1)
 2390         def check_move_completion():
 2391             status = self._get_volume_move_status(source_share, share_server)
 2392             if status['phase'].lower() != 'completed':
 2393                 msg_args = {
 2394                     'shr': source_share['id'],
 2395                     'phs': status['phase'],
 2396                 }
 2397                 msg = _('Volume move operation for share %(shr)s is not '
 2398                         'complete. Current Phase: %(phs)s. '
 2399                         'Retrying.') % msg_args
 2400                 LOG.warning(msg)
 2401                 raise exception.ShareBusyException(reason=msg)
 2402 
 2403         try:
 2404             check_move_completion()
 2405         except exception.ShareBusyException:
 2406             msg = _("Volume move operation did not complete after cut-over "
 2407                     "was triggered. Retries exhausted. Not retrying.")
 2408             raise exception.NetAppException(message=msg)
 2409 
 2410     def get_backend_info(self, context):
 2411         snapdir_visibility = self.configuration.netapp_reset_snapdir_visibility
 2412         return {
 2413             'snapdir_visibility': snapdir_visibility,
 2414         }
 2415 
 2416     def ensure_shares(self, context, shares):
 2417         cfg_snapdir = self.configuration.netapp_reset_snapdir_visibility
 2418         hide_snapdir = self.HIDE_SNAPDIR_CFG_MAP[cfg_snapdir.lower()]
 2419         if hide_snapdir is not None:
 2420             for share in shares:
 2421                 share_server = share.get('share_server')
 2422                 vserver, vserver_client = self._get_vserver(
 2423                     share_server=share_server)
 2424                 share_name = self._get_backend_share_name(share['id'])
 2425                 self._apply_snapdir_visibility(
 2426                     hide_snapdir, share_name, vserver_client)