"Fossies" - the Fresh Open Source Software Archive

Member "manila-8.1.4/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py" (19 Nov 2020, 108643 Bytes) of package /linux/misc/openstack/manila-8.1.4.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "lib_base.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 8.1.3_vs_8.1.4.

    1 # Copyright (c) 2015 Clinton Knight.  All rights reserved.
    2 # Copyright (c) 2015 Tom Barron.  All rights reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 """
   16 NetApp Data ONTAP cDOT base storage driver library.
   17 
   18 This library is the abstract base for subclasses that complete the
   19 single-SVM or multi-SVM functionality needed by the cDOT Manila drivers.
   20 """
   21 
   22 import copy
   23 import datetime
   24 import json
   25 import math
   26 import socket
   27 
   28 from oslo_config import cfg
   29 from oslo_log import log
   30 from oslo_service import loopingcall
   31 from oslo_utils import timeutils
   32 from oslo_utils import units
   33 from oslo_utils import uuidutils
   34 import six
   35 
   36 from manila.common import constants
   37 from manila import exception
   38 from manila.i18n import _
   39 from manila.share.drivers.netapp.dataontap.client import api as netapp_api
   40 from manila.share.drivers.netapp.dataontap.client import client_cmode
   41 from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
   42 from manila.share.drivers.netapp.dataontap.cluster_mode import performance
   43 from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
   44 from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
   45 from manila.share.drivers.netapp import options as na_opts
   46 from manila.share.drivers.netapp import utils as na_utils
   47 from manila.share import share_types
   48 from manila.share import utils as share_utils
   49 from manila import utils as manila_utils
   50 
   51 LOG = log.getLogger(__name__)
   52 CONF = cfg.CONF
   53 
   54 
   55 class NetAppCmodeFileStorageLibrary(object):
   56 
   57     AUTOSUPPORT_INTERVAL_SECONDS = 3600  # hourly
   58     SSC_UPDATE_INTERVAL_SECONDS = 3600  # hourly
   59     HOUSEKEEPING_INTERVAL_SECONDS = 600  # ten minutes
   60 
   61     SUPPORTED_PROTOCOLS = ('nfs', 'cifs')
   62 
   63     DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
   64     DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
   65 
   66     # Maps NetApp qualified extra specs keys to corresponding backend API
   67     # client library argument keywords.  When we expose more backend
   68     # capabilities here, we will add them to this map.
   69     BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP = {
   70         'netapp:thin_provisioned': 'thin_provisioned',
   71         'netapp:dedup': 'dedup_enabled',
   72         'netapp:compression': 'compression_enabled',
   73         'netapp:split_clone_on_create': 'split',
   74         'netapp:hide_snapdir': 'hide_snapdir',
   75     }
   76 
   77     STRING_QUALIFIED_EXTRA_SPECS_MAP = {
   78 
   79         'netapp:snapshot_policy': 'snapshot_policy',
   80         'netapp:language': 'language',
   81         'netapp:max_files': 'max_files',
   82     }
   83 
   84     # Maps standard extra spec keys to legacy NetApp keys
   85     STANDARD_BOOLEAN_EXTRA_SPECS_MAP = {
   86         'thin_provisioning': 'netapp:thin_provisioned',
   87         'dedupe': 'netapp:dedup',
   88         'compression': 'netapp:compression',
   89     }
   90 
   91     QOS_SPECS = {
   92         'netapp:maxiops': 'maxiops',
   93         'netapp:maxiopspergib': 'maxiopspergib',
   94         'netapp:maxbps': 'maxbps',
   95         'netapp:maxbpspergib': 'maxbpspergib',
   96     }
   97 
   98     HIDE_SNAPDIR_CFG_MAP = {
   99         'visible': False,
  100         'hidden': True,
  101         'default': None,
  102     }
  103 
  104     SIZE_DEPENDENT_QOS_SPECS = {'maxiopspergib', 'maxbpspergib'}
  105 
  106     def __init__(self, driver_name, **kwargs):
  107         na_utils.validate_driver_instantiation(**kwargs)
  108 
  109         self.driver_name = driver_name
  110 
  111         self.private_storage = kwargs['private_storage']
  112         self.configuration = kwargs['configuration']
  113         self.configuration.append_config_values(na_opts.netapp_connection_opts)
  114         self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
  115         self.configuration.append_config_values(na_opts.netapp_transport_opts)
  116         self.configuration.append_config_values(na_opts.netapp_support_opts)
  117         self.configuration.append_config_values(na_opts.netapp_cluster_opts)
  118         self.configuration.append_config_values(
  119             na_opts.netapp_provisioning_opts)
  120         self.configuration.append_config_values(
  121             na_opts.netapp_data_motion_opts)
  122 
  123         self._licenses = []
  124         self._client = None
  125         self._clients = {}
  126         self._ssc_stats = {}
  127         self._have_cluster_creds = None
  128         self._revert_to_snapshot_support = False
  129         self._cluster_info = {}
  130 
  131         self._app_version = kwargs.get('app_version', 'unknown')
  132 
  133         na_utils.setup_tracing(self.configuration.netapp_trace_flags,
  134                                self.configuration.netapp_api_trace_pattern)
  135         self._backend_name = self.configuration.safe_get(
  136             'share_backend_name') or driver_name
  137 
  138     @na_utils.trace
  139     def do_setup(self, context):
  140         self._client = self._get_api_client()
  141         self._have_cluster_creds = self._client.check_for_cluster_credentials()
  142         if self._have_cluster_creds is True:
  143             self._set_cluster_info()
  144 
  145         self._licenses = self._get_licenses()
  146         self._revert_to_snapshot_support = self._check_snaprestore_license()
  147 
  148         # Performance monitoring library
  149         self._perf_library = performance.PerformanceLibrary(self._client)
  150 
  151     @na_utils.trace
  152     def _set_cluster_info(self):
  153         self._cluster_info['nve_support'] = (
  154             self._client.is_nve_supported()
  155             and self._client.features.FLEXVOL_ENCRYPTION)
  156 
  157     @na_utils.trace
  158     def check_for_setup_error(self):
  159         self._start_periodic_tasks()
  160 
  161     def _get_vserver(self, share_server=None):
  162         raise NotImplementedError()
  163 
  164     @na_utils.trace
  165     def _get_api_client(self, vserver=None):
  166 
  167         # Use cached value to prevent calls to system-get-ontapi-version.
  168         client = self._clients.get(vserver)
  169 
  170         if not client:
  171             client = client_cmode.NetAppCmodeClient(
  172                 transport_type=self.configuration.netapp_transport_type,
  173                 username=self.configuration.netapp_login,
  174                 password=self.configuration.netapp_password,
  175                 hostname=self.configuration.netapp_server_hostname,
  176                 port=self.configuration.netapp_server_port,
  177                 vserver=vserver,
  178                 trace=na_utils.TRACE_API,
  179                 api_trace_pattern=na_utils.API_TRACE_PATTERN)
  180             self._clients[vserver] = client
  181 
  182         return client
  183 
  184     @na_utils.trace
  185     def _get_licenses(self):
  186 
  187         if not self._have_cluster_creds:
  188             LOG.debug('License info not available without cluster credentials')
  189             return []
  190 
  191         self._licenses = self._client.get_licenses()
  192 
  193         log_data = {
  194             'backend': self._backend_name,
  195             'licenses': ', '.join(self._licenses),
  196         }
  197         LOG.info('Available licenses on %(backend)s '
  198                  'are %(licenses)s.', log_data)
  199 
  200         if 'nfs' not in self._licenses and 'cifs' not in self._licenses:
  201             msg = 'Neither NFS nor CIFS is licensed on %(backend)s'
  202             msg_args = {'backend': self._backend_name}
  203             LOG.error(msg, msg_args)
  204 
  205         return self._licenses
  206 
  207     @na_utils.trace
  208     def _start_periodic_tasks(self):
  209 
  210         # Run the task once in the current thread so prevent a race with
  211         # the first invocation of get_share_stats.
  212         self._update_ssc_info()
  213 
  214         # Start the task that updates the slow-changing storage service catalog
  215         ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
  216             self._update_ssc_info)
  217         ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL_SECONDS,
  218                                 initial_delay=self.SSC_UPDATE_INTERVAL_SECONDS)
  219 
  220         # Start the task that logs autosupport (EMS) data to the controller
  221         ems_periodic_task = loopingcall.FixedIntervalLoopingCall(
  222             self._handle_ems_logging)
  223         ems_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS,
  224                                 initial_delay=0)
  225 
  226         # Start the task that runs other housekeeping tasks, such as deletion
  227         # of previously soft-deleted storage artifacts.
  228         housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
  229             self._handle_housekeeping_tasks)
  230         housekeeping_periodic_task.start(
  231             interval=self.HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
  232 
  233     def _get_backend_share_name(self, share_id):
  234         """Get share name according to share name template."""
  235         return self.configuration.netapp_volume_name_template % {
  236             'share_id': share_id.replace('-', '_')}
  237 
  238     def _get_backend_snapshot_name(self, snapshot_id):
  239         """Get snapshot name according to snapshot name template."""
  240         return 'share_snapshot_' + snapshot_id.replace('-', '_')
  241 
  242     def _get_backend_cg_snapshot_name(self, snapshot_id):
  243         """Get snapshot name according to snapshot name template."""
  244         return 'share_cg_snapshot_' + snapshot_id.replace('-', '_')
  245 
  246     def _get_backend_qos_policy_group_name(self, share_id):
  247         """Get QoS policy name according to QoS policy group name template."""
  248         return self.configuration.netapp_qos_policy_group_name_template % {
  249             'share_id': share_id.replace('-', '_')}
  250 
  251     @na_utils.trace
  252     def _get_aggregate_space(self):
  253         aggregates = self._find_matching_aggregates()
  254         if self._have_cluster_creds:
  255             return self._client.get_cluster_aggregate_capacities(aggregates)
  256         else:
  257             return self._client.get_vserver_aggregate_capacities(aggregates)
  258 
  259     @na_utils.trace
  260     def _check_snaprestore_license(self):
  261         """Check if snaprestore license is enabled."""
  262         if self._have_cluster_creds:
  263             return 'snaprestore' in self._licenses
  264         else:
  265             # NOTE: (felipe_rodrigues): workaround to find out whether the
  266             # backend has the license: since without cluster credentials it
  267             # cannot retrieve the ontap licenses, it sends a fake ONTAP
  268             # "snapshot-restore-volume" request which is only available when
  269             # the license exists. By the got error, it checks whether license
  270             # is installed or not.
  271             try:
  272                 self._client.restore_snapshot(
  273                     "fake_%s" % uuidutils.generate_uuid(dashed=False), "")
  274             except netapp_api.NaApiError as e:
  275                 no_license = 'is not licensed'
  276                 LOG.debug('Fake restore_snapshot request failed: %s', e)
  277                 return not (e.code == netapp_api.EAPIERROR and
  278                             no_license in e.message)
  279 
  280             # since it passed an empty snapshot, it should never get here
  281             msg = _("Caught an unexpected behavior: the fake restore to "
  282                     "snapshot request using 'fake' volume and empty string "
  283                     "snapshot as argument has not failed.")
  284             LOG.exception(msg)
  285             raise exception.NetAppException(msg)
  286 
  287     @na_utils.trace
  288     def _get_aggregate_node(self, aggregate_name):
  289         """Get home node for the specified aggregate, or None."""
  290         if self._have_cluster_creds:
  291             return self._client.get_node_for_aggregate(aggregate_name)
  292         else:
  293             return None
  294 
  295     def get_default_filter_function(self):
  296         """Get the default filter_function string."""
  297         return self.DEFAULT_FILTER_FUNCTION
  298 
  299     def get_default_goodness_function(self):
  300         """Get the default goodness_function string."""
  301         return self.DEFAULT_GOODNESS_FUNCTION
  302 
  303     @na_utils.trace
  304     def get_share_stats(self, filter_function=None, goodness_function=None):
  305         """Retrieve stats info from Data ONTAP backend."""
  306 
  307         data = {
  308             'share_backend_name': self._backend_name,
  309             'driver_name': self.driver_name,
  310             'vendor_name': 'NetApp',
  311             'driver_version': '1.0',
  312             'netapp_storage_family': 'ontap_cluster',
  313             'storage_protocol': 'NFS_CIFS',
  314             'pools': self._get_pools(filter_function=filter_function,
  315                                      goodness_function=goodness_function),
  316             'share_group_stats': {
  317                 'consistent_snapshot_support': 'host',
  318             },
  319         }
  320 
  321         if (self.configuration.replication_domain and
  322                 not self.configuration.driver_handles_share_servers):
  323             data['replication_type'] = 'dr'
  324             data['replication_domain'] = self.configuration.replication_domain
  325 
  326         return data
  327 
  328     @na_utils.trace
  329     def get_share_server_pools(self, share_server):
  330         """Return list of pools related to a particular share server.
  331 
  332         Note that the multi-SVM cDOT driver assigns all available pools to
  333         each Vserver, so there is no need to filter the pools any further
  334         by share_server.
  335 
  336         :param share_server: ShareServer class instance.
  337         """
  338         return self._get_pools()
  339 
  340     @na_utils.trace
  341     def _get_pools(self, filter_function=None, goodness_function=None):
  342         """Retrieve list of pools available to this backend."""
  343 
  344         pools = []
  345         aggr_space = self._get_aggregate_space()
  346         aggregates = aggr_space.keys()
  347 
  348         if self._have_cluster_creds:
  349             # Get up-to-date node utilization metrics just once.
  350             self._perf_library.update_performance_cache({}, self._ssc_stats)
  351             qos_support = True
  352         else:
  353             qos_support = False
  354 
  355         netapp_flexvol_encryption = self._cluster_info.get(
  356             'nve_support', False)
  357 
  358         for aggr_name in sorted(aggregates):
  359 
  360             reserved_percentage = self.configuration.reserved_share_percentage
  361 
  362             total_capacity_gb = na_utils.round_down(float(
  363                 aggr_space[aggr_name].get('total', 0)) / units.Gi)
  364             free_capacity_gb = na_utils.round_down(float(
  365                 aggr_space[aggr_name].get('available', 0)) / units.Gi)
  366             allocated_capacity_gb = na_utils.round_down(float(
  367                 aggr_space[aggr_name].get('used', 0)) / units.Gi)
  368 
  369             if total_capacity_gb == 0.0:
  370                 total_capacity_gb = 'unknown'
  371 
  372             pool = {
  373                 'pool_name': aggr_name,
  374                 'filter_function': filter_function,
  375                 'goodness_function': goodness_function,
  376                 'total_capacity_gb': total_capacity_gb,
  377                 'free_capacity_gb': free_capacity_gb,
  378                 'allocated_capacity_gb': allocated_capacity_gb,
  379                 'qos': qos_support,
  380                 'reserved_percentage': reserved_percentage,
  381                 'dedupe': [True, False],
  382                 'compression': [True, False],
  383                 'netapp_flexvol_encryption': netapp_flexvol_encryption,
  384                 'thin_provisioning': [True, False],
  385                 'snapshot_support': True,
  386                 'create_share_from_snapshot_support': True,
  387                 'revert_to_snapshot_support': self._revert_to_snapshot_support,
  388             }
  389 
  390             # Add storage service catalog data.
  391             pool_ssc_stats = self._ssc_stats.get(aggr_name)
  392             if pool_ssc_stats:
  393                 pool.update(pool_ssc_stats)
  394 
  395             # Add utilization info, or nominal value if not available.
  396             utilization = self._perf_library.get_node_utilization_for_pool(
  397                 aggr_name)
  398             pool['utilization'] = na_utils.round_down(utilization)
  399 
  400             pools.append(pool)
  401 
  402         return pools
  403 
  404     @na_utils.trace
  405     def _handle_ems_logging(self):
  406         """Build and send an EMS log message."""
  407         self._client.send_ems_log_message(self._build_ems_log_message_0())
  408         self._client.send_ems_log_message(self._build_ems_log_message_1())
  409 
  410     def _build_base_ems_log_message(self):
  411         """Construct EMS Autosupport log message common to all events."""
  412 
  413         ems_log = {
  414             'computer-name': socket.gethostname() or 'Manila_node',
  415             'event-source': 'Manila driver %s' % self.driver_name,
  416             'app-version': self._app_version,
  417             'category': 'provisioning',
  418             'log-level': '5',
  419             'auto-support': 'false',
  420         }
  421         return ems_log
  422 
  423     @na_utils.trace
  424     def _build_ems_log_message_0(self):
  425         """Construct EMS Autosupport log message with deployment info."""
  426 
  427         ems_log = self._build_base_ems_log_message()
  428         ems_log.update({
  429             'event-id': '0',
  430             'event-description': 'OpenStack Manila connected to cluster node',
  431         })
  432         return ems_log
  433 
  434     @na_utils.trace
  435     def _build_ems_log_message_1(self):
  436         """Construct EMS Autosupport log message with storage pool info."""
  437 
  438         message = self._get_ems_pool_info()
  439 
  440         ems_log = self._build_base_ems_log_message()
  441         ems_log.update({
  442             'event-id': '1',
  443             'event-description': json.dumps(message),
  444         })
  445         return ems_log
  446 
  447     def _get_ems_pool_info(self):
  448         raise NotImplementedError()
  449 
  450     @na_utils.trace
  451     def _handle_housekeeping_tasks(self):
  452         """Handle various cleanup activities."""
  453 
  454     def _find_matching_aggregates(self):
  455         """Find all aggregates match pattern."""
  456         raise NotImplementedError()
  457 
  458     @na_utils.trace
  459     def _get_helper(self, share):
  460         """Returns driver which implements share protocol."""
  461         share_protocol = share['share_proto'].lower()
  462 
  463         if share_protocol not in self.SUPPORTED_PROTOCOLS:
  464             err_msg = _("Invalid NAS protocol supplied: %s.") % share_protocol
  465             raise exception.NetAppException(err_msg)
  466 
  467         self._check_license_for_protocol(share_protocol)
  468 
  469         if share_protocol == 'nfs':
  470             return nfs_cmode.NetAppCmodeNFSHelper()
  471         elif share_protocol == 'cifs':
  472             return cifs_cmode.NetAppCmodeCIFSHelper()
  473 
  474     @na_utils.trace
  475     def _check_license_for_protocol(self, share_protocol):
  476         """Validates protocol license if cluster APIs are accessible."""
  477         if not self._have_cluster_creds:
  478             return
  479 
  480         if share_protocol.lower() not in self._licenses:
  481             current_licenses = self._get_licenses()
  482             if share_protocol.lower() not in current_licenses:
  483                 msg_args = {
  484                     'protocol': share_protocol,
  485                     'host': self.configuration.netapp_server_hostname
  486                 }
  487                 msg = _('The protocol %(protocol)s is not licensed on '
  488                         'controller %(host)s') % msg_args
  489                 LOG.error(msg)
  490                 raise exception.NetAppException(msg)
  491 
  492     @na_utils.trace
  493     def get_pool(self, share):
  494         pool = share_utils.extract_host(share['host'], level='pool')
  495         if pool:
  496             return pool
  497 
  498         share_name = self._get_backend_share_name(share['id'])
  499         return self._client.get_aggregate_for_volume(share_name)
  500 
  501     @na_utils.trace
  502     def create_share(self, context, share, share_server):
  503         """Creates new share."""
  504         vserver, vserver_client = self._get_vserver(share_server=share_server)
  505         self._allocate_container(share, vserver, vserver_client)
  506         return self._create_export(share, share_server, vserver,
  507                                    vserver_client)
  508 
  509     @na_utils.trace
  510     def create_share_from_snapshot(self, context, share, snapshot,
  511                                    share_server=None):
  512         """Creates new share from snapshot."""
  513         vserver, vserver_client = self._get_vserver(share_server=share_server)
  514         self._allocate_container_from_snapshot(
  515             share, snapshot, vserver, vserver_client)
  516         return self._create_export(share, share_server, vserver,
  517                                    vserver_client)
  518 
  519     @na_utils.trace
  520     def _allocate_container(self, share, vserver, vserver_client,
  521                             replica=False):
  522         """Create new share on aggregate."""
  523         share_name = self._get_backend_share_name(share['id'])
  524 
  525         # Get Data ONTAP aggregate name as pool name.
  526         pool_name = share_utils.extract_host(share['host'], level='pool')
  527         if pool_name is None:
  528             msg = _("Pool is not available in the share host field.")
  529             raise exception.InvalidHost(reason=msg)
  530 
  531         provisioning_options = self._get_provisioning_options_for_share(
  532             share, vserver, replica=replica)
  533 
  534         if replica:
  535             # If this volume is intended to be a replication destination,
  536             # create it as the 'data-protection' type
  537             provisioning_options['volume_type'] = 'dp'
  538 
  539         hide_snapdir = provisioning_options.pop('hide_snapdir')
  540 
  541         LOG.debug('Creating share %(share)s on pool %(pool)s with '
  542                   'provisioning options %(options)s',
  543                   {'share': share_name, 'pool': pool_name,
  544                    'options': provisioning_options})
  545         vserver_client.create_volume(
  546             pool_name, share_name, share['size'],
  547             snapshot_reserve=self.configuration.
  548             netapp_volume_snapshot_reserve_percent, **provisioning_options)
  549 
  550         if hide_snapdir:
  551             self._apply_snapdir_visibility(
  552                 hide_snapdir, share_name, vserver_client)
  553 
  554     def _apply_snapdir_visibility(
  555             self, hide_snapdir, share_name, vserver_client):
  556 
  557         LOG.debug('Applying snapshot visibility according to hide_snapdir '
  558                   'value of %(hide_snapdir)s on share %(share)s.',
  559                   {'hide_snapdir': hide_snapdir, 'share': share_name})
  560 
  561         vserver_client.set_volume_snapdir_access(share_name, hide_snapdir)
  562 
  563     @na_utils.trace
  564     def _remap_standard_boolean_extra_specs(self, extra_specs):
  565         """Replace standard boolean extra specs with NetApp-specific ones."""
  566         specs = copy.deepcopy(extra_specs)
  567         for (key, netapp_key) in self.STANDARD_BOOLEAN_EXTRA_SPECS_MAP.items():
  568             if key in specs:
  569                 bool_value = share_types.parse_boolean_extra_spec(key,
  570                                                                   specs[key])
  571                 specs[netapp_key] = 'true' if bool_value else 'false'
  572                 del specs[key]
  573         return specs
  574 
  575     @na_utils.trace
  576     def _check_extra_specs_validity(self, share, extra_specs):
  577         """Check if the extra_specs have valid values."""
  578         self._check_boolean_extra_specs_validity(
  579             share, extra_specs, list(self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP))
  580         self._check_string_extra_specs_validity(share, extra_specs)
  581 
  582     @na_utils.trace
  583     def _check_string_extra_specs_validity(self, share, extra_specs):
  584         """Check if the string_extra_specs have valid values."""
  585         if 'netapp:max_files' in extra_specs:
  586             self._check_if_max_files_is_valid(share,
  587                                               extra_specs['netapp:max_files'])
  588 
  589     @na_utils.trace
  590     def _check_if_max_files_is_valid(self, share, value):
  591         """Check if max_files has a valid value."""
  592         if int(value) < 0:
  593             args = {'value': value, 'key': 'netapp:max_files',
  594                     'type_id': share['share_type_id'], 'share_id': share['id']}
  595             msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  596                     'in share_type %(type_id)s for share %(share_id)s.')
  597             raise exception.NetAppException(msg % args)
  598 
  599     @na_utils.trace
  600     def _check_boolean_extra_specs_validity(self, share, specs,
  601                                             keys_of_interest):
  602         # cDOT compression requires deduplication.
  603         dedup = specs.get('netapp:dedup', None)
  604         compression = specs.get('netapp:compression', None)
  605         if dedup is not None and compression is not None:
  606             if dedup.lower() == 'false' and compression.lower() == 'true':
  607                 spec = {'netapp:dedup': dedup,
  608                         'netapp:compression': compression}
  609                 type_id = share['share_type_id']
  610                 share_id = share['id']
  611                 args = {'type_id': type_id, 'share_id': share_id, 'spec': spec}
  612                 msg = _('Invalid combination of extra_specs in share_type '
  613                         '%(type_id)s for share %(share_id)s: %(spec)s: '
  614                         'deduplication must be enabled in order for '
  615                         'compression to be enabled.')
  616                 raise exception.Invalid(msg % args)
  617         """Check if the boolean_extra_specs have valid values."""
  618         # Extra spec values must be (ignoring case) 'true' or 'false'.
  619         for key in keys_of_interest:
  620             value = specs.get(key)
  621             if value is not None and value.lower() not in ['true', 'false']:
  622                 type_id = share['share_type_id']
  623                 share_id = share['id']
  624                 arg_map = {'value': value, 'key': key, 'type_id': type_id,
  625                            'share_id': share_id}
  626                 msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  627                         'in share_type %(type_id)s for share %(share_id)s.')
  628                 raise exception.Invalid(msg % arg_map)
  629 
  630     @na_utils.trace
  631     def _get_boolean_provisioning_options(self, specs, boolean_specs_map):
  632         """Given extra specs, return corresponding client library kwargs.
  633 
  634         Build a full set of client library provisioning kwargs, filling in a
  635         default value if an explicit value has not been supplied via a
  636         corresponding extra spec.  Boolean extra spec values are "true" or
  637         "false", with missing specs treated as "false".  Provisioning kwarg
  638         values are True or False.
  639         """
  640         # Extract the extra spec keys of concern and their corresponding
  641         # kwarg keys as lists.
  642         keys_of_interest = list(boolean_specs_map)
  643         provisioning_args = [boolean_specs_map[key]
  644                              for key in keys_of_interest]
  645         # Set missing spec values to 'false'
  646         for key in keys_of_interest:
  647             if key not in specs:
  648                 specs[key] = 'false'
  649         # Build a list of Boolean provisioning arguments from the string
  650         # equivalents in the spec values.
  651         provisioning_values = [specs[key].lower() == 'true' for key in
  652                                keys_of_interest]
  653         # Combine the list of provisioning args and the list of provisioning
  654         # values into a dictionary suitable for use as kwargs when invoking
  655         # provisioning methods from the client API library.
  656         return dict(zip(provisioning_args, provisioning_values))
  657 
  658     @na_utils.trace
  659     def _get_string_provisioning_options(self, specs, string_specs_map):
  660         """Given extra specs, return corresponding client library kwargs.
  661 
  662         Build a full set of client library provisioning kwargs, filling in a
  663         default value if an explicit value has not been supplied via a
  664         corresponding extra spec.
  665         """
  666         # Extract the extra spec keys of concern and their corresponding
  667         # kwarg keys as lists.
  668         keys_of_interest = list(string_specs_map)
  669         provisioning_args = [string_specs_map[key]
  670                              for key in keys_of_interest]
  671         # Set missing spec values to 'false'
  672         for key in keys_of_interest:
  673             if key not in specs:
  674                 specs[key] = None
  675         provisioning_values = [specs[key] for key in keys_of_interest]
  676 
  677         # Combine the list of provisioning args and the list of provisioning
  678         # values into a dictionary suitable for use as kwargs when invoking
  679         # provisioning methods from the client API library.
  680         return dict(zip(provisioning_args, provisioning_values))
  681 
  682     def _get_normalized_qos_specs(self, extra_specs):
  683         if not extra_specs.get('qos'):
  684             return {}
  685 
  686         normalized_qos_specs = {
  687             self.QOS_SPECS[key.lower()]: value
  688             for key, value in extra_specs.items()
  689             if self.QOS_SPECS.get(key.lower())
  690         }
  691         if not normalized_qos_specs:
  692             msg = _("The extra-spec 'qos' is set to True, but no netapp "
  693                     "supported qos-specs have been specified in the share "
  694                     "type. Cannot provision a QoS policy. Specify any of the "
  695                     "following extra-specs and try again: %s")
  696             raise exception.NetAppException(msg % list(self.QOS_SPECS))
  697 
  698         # TODO(gouthamr): Modify check when throughput floors are allowed
  699         if len(normalized_qos_specs) > 1:
  700             msg = _('Only one NetApp QoS spec can be set at a time. '
  701                     'Specified QoS limits: %s')
  702             raise exception.NetAppException(msg % normalized_qos_specs)
  703 
  704         return normalized_qos_specs
  705 
  706     def _get_max_throughput(self, share_size, qos_specs):
  707         # QoS limits are exclusive of one another.
  708         if 'maxiops' in qos_specs:
  709             return '%siops' % qos_specs['maxiops']
  710         elif 'maxiopspergib' in qos_specs:
  711             return '%siops' % six.text_type(
  712                 int(qos_specs['maxiopspergib']) * int(share_size))
  713         elif 'maxbps' in qos_specs:
  714             return '%sB/s' % qos_specs['maxbps']
  715         elif 'maxbpspergib' in qos_specs:
  716             return '%sB/s' % six.text_type(
  717                 int(qos_specs['maxbpspergib']) * int(share_size))
  718 
  719     @na_utils.trace
  720     def _create_qos_policy_group(self, share, vserver, qos_specs):
  721         max_throughput = self._get_max_throughput(share['size'], qos_specs)
  722         qos_policy_group_name = self._get_backend_qos_policy_group_name(
  723             share['id'])
  724         self._client.qos_policy_group_create(qos_policy_group_name, vserver,
  725                                              max_throughput=max_throughput)
  726         return qos_policy_group_name
  727 
  728     @na_utils.trace
  729     def _get_provisioning_options_for_share(self, share, vserver,
  730                                             replica=False):
  731         """Return provisioning options from a share.
  732 
  733         Starting with a share, this method gets the extra specs, rationalizes
  734         NetApp vs. standard extra spec values, ensures their validity, and
  735         returns them in a form suitable for passing to various API client
  736         methods.
  737         """
  738         extra_specs = share_types.get_extra_specs_from_share(share)
  739         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
  740         self._check_extra_specs_validity(share, extra_specs)
  741         provisioning_options = self._get_provisioning_options(extra_specs)
  742         qos_specs = self._get_normalized_qos_specs(extra_specs)
  743         if qos_specs and not replica:
  744             qos_policy_group = self._create_qos_policy_group(
  745                 share, vserver, qos_specs)
  746             provisioning_options['qos_policy_group'] = qos_policy_group
  747         return provisioning_options
  748 
  749     @na_utils.trace
  750     def _get_provisioning_options(self, specs):
  751         """Return a merged result of string and binary provisioning options."""
  752         boolean_args = self._get_boolean_provisioning_options(
  753             specs, self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)
  754 
  755         string_args = self._get_string_provisioning_options(
  756             specs, self.STRING_QUALIFIED_EXTRA_SPECS_MAP)
  757         result = boolean_args.copy()
  758         result.update(string_args)
  759 
  760         result['encrypt'] = self._get_nve_option(specs)
  761 
  762         return result
  763 
  764     def _get_nve_option(self, specs):
  765         if 'netapp_flexvol_encryption' in specs:
  766             nve = specs['netapp_flexvol_encryption'].lower() == 'true'
  767         else:
  768             nve = False
  769 
  770         return nve
  771 
  772     @na_utils.trace
  773     def _check_aggregate_extra_specs_validity(self, aggregate_name, specs):
  774 
  775         for specs_key in ('netapp_disk_type', 'netapp_raid_type'):
  776             aggr_value = self._ssc_stats.get(aggregate_name, {}).get(specs_key)
  777             specs_value = specs.get(specs_key)
  778 
  779             if aggr_value and specs_value and aggr_value != specs_value:
  780                 msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
  781                         'in aggregate %(aggr)s.')
  782                 msg_args = {
  783                     'value': specs_value,
  784                     'key': specs_key,
  785                     'aggr': aggregate_name
  786                 }
  787                 raise exception.NetAppException(msg % msg_args)
  788 
  789     @na_utils.trace
  790     def _allocate_container_from_snapshot(
  791             self, share, snapshot, vserver, vserver_client,
  792             snapshot_name_func=_get_backend_snapshot_name):
  793         """Clones existing share."""
  794         share_name = self._get_backend_share_name(share['id'])
  795         parent_share_name = self._get_backend_share_name(snapshot['share_id'])
  796         if snapshot.get('provider_location') is None:
  797             parent_snapshot_name = snapshot_name_func(self, snapshot['id'])
  798         else:
  799             parent_snapshot_name = snapshot['provider_location']
  800 
  801         provisioning_options = self._get_provisioning_options_for_share(
  802             share, vserver)
  803 
  804         hide_snapdir = provisioning_options.pop('hide_snapdir')
  805 
  806         LOG.debug('Creating share from snapshot %s', snapshot['id'])
  807         vserver_client.create_volume_clone(share_name, parent_share_name,
  808                                            parent_snapshot_name,
  809                                            **provisioning_options)
  810         if share['size'] > snapshot['size']:
  811             vserver_client.set_volume_size(share_name, share['size'])
  812 
  813         if hide_snapdir:
  814             self._apply_snapdir_visibility(
  815                 hide_snapdir, share_name, vserver_client)
  816 
  817     @na_utils.trace
  818     def _share_exists(self, share_name, vserver_client):
  819         return vserver_client.volume_exists(share_name)
  820 
  821     @na_utils.trace
  822     def delete_share(self, context, share, share_server=None):
  823         """Deletes share."""
  824         try:
  825             vserver, vserver_client = self._get_vserver(
  826                 share_server=share_server)
  827         except (exception.InvalidInput,
  828                 exception.VserverNotSpecified,
  829                 exception.VserverNotFound) as error:
  830             LOG.warning("Could not determine share server for share being "
  831                         "deleted: %(share)s. Deletion of share record "
  832                         "will proceed anyway. Error: %(error)s",
  833                         {'share': share['id'], 'error': error})
  834             return
  835 
  836         share_name = self._get_backend_share_name(share['id'])
  837         if self._share_exists(share_name, vserver_client):
  838             self._remove_export(share, vserver_client)
  839             self._deallocate_container(share_name, vserver_client)
  840             qos_policy_for_share = self._get_backend_qos_policy_group_name(
  841                 share['id'])
  842             self._client.mark_qos_policy_group_for_deletion(
  843                 qos_policy_for_share)
  844         else:
  845             LOG.info("Share %s does not exist.", share['id'])
  846 
  847     @na_utils.trace
  848     def _deallocate_container(self, share_name, vserver_client):
  849         """Free share space."""
  850         vserver_client.unmount_volume(share_name, force=True)
  851         vserver_client.offline_volume(share_name)
  852         vserver_client.delete_volume(share_name)
  853 
  854     @na_utils.trace
  855     def _create_export(self, share, share_server, vserver, vserver_client,
  856                        clear_current_export_policy=True):
  857         """Creates NAS storage."""
  858         helper = self._get_helper(share)
  859         helper.set_client(vserver_client)
  860         share_name = self._get_backend_share_name(share['id'])
  861 
  862         interfaces = vserver_client.get_network_interfaces(
  863             protocols=[share['share_proto']])
  864 
  865         if not interfaces:
  866             msg = _('Cannot find network interfaces for Vserver %(vserver)s '
  867                     'and protocol %(proto)s.')
  868             msg_args = {'vserver': vserver, 'proto': share['share_proto']}
  869             raise exception.NetAppException(msg % msg_args)
  870 
  871         # Get LIF addresses with metadata
  872         export_addresses = self._get_export_addresses_with_metadata(
  873             share, share_server, interfaces)
  874 
  875         # Create the share and get a callback for generating export locations
  876         callback = helper.create_share(
  877             share, share_name,
  878             clear_current_export_policy=clear_current_export_policy)
  879 
  880         # Generate export locations using addresses, metadata and callback
  881         export_locations = [
  882             {
  883                 'path': callback(export_address),
  884                 'is_admin_only': metadata.pop('is_admin_only', False),
  885                 'metadata': metadata,
  886             }
  887             for export_address, metadata
  888             in copy.deepcopy(export_addresses).items()
  889         ]
  890 
  891         # Sort the export locations to report preferred paths first
  892         export_locations = self._sort_export_locations_by_preferred_paths(
  893             export_locations)
  894 
  895         return export_locations
  896 
  897     @na_utils.trace
  898     def _get_export_addresses_with_metadata(self, share, share_server,
  899                                             interfaces):
  900         """Return interface addresses with locality and other metadata."""
  901 
  902         # Get home node so we can identify preferred paths
  903         aggregate_name = share_utils.extract_host(share['host'], level='pool')
  904         home_node = self._get_aggregate_node(aggregate_name)
  905 
  906         # Get admin LIF addresses so we can identify admin export locations
  907         admin_addresses = self._get_admin_addresses_for_share_server(
  908             share_server)
  909 
  910         addresses = {}
  911         for interface in interfaces:
  912 
  913             address = interface['address']
  914             is_admin_only = address in admin_addresses
  915 
  916             if home_node:
  917                 preferred = interface.get('home-node') == home_node
  918             else:
  919                 preferred = False
  920 
  921             addresses[address] = {
  922                 'is_admin_only': is_admin_only,
  923                 'preferred': preferred,
  924             }
  925 
  926         return addresses
  927 
  928     @na_utils.trace
  929     def _get_admin_addresses_for_share_server(self, share_server):
  930 
  931         if not share_server:
  932             return []
  933 
  934         admin_addresses = []
  935         for network_allocation in share_server.get('network_allocations'):
  936             if network_allocation['label'] == 'admin':
  937                 admin_addresses.append(network_allocation['ip_address'])
  938 
  939         return admin_addresses
  940 
  941     @na_utils.trace
  942     def _sort_export_locations_by_preferred_paths(self, export_locations):
  943         """Sort the export locations to report preferred paths first."""
  944 
  945         sort_key = lambda location: location.get(
  946             'metadata', {}).get('preferred') is not True
  947 
  948         return sorted(export_locations, key=sort_key)
  949 
  950     @na_utils.trace
  951     def _remove_export(self, share, vserver_client):
  952         """Deletes NAS storage."""
  953         helper = self._get_helper(share)
  954         helper.set_client(vserver_client)
  955         share_name = self._get_backend_share_name(share['id'])
  956         target = helper.get_target(share)
  957         # Share may be in error state, so there's no share and target.
  958         if target:
  959             helper.delete_share(share, share_name)
  960 
  961     @na_utils.trace
  962     def create_snapshot(self, context, snapshot, share_server=None):
  963         """Creates a snapshot of a share."""
  964         vserver, vserver_client = self._get_vserver(share_server=share_server)
  965         share_name = self._get_backend_share_name(snapshot['share_id'])
  966         snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
  967         LOG.debug('Creating snapshot %s', snapshot_name)
  968         vserver_client.create_snapshot(share_name, snapshot_name)
  969         return {'provider_location': snapshot_name}
  970 
  971     def revert_to_snapshot(self, context, snapshot, share_server=None):
  972         """Reverts a share (in place) to the specified snapshot."""
  973         vserver, vserver_client = self._get_vserver(share_server=share_server)
  974         share_name = self._get_backend_share_name(snapshot['share_id'])
  975         snapshot_name = (snapshot.get('provider_location') or
  976                          self._get_backend_snapshot_name(snapshot['id']))
  977         LOG.debug('Restoring snapshot %s', snapshot_name)
  978         vserver_client.restore_snapshot(share_name, snapshot_name)
  979 
  980     @na_utils.trace
  981     def delete_snapshot(self, context, snapshot, share_server=None,
  982                         snapshot_name=None):
  983         """Deletes a snapshot of a share."""
  984         try:
  985             vserver, vserver_client = self._get_vserver(
  986                 share_server=share_server)
  987         except (exception.InvalidInput,
  988                 exception.VserverNotSpecified,
  989                 exception.VserverNotFound) as error:
  990             LOG.warning("Could not determine share server for snapshot "
  991                         "being deleted: %(snap)s. Deletion of snapshot "
  992                         "record will proceed anyway. Error: %(error)s",
  993                         {'snap': snapshot['id'], 'error': error})
  994             return
  995 
  996         share_name = self._get_backend_share_name(snapshot['share_id'])
  997         snapshot_name = (snapshot.get('provider_location') or snapshot_name or
  998                          self._get_backend_snapshot_name(snapshot['id']))
  999 
 1000         try:
 1001             self._delete_snapshot(vserver_client, share_name, snapshot_name)
 1002         except exception.SnapshotResourceNotFound:
 1003             msg = ("Snapshot %(snap)s does not exist on share %(share)s.")
 1004             msg_args = {'snap': snapshot_name, 'share': share_name}
 1005             LOG.info(msg, msg_args)
 1006 
 1007     def _delete_snapshot(self, vserver_client, share_name, snapshot_name):
 1008         """Deletes a backend snapshot, handling busy snapshots as needed."""
 1009 
 1010         backend_snapshot = vserver_client.get_snapshot(share_name,
 1011                                                        snapshot_name)
 1012 
 1013         LOG.debug('Deleting snapshot %(snap)s for share %(share)s.',
 1014                   {'snap': snapshot_name, 'share': share_name})
 1015 
 1016         if not backend_snapshot['busy']:
 1017             vserver_client.delete_snapshot(share_name, snapshot_name)
 1018 
 1019         elif backend_snapshot['owners'] == {'volume clone'}:
 1020             # Snapshots are locked by clone(s), so split clone and soft delete
 1021             snapshot_children = vserver_client.get_clone_children_for_snapshot(
 1022                 share_name, snapshot_name)
 1023             for snapshot_child in snapshot_children:
 1024                 vserver_client.split_volume_clone(snapshot_child['name'])
 1025 
 1026             vserver_client.soft_delete_snapshot(share_name, snapshot_name)
 1027 
 1028         else:
 1029             raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name)
 1030 
 1031     @na_utils.trace
 1032     def manage_existing(self, share, driver_options, share_server=None):
 1033         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1034         share_size = self._manage_container(share, vserver, vserver_client)
 1035         export_locations = self._create_export(share, share_server, vserver,
 1036                                                vserver_client)
 1037         return {'size': share_size, 'export_locations': export_locations}
 1038 
 1039     @na_utils.trace
 1040     def unmanage(self, share, share_server=None):
 1041         pass
 1042 
 1043     @na_utils.trace
 1044     def _manage_container(self, share, vserver, vserver_client):
 1045         """Bring existing volume under management as a share."""
 1046 
 1047         protocol_helper = self._get_helper(share)
 1048         protocol_helper.set_client(vserver_client)
 1049 
 1050         volume_name = protocol_helper.get_share_name_for_share(share)
 1051         if not volume_name:
 1052             msg = _('Volume could not be determined from export location '
 1053                     '%(export)s.')
 1054             msg_args = {'export': share['export_location']}
 1055             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1056 
 1057         share_name = self._get_backend_share_name(share['id'])
 1058         aggregate_name = share_utils.extract_host(share['host'], level='pool')
 1059 
 1060         # Get existing volume info
 1061         volume = vserver_client.get_volume_to_manage(aggregate_name,
 1062                                                      volume_name)
 1063 
 1064         if not volume:
 1065             msg = _('Volume %(volume)s not found on aggregate %(aggr)s.')
 1066             msg_args = {'volume': volume_name, 'aggr': aggregate_name}
 1067             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1068 
 1069         # When calculating the size, round up to the next GB.
 1070         volume_size = int(math.ceil(float(volume['size']) / units.Gi))
 1071 
 1072         # Validate extra specs
 1073         extra_specs = share_types.get_extra_specs_from_share(share)
 1074         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 1075         try:
 1076             self._check_extra_specs_validity(share, extra_specs)
 1077             self._check_aggregate_extra_specs_validity(aggregate_name,
 1078                                                        extra_specs)
 1079         except exception.ManilaException as ex:
 1080             raise exception.ManageExistingShareTypeMismatch(
 1081                 reason=six.text_type(ex))
 1082 
 1083         # Ensure volume is manageable
 1084         self._validate_volume_for_manage(volume, vserver_client)
 1085 
 1086         provisioning_options = self._get_provisioning_options(extra_specs)
 1087 
 1088         debug_args = {
 1089             'share': share_name,
 1090             'aggr': aggregate_name,
 1091             'options': provisioning_options
 1092         }
 1093         LOG.debug('Managing share %(share)s on aggregate %(aggr)s with '
 1094                   'provisioning options %(options)s', debug_args)
 1095 
 1096         # Rename & remount volume on new path
 1097         vserver_client.unmount_volume(volume_name)
 1098         vserver_client.set_volume_name(volume_name, share_name)
 1099         vserver_client.mount_volume(share_name)
 1100 
 1101         qos_policy_group_name = self._modify_or_create_qos_for_existing_share(
 1102             share, extra_specs, vserver, vserver_client)
 1103         if qos_policy_group_name:
 1104             provisioning_options['qos_policy_group'] = qos_policy_group_name
 1105 
 1106         # Modify volume to match extra specs
 1107         vserver_client.modify_volume(aggregate_name, share_name,
 1108                                      **provisioning_options)
 1109 
 1110         # Save original volume info to private storage
 1111         original_data = {
 1112             'original_name': volume['name'],
 1113             'original_junction_path': volume['junction-path']
 1114         }
 1115         self.private_storage.update(share['id'], original_data)
 1116 
 1117         return volume_size
 1118 
 1119     @na_utils.trace
 1120     def _validate_volume_for_manage(self, volume, vserver_client):
 1121         """Ensure volume is a candidate for becoming a share."""
 1122 
 1123         # Check volume info, extra specs validity
 1124         if volume['type'] != 'rw' or volume['style'] != 'flex':
 1125             msg = _('Volume %(volume)s must be a read-write flexible volume.')
 1126             msg_args = {'volume': volume['name']}
 1127             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1128 
 1129         if vserver_client.volume_has_luns(volume['name']):
 1130             msg = _('Volume %(volume)s must not contain LUNs.')
 1131             msg_args = {'volume': volume['name']}
 1132             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1133 
 1134         if vserver_client.volume_has_junctioned_volumes(volume['name']):
 1135             msg = _('Volume %(volume)s must not have junctioned volumes.')
 1136             msg_args = {'volume': volume['name']}
 1137             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1138 
 1139         if vserver_client.volume_has_snapmirror_relationships(volume):
 1140             msg = _('Volume %(volume)s must not be in any snapmirror '
 1141                     'relationships.')
 1142             msg_args = {'volume': volume['name']}
 1143             raise exception.ManageInvalidShare(reason=msg % msg_args)
 1144 
 1145     @na_utils.trace
 1146     def manage_existing_snapshot(
 1147             self, snapshot, driver_options, share_server=None):
 1148         """Brings an existing snapshot under Manila management."""
 1149         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1150         share_name = self._get_backend_share_name(snapshot['share_id'])
 1151         existing_snapshot_name = snapshot.get('provider_location')
 1152         new_snapshot_name = self._get_backend_snapshot_name(snapshot['id'])
 1153 
 1154         if not existing_snapshot_name:
 1155             msg = _('provider_location not specified.')
 1156             raise exception.ManageInvalidShareSnapshot(reason=msg)
 1157 
 1158         # Get the volume containing the snapshot so we can report its size
 1159         try:
 1160             volume = vserver_client.get_volume(share_name)
 1161         except (netapp_api.NaApiError,
 1162                 exception.StorageResourceNotFound,
 1163                 exception.NetAppException):
 1164             msg = _('Could not determine snapshot %(snap)s size from '
 1165                     'volume %(vol)s.')
 1166             msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
 1167             LOG.exception(msg, msg_args)
 1168             raise exception.ShareNotFound(share_id=snapshot['share_id'])
 1169 
 1170         # Ensure there aren't any mirrors on this volume
 1171         if vserver_client.volume_has_snapmirror_relationships(volume):
 1172             msg = _('Share %s has SnapMirror relationships.')
 1173             msg_args = {'vol': share_name}
 1174             raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args)
 1175 
 1176         # Rename snapshot
 1177         try:
 1178             vserver_client.rename_snapshot(share_name,
 1179                                            existing_snapshot_name,
 1180                                            new_snapshot_name)
 1181         except netapp_api.NaApiError:
 1182             msg = _('Could not rename snapshot %(snap)s in share %(vol)s.')
 1183             msg_args = {'snap': existing_snapshot_name, 'vol': share_name}
 1184             raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args)
 1185 
 1186         # Save original snapshot info to private storage
 1187         original_data = {'original_name': existing_snapshot_name}
 1188         self.private_storage.update(snapshot['id'], original_data)
 1189 
 1190         # When calculating the size, round up to the next GB.
 1191         size = int(math.ceil(float(volume['size']) / units.Gi))
 1192 
 1193         return {'size': size, 'provider_location': new_snapshot_name}
 1194 
 1195     @na_utils.trace
 1196     def unmanage_snapshot(self, snapshot, share_server=None):
 1197         """Removes the specified snapshot from Manila management."""
 1198 
 1199     @na_utils.trace
 1200     def create_consistency_group_from_cgsnapshot(
 1201             self, context, cg_dict, cgsnapshot_dict, share_server=None):
 1202         """Creates a consistency group from an existing CG snapshot."""
 1203         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1204 
 1205         # Ensure there is something to do
 1206         if not cgsnapshot_dict['share_group_snapshot_members']:
 1207             return None, None
 1208 
 1209         clone_list = self._collate_cg_snapshot_info(cg_dict, cgsnapshot_dict)
 1210         share_update_list = []
 1211 
 1212         LOG.debug('Creating consistency group from CG snapshot %s.',
 1213                   cgsnapshot_dict['id'])
 1214 
 1215         for clone in clone_list:
 1216 
 1217             self._allocate_container_from_snapshot(
 1218                 clone['share'], clone['snapshot'], vserver, vserver_client,
 1219                 NetAppCmodeFileStorageLibrary._get_backend_cg_snapshot_name)
 1220 
 1221             export_locations = self._create_export(clone['share'],
 1222                                                    share_server,
 1223                                                    vserver,
 1224                                                    vserver_client)
 1225             share_update_list.append({
 1226                 'id': clone['share']['id'],
 1227                 'export_locations': export_locations,
 1228             })
 1229 
 1230         return None, share_update_list
 1231 
 1232     def _collate_cg_snapshot_info(self, cg_dict, cgsnapshot_dict):
 1233         """Collate the data for a clone of a CG snapshot.
 1234 
 1235         Given two data structures, a CG snapshot (cgsnapshot_dict) and a new
 1236         CG to be cloned from the snapshot (cg_dict), match up both structures
 1237         into a list of dicts (share & snapshot) suitable for use by existing
 1238         driver methods that clone individual share snapshots.
 1239         """
 1240 
 1241         clone_list = list()
 1242 
 1243         for share in cg_dict['shares']:
 1244 
 1245             clone_info = {'share': share}
 1246 
 1247             for cgsnapshot_member in (
 1248                     cgsnapshot_dict['share_group_snapshot_members']):
 1249                 if (share['source_share_group_snapshot_member_id'] ==
 1250                         cgsnapshot_member['id']):
 1251                     clone_info['snapshot'] = {
 1252                         'share_id': cgsnapshot_member['share_id'],
 1253                         'id': cgsnapshot_dict['id'],
 1254                         'size': cgsnapshot_member['size'],
 1255                     }
 1256                     break
 1257 
 1258             else:
 1259                 msg = _("Invalid data supplied for creating consistency group "
 1260                         "from CG snapshot %s.") % cgsnapshot_dict['id']
 1261                 raise exception.InvalidShareGroup(reason=msg)
 1262 
 1263             clone_list.append(clone_info)
 1264 
 1265         return clone_list
 1266 
 1267     @na_utils.trace
 1268     def create_cgsnapshot(self, context, snap_dict, share_server=None):
 1269         """Creates a consistency group snapshot."""
 1270         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1271 
 1272         share_names = [self._get_backend_share_name(member['share_id'])
 1273                        for member in
 1274                        snap_dict.get('share_group_snapshot_members', [])]
 1275         snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
 1276 
 1277         if share_names:
 1278             LOG.debug('Creating CG snapshot %s.', snapshot_name)
 1279             vserver_client.create_cg_snapshot(share_names, snapshot_name)
 1280 
 1281         return None, None
 1282 
 1283     @na_utils.trace
 1284     def delete_cgsnapshot(self, context, snap_dict, share_server=None):
 1285         """Deletes a consistency group snapshot."""
 1286         try:
 1287             vserver, vserver_client = self._get_vserver(
 1288                 share_server=share_server)
 1289         except (exception.InvalidInput,
 1290                 exception.VserverNotSpecified,
 1291                 exception.VserverNotFound) as error:
 1292             LOG.warning("Could not determine share server for CG snapshot "
 1293                         "being deleted: %(snap)s. Deletion of CG snapshot "
 1294                         "record will proceed anyway. Error: %(error)s",
 1295                         {'snap': snap_dict['id'], 'error': error})
 1296             return None, None
 1297 
 1298         share_names = [self._get_backend_share_name(member['share_id'])
 1299                        for member in (
 1300                            snap_dict.get('share_group_snapshot_members', []))]
 1301         snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id'])
 1302 
 1303         for share_name in share_names:
 1304             try:
 1305                 self._delete_snapshot(
 1306                     vserver_client, share_name, snapshot_name)
 1307             except exception.SnapshotResourceNotFound:
 1308                 msg = ("Snapshot %(snap)s does not exist on share "
 1309                        "%(share)s.")
 1310                 msg_args = {'snap': snapshot_name, 'share': share_name}
 1311                 LOG.info(msg, msg_args)
 1312                 continue
 1313 
 1314         return None, None
 1315 
 1316     @staticmethod
 1317     def _is_group_cg(context, share_group):
 1318         return 'host' == share_group.consistent_snapshot_support
 1319 
 1320     @na_utils.trace
 1321     def create_group_snapshot(self, context, snap_dict, fallback_create,
 1322                               share_server=None):
 1323         share_group = snap_dict['share_group']
 1324         if self._is_group_cg(context, share_group):
 1325             return self.create_cgsnapshot(context, snap_dict,
 1326                                           share_server=share_server)
 1327         else:
 1328             return fallback_create(context, snap_dict,
 1329                                    share_server=share_server)
 1330 
 1331     @na_utils.trace
 1332     def delete_group_snapshot(self, context, snap_dict, fallback_delete,
 1333                               share_server=None):
 1334         share_group = snap_dict['share_group']
 1335         if self._is_group_cg(context, share_group):
 1336             return self.delete_cgsnapshot(context, snap_dict,
 1337                                           share_server=share_server)
 1338         else:
 1339             return fallback_delete(context, snap_dict,
 1340                                    share_server=share_server)
 1341 
 1342     @na_utils.trace
 1343     def create_group_from_snapshot(self, context, share_group,
 1344                                    snapshot_dict, fallback_create,
 1345                                    share_server=None):
 1346         share_group2 = snapshot_dict['share_group']
 1347         if self._is_group_cg(context, share_group2):
 1348             return self.create_consistency_group_from_cgsnapshot(
 1349                 context, share_group, snapshot_dict,
 1350                 share_server=share_server)
 1351         else:
 1352             return fallback_create(context, share_group, snapshot_dict,
 1353                                    share_server=share_server)
 1354 
 1355     @na_utils.trace
 1356     def _adjust_qos_policy_with_volume_resize(self, share, new_size,
 1357                                               vserver_client):
 1358         # Adjust QoS policy on a share if any
 1359         if self._have_cluster_creds:
 1360             share_name = self._get_backend_share_name(share['id'])
 1361             share_on_the_backend = vserver_client.get_volume(share_name)
 1362             qos_policy_on_share = share_on_the_backend['qos-policy-group-name']
 1363             if qos_policy_on_share is None:
 1364                 return
 1365 
 1366             extra_specs = share_types.get_extra_specs_from_share(share)
 1367             qos_specs = self._get_normalized_qos_specs(extra_specs)
 1368             size_dependent_specs = {k: v for k, v in qos_specs.items() if k in
 1369                                     self.SIZE_DEPENDENT_QOS_SPECS}
 1370             if size_dependent_specs:
 1371                 max_throughput = self._get_max_throughput(
 1372                     new_size, size_dependent_specs)
 1373                 self._client.qos_policy_group_modify(
 1374                     qos_policy_on_share, max_throughput)
 1375 
 1376     @na_utils.trace
 1377     def extend_share(self, share, new_size, share_server=None):
 1378         """Extends size of existing share."""
 1379         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1380         share_name = self._get_backend_share_name(share['id'])
 1381         vserver_client.set_volume_filesys_size_fixed(share_name,
 1382                                                      filesys_size_fixed=False)
 1383         LOG.debug('Extending share %(name)s to %(size)s GB.',
 1384                   {'name': share_name, 'size': new_size})
 1385         vserver_client.set_volume_size(share_name, new_size)
 1386         self._adjust_qos_policy_with_volume_resize(share, new_size,
 1387                                                    vserver_client)
 1388 
 1389     @na_utils.trace
 1390     def shrink_share(self, share, new_size, share_server=None):
 1391         """Shrinks size of existing share."""
 1392         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1393         share_name = self._get_backend_share_name(share['id'])
 1394         vserver_client.set_volume_filesys_size_fixed(share_name,
 1395                                                      filesys_size_fixed=False)
 1396         LOG.debug('Shrinking share %(name)s to %(size)s GB.',
 1397                   {'name': share_name, 'size': new_size})
 1398 
 1399         try:
 1400             vserver_client.set_volume_size(share_name, new_size)
 1401         except netapp_api.NaApiError as e:
 1402             if e.code == netapp_api.EVOLOPNOTSUPP:
 1403                 msg = _('Failed to shrink share %(share_id)s. '
 1404                         'The current used space is larger than the the size'
 1405                         ' requested.')
 1406                 msg_args = {'share_id': share['id']}
 1407                 LOG.error(msg, msg_args)
 1408                 raise exception.ShareShrinkingPossibleDataLoss(
 1409                     share_id=share['id'])
 1410 
 1411         self._adjust_qos_policy_with_volume_resize(
 1412             share, new_size, vserver_client)
 1413 
 1414     @na_utils.trace
 1415     def update_access(self, context, share, access_rules, add_rules,
 1416                       delete_rules, share_server=None):
 1417         """Updates access rules for a share."""
 1418         # NOTE(ameade): We do not need to add export rules to a non-active
 1419         # replica as it will fail.
 1420         replica_state = share.get('replica_state')
 1421         if (replica_state is not None and
 1422                 replica_state != constants.REPLICA_STATE_ACTIVE):
 1423             return
 1424         try:
 1425             vserver, vserver_client = self._get_vserver(
 1426                 share_server=share_server)
 1427         except (exception.InvalidInput,
 1428                 exception.VserverNotSpecified,
 1429                 exception.VserverNotFound) as error:
 1430             LOG.warning("Could not determine share server for share "
 1431                         "%(share)s during access rules update. "
 1432                         "Error: %(error)s",
 1433                         {'share': share['id'], 'error': error})
 1434             return
 1435 
 1436         share_name = self._get_backend_share_name(share['id'])
 1437         if self._share_exists(share_name, vserver_client):
 1438             helper = self._get_helper(share)
 1439             helper.set_client(vserver_client)
 1440             helper.update_access(share, share_name, access_rules)
 1441         else:
 1442             raise exception.ShareResourceNotFound(share_id=share['id'])
 1443 
 1444     def setup_server(self, network_info, metadata=None):
 1445         raise NotImplementedError()
 1446 
 1447     def teardown_server(self, server_details, security_services=None):
 1448         raise NotImplementedError()
 1449 
 1450     def get_network_allocations_number(self):
 1451         """Get number of network interfaces to be created."""
 1452         raise NotImplementedError()
 1453 
 1454     @na_utils.trace
 1455     def _update_ssc_info(self):
 1456         """Periodically runs to update Storage Service Catalog data.
 1457 
 1458         The self._ssc_stats attribute is updated with the following format.
 1459         {<aggregate_name> : {<ssc_key>: <ssc_value>}}
 1460         """
 1461         LOG.info("Updating storage service catalog information for "
 1462                  "backend '%s'", self._backend_name)
 1463 
 1464         # Work on a copy and update the ssc data atomically before returning.
 1465         ssc_stats = copy.deepcopy(self._ssc_stats)
 1466 
 1467         aggregate_names = self._find_matching_aggregates()
 1468 
 1469         # Initialize entries for each aggregate.
 1470         for aggregate_name in aggregate_names:
 1471             if aggregate_name not in ssc_stats:
 1472                 ssc_stats[aggregate_name] = {
 1473                     'netapp_aggregate': aggregate_name,
 1474                 }
 1475 
 1476         if aggregate_names:
 1477             self._update_ssc_aggr_info(aggregate_names, ssc_stats)
 1478 
 1479         self._ssc_stats = ssc_stats
 1480 
 1481     @na_utils.trace
 1482     def _update_ssc_aggr_info(self, aggregate_names, ssc_stats):
 1483         """Updates the given SSC dictionary with new disk type information.
 1484 
 1485         :param aggregate_names: The aggregates this driver cares about
 1486         :param ssc_stats: The dictionary to update
 1487         """
 1488 
 1489         if not self._have_cluster_creds:
 1490             return
 1491 
 1492         for aggregate_name in aggregate_names:
 1493 
 1494             aggregate = self._client.get_aggregate(aggregate_name)
 1495             hybrid = (six.text_type(aggregate.get('is-hybrid')).lower()
 1496                       if 'is-hybrid' in aggregate else None)
 1497             disk_types = self._client.get_aggregate_disk_types(aggregate_name)
 1498 
 1499             ssc_stats[aggregate_name].update({
 1500                 'netapp_raid_type': aggregate.get('raid-type'),
 1501                 'netapp_hybrid_aggregate': hybrid,
 1502                 'netapp_disk_type': disk_types,
 1503             })
 1504 
 1505     def _find_active_replica(self, replica_list):
 1506         # NOTE(ameade): Find current active replica. There can only be one
 1507         # active replica (SnapMirror source volume) at a time in cDOT.
 1508         for r in replica_list:
 1509             if r['replica_state'] == constants.REPLICA_STATE_ACTIVE:
 1510                 return r
 1511 
 1512     def _find_nonactive_replicas(self, replica_list):
 1513         """Returns a list of all except the active replica."""
 1514         return [replica for replica in replica_list
 1515                 if replica['replica_state'] != constants.REPLICA_STATE_ACTIVE]
 1516 
 1517     def create_replica(self, context, replica_list, new_replica,
 1518                        access_rules, share_snapshots, share_server=None):
 1519         """Creates the new replica on this backend and sets up SnapMirror."""
 1520         active_replica = self._find_active_replica(replica_list)
 1521         dm_session = data_motion.DataMotionSession()
 1522 
 1523         # 1. Create the destination share
 1524         dest_backend = share_utils.extract_host(new_replica['host'],
 1525                                                 level='backend_name')
 1526 
 1527         vserver = (dm_session.get_vserver_from_share(new_replica) or
 1528                    self.configuration.netapp_vserver)
 1529 
 1530         vserver_client = data_motion.get_client_for_backend(
 1531             dest_backend, vserver_name=vserver)
 1532 
 1533         self._allocate_container(new_replica, vserver, vserver_client,
 1534                                  replica=True)
 1535 
 1536         # 2. Setup SnapMirror
 1537         dm_session.create_snapmirror(active_replica, new_replica)
 1538 
 1539         model_update = {
 1540             'export_locations': [],
 1541             'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
 1542             'access_rules_status': constants.STATUS_ACTIVE,
 1543         }
 1544 
 1545         return model_update
 1546 
 1547     def delete_replica(self, context, replica_list, replica, share_snapshots,
 1548                        share_server=None):
 1549         """Removes the replica on this backend and destroys SnapMirror."""
 1550         dm_session = data_motion.DataMotionSession()
 1551         # 1. Remove SnapMirror
 1552         dest_backend = share_utils.extract_host(replica['host'],
 1553                                                 level='backend_name')
 1554         vserver = (dm_session.get_vserver_from_share(replica) or
 1555                    self.configuration.netapp_vserver)
 1556 
 1557         # Ensure that all potential snapmirror relationships and their metadata
 1558         # involving the replica are destroyed.
 1559         for other_replica in replica_list:
 1560             if other_replica['id'] != replica['id']:
 1561                 dm_session.delete_snapmirror(other_replica, replica)
 1562                 dm_session.delete_snapmirror(replica, other_replica)
 1563 
 1564         # 2. Delete share
 1565         vserver_client = data_motion.get_client_for_backend(
 1566             dest_backend, vserver_name=vserver)
 1567         share_name = self._get_backend_share_name(replica['id'])
 1568         if self._share_exists(share_name, vserver_client):
 1569             self._deallocate_container(share_name, vserver_client)
 1570 
 1571     def update_replica_state(self, context, replica_list, replica,
 1572                              access_rules, share_snapshots, share_server=None):
 1573         """Returns the status of the given replica on this backend."""
 1574         active_replica = self._find_active_replica(replica_list)
 1575 
 1576         share_name = self._get_backend_share_name(replica['id'])
 1577         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1578 
 1579         if not vserver_client.volume_exists(share_name):
 1580             msg = _("Volume %(share_name)s does not exist on vserver "
 1581                     "%(vserver)s.")
 1582             msg_args = {'share_name': share_name, 'vserver': vserver}
 1583             raise exception.ShareResourceNotFound(msg % msg_args)
 1584 
 1585         # NOTE(cknight): The SnapMirror may have been intentionally broken by
 1586         # a revert-to-snapshot operation, in which case this method should not
 1587         # attempt to change anything.
 1588         if active_replica['status'] == constants.STATUS_REVERTING:
 1589             return None
 1590 
 1591         dm_session = data_motion.DataMotionSession()
 1592         try:
 1593             snapmirrors = dm_session.get_snapmirrors(active_replica, replica)
 1594         except netapp_api.NaApiError:
 1595             LOG.exception("Could not get snapmirrors for replica %s.",
 1596                           replica['id'])
 1597             return constants.STATUS_ERROR
 1598 
 1599         if not snapmirrors:
 1600             if replica['status'] != constants.STATUS_CREATING:
 1601                 try:
 1602                     dm_session.create_snapmirror(active_replica, replica)
 1603                 except netapp_api.NaApiError:
 1604                     LOG.exception("Could not create snapmirror for "
 1605                                   "replica %s.", replica['id'])
 1606                     return constants.STATUS_ERROR
 1607             return constants.REPLICA_STATE_OUT_OF_SYNC
 1608 
 1609         snapmirror = snapmirrors[0]
 1610         # NOTE(dviroel): Don't try to resume or resync a SnapMirror that has
 1611         # one of the in progress transfer states, because the storage will
 1612         # answer with an error.
 1613         in_progress_status = ['preparing', 'transferring', 'finalizing']
 1614         if (snapmirror.get('mirror-state') != 'snapmirrored' and
 1615                 snapmirror.get('relationship-status') in in_progress_status):
 1616             return constants.REPLICA_STATE_OUT_OF_SYNC
 1617 
 1618         if snapmirror.get('mirror-state') != 'snapmirrored':
 1619             try:
 1620                 vserver_client.resume_snapmirror(snapmirror['source-vserver'],
 1621                                                  snapmirror['source-volume'],
 1622                                                  vserver,
 1623                                                  share_name)
 1624                 vserver_client.resync_snapmirror(snapmirror['source-vserver'],
 1625                                                  snapmirror['source-volume'],
 1626                                                  vserver,
 1627                                                  share_name)
 1628                 return constants.REPLICA_STATE_OUT_OF_SYNC
 1629             except netapp_api.NaApiError:
 1630                 LOG.exception("Could not resync snapmirror.")
 1631                 return constants.STATUS_ERROR
 1632 
 1633         last_update_timestamp = float(
 1634             snapmirror.get('last-transfer-end-timestamp', 0))
 1635         # TODO(ameade): Have a configurable RPO for replicas, for now it is
 1636         # one hour.
 1637         if (last_update_timestamp and
 1638             (timeutils.is_older_than(
 1639                 datetime.datetime.utcfromtimestamp(last_update_timestamp)
 1640                 .isoformat(), 3600))):
 1641             return constants.REPLICA_STATE_OUT_OF_SYNC
 1642 
 1643         # Check all snapshots exist
 1644         snapshots = [snap['share_replica_snapshot']
 1645                      for snap in share_snapshots]
 1646         for snap in snapshots:
 1647             snapshot_name = snap.get('provider_location')
 1648             if not vserver_client.snapshot_exists(snapshot_name, share_name):
 1649                 return constants.REPLICA_STATE_OUT_OF_SYNC
 1650 
 1651         return constants.REPLICA_STATE_IN_SYNC
 1652 
 1653     def promote_replica(self, context, replica_list, replica, access_rules,
 1654                         share_server=None):
 1655         """Switch SnapMirror relationships and allow r/w ops on replica.
 1656 
 1657         Creates a DataMotion session and switches the direction of the
 1658         SnapMirror relationship between the currently 'active' instance (
 1659         SnapMirror source volume) and the replica. Also attempts setting up
 1660         SnapMirror relationships between the other replicas and the new
 1661         SnapMirror source volume ('active' instance).
 1662         :param context: Request Context
 1663         :param replica_list: List of replicas, including the 'active' instance
 1664         :param replica: Replica to promote to SnapMirror source
 1665         :param access_rules: Access rules to apply to the replica
 1666         :param share_server: ShareServer class instance of replica
 1667         :return: Updated replica_list
 1668         """
 1669         orig_active_replica = self._find_active_replica(replica_list)
 1670 
 1671         dm_session = data_motion.DataMotionSession()
 1672 
 1673         new_replica_list = []
 1674 
 1675         # Setup the new active replica
 1676         try:
 1677             new_active_replica = (
 1678                 self._convert_destination_replica_to_independent(
 1679                     context, dm_session, orig_active_replica, replica,
 1680                     access_rules, share_server=share_server))
 1681         except exception.StorageCommunicationException:
 1682             LOG.exception("Could not communicate with the backend "
 1683                           "for replica %s during promotion.",
 1684                           replica['id'])
 1685             new_active_replica = copy.deepcopy(replica)
 1686             new_active_replica['replica_state'] = (
 1687                 constants.STATUS_ERROR)
 1688             new_active_replica['status'] = constants.STATUS_ERROR
 1689             return [new_active_replica]
 1690 
 1691         new_replica_list.append(new_active_replica)
 1692 
 1693         # Change the source replica for all destinations to the new
 1694         # active replica.
 1695         for r in replica_list:
 1696             if r['id'] != replica['id']:
 1697                 r = self._safe_change_replica_source(dm_session, r,
 1698                                                      orig_active_replica,
 1699                                                      replica,
 1700                                                      replica_list)
 1701                 new_replica_list.append(r)
 1702 
 1703         orig_active_vserver = dm_session.get_vserver_from_share(
 1704             orig_active_replica)
 1705 
 1706         # Cleanup the original active share if necessary
 1707         orig_active_replica_backend = (
 1708             share_utils.extract_host(orig_active_replica['host'],
 1709                                      level='backend_name'))
 1710         orig_active_replica_name = self._get_backend_share_name(
 1711             orig_active_replica['id'])
 1712         orig_active_vserver_client = data_motion.get_client_for_backend(
 1713             orig_active_replica_backend, vserver_name=orig_active_vserver)
 1714 
 1715         orig_active_replica_helper = self._get_helper(orig_active_replica)
 1716         orig_active_replica_helper.set_client(orig_active_vserver_client)
 1717 
 1718         try:
 1719             orig_active_replica_helper.cleanup_demoted_replica(
 1720                 orig_active_replica, orig_active_replica_name)
 1721         except exception.StorageCommunicationException:
 1722             LOG.exception("Could not cleanup the original active replica %s.",
 1723                           orig_active_replica['id'])
 1724 
 1725         # Unmount the original active replica.
 1726         self._unmount_orig_active_replica(orig_active_replica,
 1727                                           orig_active_vserver)
 1728 
 1729         self._handle_qos_on_replication_change(dm_session,
 1730                                                new_active_replica,
 1731                                                orig_active_replica,
 1732                                                share_server=share_server)
 1733 
 1734         return new_replica_list
 1735 
 1736     def _unmount_orig_active_replica(self, orig_active_replica,
 1737                                      orig_active_vserver=None):
 1738         orig_active_replica_backend = (
 1739             share_utils.extract_host(orig_active_replica['host'],
 1740                                      level='backend_name'))
 1741         orig_active_vserver_client = data_motion.get_client_for_backend(
 1742             orig_active_replica_backend,
 1743             vserver_name=orig_active_vserver)
 1744         share_name = self._get_backend_share_name(
 1745             orig_active_replica['id'])
 1746         try:
 1747             orig_active_vserver_client.unmount_volume(share_name,
 1748                                                       force=True)
 1749             LOG.info("Unmount of the original active replica %s successful.",
 1750                      orig_active_replica['id'])
 1751         except exception.StorageCommunicationException:
 1752             LOG.exception("Could not unmount the original active replica %s.",
 1753                           orig_active_replica['id'])
 1754 
 1755     def _handle_qos_on_replication_change(self, dm_session, new_active_replica,
 1756                                           orig_active_replica,
 1757                                           share_server=None):
 1758         # QoS operations: Remove and purge QoS policy on old active replica
 1759         # if any and create a new policy on the destination if necessary.
 1760         extra_specs = share_types.get_extra_specs_from_share(
 1761             orig_active_replica)
 1762         qos_specs = self._get_normalized_qos_specs(extra_specs)
 1763 
 1764         if qos_specs and self._have_cluster_creds:
 1765             dm_session.remove_qos_on_old_active_replica(orig_active_replica)
 1766             # Check if a QoS policy already exists for the promoted replica,
 1767             # if it does, modify it as necessary, else create it:
 1768             try:
 1769                 new_active_replica_qos_policy = (
 1770                     self._get_backend_qos_policy_group_name(
 1771                         new_active_replica['id']))
 1772                 vserver, vserver_client = self._get_vserver(
 1773                     share_server=share_server)
 1774 
 1775                 volume_name_on_backend = self._get_backend_share_name(
 1776                     new_active_replica['id'])
 1777                 if not self._client.qos_policy_group_exists(
 1778                         new_active_replica_qos_policy):
 1779                     self._create_qos_policy_group(
 1780                         new_active_replica, vserver, qos_specs)
 1781                 else:
 1782                     max_throughput = self._get_max_throughput(
 1783                         new_active_replica['size'], qos_specs)
 1784                     self._client.qos_policy_group_modify(
 1785                         new_active_replica_qos_policy, max_throughput)
 1786                 vserver_client.set_qos_policy_group_for_volume(
 1787                     volume_name_on_backend, new_active_replica_qos_policy)
 1788 
 1789                 LOG.info("QoS policy applied successfully for promoted "
 1790                          "replica: %s", new_active_replica['id'])
 1791             except Exception:
 1792                 LOG.exception("Could not apply QoS to the promoted replica.")
 1793 
 1794     def _convert_destination_replica_to_independent(
 1795             self, context, dm_session, orig_active_replica, replica,
 1796             access_rules, share_server=None):
 1797         """Breaks SnapMirror and allows r/w ops on the destination replica.
 1798 
 1799         For promotion, the existing SnapMirror relationship must be broken
 1800         and access rules have to be granted to the broken off replica to
 1801         use it as an independent share.
 1802         :param context: Request Context
 1803         :param dm_session: Data motion object for SnapMirror operations
 1804         :param orig_active_replica: Original SnapMirror source
 1805         :param replica: Replica to promote to SnapMirror source
 1806         :param access_rules: Access rules to apply to the replica
 1807         :param share_server: ShareServer class instance of replica
 1808         :return: Updated replica
 1809         """
 1810         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1811         share_name = self._get_backend_share_name(replica['id'])
 1812 
 1813         try:
 1814             # 1. Start an update to try to get a last minute transfer before we
 1815             # quiesce and break
 1816             dm_session.update_snapmirror(orig_active_replica, replica)
 1817         except exception.StorageCommunicationException:
 1818             # Ignore any errors since the current source replica may be
 1819             # unreachable
 1820             pass
 1821         # 2. Break SnapMirror
 1822         dm_session.break_snapmirror(orig_active_replica, replica)
 1823 
 1824         # 3. Setup access rules
 1825         new_active_replica = copy.deepcopy(replica)
 1826         helper = self._get_helper(replica)
 1827         helper.set_client(vserver_client)
 1828         try:
 1829             helper.update_access(replica, share_name, access_rules)
 1830         except Exception:
 1831             new_active_replica['access_rules_status'] = (
 1832                 constants.SHARE_INSTANCE_RULES_SYNCING)
 1833         else:
 1834             new_active_replica['access_rules_status'] = constants.STATUS_ACTIVE
 1835 
 1836         new_active_replica['export_locations'] = self._create_export(
 1837             new_active_replica, share_server, vserver, vserver_client)
 1838         new_active_replica['replica_state'] = constants.REPLICA_STATE_ACTIVE
 1839 
 1840         # 4. Set File system size fixed to false
 1841         vserver_client.set_volume_filesys_size_fixed(share_name,
 1842                                                      filesys_size_fixed=False)
 1843 
 1844         return new_active_replica
 1845 
 1846     def _safe_change_replica_source(self, dm_session, replica,
 1847                                     orig_source_replica,
 1848                                     new_source_replica, replica_list):
 1849         """Attempts to change the SnapMirror source to new source.
 1850 
 1851         If the attempt fails, 'replica_state' is set to 'error'.
 1852         :param dm_session: Data motion object for SnapMirror operations
 1853         :param replica: Replica that requires a change of source
 1854         :param orig_source_replica: Original SnapMirror source volume
 1855         :param new_source_replica: New SnapMirror source volume
 1856         :return: Updated replica
 1857         """
 1858         try:
 1859             dm_session.change_snapmirror_source(replica,
 1860                                                 orig_source_replica,
 1861                                                 new_source_replica,
 1862                                                 replica_list)
 1863         except exception.StorageCommunicationException:
 1864             replica['status'] = constants.STATUS_ERROR
 1865             replica['replica_state'] = constants.STATUS_ERROR
 1866             replica['export_locations'] = []
 1867             msg = ("Failed to change replica (%s) to a SnapMirror "
 1868                    "destination. Replica backend is unreachable.")
 1869 
 1870             LOG.exception(msg, replica['id'])
 1871             return replica
 1872         except netapp_api.NaApiError:
 1873             replica['replica_state'] = constants.STATUS_ERROR
 1874             replica['export_locations'] = []
 1875             msg = ("Failed to change replica (%s) to a SnapMirror "
 1876                    "destination.")
 1877             LOG.exception(msg, replica['id'])
 1878             return replica
 1879 
 1880         replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC
 1881         replica['export_locations'] = []
 1882 
 1883         return replica
 1884 
 1885     def create_replicated_snapshot(self, context, replica_list,
 1886                                    snapshot_instances, share_server=None):
 1887         active_replica = self._find_active_replica(replica_list)
 1888         active_snapshot = [x for x in snapshot_instances
 1889                            if x['share_id'] == active_replica['id']][0]
 1890         snapshot_name = self._get_backend_snapshot_name(active_snapshot['id'])
 1891 
 1892         self.create_snapshot(context, active_snapshot,
 1893                              share_server=share_server)
 1894 
 1895         active_snapshot['status'] = constants.STATUS_AVAILABLE
 1896         active_snapshot['provider_location'] = snapshot_name
 1897         snapshots = [active_snapshot]
 1898         instances = zip(sorted(replica_list,
 1899                                key=lambda x: x['id']),
 1900                         sorted(snapshot_instances,
 1901                                key=lambda x: x['share_id']))
 1902 
 1903         for replica, snapshot in instances:
 1904             if snapshot['id'] != active_snapshot['id']:
 1905                 snapshot['provider_location'] = snapshot_name
 1906                 snapshots.append(snapshot)
 1907                 dm_session = data_motion.DataMotionSession()
 1908                 if replica.get('host'):
 1909                     try:
 1910                         dm_session.update_snapmirror(active_replica,
 1911                                                      replica)
 1912                     except netapp_api.NaApiError as e:
 1913                         if e.code != netapp_api.EOBJECTNOTFOUND:
 1914                             raise
 1915         return snapshots
 1916 
 1917     def delete_replicated_snapshot(self, context, replica_list,
 1918                                    snapshot_instances, share_server=None):
 1919         active_replica = self._find_active_replica(replica_list)
 1920         active_snapshot = [x for x in snapshot_instances
 1921                            if x['share_id'] == active_replica['id']][0]
 1922 
 1923         self.delete_snapshot(context, active_snapshot,
 1924                              share_server=share_server,
 1925                              snapshot_name=active_snapshot['provider_location']
 1926                              )
 1927         active_snapshot['status'] = constants.STATUS_DELETED
 1928         instances = zip(sorted(replica_list,
 1929                                key=lambda x: x['id']),
 1930                         sorted(snapshot_instances,
 1931                                key=lambda x: x['share_id']))
 1932 
 1933         for replica, snapshot in instances:
 1934             if snapshot['id'] != active_snapshot['id']:
 1935                 dm_session = data_motion.DataMotionSession()
 1936                 if replica.get('host'):
 1937                     try:
 1938                         dm_session.update_snapmirror(active_replica, replica)
 1939                     except netapp_api.NaApiError as e:
 1940                         if e.code != netapp_api.EOBJECTNOTFOUND:
 1941                             raise
 1942 
 1943         return [active_snapshot]
 1944 
 1945     def update_replicated_snapshot(self, replica_list, share_replica,
 1946                                    snapshot_instances, snapshot_instance,
 1947                                    share_server=None):
 1948         active_replica = self._find_active_replica(replica_list)
 1949         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1950         share_name = self._get_backend_share_name(
 1951             snapshot_instance['share_id'])
 1952         snapshot_name = snapshot_instance.get('provider_location')
 1953         # NOTE(ameade): If there is no provider location,
 1954         # then grab from active snapshot instance
 1955         if snapshot_name is None:
 1956             active_snapshot = [x for x in snapshot_instances
 1957                                if x['share_id'] == active_replica['id']][0]
 1958             snapshot_name = active_snapshot.get('provider_location')
 1959             if not snapshot_name:
 1960                 return
 1961 
 1962         try:
 1963             snapshot_exists = vserver_client.snapshot_exists(snapshot_name,
 1964                                                              share_name)
 1965         except exception.SnapshotUnavailable:
 1966             # The volume must still be offline
 1967             return
 1968 
 1969         if (snapshot_exists and
 1970                 snapshot_instance['status'] == constants.STATUS_CREATING):
 1971             return {
 1972                 'status': constants.STATUS_AVAILABLE,
 1973                 'provider_location': snapshot_name,
 1974             }
 1975         elif (not snapshot_exists and
 1976               snapshot_instance['status'] == constants.STATUS_DELETING):
 1977             raise exception.SnapshotResourceNotFound(
 1978                 name=snapshot_instance.get('provider_location'))
 1979 
 1980         dm_session = data_motion.DataMotionSession()
 1981         try:
 1982             dm_session.update_snapmirror(active_replica, share_replica)
 1983         except netapp_api.NaApiError as e:
 1984             if e.code != netapp_api.EOBJECTNOTFOUND:
 1985                 raise
 1986 
 1987     def revert_to_replicated_snapshot(self, context, active_replica,
 1988                                       replica_list, active_replica_snapshot,
 1989                                       replica_snapshots, share_server=None):
 1990         """Reverts a replicated share (in place) to the specified snapshot."""
 1991         vserver, vserver_client = self._get_vserver(share_server=share_server)
 1992         share_name = self._get_backend_share_name(
 1993             active_replica_snapshot['share_id'])
 1994         snapshot_name = (
 1995             active_replica_snapshot.get('provider_location') or
 1996             self._get_backend_snapshot_name(active_replica_snapshot['id']))
 1997 
 1998         LOG.debug('Restoring snapshot %s', snapshot_name)
 1999 
 2000         dm_session = data_motion.DataMotionSession()
 2001         non_active_replica_list = self._find_nonactive_replicas(replica_list)
 2002 
 2003         # Ensure source snapshot exists
 2004         vserver_client.get_snapshot(share_name, snapshot_name)
 2005 
 2006         # Break all mirrors
 2007         for replica in non_active_replica_list:
 2008             try:
 2009                 dm_session.break_snapmirror(
 2010                     active_replica, replica, mount=False)
 2011             except netapp_api.NaApiError as e:
 2012                 if e.code != netapp_api.EOBJECTNOTFOUND:
 2013                     raise
 2014 
 2015         # Delete source SnapMirror snapshots that will prevent a snap restore
 2016         snapmirror_snapshot_names = vserver_client.list_snapmirror_snapshots(
 2017             share_name)
 2018         for snapmirror_snapshot_name in snapmirror_snapshot_names:
 2019             vserver_client.delete_snapshot(
 2020                 share_name, snapmirror_snapshot_name, ignore_owners=True)
 2021 
 2022         # Restore source snapshot of interest
 2023         vserver_client.restore_snapshot(share_name, snapshot_name)
 2024 
 2025         # Reestablish mirrors
 2026         for replica in non_active_replica_list:
 2027             try:
 2028                 dm_session.resync_snapmirror(active_replica, replica)
 2029             except netapp_api.NaApiError as e:
 2030                 if e.code != netapp_api.EOBJECTNOTFOUND:
 2031                     raise
 2032 
 2033     def _check_destination_vserver_for_vol_move(self, source_share,
 2034                                                 source_vserver,
 2035                                                 dest_share_server):
 2036         try:
 2037             destination_vserver, __ = self._get_vserver(
 2038                 share_server=dest_share_server)
 2039         except exception.InvalidParameterValue:
 2040             destination_vserver = None
 2041 
 2042         if source_vserver != destination_vserver:
 2043             msg = _("Cannot migrate %(shr)s efficiently from source "
 2044                     "VServer %(src)s to destination VServer %(dest)s.")
 2045             msg_args = {
 2046                 'shr': source_share['id'],
 2047                 'src': source_vserver,
 2048                 'dest': destination_vserver,
 2049             }
 2050             raise exception.NetAppException(msg % msg_args)
 2051 
 2052     def migration_check_compatibility(self, context, source_share,
 2053                                       destination_share, share_server=None,
 2054                                       destination_share_server=None):
 2055         """Checks compatibility between self.host and destination host."""
 2056         # We need cluster creds to perform an intra-cluster data motion
 2057         compatible = False
 2058         destination_host = destination_share['host']
 2059 
 2060         if self._have_cluster_creds:
 2061             try:
 2062                 backend = share_utils.extract_host(
 2063                     destination_host, level='backend_name')
 2064                 destination_aggregate = share_utils.extract_host(
 2065                     destination_host, level='pool')
 2066                 # Validate new extra-specs are valid on the destination
 2067                 extra_specs = share_types.get_extra_specs_from_share(
 2068                     destination_share)
 2069                 self._check_extra_specs_validity(
 2070                     destination_share, extra_specs)
 2071                 # TODO(gouthamr): Check whether QoS min-throughputs can be
 2072                 # honored on the destination aggregate when supported.
 2073                 self._check_aggregate_extra_specs_validity(
 2074                     destination_aggregate, extra_specs)
 2075 
 2076                 data_motion.get_backend_configuration(backend)
 2077 
 2078                 source_vserver, __ = self._get_vserver(
 2079                     share_server=share_server)
 2080                 share_volume = self._get_backend_share_name(
 2081                     source_share['id'])
 2082 
 2083                 self._check_destination_vserver_for_vol_move(
 2084                     source_share, source_vserver, destination_share_server)
 2085 
 2086                 encrypt_dest = self._get_dest_flexvol_encryption_value(
 2087                     destination_share)
 2088                 self._client.check_volume_move(
 2089                     share_volume, source_vserver, destination_aggregate,
 2090                     encrypt_destination=encrypt_dest)
 2091 
 2092             except Exception:
 2093                 msg = ("Cannot migrate share %(shr)s efficiently between "
 2094                        "%(src)s and %(dest)s.")
 2095                 msg_args = {
 2096                     'shr': source_share['id'],
 2097                     'src': source_share['host'],
 2098                     'dest': destination_host,
 2099                 }
 2100                 LOG.exception(msg, msg_args)
 2101             else:
 2102                 compatible = True
 2103         else:
 2104             msg = ("Cluster credentials have not been configured "
 2105                    "with this share driver. Cannot perform volume move "
 2106                    "operations.")
 2107             LOG.warning(msg)
 2108 
 2109         compatibility = {
 2110             'compatible': compatible,
 2111             'writable': compatible,
 2112             'nondisruptive': compatible,
 2113             'preserve_metadata': compatible,
 2114             'preserve_snapshots': compatible,
 2115         }
 2116 
 2117         return compatibility
 2118 
 2119     def migration_start(self, context, source_share, destination_share,
 2120                         source_snapshots, snapshot_mappings,
 2121                         share_server=None, destination_share_server=None):
 2122         """Begins data motion from source_share to destination_share."""
 2123         # Intra-cluster migration
 2124         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2125         share_volume = self._get_backend_share_name(source_share['id'])
 2126         destination_aggregate = share_utils.extract_host(
 2127             destination_share['host'], level='pool')
 2128 
 2129         # If the destination's share type extra-spec for Flexvol encryption
 2130         # is different than the source's, then specify the volume-move
 2131         # operation to set the correct 'encrypt' attribute on the destination
 2132         # volume.
 2133         encrypt_dest = self._get_dest_flexvol_encryption_value(
 2134             destination_share)
 2135 
 2136         self._client.start_volume_move(
 2137             share_volume,
 2138             vserver,
 2139             destination_aggregate,
 2140             encrypt_destination=encrypt_dest)
 2141 
 2142         msg = ("Began volume move operation of share %(shr)s from %(src)s "
 2143                "to %(dest)s.")
 2144         msg_args = {
 2145             'shr': source_share['id'],
 2146             'src': source_share['host'],
 2147             'dest': destination_share['host'],
 2148         }
 2149         LOG.info(msg, msg_args)
 2150 
 2151     def _get_volume_move_status(self, source_share, share_server):
 2152         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2153         share_volume = self._get_backend_share_name(source_share['id'])
 2154         status = self._client.get_volume_move_status(share_volume, vserver)
 2155         return status
 2156 
 2157     def _get_dest_flexvol_encryption_value(self, destination_share):
 2158         dest_share_type_encrypted_val = share_types.get_share_type_extra_specs(
 2159             destination_share['share_type_id'],
 2160             'netapp_flexvol_encryption')
 2161         encrypt_destination = share_types.parse_boolean_extra_spec(
 2162             'netapp_flexvol_encryption', dest_share_type_encrypted_val)
 2163 
 2164         return encrypt_destination
 2165 
 2166     def migration_continue(self, context, source_share, destination_share,
 2167                            source_snapshots, snapshot_mappings,
 2168                            share_server=None, destination_share_server=None):
 2169         """Check progress of migration, try to repair data motion errors."""
 2170         status = self._get_volume_move_status(source_share, share_server)
 2171         completed_phases = (
 2172             'cutover_hard_deferred', 'cutover_soft_deferred', 'completed')
 2173 
 2174         move_phase = status['phase'].lower()
 2175         if move_phase == 'failed':
 2176             msg_args = {
 2177                 'shr': source_share['id'],
 2178                 'reason': status['details'],
 2179             }
 2180             msg = _("Volume move operation for share %(shr)s failed. Reason: "
 2181                     "%(reason)s") % msg_args
 2182             LOG.exception(msg)
 2183             raise exception.NetAppException(msg)
 2184         elif move_phase in completed_phases:
 2185             return True
 2186 
 2187         return False
 2188 
 2189     def migration_get_progress(self, context, source_share,
 2190                                destination_share, source_snapshots,
 2191                                snapshot_mappings, share_server=None,
 2192                                destination_share_server=None):
 2193         """Return detailed progress of the migration in progress."""
 2194         status = self._get_volume_move_status(source_share, share_server)
 2195 
 2196         # NOTE (gouthamr): If the volume move is waiting for a manual
 2197         # intervention to cut-over, the copy is done with respect to the
 2198         # user. Volume move copies the rest of the data before cut-over anyway.
 2199         if status['phase'] in ('cutover_hard_deferred',
 2200                                'cutover_soft_deferred'):
 2201             status['percent-complete'] = 100
 2202 
 2203         msg = ("Volume move status for share %(share)s: (State) %(state)s. "
 2204                "(Phase) %(phase)s. Details: %(details)s")
 2205         msg_args = {
 2206             'state': status['state'],
 2207             'details': status['details'],
 2208             'share': source_share['id'],
 2209             'phase': status['phase'],
 2210         }
 2211         LOG.info(msg, msg_args)
 2212 
 2213         return {
 2214             'total_progress': status['percent-complete'] or 0,
 2215             'state': status['state'],
 2216             'estimated_completion_time': status['estimated-completion-time'],
 2217             'phase': status['phase'],
 2218             'details': status['details'],
 2219         }
 2220 
 2221     def migration_cancel(self, context, source_share, destination_share,
 2222                          source_snapshots, snapshot_mappings,
 2223                          share_server=None, destination_share_server=None):
 2224         """Abort an ongoing migration."""
 2225         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2226         share_volume = self._get_backend_share_name(source_share['id'])
 2227         retries = (self.configuration.netapp_migration_cancel_timeout / 5 or
 2228                    1)
 2229 
 2230         try:
 2231             self._get_volume_move_status(source_share, share_server)
 2232         except exception.NetAppException:
 2233             LOG.exception("Could not get volume move status.")
 2234             return
 2235 
 2236         self._client.abort_volume_move(share_volume, vserver)
 2237 
 2238         @manila_utils.retry(exception.InUse, interval=5,
 2239                             retries=retries, backoff_rate=1)
 2240         def wait_for_migration_cancel_complete():
 2241             move_status = self._get_volume_move_status(source_share,
 2242                                                        share_server)
 2243             if move_status['state'] == 'failed':
 2244                 return
 2245             else:
 2246                 msg = "Migration cancelation isn't finished yet."
 2247                 raise exception.InUse(message=msg)
 2248 
 2249         try:
 2250             wait_for_migration_cancel_complete()
 2251         except exception.InUse:
 2252             move_status = self._get_volume_move_status(source_share,
 2253                                                        share_server)
 2254             msg_args = {
 2255                 'share_move_state': move_status['state']
 2256             }
 2257             msg = _("Migration cancelation was not successful. The share "
 2258                     "migration state failed while transitioning from "
 2259                     "%(share_move_state)s state to 'failed'. Retries "
 2260                     "exhausted.") % msg_args
 2261             raise exception.NetAppException(message=msg)
 2262         except exception.NetAppException:
 2263             LOG.exception("Could not get volume move status.")
 2264 
 2265         msg = ("Share volume move operation for share %(shr)s from host "
 2266                "%(src)s to %(dest)s was successfully aborted.")
 2267         msg_args = {
 2268             'shr': source_share['id'],
 2269             'src': source_share['host'],
 2270             'dest': destination_share['host'],
 2271         }
 2272         LOG.info(msg, msg_args)
 2273 
 2274     def migration_complete(self, context, source_share, destination_share,
 2275                            source_snapshots, snapshot_mappings,
 2276                            share_server=None, destination_share_server=None):
 2277         """Initiate the cutover to destination share after move is complete."""
 2278         vserver, vserver_client = self._get_vserver(share_server=share_server)
 2279         share_volume = self._get_backend_share_name(source_share['id'])
 2280 
 2281         status = self._get_volume_move_status(source_share, share_server)
 2282 
 2283         move_phase = status['phase'].lower()
 2284         if move_phase == 'completed':
 2285             LOG.debug("Volume move operation was already successfully "
 2286                       "completed for share %(shr)s.",
 2287                       {'shr': source_share['id']})
 2288         elif move_phase in ('cutover_hard_deferred', 'cutover_soft_deferred'):
 2289             self._client.trigger_volume_move_cutover(share_volume, vserver)
 2290             self._wait_for_cutover_completion(
 2291                 source_share, share_server)
 2292         else:
 2293             msg_args = {
 2294                 'shr': source_share['id'],
 2295                 'status': status['state'],
 2296                 'phase': status['phase'],
 2297                 'details': status['details'],
 2298             }
 2299             msg = _("Cannot complete volume move operation for share %(shr)s. "
 2300                     "Current volume move status: %(status)s, phase: "
 2301                     "%(phase)s. Details: %(details)s") % msg_args
 2302             LOG.exception(msg)
 2303             raise exception.NetAppException(msg)
 2304 
 2305         new_share_volume_name = self._get_backend_share_name(
 2306             destination_share['id'])
 2307         vserver_client.set_volume_name(share_volume, new_share_volume_name)
 2308 
 2309         # Modify volume properties per share type extra-specs
 2310         extra_specs = share_types.get_extra_specs_from_share(
 2311             destination_share)
 2312         extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
 2313         self._check_extra_specs_validity(destination_share, extra_specs)
 2314         provisioning_options = self._get_provisioning_options(extra_specs)
 2315         qos_policy_group_name = self._modify_or_create_qos_for_existing_share(
 2316             destination_share, extra_specs, vserver, vserver_client)
 2317         if qos_policy_group_name:
 2318             provisioning_options['qos_policy_group'] = qos_policy_group_name
 2319         else:
 2320             # Removing the QOS Policy on the migrated share as the
 2321             # new extra-spec for which this share is being migrated to
 2322             # does not specify any QOS settings.
 2323             provisioning_options['qos_policy_group'] = "none"
 2324 
 2325             qos_policy_of_src_share = self._get_backend_qos_policy_group_name(
 2326                 source_share['id'])
 2327             self._client.mark_qos_policy_group_for_deletion(
 2328                 qos_policy_of_src_share)
 2329 
 2330         destination_aggregate = share_utils.extract_host(
 2331             destination_share['host'], level='pool')
 2332 
 2333         # Modify volume to match extra specs
 2334         vserver_client.modify_volume(destination_aggregate,
 2335                                      new_share_volume_name,
 2336                                      **provisioning_options)
 2337 
 2338         msg = ("Volume move operation for share %(shr)s has completed "
 2339                "successfully. Share has been moved from %(src)s to "
 2340                "%(dest)s.")
 2341         msg_args = {
 2342             'shr': source_share['id'],
 2343             'src': source_share['host'],
 2344             'dest': destination_share['host'],
 2345         }
 2346         LOG.info(msg, msg_args)
 2347 
 2348         # NOTE(gouthamr): For nondisruptive migration, current export
 2349         # policy will not be cleared, the export policy will be renamed to
 2350         # match the name of the share.
 2351         export_locations = self._create_export(
 2352             destination_share, share_server, vserver, vserver_client,
 2353             clear_current_export_policy=False)
 2354         src_snaps_dict = {s['id']: s for s in source_snapshots}
 2355         snapshot_updates = {}
 2356 
 2357         for source_snap_id, destination_snap in snapshot_mappings.items():
 2358             p_location = src_snaps_dict[source_snap_id]['provider_location']
 2359 
 2360             snapshot_updates.update(
 2361                 {destination_snap['id']: {'provider_location': p_location}})
 2362 
 2363         return {
 2364             'export_locations': export_locations,
 2365             'snapshot_updates': snapshot_updates,
 2366         }
 2367 
 2368     @na_utils.trace
 2369     def _modify_or_create_qos_for_existing_share(self, share, extra_specs,
 2370                                                  vserver, vserver_client):
 2371         """Gets/Creates QoS policy for an existing FlexVol.
 2372 
 2373         The share's assigned QoS policy is renamed and adjusted if the policy
 2374         is exclusive to the FlexVol. If the policy includes other workloads
 2375         besides the FlexVol, a new policy is created with the specs necessary.
 2376         """
 2377         qos_specs = self._get_normalized_qos_specs(extra_specs)
 2378         if not qos_specs:
 2379             return
 2380 
 2381         backend_share_name = self._get_backend_share_name(share['id'])
 2382         qos_policy_group_name = self._get_backend_qos_policy_group_name(
 2383             share['id'])
 2384 
 2385         create_new_qos_policy_group = True
 2386 
 2387         backend_volume = vserver_client.get_volume(
 2388             backend_share_name)
 2389         backend_volume_size = int(
 2390             math.ceil(float(backend_volume['size']) / units.Gi))
 2391 
 2392         LOG.debug("Checking for a pre-existing QoS policy group that "
 2393                   "is exclusive to the volume %s.", backend_share_name)
 2394 
 2395         # Does the volume have an exclusive QoS policy that we can rename?
 2396         if backend_volume['qos-policy-group-name'] is not None:
 2397             existing_qos_policy_group = self._client.qos_policy_group_get(
 2398                 backend_volume['qos-policy-group-name'])
 2399             if existing_qos_policy_group['num-workloads'] == 1:
 2400                 # Yay, can set max-throughput and rename
 2401 
 2402                 msg = ("Found pre-existing QoS policy %(policy)s and it is "
 2403                        "exclusive to the volume %(volume)s. Modifying and "
 2404                        "renaming this policy to %(new_policy)s.")
 2405                 msg_args = {
 2406                     'policy': backend_volume['qos-policy-group-name'],
 2407                     'volume': backend_share_name,
 2408                     'new_policy': qos_policy_group_name,
 2409                 }
 2410                 LOG.debug(msg, msg_args)
 2411 
 2412                 max_throughput = self._get_max_throughput(
 2413                     backend_volume_size, qos_specs)
 2414                 self._client.qos_policy_group_modify(
 2415                     backend_volume['qos-policy-group-name'], max_throughput)
 2416                 self._client.qos_policy_group_rename(
 2417                     backend_volume['qos-policy-group-name'],
 2418                     qos_policy_group_name)
 2419                 create_new_qos_policy_group = False
 2420 
 2421         if create_new_qos_policy_group:
 2422             share_obj = {
 2423                 'size': backend_volume_size,
 2424                 'id': share['id'],
 2425             }
 2426             LOG.debug("No existing QoS policy group found for "
 2427                       "volume. Creating  a new one with name %s.",
 2428                       qos_policy_group_name)
 2429             self._create_qos_policy_group(share_obj, vserver, qos_specs)
 2430         return qos_policy_group_name
 2431 
 2432     def _wait_for_cutover_completion(self, source_share, share_server):
 2433 
 2434         retries = (self.configuration.netapp_volume_move_cutover_timeout / 5
 2435                    or 1)
 2436 
 2437         @manila_utils.retry(exception.ShareBusyException, interval=5,
 2438                             retries=retries, backoff_rate=1)
 2439         def check_move_completion():
 2440             status = self._get_volume_move_status(source_share, share_server)
 2441             if status['phase'].lower() != 'completed':
 2442                 msg_args = {
 2443                     'shr': source_share['id'],
 2444                     'phs': status['phase'],
 2445                 }
 2446                 msg = _('Volume move operation for share %(shr)s is not '
 2447                         'complete. Current Phase: %(phs)s. '
 2448                         'Retrying.') % msg_args
 2449                 LOG.warning(msg)
 2450                 raise exception.ShareBusyException(reason=msg)
 2451 
 2452         try:
 2453             check_move_completion()
 2454         except exception.ShareBusyException:
 2455             msg = _("Volume move operation did not complete after cut-over "
 2456                     "was triggered. Retries exhausted. Not retrying.")
 2457             raise exception.NetAppException(message=msg)
 2458 
 2459     def get_backend_info(self, context):
 2460         snapdir_visibility = self.configuration.netapp_reset_snapdir_visibility
 2461         return {
 2462             'snapdir_visibility': snapdir_visibility,
 2463         }
 2464 
 2465     def ensure_shares(self, context, shares):
 2466         cfg_snapdir = self.configuration.netapp_reset_snapdir_visibility
 2467         hide_snapdir = self.HIDE_SNAPDIR_CFG_MAP[cfg_snapdir.lower()]
 2468         if hide_snapdir is not None:
 2469             for share in shares:
 2470                 share_server = share.get('share_server')
 2471                 vserver, vserver_client = self._get_vserver(
 2472                     share_server=share_server)
 2473                 share_name = self._get_backend_share_name(share['id'])
 2474                 self._apply_snapdir_visibility(
 2475                     hide_snapdir, share_name, vserver_client)