"Fossies" - the Fresh Open Source Software Archive

Member "manila-8.1.3/manila/share/manager.py" (20 Jul 2020, 191704 Bytes) of package /linux/misc/openstack/manila-8.1.3.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "manager.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 8.1.2_vs_8.1.3.

    1 # Copyright (c) 2014 NetApp Inc.
    2 # All Rights Reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 """NAS share manager managers creating shares and access rights.
   16 
   17 **Related Flags**
   18 
   19 :share_driver: Used by :class:`ShareManager`.
   20 """
   21 
   22 import copy
   23 import datetime
   24 import functools
   25 import hashlib
   26 
   27 from oslo_config import cfg
   28 from oslo_log import log
   29 from oslo_serialization import jsonutils
   30 from oslo_service import periodic_task
   31 from oslo_utils import excutils
   32 from oslo_utils import importutils
   33 from oslo_utils import timeutils
   34 import six
   35 
   36 from manila.common import constants
   37 from manila import context
   38 from manila import coordination
   39 from manila.data import rpcapi as data_rpcapi
   40 from manila import exception
   41 from manila.i18n import _
   42 from manila import manager
   43 from manila.message import api as message_api
   44 from manila.message import message_field
   45 from manila import quota
   46 from manila.share import access
   47 from manila.share import api
   48 import manila.share.configuration
   49 from manila.share import drivers_private_data
   50 from manila.share import migration
   51 from manila.share import rpcapi as share_rpcapi
   52 from manila.share import share_types
   53 from manila.share import snapshot_access
   54 from manila.share import utils as share_utils
   55 from manila import utils
   56 
   57 LOG = log.getLogger(__name__)
   58 
   59 share_manager_opts = [
   60     cfg.StrOpt('share_driver',
   61                default='manila.share.drivers.generic.GenericShareDriver',
   62                help='Driver to use for share creation.'),
   63     cfg.ListOpt('hook_drivers',
   64                 default=[],
   65                 help='Driver(s) to perform some additional actions before and '
   66                      'after share driver actions and on a periodic basis. '
   67                      'Default is [].',
   68                 deprecated_group='DEFAULT'),
   69     cfg.BoolOpt('delete_share_server_with_last_share',
   70                 default=False,
   71                 help='Whether share servers will '
   72                      'be deleted on deletion of the last share.'),
   73     cfg.BoolOpt('unmanage_remove_access_rules',
   74                 default=False,
   75                 help='If set to True, then manila will deny access and remove '
   76                      'all access rules on share unmanage.'
   77                      'If set to False - nothing will be changed.'),
   78     cfg.BoolOpt('automatic_share_server_cleanup',
   79                 default=True,
   80                 help='If set to True, then Manila will delete all share '
   81                      'servers which were unused more than specified time .'
   82                      'If set to False - automatic deletion of share servers '
   83                      'will be disabled.',
   84                 deprecated_group='DEFAULT'),
   85     cfg.IntOpt('unused_share_server_cleanup_interval',
   86                default=10,
   87                help='Unallocated share servers reclamation time interval '
   88                     '(minutes). Minimum value is 10 minutes, maximum is 60 '
   89                     'minutes. The reclamation function is run every '
   90                     '10 minutes and delete share servers which were unused '
   91                     'more than unused_share_server_cleanup_interval option '
   92                     'defines. This value reflects the shortest time Manila '
   93                     'will wait for a share server to go unutilized before '
   94                     'deleting it.',
   95                deprecated_group='DEFAULT',
   96                min=10,
   97                max=60),
   98     cfg.IntOpt('replica_state_update_interval',
   99                default=300,
  100                help='This value, specified in seconds, determines how often '
  101                     'the share manager will poll for the health '
  102                     '(replica_state) of each replica instance.'),
  103     cfg.IntOpt('migration_driver_continue_update_interval',
  104                default=60,
  105                help='This value, specified in seconds, determines how often '
  106                     'the share manager will poll the driver to perform the '
  107                     'next step of migration in the storage backend, for a '
  108                     'migrating share.'),
  109     cfg.IntOpt('share_usage_size_update_interval',
  110                default=300,
  111                help='This value, specified in seconds, determines how often '
  112                     'the share manager will poll the driver to update the '
  113                     'share usage size in the storage backend, for shares in '
  114                     'that backend.'),
  115     cfg.BoolOpt('enable_gathering_share_usage_size',
  116                 default=False,
  117                 help='If set to True, share usage size will be polled for in '
  118                      'the interval specified with '
  119                      '"share_usage_size_update_interval". Usage data can be '
  120                      'consumed by telemetry integration. If telemetry is not '
  121                      'configured, this option must be set to False. '
  122                      'If set to False - gathering share usage size will be'
  123                      ' disabled.'),
  124 ]
  125 
  126 CONF = cfg.CONF
  127 CONF.register_opts(share_manager_opts)
  128 CONF.import_opt('periodic_hooks_interval', 'manila.share.hook')
  129 CONF.import_opt('periodic_interval', 'manila.service')
  130 
  131 # Drivers that need to change module paths or class names can add their
  132 # old/new path here to maintain backward compatibility.
  133 MAPPING = {
  134     'manila.share.drivers.netapp.cluster_mode.NetAppClusteredShareDriver':
  135     'manila.share.drivers.netapp.common.NetAppDriver',
  136     'manila.share.drivers.hp.hp_3par_driver.HP3ParShareDriver':
  137     'manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver',
  138     'manila.share.drivers.hitachi.hds_hnas.HDSHNASDriver':
  139     'manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver',
  140     'manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver':
  141     'manila.share.drivers.glusterfs.glusterfs_native.'
  142     'GlusterfsNativeShareDriver',
  143     'manila.share.drivers.emc.driver.EMCShareDriver':
  144     'manila.share.drivers.dell_emc.driver.EMCShareDriver',
  145     'manila.share.drivers.cephfs.cephfs_native.CephFSNativeDriver':
  146     'manila.share.drivers.cephfs.driver.CephFSDriver',
  147 }
  148 
  149 QUOTAS = quota.QUOTAS
  150 
  151 
  152 def locked_share_replica_operation(operation):
  153     """Lock decorator for share replica operations.
  154 
  155     Takes a named lock prior to executing the operation. The lock is named with
  156     the id of the share to which the replica belongs.
  157 
  158     Intended use:
  159     If a replica operation uses this decorator, it will block actions on
  160     all share replicas of the share until the named lock is free. This is
  161     used to protect concurrent operations on replicas of the same share e.g.
  162     promote ReplicaA while deleting ReplicaB, both belonging to the same share.
  163     """
  164 
  165     def wrapped(*args, **kwargs):
  166         share_id = kwargs.get('share_id')
  167 
  168         @coordination.synchronized(
  169             'locked-share-replica-operation-for-share-%s' % share_id)
  170         def locked_replica_operation(*_args, **_kwargs):
  171             return operation(*_args, **_kwargs)
  172         return locked_replica_operation(*args, **kwargs)
  173 
  174     return wrapped
  175 
  176 
  177 def add_hooks(f):
  178     """Hook decorator to perform action before and after a share method call
  179 
  180     The hook decorator can perform actions before some share driver methods
  181     calls and after a call with results of driver call and preceding hook call.
  182     """
  183     @functools.wraps(f)
  184     def wrapped(self, *args, **kwargs):
  185         if not self.hooks:
  186             return f(self, *args, **kwargs)
  187 
  188         pre_hook_results = []
  189         for hook in self.hooks:
  190             pre_hook_results.append(
  191                 hook.execute_pre_hook(
  192                     func_name=f.__name__,
  193                     *args, **kwargs))
  194 
  195         wrapped_func_results = f(self, *args, **kwargs)
  196 
  197         for i, hook in enumerate(self.hooks):
  198             hook.execute_post_hook(
  199                 func_name=f.__name__,
  200                 driver_action_results=wrapped_func_results,
  201                 pre_hook_data=pre_hook_results[i],
  202                 *args, **kwargs)
  203 
  204         return wrapped_func_results
  205 
  206     return wrapped
  207 
  208 
  209 class ShareManager(manager.SchedulerDependentManager):
  210     """Manages NAS storages."""
  211 
  212     RPC_API_VERSION = '1.19'
  213 
  214     def __init__(self, share_driver=None, service_name=None, *args, **kwargs):
  215         """Load the driver from args, or from flags."""
  216         self.configuration = manila.share.configuration.Configuration(
  217             share_manager_opts,
  218             config_group=service_name)
  219         super(ShareManager, self).__init__(service_name='share',
  220                                            *args, **kwargs)
  221 
  222         if not share_driver:
  223             share_driver = self.configuration.share_driver
  224         if share_driver in MAPPING:
  225             msg_args = {'old': share_driver, 'new': MAPPING[share_driver]}
  226             LOG.warning("Driver path %(old)s is deprecated, update your "
  227                         "configuration to the new path %(new)s",
  228                         msg_args)
  229             share_driver = MAPPING[share_driver]
  230 
  231         ctxt = context.get_admin_context()
  232         private_storage = drivers_private_data.DriverPrivateData(
  233             context=ctxt, backend_host=self.host,
  234             config_group=self.configuration.config_group
  235         )
  236         self.driver = importutils.import_object(
  237             share_driver, private_storage=private_storage,
  238             configuration=self.configuration,
  239         )
  240 
  241         backend_availability_zone = self.driver.configuration.safe_get(
  242             'backend_availability_zone')
  243         self.availability_zone = (
  244             backend_availability_zone or CONF.storage_availability_zone
  245         )
  246 
  247         self.access_helper = access.ShareInstanceAccess(self.db, self.driver)
  248         self.snapshot_access_helper = (
  249             snapshot_access.ShareSnapshotInstanceAccess(self.db, self.driver))
  250         self.migration_wait_access_rules_timeout = (
  251             CONF.migration_wait_access_rules_timeout)
  252 
  253         self.message_api = message_api.API()
  254         self.hooks = []
  255         self._init_hook_drivers()
  256 
  257     def _init_hook_drivers(self):
  258         # Try to initialize hook driver(s).
  259         hook_drivers = self.configuration.safe_get("hook_drivers")
  260         for hook_driver in hook_drivers:
  261             self.hooks.append(
  262                 importutils.import_object(
  263                     hook_driver,
  264                     configuration=self.configuration,
  265                     host=self.host,
  266                 )
  267             )
  268 
  269     def _ensure_share_instance_has_pool(self, ctxt, share_instance):
  270         pool = share_utils.extract_host(share_instance['host'], 'pool')
  271         if pool is None:
  272             # No pool name encoded in host, so this is a legacy
  273             # share created before pool is introduced, ask
  274             # driver to provide pool info if it has such
  275             # knowledge and update the DB.
  276             try:
  277                 pool = self.driver.get_pool(share_instance)
  278             except Exception:
  279                 LOG.exception("Failed to fetch pool name for share: "
  280                               "%(share)s.",
  281                               {'share': share_instance['id']})
  282                 return
  283 
  284             if pool:
  285                 new_host = share_utils.append_host(
  286                     share_instance['host'], pool)
  287                 self.db.share_instance_update(
  288                     ctxt, share_instance['id'], {'host': new_host})
  289 
  290         return pool
  291 
  292     @add_hooks
  293     def init_host(self):
  294         """Initialization for a standalone service."""
  295 
  296         ctxt = context.get_admin_context()
  297         driver_host_pair = "{}@{}".format(
  298             self.driver.__class__.__name__,
  299             self.host)
  300 
  301         # we want to retry to setup the driver. In case of a multi-backend
  302         # scenario, working backends are usable and the non-working ones (where
  303         # do_setup() or check_for_setup_error() fail) retry.
  304         @utils.retry(Exception, interval=2, backoff_rate=2,
  305                      backoff_sleep_max=600, retries=0)
  306         def _driver_setup():
  307             self.driver.initialized = False
  308             LOG.debug("Start initialization of driver: '%s'", driver_host_pair)
  309             try:
  310                 self.driver.do_setup(ctxt)
  311                 self.driver.check_for_setup_error()
  312             except Exception:
  313                 LOG.exception("Error encountered during initialization of "
  314                               "driver %s", driver_host_pair)
  315                 raise
  316             else:
  317                 self.driver.initialized = True
  318 
  319         _driver_setup()
  320         if (self.driver.driver_handles_share_servers and
  321                 hasattr(self.driver, 'service_instance_manager')):
  322             (self.driver.service_instance_manager.network_helper.
  323              setup_connectivity_with_service_instances())
  324 
  325         self.ensure_driver_resources(ctxt)
  326 
  327         self.publish_service_capabilities(ctxt)
  328         LOG.info("Finished initialization of driver: '%(driver)s"
  329                  "@%(host)s'",
  330                  {"driver": self.driver.__class__.__name__,
  331                   "host": self.host})
  332 
  333     def ensure_driver_resources(self, ctxt):
  334         old_backend_info = self.db.backend_info_get(ctxt, self.host)
  335         old_backend_info_hash = (old_backend_info.get('info_hash')
  336                                  if old_backend_info is not None else None)
  337         new_backend_info = None
  338         new_backend_info_hash = None
  339         backend_info_implemented = True
  340         update_share_instances = []
  341         try:
  342             new_backend_info = self.driver.get_backend_info(ctxt)
  343         except Exception as e:
  344             if not isinstance(e, NotImplementedError):
  345                 LOG.exception(
  346                     "The backend %(host)s could not get backend info.",
  347                     {'host': self.host})
  348                 raise
  349             else:
  350                 backend_info_implemented = False
  351                 LOG.debug(
  352                     ("The backend %(host)s does not support get backend"
  353                      " info method."),
  354                     {'host': self.host})
  355 
  356         if new_backend_info:
  357             new_backend_info_hash = hashlib.sha1(six.text_type(
  358                 sorted(new_backend_info.items())).encode('utf-8')).hexdigest()
  359         if (old_backend_info_hash == new_backend_info_hash and
  360                 backend_info_implemented):
  361             LOG.debug(
  362                 ("Ensure shares is being skipped because the %(host)s's old "
  363                  "backend info is the same as its new backend info."),
  364                 {'host': self.host})
  365             return
  366 
  367         share_instances = self.db.share_instances_get_all_by_host(
  368             ctxt, self.host)
  369         LOG.debug("Re-exporting %s shares", len(share_instances))
  370 
  371         for share_instance in share_instances:
  372             share_ref = self.db.share_get(ctxt, share_instance['share_id'])
  373 
  374             if share_ref.is_busy:
  375                 LOG.info(
  376                     "Share instance %(id)s: skipping export, "
  377                     "because it is busy with an active task: %(task)s.",
  378                     {'id': share_instance['id'],
  379                      'task': share_ref['task_state']},
  380                 )
  381                 continue
  382 
  383             if share_instance['status'] != constants.STATUS_AVAILABLE:
  384                 LOG.info(
  385                     "Share instance %(id)s: skipping export, "
  386                     "because it has '%(status)s' status.",
  387                     {'id': share_instance['id'],
  388                      'status': share_instance['status']},
  389                 )
  390                 continue
  391 
  392             self._ensure_share_instance_has_pool(ctxt, share_instance)
  393             share_instance = self.db.share_instance_get(
  394                 ctxt, share_instance['id'], with_share_data=True)
  395             share_instance_dict = self._get_share_replica_dict(
  396                 ctxt, share_instance)
  397             update_share_instances.append(share_instance_dict)
  398 
  399         if update_share_instances:
  400             try:
  401                 update_share_instances = self.driver.ensure_shares(
  402                     ctxt, update_share_instances) or {}
  403             except Exception as e:
  404                 if not isinstance(e, NotImplementedError):
  405                     LOG.exception("Caught exception trying ensure "
  406                                   "share instances.")
  407                 else:
  408                     self._ensure_share(ctxt, update_share_instances)
  409 
  410         if new_backend_info:
  411             self.db.backend_info_update(
  412                 ctxt, self.host, new_backend_info_hash)
  413 
  414         for share_instance in share_instances:
  415             if share_instance['id'] not in update_share_instances:
  416                 continue
  417             if update_share_instances[share_instance['id']].get('status'):
  418                 self.db.share_instance_update(
  419                     ctxt, share_instance['id'],
  420                     {'status': (
  421                         update_share_instances[share_instance['id']].
  422                         get('status')),
  423                      'host': share_instance['host']}
  424                 )
  425 
  426             update_export_location = (
  427                 update_share_instances[share_instance['id']]
  428                 .get('export_locations'))
  429             if update_export_location:
  430                 self.db.share_export_locations_update(
  431                     ctxt, share_instance['id'], update_export_location)
  432 
  433             share_server = self._get_share_server(ctxt, share_instance)
  434 
  435             if share_instance['access_rules_status'] != (
  436                     constants.STATUS_ACTIVE):
  437                 try:
  438                     # Cast any existing 'applying' rules to 'new'
  439                     self.access_helper.reset_applying_rules(
  440                         ctxt, share_instance['id'])
  441                     self.access_helper.update_access_rules(
  442                         ctxt, share_instance['id'], share_server=share_server)
  443                 except Exception:
  444                     LOG.exception(
  445                         ("Unexpected error occurred while updating access "
  446                          "rules for share instance %(s_id)s."),
  447                         {'s_id': share_instance['id']},
  448                     )
  449 
  450             snapshot_instances = (
  451                 self.db.share_snapshot_instance_get_all_with_filters(
  452                     ctxt, {'share_instance_ids': share_instance['id']},
  453                     with_share_data=True))
  454 
  455             for snap_instance in snapshot_instances:
  456 
  457                 rules = (
  458                     self.db.
  459                     share_snapshot_access_get_all_for_snapshot_instance(
  460                         ctxt, snap_instance['id']))
  461 
  462                 # NOTE(ganso): We don't invoke update_access for snapshots if
  463                 # we don't have invalid rules or pending updates
  464                 if any(r['state'] in (constants.ACCESS_STATE_DENYING,
  465                                       constants.ACCESS_STATE_QUEUED_TO_DENY,
  466                                       constants.ACCESS_STATE_APPLYING,
  467                                       constants.ACCESS_STATE_QUEUED_TO_APPLY)
  468                        for r in rules):
  469                     try:
  470                         self.snapshot_access_helper.update_access_rules(
  471                             ctxt, snap_instance['id'], share_server)
  472                     except Exception:
  473                         LOG.exception(
  474                             "Unexpected error occurred while updating "
  475                             "access rules for snapshot instance %s.",
  476                             snap_instance['id'])
  477 
  478     def _ensure_share(self, ctxt, share_instances):
  479         for share_instance in share_instances:
  480             try:
  481                 export_locations = self.driver.ensure_share(
  482                     ctxt, share_instance,
  483                     share_server=share_instance['share_server'])
  484             except Exception:
  485                 LOG.exception("Caught exception trying ensure "
  486                               "share '%(s_id)s'.",
  487                               {'s_id': share_instance['id']})
  488                 continue
  489             if export_locations:
  490                 self.db.share_export_locations_update(
  491                     ctxt, share_instance['id'], export_locations)
  492 
  493     def _provide_share_server_for_share(self, context, share_network_id,
  494                                         share_instance, snapshot=None,
  495                                         share_group=None,
  496                                         create_on_backend=True):
  497         """Gets or creates share_server and updates share with its id.
  498 
  499         Active share_server can be deleted if there are no dependent shares
  500         on it.
  501         So we need avoid possibility to delete share_server in time gap
  502         between reaching active state for share_server and setting up
  503         share_server_id for share. It is possible, for example, with first
  504         share creation, which starts share_server creation.
  505         For this purpose used shared lock between this method and the one
  506         with deletion of share_server.
  507 
  508         :param context: Current context
  509         :param share_network_id: Share network where existing share server
  510                                  should be found or created. If
  511                                  share_network_id is None method use
  512                                  share_network_id from provided snapshot.
  513         :param share_instance: Share Instance model
  514         :param snapshot: Optional -- Snapshot model
  515         :param create_on_backend: Boolean. If True, driver will be asked to
  516                                   create the share server if no share server
  517                                   is available.
  518 
  519         :returns: dict, dict -- first value is share_server, that
  520                   has been chosen for share schedule. Second value is
  521                   share updated with share_server_id.
  522         """
  523         if not (share_network_id or snapshot):
  524             msg = _("'share_network_id' parameter or 'snapshot'"
  525                     " should be provided. ")
  526             raise ValueError(msg)
  527 
  528         parent_share_server = None
  529 
  530         def error(msg, *args):
  531             LOG.error(msg, *args)
  532             self.db.share_instance_update(context, share_instance['id'],
  533                                           {'status': constants.STATUS_ERROR})
  534 
  535         if snapshot:
  536             parent_share_server_id = (
  537                 snapshot['share']['instance']['share_server_id'])
  538             try:
  539                 parent_share_server = self.db.share_server_get(
  540                     context, parent_share_server_id)
  541             except exception.ShareServerNotFound:
  542                 with excutils.save_and_reraise_exception():
  543                     error("Parent share server %s does not exist.",
  544                           parent_share_server_id)
  545 
  546             if parent_share_server['status'] != constants.STATUS_ACTIVE:
  547                 error_params = {
  548                     'id': parent_share_server_id,
  549                     'status': parent_share_server['status'],
  550                 }
  551                 error("Parent share server %(id)s has invalid status "
  552                       "'%(status)s'.", error_params)
  553                 raise exception.InvalidShareServer(
  554                     share_server_id=parent_share_server
  555                 )
  556 
  557         if parent_share_server and not share_network_id:
  558             share_network_id = parent_share_server['share_network_id']
  559 
  560         def get_available_share_servers():
  561             if parent_share_server:
  562                 return [parent_share_server]
  563             else:
  564                 return (
  565                     self.db.share_server_get_all_by_host_and_share_net_valid(
  566                         context, self.host, share_network_id)
  567                 )
  568 
  569         @utils.synchronized("share_manager_%s" % share_network_id,
  570                             external=True)
  571         def _wrapped_provide_share_server_for_share():
  572             try:
  573                 available_share_servers = get_available_share_servers()
  574             except exception.ShareServerNotFound:
  575                 available_share_servers = None
  576 
  577             compatible_share_server = None
  578 
  579             if available_share_servers:
  580                 try:
  581                     compatible_share_server = (
  582                         self.driver.choose_share_server_compatible_with_share(
  583                             context, available_share_servers, share_instance,
  584                             snapshot=snapshot.instance if snapshot else None,
  585                             share_group=share_group
  586                         )
  587                     )
  588                 except Exception as e:
  589                     with excutils.save_and_reraise_exception():
  590                         error("Cannot choose compatible share server: %s",
  591                               e)
  592 
  593             if not compatible_share_server:
  594                 compatible_share_server = self.db.share_server_create(
  595                     context,
  596                     {
  597                         'host': self.host,
  598                         'share_network_id': share_network_id,
  599                         'status': constants.STATUS_CREATING,
  600                     }
  601                 )
  602 
  603             msg = ("Using share_server %(share_server)s for share instance"
  604                    " %(share_instance_id)s")
  605             LOG.debug(msg, {
  606                 'share_server': compatible_share_server['id'],
  607                 'share_instance_id': share_instance['id']
  608             })
  609 
  610             share_instance_ref = self.db.share_instance_update(
  611                 context,
  612                 share_instance['id'],
  613                 {'share_server_id': compatible_share_server['id']},
  614                 with_share_data=True
  615             )
  616             if create_on_backend:
  617                 metadata = {'request_host': share_instance['host']}
  618                 compatible_share_server = (
  619                     self._create_share_server_in_backend(
  620                         context, compatible_share_server,
  621                         metadata=metadata))
  622 
  623             return compatible_share_server, share_instance_ref
  624 
  625         return _wrapped_provide_share_server_for_share()
  626 
  627     def _create_share_server_in_backend(self, context, share_server,
  628                                         metadata=None):
  629         """Perform setup_server on backend
  630 
  631         :param metadata: A dictionary, to be passed to driver's setup_server()
  632         """
  633 
  634         if share_server['status'] == constants.STATUS_CREATING:
  635             # Create share server on backend with data from db.
  636             share_server = self._setup_server(context, share_server,
  637                                               metadata=metadata)
  638             LOG.info("Share server created successfully.")
  639         else:
  640             LOG.info("Using preexisting share server: "
  641                      "'%(share_server_id)s'",
  642                      {'share_server_id': share_server['id']})
  643         return share_server
  644 
  645     def create_share_server(self, context, share_server_id):
  646         """Invoked to create a share server in this backend.
  647 
  648         This method is invoked to create the share server defined in the model
  649         obtained by the supplied id.
  650 
  651         :param context: The 'context.RequestContext' object for the request.
  652         :param share_server_id: The id of the server to be created.
  653         """
  654         share_server = self.db.share_server_get(context, share_server_id)
  655 
  656         self._create_share_server_in_backend(context, share_server)
  657 
  658     def provide_share_server(self, context, share_instance_id,
  659                              share_network_id, snapshot_id=None):
  660         """Invoked to provide a compatible share server.
  661 
  662         This method is invoked to find a compatible share server among the
  663         existing ones or create a share server database instance with the share
  664         server properties that will be used to create the share server later.
  665 
  666         :param context: The 'context.RequestContext' object for the request.
  667         :param share_instance_id: The id of the share instance whose model
  668             attributes will be used to provide the share server.
  669         :param share_network_id: The id of the share network the share server
  670             to be provided has to be related to.
  671         :param snapshot_id: The id of the snapshot to be used to obtain the
  672             share server if applicable.
  673         :return: The id of the share server that is being provided.
  674         """
  675         share_instance = self.db.share_instance_get(context, share_instance_id,
  676                                                     with_share_data=True)
  677         snapshot_ref = None
  678         if snapshot_id:
  679             snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
  680 
  681         share_group_ref = None
  682         if share_instance.get('share_group_id'):
  683             share_group_ref = self.db.share_group_get(
  684                 context, share_instance['share_group_id'])
  685 
  686         share_server, share_instance = self._provide_share_server_for_share(
  687             context, share_network_id, share_instance, snapshot_ref,
  688             share_group_ref, create_on_backend=False)
  689 
  690         return share_server['id']
  691 
  692     def _provide_share_server_for_share_group(self, context, share_network_id,
  693                                               share_group_ref,
  694                                               share_group_snapshot=None):
  695         """Gets or creates share_server and updates share group with its id.
  696 
  697         Active share_server can be deleted if there are no shares or share
  698         groups dependent on it.
  699 
  700         So we need avoid possibility to delete share_server in time gap
  701         between reaching active state for share_server and setting up
  702         share_server_id for share group. It is possible, for example, with
  703         first share group creation, which starts share_server creation.
  704         For this purpose used shared lock between this method and the one
  705         with deletion of share_server.
  706 
  707         :param context: Current context
  708         :param share_network_id: Share network where existing share server
  709                                  should be found or created.
  710         :param share_group_ref: Share Group model
  711         :param share_group_snapshot: Optional -- ShareGroupSnapshot model.  If
  712                                      supplied, driver will use it to choose
  713                                      the appropriate share server.
  714 
  715         :returns: dict, dict -- first value is share_server, that
  716                   has been chosen for share group schedule.
  717                   Second value is share group updated with
  718                   share_server_id.
  719         """
  720         if not share_network_id:
  721             msg = _("'share_network_id' parameter should be provided. ")
  722             raise exception.InvalidInput(reason=msg)
  723 
  724         def error(msg, *args):
  725             LOG.error(msg, *args)
  726             self.db.share_group_update(
  727                 context, share_group_ref['id'],
  728                 {'status': constants.STATUS_ERROR})
  729 
  730         @utils.synchronized("share_manager_%s" % share_network_id,
  731                             external=True)
  732         def _wrapped_provide_share_server_for_share_group():
  733             try:
  734                 available_share_servers = (
  735                     self.db.share_server_get_all_by_host_and_share_net_valid(
  736                         context, self.host, share_network_id))
  737             except exception.ShareServerNotFound:
  738                 available_share_servers = None
  739 
  740             compatible_share_server = None
  741             choose_share_server = (
  742                 self.driver.choose_share_server_compatible_with_share_group)
  743 
  744             if available_share_servers:
  745                 try:
  746                     compatible_share_server = choose_share_server(
  747                         context, available_share_servers, share_group_ref,
  748                         share_group_snapshot=share_group_snapshot,
  749                     )
  750                 except Exception as e:
  751                     with excutils.save_and_reraise_exception():
  752                         error("Cannot choose compatible share-server: %s",
  753                               e)
  754 
  755             if not compatible_share_server:
  756                 compatible_share_server = self.db.share_server_create(
  757                     context,
  758                     {
  759                         'host': self.host,
  760                         'share_network_id': share_network_id,
  761                         'status': constants.STATUS_CREATING
  762                     }
  763                 )
  764 
  765             msg = ("Using share_server %(share_server)s for share "
  766                    "group %(group_id)s")
  767             LOG.debug(msg, {
  768                 'share_server': compatible_share_server['id'],
  769                 'group_id': share_group_ref['id']
  770             })
  771 
  772             updated_share_group = self.db.share_group_update(
  773                 context,
  774                 share_group_ref['id'],
  775                 {'share_server_id': compatible_share_server['id']},
  776             )
  777 
  778             if compatible_share_server['status'] == constants.STATUS_CREATING:
  779                 # Create share server on backend with data from db.
  780                 compatible_share_server = self._setup_server(
  781                     context, compatible_share_server)
  782                 LOG.info("Share server created successfully.")
  783             else:
  784                 LOG.info("Used preexisting share server "
  785                          "'%(share_server_id)s'",
  786                          {'share_server_id': compatible_share_server['id']})
  787             return compatible_share_server, updated_share_group
  788 
  789         return _wrapped_provide_share_server_for_share_group()
  790 
  791     def _get_share_server(self, context, share_instance):
  792         if share_instance['share_server_id']:
  793             return self.db.share_server_get(
  794                 context, share_instance['share_server_id'])
  795         else:
  796             return None
  797 
  798     @utils.require_driver_initialized
  799     def connection_get_info(self, context, share_instance_id):
  800         share_instance = self.db.share_instance_get(
  801             context, share_instance_id, with_share_data=True)
  802 
  803         share_server = None
  804         if share_instance.get('share_server_id'):
  805             share_server = self.db.share_server_get(
  806                 context, share_instance['share_server_id'])
  807 
  808         return self.driver.connection_get_info(context, share_instance,
  809                                                share_server)
  810 
  811     def _migration_start_driver(
  812             self, context, share_ref, src_share_instance, dest_host, writable,
  813             preserve_metadata, nondisruptive, preserve_snapshots,
  814             new_share_network_id, new_az_id, new_share_type_id):
  815 
  816         share_server = self._get_share_server(context, src_share_instance)
  817 
  818         share_api = api.API()
  819 
  820         request_spec, dest_share_instance = (
  821             share_api.create_share_instance_and_get_request_spec(
  822                 context, share_ref, new_az_id, None, dest_host,
  823                 new_share_network_id, new_share_type_id))
  824 
  825         self.db.share_instance_update(
  826             context, dest_share_instance['id'],
  827             {'status': constants.STATUS_MIGRATING_TO})
  828 
  829         # refresh and obtain proxified properties
  830         dest_share_instance = self.db.share_instance_get(
  831             context, dest_share_instance['id'], with_share_data=True)
  832 
  833         helper = migration.ShareMigrationHelper(
  834             context, self.db, share_ref, self.access_helper)
  835 
  836         try:
  837             if dest_share_instance['share_network_id']:
  838                 rpcapi = share_rpcapi.ShareAPI()
  839 
  840                 # NOTE(ganso): Obtaining the share_server_id asynchronously so
  841                 # we can wait for it to be ready.
  842                 dest_share_server_id = rpcapi.provide_share_server(
  843                     context, dest_share_instance,
  844                     dest_share_instance['share_network_id'])
  845 
  846                 rpcapi.create_share_server(
  847                     context, dest_share_instance, dest_share_server_id)
  848 
  849                 dest_share_server = helper.wait_for_share_server(
  850                     dest_share_server_id)
  851             else:
  852                 dest_share_server = None
  853 
  854             compatibility = self.driver.migration_check_compatibility(
  855                 context, src_share_instance, dest_share_instance,
  856                 share_server, dest_share_server)
  857 
  858             if not compatibility.get('compatible'):
  859                 msg = _("Destination host %(host)s is not compatible with "
  860                         "share %(share)s's source backend for driver-assisted "
  861                         "migration.") % {
  862                     'host': dest_host,
  863                     'share': share_ref['id'],
  864                 }
  865                 raise exception.ShareMigrationFailed(reason=msg)
  866 
  867             if (not compatibility.get('nondisruptive') and
  868                     nondisruptive):
  869                 msg = _("Driver cannot perform a non-disruptive migration of "
  870                         "share %s.") % share_ref['id']
  871                 raise exception.ShareMigrationFailed(reason=msg)
  872 
  873             if (not compatibility.get('preserve_metadata') and
  874                     preserve_metadata):
  875                 msg = _("Driver cannot perform migration of share %s while "
  876                         "preserving all metadata.") % share_ref['id']
  877                 raise exception.ShareMigrationFailed(reason=msg)
  878 
  879             if not compatibility.get('writable') and writable:
  880                 msg = _("Driver cannot perform migration of share %s while "
  881                         "remaining writable.") % share_ref['id']
  882                 raise exception.ShareMigrationFailed(reason=msg)
  883 
  884             if (not compatibility.get('preserve_snapshots') and
  885                     preserve_snapshots):
  886                 msg = _("Driver cannot perform migration of share %s while "
  887                         "preserving snapshots.") % share_ref['id']
  888                 raise exception.ShareMigrationFailed(reason=msg)
  889 
  890             snapshot_mapping = {}
  891             src_snap_instances = []
  892             src_snapshots = self.db.share_snapshot_get_all_for_share(
  893                 context, share_ref['id'])
  894 
  895             if compatibility.get('preserve_snapshots'):
  896 
  897                 # Make sure all snapshots are 'available'
  898                 if any(x['status'] != constants.STATUS_AVAILABLE
  899                        for x in src_snapshots):
  900                     msg = _(
  901                         "All snapshots must have '%(status)s' status to be "
  902                         "migrated by the driver along with share "
  903                         "%(share)s.") % {
  904                             'share': share_ref['id'],
  905                             'status': constants.STATUS_AVAILABLE
  906                         }
  907                     raise exception.ShareMigrationFailed(reason=msg)
  908 
  909                 src_snap_instances = [x.instance for x in src_snapshots]
  910 
  911                 dest_snap_instance_data = {
  912                     'status': constants.STATUS_MIGRATING_TO,
  913                     'progress': '0%',
  914                     'share_instance_id': dest_share_instance['id'],
  915                 }
  916 
  917                 for snap_instance in src_snap_instances:
  918                     snapshot_mapping[snap_instance['id']] = (
  919                         self.db.share_snapshot_instance_create(
  920                             context, snap_instance['snapshot_id'],
  921                             dest_snap_instance_data))
  922                     self.db.share_snapshot_instance_update(
  923                         context, snap_instance['id'],
  924                         {'status': constants.STATUS_MIGRATING})
  925 
  926             else:
  927                 if src_snapshots:
  928                     msg = _("Driver does not support preserving snapshots, "
  929                             "driver-assisted migration cannot proceed while "
  930                             "share %s has snapshots.") % share_ref['id']
  931                     raise exception.ShareMigrationFailed(reason=msg)
  932 
  933             if not compatibility.get('writable'):
  934                 self._cast_access_rules_to_readonly(
  935                     context, src_share_instance, share_server)
  936 
  937             LOG.debug("Initiating driver migration for share %s.",
  938                       share_ref['id'])
  939 
  940             self.db.share_update(
  941                 context, share_ref['id'],
  942                 {'task_state': (
  943                     constants.TASK_STATE_MIGRATION_DRIVER_STARTING)})
  944 
  945             self.driver.migration_start(
  946                 context, src_share_instance, dest_share_instance,
  947                 src_snap_instances, snapshot_mapping, share_server,
  948                 dest_share_server)
  949 
  950             self.db.share_update(
  951                 context, share_ref['id'],
  952                 {'task_state': (
  953                     constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)})
  954 
  955         except Exception:
  956             # NOTE(ganso): Cleaning up error'ed destination share instance from
  957             # database. It is assumed that driver cleans up leftovers in
  958             # backend when migration fails.
  959             self._migration_delete_instance(context, dest_share_instance['id'])
  960             self._restore_migrating_snapshots_status(
  961                 context, src_share_instance['id'])
  962 
  963             # NOTE(ganso): Read only access rules and share instance status
  964             # will be restored in migration_start's except block.
  965 
  966             # NOTE(ganso): For now source share instance should remain in
  967             # migrating status for host-assisted migration.
  968             msg = _("Driver-assisted migration of share %s "
  969                     "failed.") % share_ref['id']
  970             LOG.exception(msg)
  971             raise exception.ShareMigrationFailed(reason=msg)
  972 
  973         return True
  974 
  975     def _cast_access_rules_to_readonly(self, context, src_share_instance,
  976                                        share_server):
  977         self.db.share_instance_update(
  978             context, src_share_instance['id'],
  979             {'cast_rules_to_readonly': True})
  980 
  981         # Set all 'applying' or 'active' rules to 'queued_to_apply'. Since the
  982         # share instance has its cast_rules_to_readonly attribute set to True,
  983         # existing rules will be cast to read/only.
  984         acceptable_past_states = (constants.ACCESS_STATE_APPLYING,
  985                                   constants.ACCESS_STATE_ACTIVE)
  986         new_state = constants.ACCESS_STATE_QUEUED_TO_APPLY
  987         conditionally_change = {k: new_state for k in acceptable_past_states}
  988         self.access_helper.get_and_update_share_instance_access_rules(
  989             context, share_instance_id=src_share_instance['id'],
  990             conditionally_change=conditionally_change)
  991 
  992         self.access_helper.update_access_rules(
  993             context, src_share_instance['id'],
  994             share_server=share_server)
  995 
  996         utils.wait_for_access_update(
  997             context, self.db, src_share_instance,
  998             self.migration_wait_access_rules_timeout)
  999 
 1000     def _reset_read_only_access_rules(
 1001             self, context, share, share_instance_id, supress_errors=True,
 1002             helper=None):
 1003 
 1004         instance = self.db.share_instance_get(context, share_instance_id,
 1005                                               with_share_data=True)
 1006         if instance['cast_rules_to_readonly']:
 1007             update = {'cast_rules_to_readonly': False}
 1008 
 1009             self.db.share_instance_update(
 1010                 context, share_instance_id, update)
 1011 
 1012             share_server = self._get_share_server(context, instance)
 1013 
 1014             if helper is None:
 1015                 helper = migration.ShareMigrationHelper(
 1016                     context, self.db, share, self.access_helper)
 1017 
 1018             if supress_errors:
 1019                 helper.cleanup_access_rules(instance, share_server)
 1020             else:
 1021                 helper.revert_access_rules(instance, share_server)
 1022 
 1023     @periodic_task.periodic_task(
 1024         spacing=CONF.migration_driver_continue_update_interval)
 1025     @utils.require_driver_initialized
 1026     def migration_driver_continue(self, context):
 1027         """Invokes driver to continue migration of shares."""
 1028 
 1029         instances = self.db.share_instances_get_all_by_host(context, self.host)
 1030 
 1031         for instance in instances:
 1032 
 1033             if instance['status'] != constants.STATUS_MIGRATING:
 1034                 continue
 1035 
 1036             share = self.db.share_get(context, instance['share_id'])
 1037 
 1038             if share['task_state'] == (
 1039                     constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
 1040 
 1041                 share_api = api.API()
 1042 
 1043                 src_share_instance_id, dest_share_instance_id = (
 1044                     share_api.get_migrating_instances(share))
 1045 
 1046                 src_share_instance = instance
 1047 
 1048                 dest_share_instance = self.db.share_instance_get(
 1049                     context, dest_share_instance_id, with_share_data=True)
 1050 
 1051                 src_share_server = self._get_share_server(
 1052                     context, src_share_instance)
 1053 
 1054                 dest_share_server = self._get_share_server(
 1055                     context, dest_share_instance)
 1056 
 1057                 src_snap_instances, snapshot_mappings = (
 1058                     self._get_migrating_snapshots(context, src_share_instance,
 1059                                                   dest_share_instance))
 1060 
 1061                 try:
 1062 
 1063                     finished = self.driver.migration_continue(
 1064                         context, src_share_instance, dest_share_instance,
 1065                         src_snap_instances, snapshot_mappings,
 1066                         src_share_server, dest_share_server)
 1067 
 1068                     if finished:
 1069                         self.db.share_update(
 1070                             context, instance['share_id'],
 1071                             {'task_state':
 1072                                 (constants.
 1073                                  TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)})
 1074 
 1075                         LOG.info("Share Migration for share %s completed "
 1076                                  "first phase successfully.",
 1077                                  share['id'])
 1078                     else:
 1079                         share = self.db.share_get(
 1080                             context, instance['share_id'])
 1081 
 1082                         if (share['task_state'] ==
 1083                                 constants.TASK_STATE_MIGRATION_CANCELLED):
 1084                             LOG.warning(
 1085                                 "Share Migration for share %s was cancelled.",
 1086                                 share['id'])
 1087 
 1088                 except Exception:
 1089 
 1090                     # NOTE(ganso): Cleaning up error'ed destination share
 1091                     # instance from database. It is assumed that driver cleans
 1092                     # up leftovers in backend when migration fails.
 1093                     self._migration_delete_instance(
 1094                         context, dest_share_instance['id'])
 1095                     self._restore_migrating_snapshots_status(
 1096                         context, src_share_instance['id'])
 1097                     self._reset_read_only_access_rules(
 1098                         context, share, src_share_instance_id)
 1099                     self.db.share_instance_update(
 1100                         context, src_share_instance_id,
 1101                         {'status': constants.STATUS_AVAILABLE})
 1102 
 1103                     self.db.share_update(
 1104                         context, instance['share_id'],
 1105                         {'task_state': constants.TASK_STATE_MIGRATION_ERROR})
 1106                     msg = _("Driver-assisted migration of share %s "
 1107                             "failed.") % share['id']
 1108                     LOG.exception(msg)
 1109 
 1110     def _get_migrating_snapshots(
 1111             self, context, src_share_instance, dest_share_instance):
 1112 
 1113         dest_snap_instances = (
 1114             self.db.share_snapshot_instance_get_all_with_filters(
 1115                 context,
 1116                 {'share_instance_ids': [dest_share_instance['id']]}))
 1117 
 1118         snapshot_mappings = {}
 1119         src_snap_instances = []
 1120         if len(dest_snap_instances) > 0:
 1121             src_snap_instances = (
 1122                 self.db.share_snapshot_instance_get_all_with_filters(
 1123                     context,
 1124                     {'share_instance_ids': [src_share_instance['id']]}))
 1125             for snap in src_snap_instances:
 1126                 dest_snap_instance = next(
 1127                     x for x in dest_snap_instances
 1128                     if snap['snapshot_id'] == x['snapshot_id'])
 1129                 snapshot_mappings[snap['id']] = dest_snap_instance
 1130 
 1131         return src_snap_instances, snapshot_mappings
 1132 
 1133     def _restore_migrating_snapshots_status(
 1134             self, context, src_share_instance_id,
 1135             errored_dest_instance_id=None):
 1136         filters = {'share_instance_ids': [src_share_instance_id]}
 1137         status = constants.STATUS_AVAILABLE
 1138         if errored_dest_instance_id:
 1139             filters['share_instance_ids'].append(errored_dest_instance_id)
 1140             status = constants.STATUS_ERROR
 1141         snap_instances = (
 1142             self.db.share_snapshot_instance_get_all_with_filters(
 1143                 context, filters)
 1144         )
 1145         for instance in snap_instances:
 1146             if instance['status'] == constants.STATUS_MIGRATING:
 1147                 self.db.share_snapshot_instance_update(
 1148                     context, instance['id'], {'status': status})
 1149             elif (errored_dest_instance_id and
 1150                   instance['status'] == constants.STATUS_MIGRATING_TO):
 1151                 self.db.share_snapshot_instance_update(
 1152                     context, instance['id'], {'status': status})
 1153 
 1154     @utils.require_driver_initialized
 1155     def migration_start(
 1156             self, context, share_id, dest_host, force_host_assisted_migration,
 1157             preserve_metadata, writable, nondisruptive, preserve_snapshots,
 1158             new_share_network_id=None, new_share_type_id=None):
 1159         """Migrates a share from current host to another host."""
 1160         LOG.debug("Entered migration_start method for share %s.", share_id)
 1161 
 1162         self.db.share_update(
 1163             context, share_id,
 1164             {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS})
 1165 
 1166         share_ref = self.db.share_get(context, share_id)
 1167         share_instance = self._get_share_instance(context, share_ref)
 1168         success = False
 1169 
 1170         host_value = share_utils.extract_host(dest_host)
 1171         service = self.db.service_get_by_args(
 1172             context, host_value, 'manila-share')
 1173         new_az_id = service['availability_zone_id']
 1174 
 1175         if not force_host_assisted_migration:
 1176 
 1177             try:
 1178                 success = self._migration_start_driver(
 1179                     context, share_ref, share_instance, dest_host, writable,
 1180                     preserve_metadata, nondisruptive, preserve_snapshots,
 1181                     new_share_network_id, new_az_id, new_share_type_id)
 1182 
 1183             except Exception as e:
 1184                 if not isinstance(e, NotImplementedError):
 1185                     LOG.exception(
 1186                         ("The driver could not migrate the share %(shr)s"),
 1187                         {'shr': share_id})
 1188 
 1189         try:
 1190 
 1191             if not success:
 1192                 if (writable or preserve_metadata or nondisruptive or
 1193                         preserve_snapshots):
 1194                     msg = _("Migration for share %s could not be "
 1195                             "performed because host-assisted migration is not "
 1196                             "allowed when share must remain writable, "
 1197                             "preserve snapshots and/or file metadata or be "
 1198                             "performed nondisruptively.") % share_id
 1199 
 1200                     raise exception.ShareMigrationFailed(reason=msg)
 1201 
 1202                 # We only handle shares without snapshots for now
 1203                 snaps = self.db.share_snapshot_get_all_for_share(
 1204                     context, share_id)
 1205                 if snaps:
 1206                     msg = _("Share %s must not have snapshots in order to "
 1207                             "perform a host-assisted migration.") % share_id
 1208                     raise exception.ShareMigrationFailed(reason=msg)
 1209 
 1210                 LOG.debug("Starting host-assisted migration "
 1211                           "for share %s.", share_id)
 1212 
 1213                 self.db.share_update(
 1214                     context, share_id,
 1215                     {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS})
 1216 
 1217                 self._migration_start_host_assisted(
 1218                     context, share_ref, share_instance, dest_host,
 1219                     new_share_network_id, new_az_id, new_share_type_id)
 1220 
 1221         except Exception:
 1222             msg = _("Host-assisted migration failed for share %s.") % share_id
 1223             LOG.exception(msg)
 1224             self.db.share_update(
 1225                 context, share_id,
 1226                 {'task_state': constants.TASK_STATE_MIGRATION_ERROR})
 1227             self._reset_read_only_access_rules(
 1228                 context, share_ref, share_instance['id'])
 1229             self.db.share_instance_update(
 1230                 context, share_instance['id'],
 1231                 {'status': constants.STATUS_AVAILABLE})
 1232 
 1233             raise exception.ShareMigrationFailed(reason=msg)
 1234 
 1235     def _migration_start_host_assisted(
 1236             self, context, share, src_share_instance, dest_host,
 1237             new_share_network_id, new_az_id, new_share_type_id):
 1238 
 1239         rpcapi = share_rpcapi.ShareAPI()
 1240 
 1241         helper = migration.ShareMigrationHelper(
 1242             context, self.db, share, self.access_helper)
 1243 
 1244         share_server = self._get_share_server(context.elevated(),
 1245                                               src_share_instance)
 1246 
 1247         self._cast_access_rules_to_readonly(
 1248             context, src_share_instance, share_server)
 1249 
 1250         try:
 1251             dest_share_instance = helper.create_instance_and_wait(
 1252                 share, dest_host, new_share_network_id, new_az_id,
 1253                 new_share_type_id)
 1254 
 1255             self.db.share_instance_update(
 1256                 context, dest_share_instance['id'],
 1257                 {'status': constants.STATUS_MIGRATING_TO})
 1258 
 1259         except Exception:
 1260             msg = _("Failed to create instance on destination "
 1261                     "backend during migration of share %s.") % share['id']
 1262             LOG.exception(msg)
 1263             raise exception.ShareMigrationFailed(reason=msg)
 1264 
 1265         ignore_list = self.driver.configuration.safe_get(
 1266             'migration_ignore_files')
 1267 
 1268         data_rpc = data_rpcapi.DataAPI()
 1269 
 1270         try:
 1271             src_connection_info = self.driver.connection_get_info(
 1272                 context, src_share_instance, share_server)
 1273 
 1274             dest_connection_info = rpcapi.connection_get_info(
 1275                 context, dest_share_instance)
 1276 
 1277             LOG.debug("Time to start copying in migration"
 1278                       " for share %s.", share['id'])
 1279 
 1280             data_rpc.migration_start(
 1281                 context, share['id'], ignore_list, src_share_instance['id'],
 1282                 dest_share_instance['id'], src_connection_info,
 1283                 dest_connection_info)
 1284 
 1285         except Exception:
 1286             msg = _("Failed to obtain migration info from backends or"
 1287                     " invoking Data Service for migration of "
 1288                     "share %s.") % share['id']
 1289             LOG.exception(msg)
 1290             helper.cleanup_new_instance(dest_share_instance)
 1291             raise exception.ShareMigrationFailed(reason=msg)
 1292 
 1293     def _migration_complete_driver(
 1294             self, context, share_ref, src_share_instance, dest_share_instance):
 1295 
 1296         share_server = self._get_share_server(context, src_share_instance)
 1297         dest_share_server = self._get_share_server(
 1298             context, dest_share_instance)
 1299 
 1300         self.db.share_update(
 1301             context, share_ref['id'],
 1302             {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING})
 1303 
 1304         src_snap_instances, snapshot_mappings = (
 1305             self._get_migrating_snapshots(context, src_share_instance,
 1306                                           dest_share_instance))
 1307 
 1308         data_updates = self.driver.migration_complete(
 1309             context, src_share_instance, dest_share_instance,
 1310             src_snap_instances, snapshot_mappings, share_server,
 1311             dest_share_server) or {}
 1312 
 1313         if data_updates.get('export_locations'):
 1314             self.db.share_export_locations_update(
 1315                 context, dest_share_instance['id'],
 1316                 data_updates['export_locations'])
 1317 
 1318         snapshot_updates = data_updates.get('snapshot_updates') or {}
 1319 
 1320         dest_extra_specs = self._get_extra_specs_from_share_type(
 1321             context, dest_share_instance['share_type_id'])
 1322 
 1323         for src_snap_ins, dest_snap_ins in snapshot_mappings.items():
 1324             model_update = snapshot_updates.get(dest_snap_ins['id']) or {}
 1325             snapshot_export_locations = model_update.pop(
 1326                 'export_locations', [])
 1327 
 1328             model_update['status'] = constants.STATUS_AVAILABLE
 1329             model_update['progress'] = '100%'
 1330             self.db.share_snapshot_instance_update(
 1331                 context, dest_snap_ins['id'], model_update)
 1332 
 1333             if dest_extra_specs['mount_snapshot_support']:
 1334 
 1335                 for el in snapshot_export_locations:
 1336                     values = {
 1337                         'share_snapshot_instance_id': dest_snap_ins['id'],
 1338                         'path': el['path'],
 1339                         'is_admin_only': el['is_admin_only'],
 1340                     }
 1341                     self.db.share_snapshot_instance_export_location_create(
 1342                         context, values)
 1343 
 1344         helper = migration.ShareMigrationHelper(
 1345             context, self.db, share_ref, self.access_helper)
 1346 
 1347         helper.apply_new_access_rules(dest_share_instance)
 1348 
 1349         self.db.share_instance_update(
 1350             context, dest_share_instance['id'],
 1351             {'status': constants.STATUS_AVAILABLE})
 1352 
 1353         self.db.share_instance_update(context, src_share_instance['id'],
 1354                                       {'status': constants.STATUS_INACTIVE})
 1355 
 1356         self._migration_delete_instance(context, src_share_instance['id'])
 1357 
 1358     def _migration_delete_instance(self, context, instance_id):
 1359 
 1360         # refresh the share instance model
 1361         share_instance = self.db.share_instance_get(
 1362             context, instance_id, with_share_data=True)
 1363 
 1364         rules = self.access_helper.get_and_update_share_instance_access_rules(
 1365             context, share_instance_id=instance_id)
 1366 
 1367         self.access_helper.delete_share_instance_access_rules(
 1368             context, rules, instance_id)
 1369 
 1370         snap_instances = self.db.share_snapshot_instance_get_all_with_filters(
 1371             context, {'share_instance_ids': [instance_id]})
 1372 
 1373         for instance in snap_instances:
 1374             self.db.share_snapshot_instance_delete(context, instance['id'])
 1375 
 1376         self.db.share_instance_delete(context, instance_id)
 1377         LOG.info("Share instance %s: deleted successfully.",
 1378                  instance_id)
 1379 
 1380         self._check_delete_share_server(context, share_instance)
 1381 
 1382     @utils.require_driver_initialized
 1383     def migration_complete(self, context, src_instance_id, dest_instance_id):
 1384 
 1385         src_share_instance = self.db.share_instance_get(
 1386             context, src_instance_id, with_share_data=True)
 1387         dest_share_instance = self.db.share_instance_get(
 1388             context, dest_instance_id, with_share_data=True)
 1389 
 1390         share_ref = self.db.share_get(context, src_share_instance['share_id'])
 1391 
 1392         LOG.info("Received request to finish Share Migration for "
 1393                  "share %s.", share_ref['id'])
 1394 
 1395         if share_ref['task_state'] == (
 1396                 constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE):
 1397 
 1398             try:
 1399                 self._migration_complete_driver(
 1400                     context, share_ref, src_share_instance,
 1401                     dest_share_instance)
 1402 
 1403             except Exception:
 1404                     msg = _("Driver migration completion failed for"
 1405                             " share %s.") % share_ref['id']
 1406                     LOG.exception(msg)
 1407 
 1408                     # NOTE(ganso): If driver fails during migration-complete,
 1409                     # all instances are set to error and it is up to the admin
 1410                     # to fix the problem to either complete migration
 1411                     # manually or clean it up. At this moment, data
 1412                     # preservation at the source backend cannot be
 1413                     # guaranteed.
 1414 
 1415                     self._restore_migrating_snapshots_status(
 1416                         context, src_share_instance['id'],
 1417                         errored_dest_instance_id=dest_share_instance['id'])
 1418                     self.db.share_instance_update(
 1419                         context, src_instance_id,
 1420                         {'status': constants.STATUS_ERROR})
 1421                     self.db.share_instance_update(
 1422                         context, dest_instance_id,
 1423                         {'status': constants.STATUS_ERROR})
 1424                     self.db.share_update(
 1425                         context, share_ref['id'],
 1426                         {'task_state': constants.TASK_STATE_MIGRATION_ERROR})
 1427                     raise exception.ShareMigrationFailed(reason=msg)
 1428         else:
 1429             try:
 1430                 self._migration_complete_host_assisted(
 1431                     context, share_ref, src_instance_id,
 1432                     dest_instance_id)
 1433             except Exception:
 1434                     msg = _("Host-assisted migration completion failed for"
 1435                             " share %s.") % share_ref['id']
 1436                     LOG.exception(msg)
 1437                     self.db.share_update(
 1438                         context, share_ref['id'],
 1439                         {'task_state': constants.TASK_STATE_MIGRATION_ERROR})
 1440                     self.db.share_instance_update(
 1441                         context, src_instance_id,
 1442                         {'status': constants.STATUS_AVAILABLE})
 1443                     raise exception.ShareMigrationFailed(reason=msg)
 1444 
 1445         model_update = self._get_extra_specs_from_share_type(
 1446             context, dest_share_instance['share_type_id'])
 1447 
 1448         model_update['task_state'] = constants.TASK_STATE_MIGRATION_SUCCESS
 1449 
 1450         self.db.share_update(
 1451             context, dest_share_instance['share_id'], model_update)
 1452 
 1453         LOG.info("Share Migration for share %s"
 1454                  " completed successfully.", share_ref['id'])
 1455 
 1456     def _get_extra_specs_from_share_type(self, context, share_type_id):
 1457 
 1458         share_type = share_types.get_share_type(context, share_type_id)
 1459 
 1460         share_api = api.API()
 1461 
 1462         return share_api.get_share_attributes_from_share_type(share_type)
 1463 
 1464     def _migration_complete_host_assisted(self, context, share_ref,
 1465                                           src_instance_id, dest_instance_id):
 1466 
 1467         src_share_instance = self.db.share_instance_get(
 1468             context, src_instance_id, with_share_data=True)
 1469         dest_share_instance = self.db.share_instance_get(
 1470             context, dest_instance_id, with_share_data=True)
 1471 
 1472         helper = migration.ShareMigrationHelper(
 1473             context, self.db, share_ref, self.access_helper)
 1474 
 1475         task_state = share_ref['task_state']
 1476         if task_state in (constants.TASK_STATE_DATA_COPYING_ERROR,
 1477                           constants.TASK_STATE_DATA_COPYING_CANCELLED):
 1478             msg = _("Data copy of host assisted migration for share %s has not"
 1479                     " completed successfully.") % share_ref['id']
 1480             LOG.warning(msg)
 1481             helper.cleanup_new_instance(dest_share_instance)
 1482             cancelled = (
 1483                 task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED)
 1484             suppress_errors = True
 1485             if cancelled:
 1486                 suppress_errors = False
 1487             self._reset_read_only_access_rules(
 1488                 context, share_ref, src_instance_id,
 1489                 supress_errors=suppress_errors, helper=helper)
 1490             self.db.share_instance_update(
 1491                 context, src_instance_id,
 1492                 {'status': constants.STATUS_AVAILABLE})
 1493             if cancelled:
 1494                 self.db.share_update(
 1495                     context, share_ref['id'],
 1496                     {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
 1497 
 1498                 LOG.info("Share Migration for share %s"
 1499                          " was cancelled.", share_ref['id'])
 1500                 return
 1501             else:
 1502                 raise exception.ShareMigrationFailed(reason=msg)
 1503 
 1504         elif task_state != constants.TASK_STATE_DATA_COPYING_COMPLETED:
 1505             msg = _("Data copy for migration of share %s has not completed"
 1506                     " yet.") % share_ref['id']
 1507             LOG.error(msg)
 1508             raise exception.ShareMigrationFailed(reason=msg)
 1509 
 1510         self.db.share_update(
 1511             context, share_ref['id'],
 1512             {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING})
 1513 
 1514         try:
 1515             helper.apply_new_access_rules(dest_share_instance)
 1516         except Exception:
 1517             msg = _("Failed to apply new access rules during migration "
 1518                     "of share %s.") % share_ref['id']
 1519             LOG.exception(msg)
 1520             helper.cleanup_new_instance(dest_share_instance)
 1521             self._reset_read_only_access_rules(
 1522                 context, share_ref, src_instance_id, helper=helper,
 1523                 supress_errors=True)
 1524             self.db.share_instance_update(
 1525                 context, src_instance_id,
 1526                 {'status': constants.STATUS_AVAILABLE})
 1527 
 1528             raise exception.ShareMigrationFailed(reason=msg)
 1529 
 1530         self.db.share_instance_update(
 1531             context, dest_share_instance['id'],
 1532             {'status': constants.STATUS_AVAILABLE})
 1533 
 1534         self.db.share_instance_update(context, src_instance_id,
 1535                                       {'status': constants.STATUS_INACTIVE})
 1536 
 1537         helper.delete_instance_and_wait(src_share_instance)
 1538 
 1539     @utils.require_driver_initialized
 1540     def migration_cancel(self, context, src_instance_id, dest_instance_id):
 1541 
 1542         src_share_instance = self.db.share_instance_get(
 1543             context, src_instance_id, with_share_data=True)
 1544         dest_share_instance = self.db.share_instance_get(
 1545             context, dest_instance_id, with_share_data=True)
 1546 
 1547         share_ref = self.db.share_get(context, src_share_instance['share_id'])
 1548 
 1549         if share_ref['task_state'] not in (
 1550                 constants.TASK_STATE_DATA_COPYING_COMPLETED,
 1551                 constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
 1552                 constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
 1553             msg = _("Migration of share %s cannot be cancelled at this "
 1554                     "moment.") % share_ref['id']
 1555             raise exception.InvalidShare(reason=msg)
 1556 
 1557         share_server = self._get_share_server(context, src_share_instance)
 1558 
 1559         dest_share_server = self._get_share_server(
 1560             context, dest_share_instance)
 1561 
 1562         helper = migration.ShareMigrationHelper(
 1563             context, self.db, share_ref, self.access_helper)
 1564 
 1565         if share_ref['task_state'] == (
 1566                 constants.TASK_STATE_DATA_COPYING_COMPLETED):
 1567 
 1568             self.db.share_instance_update(
 1569                 context, dest_share_instance['id'],
 1570                 {'status': constants.STATUS_INACTIVE})
 1571 
 1572             helper.cleanup_new_instance(dest_share_instance)
 1573 
 1574         else:
 1575 
 1576             src_snap_instances, snapshot_mappings = (
 1577                 self._get_migrating_snapshots(context, src_share_instance,
 1578                                               dest_share_instance))
 1579 
 1580             self.driver.migration_cancel(
 1581                 context, src_share_instance, dest_share_instance,
 1582                 src_snap_instances, snapshot_mappings, share_server,
 1583                 dest_share_server)
 1584 
 1585             self._migration_delete_instance(context, dest_share_instance['id'])
 1586             self._restore_migrating_snapshots_status(
 1587                 context, src_share_instance['id'])
 1588 
 1589         self._reset_read_only_access_rules(
 1590             context, share_ref, src_instance_id, supress_errors=False,
 1591             helper=helper)
 1592 
 1593         self.db.share_instance_update(
 1594             context, src_instance_id,
 1595             {'status': constants.STATUS_AVAILABLE})
 1596 
 1597         self.db.share_update(
 1598             context, share_ref['id'],
 1599             {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
 1600 
 1601         LOG.info("Share Migration for share %s"
 1602                  " was cancelled.", share_ref['id'])
 1603 
 1604     @utils.require_driver_initialized
 1605     def migration_get_progress(self, context, src_instance_id,
 1606                                dest_instance_id):
 1607 
 1608         src_share_instance = self.db.share_instance_get(
 1609             context, src_instance_id, with_share_data=True)
 1610         dest_share_instance = self.db.share_instance_get(
 1611             context, dest_instance_id, with_share_data=True)
 1612 
 1613         share_ref = self.db.share_get(context, src_share_instance['share_id'])
 1614 
 1615         # Confirm that it is driver migration scenario
 1616         if share_ref['task_state'] != (
 1617                 constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS):
 1618             msg = _("Driver is not performing migration for"
 1619                     " share %s at this moment.") % share_ref['id']
 1620             raise exception.InvalidShare(reason=msg)
 1621 
 1622         share_server = None
 1623         if share_ref.instance.get('share_server_id'):
 1624             share_server = self.db.share_server_get(
 1625                 context, src_share_instance['share_server_id'])
 1626 
 1627         dest_share_server = None
 1628         if dest_share_instance.get('share_server_id'):
 1629             dest_share_server = self.db.share_server_get(
 1630                 context, dest_share_instance['share_server_id'])
 1631 
 1632         src_snap_instances, snapshot_mappings = (
 1633             self._get_migrating_snapshots(context, src_share_instance,
 1634                                           dest_share_instance))
 1635 
 1636         return self.driver.migration_get_progress(
 1637             context, src_share_instance, dest_share_instance,
 1638             src_snap_instances, snapshot_mappings, share_server,
 1639             dest_share_server)
 1640 
 1641     def _get_share_instance(self, context, share):
 1642         if isinstance(share, six.string_types):
 1643             id = share
 1644         else:
 1645             id = share.instance['id']
 1646         return self.db.share_instance_get(context, id, with_share_data=True)
 1647 
 1648     @add_hooks
 1649     @utils.require_driver_initialized
 1650     def create_share_instance(self, context, share_instance_id,
 1651                               request_spec=None, filter_properties=None,
 1652                               snapshot_id=None):
 1653         """Creates a share instance."""
 1654         context = context.elevated()
 1655 
 1656         share_instance = self._get_share_instance(context, share_instance_id)
 1657         share_id = share_instance.get('share_id')
 1658         share_network_id = share_instance.get('share_network_id')
 1659         share = self.db.share_get(context, share_id)
 1660 
 1661         self._notify_about_share_usage(context, share,
 1662                                        share_instance, "create.start")
 1663 
 1664         if not share_instance['availability_zone']:
 1665             share_instance = self.db.share_instance_update(
 1666                 context, share_instance_id,
 1667                 {'availability_zone': self.availability_zone},
 1668                 with_share_data=True
 1669             )
 1670 
 1671         if share_network_id and not self.driver.driver_handles_share_servers:
 1672             self.db.share_instance_update(
 1673                 context, share_instance_id, {'status': constants.STATUS_ERROR})
 1674             self.message_api.create(
 1675                 context,
 1676                 message_field.Action.CREATE,
 1677                 share['project_id'],
 1678                 resource_type=message_field.Resource.SHARE,
 1679                 resource_id=share_id,
 1680                 detail=message_field.Detail.UNEXPECTED_NETWORK)
 1681             raise exception.ManilaException(_(
 1682                 "Creation of share instance %s failed: driver does not expect "
 1683                 "share-network to be provided with current "
 1684                 "configuration.") % share_instance_id)
 1685 
 1686         if snapshot_id is not None:
 1687             snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
 1688             parent_share_server_id = (
 1689                 snapshot_ref['share']['instance']['share_server_id'])
 1690         else:
 1691             snapshot_ref = None
 1692             parent_share_server_id = None
 1693 
 1694         share_group_ref = None
 1695         if share_instance.get('share_group_id'):
 1696             share_group_ref = self.db.share_group_get(
 1697                 context, share_instance['share_group_id'])
 1698 
 1699         if share_network_id or parent_share_server_id:
 1700             try:
 1701                 share_server, share_instance = (
 1702                     self._provide_share_server_for_share(
 1703                         context, share_network_id, share_instance,
 1704                         snapshot=snapshot_ref,
 1705                         share_group=share_group_ref,
 1706                     )
 1707                 )
 1708             except Exception:
 1709                 with excutils.save_and_reraise_exception():
 1710                     error = ("Creation of share instance %s failed: "
 1711                              "failed to get share server.")
 1712                     LOG.error(error, share_instance_id)
 1713                     self.db.share_instance_update(
 1714                         context, share_instance_id,
 1715                         {'status': constants.STATUS_ERROR}
 1716                     )
 1717                     self.message_api.create(
 1718                         context,
 1719                         message_field.Action.CREATE,
 1720                         share['project_id'],
 1721                         resource_type=message_field.Resource.SHARE,
 1722                         resource_id=share_id,
 1723                         detail=message_field.Detail.NO_SHARE_SERVER)
 1724 
 1725         else:
 1726             share_server = None
 1727 
 1728         try:
 1729             if snapshot_ref:
 1730                 export_locations = self.driver.create_share_from_snapshot(
 1731                     context, share_instance, snapshot_ref.instance,
 1732                     share_server=share_server)
 1733             else:
 1734                 export_locations = self.driver.create_share(
 1735                     context, share_instance, share_server=share_server)
 1736 
 1737             self.db.share_export_locations_update(
 1738                 context, share_instance['id'], export_locations)
 1739 
 1740         except Exception as e:
 1741             with excutils.save_and_reraise_exception():
 1742                 LOG.error("Share instance %s failed on creation.",
 1743                           share_instance_id)
 1744                 detail_data = getattr(e, 'detail_data', {})
 1745 
 1746                 def get_export_location(details):
 1747                     if not isinstance(details, dict):
 1748                         return None
 1749                     return details.get('export_locations',
 1750                                        details.get('export_location'))
 1751 
 1752                 export_locations = get_export_location(detail_data)
 1753 
 1754                 if export_locations:
 1755                     self.db.share_export_locations_update(
 1756                         context, share_instance['id'], export_locations)
 1757                 else:
 1758                     LOG.warning('Share instance information in exception '
 1759                                 'can not be written to db because it '
 1760                                 'contains %s and it is not a dictionary.',
 1761                                 detail_data)
 1762                 self.db.share_instance_update(
 1763                     context, share_instance_id,
 1764                     {'status': constants.STATUS_ERROR}
 1765                 )
 1766                 self.message_api.create(
 1767                     context,
 1768                     message_field.Action.CREATE,
 1769                     share['project_id'],
 1770                     resource_type=message_field.Resource.SHARE,
 1771                     resource_id=share_id,
 1772                     exception=e)
 1773         else:
 1774             LOG.info("Share instance %s created successfully.",
 1775                      share_instance_id)
 1776             updates = {
 1777                 'status': constants.STATUS_AVAILABLE,
 1778                 'launched_at': timeutils.utcnow(),
 1779             }
 1780             if share.get('replication_type'):
 1781                 updates['replica_state'] = constants.REPLICA_STATE_ACTIVE
 1782 
 1783             self.db.share_instance_update(context, share_instance_id, updates)
 1784 
 1785             self._notify_about_share_usage(context, share,
 1786                                            share_instance, "create.end")
 1787 
 1788     def _update_share_replica_access_rules_state(self, context,
 1789                                                  share_replica_id, state):
 1790         """Update the access_rules_status for the share replica."""
 1791         self.access_helper.get_and_update_share_instance_access_rules_status(
 1792             context, status=state, share_instance_id=share_replica_id)
 1793 
 1794     def _get_replica_snapshots_for_snapshot(self, context, snapshot_id,
 1795                                             active_replica_id,
 1796                                             share_replica_id,
 1797                                             with_share_data=True):
 1798         """Return dict of snapshot instances of active and replica instances.
 1799 
 1800         This method returns a dict of snapshot instances for snapshot
 1801         referred to by snapshot_id. The dict contains the snapshot instance
 1802         pertaining to the 'active' replica and the snapshot instance
 1803         pertaining to the replica referred to by share_replica_id.
 1804         """
 1805         filters = {
 1806             'snapshot_ids': snapshot_id,
 1807             'share_instance_ids': (share_replica_id, active_replica_id),
 1808         }
 1809         instance_list = self.db.share_snapshot_instance_get_all_with_filters(
 1810             context, filters, with_share_data=with_share_data)
 1811 
 1812         snapshots = {
 1813             'active_replica_snapshot': self._get_snapshot_instance_dict(
 1814                 context,
 1815                 list(filter(lambda x:
 1816                             x['share_instance_id'] == active_replica_id,
 1817                             instance_list))[0]),
 1818             'share_replica_snapshot': self._get_snapshot_instance_dict(
 1819                 context,
 1820                 list(filter(lambda x:
 1821                             x['share_instance_id'] == share_replica_id,
 1822                             instance_list))[0]),
 1823         }
 1824 
 1825         return snapshots
 1826 
 1827     @add_hooks
 1828     @utils.require_driver_initialized
 1829     @locked_share_replica_operation
 1830     def create_share_replica(self, context, share_replica_id, share_id=None,
 1831                              request_spec=None, filter_properties=None):
 1832         """Create a share replica."""
 1833         context = context.elevated()
 1834 
 1835         share_replica = self.db.share_replica_get(
 1836             context, share_replica_id, with_share_data=True,
 1837             with_share_server=True)
 1838 
 1839         if not share_replica['availability_zone']:
 1840             share_replica = self.db.share_replica_update(
 1841                 context, share_replica['id'],
 1842                 {'availability_zone': self.availability_zone},
 1843                 with_share_data=True
 1844             )
 1845 
 1846         _active_replica = (
 1847             self.db.share_replicas_get_available_active_replica(
 1848                 context, share_replica['share_id'], with_share_data=True,
 1849                 with_share_server=True))
 1850 
 1851         if not _active_replica:
 1852             self.db.share_replica_update(
 1853                 context, share_replica['id'],
 1854                 {'status': constants.STATUS_ERROR,
 1855                  'replica_state': constants.STATUS_ERROR})
 1856             self.message_api.create(
 1857                 context,
 1858                 message_field.Action.CREATE,
 1859                 share_replica['project_id'],
 1860                 resource_type=message_field.Resource.SHARE_REPLICA,
 1861                 resource_id=share_replica['id'],
 1862                 detail=message_field.Detail.NO_ACTIVE_REPLICA)
 1863             msg = _("An 'active' replica must exist in 'available' "
 1864                     "state to create a new replica for share %s.")
 1865             raise exception.ReplicationException(
 1866                 reason=msg % share_replica['share_id'])
 1867 
 1868         # We need the share_network_id in case of
 1869         # driver_handles_share_server=True
 1870         share_network_id = share_replica.get('share_network_id', None)
 1871 
 1872         if (share_network_id and
 1873                 not self.driver.driver_handles_share_servers):
 1874             self.db.share_replica_update(
 1875                 context, share_replica['id'],
 1876                 {'status': constants.STATUS_ERROR,
 1877                  'replica_state': constants.STATUS_ERROR})
 1878             self.message_api.create(
 1879                 context,
 1880                 message_field.Action.CREATE,
 1881                 share_replica['project_id'],
 1882                 resource_type=message_field.Resource.SHARE_REPLICA,
 1883                 resource_id=share_replica['id'],
 1884                 detail=message_field.Detail.UNEXPECTED_NETWORK)
 1885             raise exception.InvalidDriverMode(
 1886                 "Driver does not expect share-network to be provided "
 1887                 "with current configuration.")
 1888 
 1889         if share_network_id:
 1890             try:
 1891                 share_server, share_replica = (
 1892                     self._provide_share_server_for_share(
 1893                         context, share_network_id, share_replica)
 1894                 )
 1895             except Exception:
 1896                 with excutils.save_and_reraise_exception():
 1897                     LOG.error("Failed to get share server "
 1898                               "for share replica creation.")
 1899                     self.db.share_replica_update(
 1900                         context, share_replica['id'],
 1901                         {'status': constants.STATUS_ERROR,
 1902                          'replica_state': constants.STATUS_ERROR})
 1903                     self.message_api.create(
 1904                         context,
 1905                         message_field.Action.CREATE,
 1906                         share_replica['project_id'],
 1907                         resource_type=message_field.Resource.SHARE_REPLICA,
 1908                         resource_id=share_replica['id'],
 1909                         detail=message_field.Detail.NO_SHARE_SERVER)
 1910         else:
 1911             share_server = None
 1912 
 1913         # Map the existing access rules for the share to
 1914         # the replica in the DB.
 1915         share_access_rules = self.db.share_instance_access_copy(
 1916             context, share_replica['share_id'], share_replica['id'])
 1917 
 1918         # Get snapshots for the share.
 1919         share_snapshots = self.db.share_snapshot_get_all_for_share(
 1920             context, share_id)
 1921         # Get the required data for snapshots that have 'aggregate_status'
 1922         # set to 'available'.
 1923         available_share_snapshots = [
 1924             self._get_replica_snapshots_for_snapshot(
 1925                 context, x['id'], _active_replica['id'], share_replica_id)
 1926             for x in share_snapshots
 1927             if x['aggregate_status'] == constants.STATUS_AVAILABLE]
 1928 
 1929         replica_list = (
 1930             self.db.share_replicas_get_all_by_share(
 1931                 context, share_replica['share_id'],
 1932                 with_share_data=True, with_share_server=True)
 1933         )
 1934 
 1935         replica_list = [self._get_share_replica_dict(context, r)
 1936                         for r in replica_list]
 1937         share_replica = self._get_share_replica_dict(context, share_replica)
 1938 
 1939         try:
 1940             replica_ref = self.driver.create_replica(
 1941                 context, replica_list, share_replica,
 1942                 share_access_rules, available_share_snapshots,
 1943                 share_server=share_server) or {}
 1944 
 1945         except Exception as excep:
 1946             with excutils.save_and_reraise_exception():
 1947                 LOG.error("Share replica %s failed on creation.",
 1948                           share_replica['id'])
 1949                 self.db.share_replica_update(
 1950                     context, share_replica['id'],
 1951                     {'status': constants.STATUS_ERROR,
 1952                      'replica_state': constants.STATUS_ERROR})
 1953                 self._update_share_replica_access_rules_state(
 1954                     context, share_replica['id'], constants.STATUS_ERROR)
 1955                 self.message_api.create(
 1956                     context,
 1957                     message_field.Action.CREATE,
 1958                     share_replica['project_id'],
 1959                     resource_type=message_field.Resource.SHARE_REPLICA,
 1960                     resource_id=share_replica['id'],
 1961                     exception=excep)
 1962 
 1963         if replica_ref.get('export_locations'):
 1964                 if isinstance(replica_ref.get('export_locations'), list):
 1965                     self.db.share_export_locations_update(
 1966                         context, share_replica['id'],
 1967                         replica_ref.get('export_locations'))
 1968                 else:
 1969                     msg = ('Invalid export locations passed to the share '
 1970                            'manager.')
 1971                     LOG.warning(msg)
 1972 
 1973         if replica_ref.get('replica_state'):
 1974             self.db.share_replica_update(
 1975                 context, share_replica['id'],
 1976                 {'status': constants.STATUS_AVAILABLE,
 1977                  'replica_state': replica_ref.get('replica_state')})
 1978 
 1979         if replica_ref.get('access_rules_status'):
 1980             self._update_share_replica_access_rules_state(
 1981                 context, share_replica['id'],
 1982                 replica_ref.get('access_rules_status'))
 1983         else:
 1984             self._update_share_replica_access_rules_state(
 1985                 context, share_replica['id'],
 1986                 constants.STATUS_ACTIVE)
 1987 
 1988         LOG.info("Share replica %s created successfully.",
 1989                  share_replica['id'])
 1990 
 1991     @add_hooks
 1992     @utils.require_driver_initialized
 1993     @locked_share_replica_operation
 1994     def delete_share_replica(self, context, share_replica_id, share_id=None,
 1995                              force=False):
 1996         """Delete a share replica."""
 1997         context = context.elevated()
 1998         share_replica = self.db.share_replica_get(
 1999             context, share_replica_id, with_share_data=True,
 2000             with_share_server=True)
 2001 
 2002         # Grab all the snapshot instances that belong to this replica.
 2003         replica_snapshots = (
 2004             self.db.share_snapshot_instance_get_all_with_filters(
 2005                 context, {'share_instance_ids': share_replica_id},
 2006                 with_share_data=True)
 2007         )
 2008 
 2009         replica_list = (
 2010             self.db.share_replicas_get_all_by_share(
 2011                 context, share_replica['share_id'],
 2012                 with_share_data=True, with_share_server=True)
 2013         )
 2014 
 2015         replica_list = [self._get_share_replica_dict(context, r)
 2016                         for r in replica_list]
 2017         replica_snapshots = [self._get_snapshot_instance_dict(context, s)
 2018                              for s in replica_snapshots]
 2019         share_server = self._get_share_server(context, share_replica)
 2020         share_replica = self._get_share_replica_dict(context, share_replica)
 2021 
 2022         try:
 2023             self.access_helper.update_access_rules(
 2024                 context,
 2025                 share_replica_id,
 2026                 delete_all_rules=True,
 2027                 share_server=share_server
 2028             )
 2029         except Exception as excep:
 2030             with excutils.save_and_reraise_exception() as exc_context:
 2031                 # Set status to 'error' from 'deleting' since
 2032                 # access_rules_status has been set to 'error'.
 2033                 self.db.share_replica_update(
 2034                     context, share_replica['id'],
 2035                     {'status': constants.STATUS_ERROR})
 2036                 self.message_api.create(
 2037                     context,
 2038                     message_field.Action.DELETE_ACCESS_RULES,
 2039                     share_replica['project_id'],
 2040                     resource_type=message_field.Resource.SHARE_REPLICA,
 2041                     resource_id=share_replica['id'],
 2042                     exception=excep)
 2043                 if force:
 2044                     msg = _("The driver was unable to delete access rules "
 2045                             "for the replica: %s. Will attempt to delete "
 2046                             "the replica anyway.")
 2047                     LOG.exception(msg, share_replica['id'])
 2048                     exc_context.reraise = False
 2049 
 2050         try:
 2051             self.driver.delete_replica(
 2052                 context, replica_list, replica_snapshots, share_replica,
 2053                 share_server=share_server)
 2054         except Exception as excep:
 2055             with excutils.save_and_reraise_exception() as exc_context:
 2056                 if force:
 2057                     msg = _("The driver was unable to delete the share "
 2058                             "replica: %s on the backend. Since "
 2059                             "this operation is forced, the replica will be "
 2060                             "deleted from Manila's database. A cleanup on "
 2061                             "the backend may be necessary.")
 2062                     LOG.exception(msg, share_replica['id'])
 2063                     exc_context.reraise = False
 2064                 else:
 2065                     self.db.share_replica_update(
 2066                         context, share_replica['id'],
 2067                         {'status': constants.STATUS_ERROR_DELETING,
 2068                          'replica_state': constants.STATUS_ERROR})
 2069                 self.message_api.create(
 2070                     context,
 2071                     message_field.Action.DELETE,
 2072                     share_replica['project_id'],
 2073                     resource_type=message_field.Resource.SHARE_REPLICA,
 2074                     resource_id=share_replica['id'],
 2075                     exception=excep)
 2076 
 2077         for replica_snapshot in replica_snapshots:
 2078             self.db.share_snapshot_instance_delete(
 2079                 context, replica_snapshot['id'])
 2080 
 2081         self.db.share_replica_delete(context, share_replica['id'])
 2082         LOG.info("Share replica %s deleted successfully.",
 2083                  share_replica['id'])
 2084 
 2085     @add_hooks
 2086     @utils.require_driver_initialized
 2087     @locked_share_replica_operation
 2088     def promote_share_replica(self, context, share_replica_id, share_id=None):
 2089         """Promote a share replica to active state."""
 2090         context = context.elevated()
 2091         share_replica = self.db.share_replica_get(
 2092             context, share_replica_id, with_share_data=True,
 2093             with_share_server=True)
 2094         replication_type = share_replica['replication_type']
 2095         if replication_type == constants.REPLICATION_TYPE_READABLE:
 2096             ensure_old_active_replica_to_readonly = True
 2097         else:
 2098             ensure_old_active_replica_to_readonly = False
 2099         share_server = self._get_share_server(context, share_replica)
 2100 
 2101         # Get list of all replicas for share
 2102         replica_list = (
 2103             self.db.share_replicas_get_all_by_share(
 2104                 context, share_replica['share_id'],
 2105                 with_share_data=True, with_share_server=True)
 2106         )
 2107 
 2108         try:
 2109             old_active_replica = list(filter(
 2110                 lambda r: (
 2111                     r['replica_state'] == constants.REPLICA_STATE_ACTIVE),
 2112                 replica_list))[0]
 2113         except IndexError:
 2114             self.db.share_replica_update(
 2115                 context, share_replica['id'],
 2116                 {'status': constants.STATUS_AVAILABLE})
 2117             msg = _("Share %(share)s has no replica with 'replica_state' "
 2118                     "set to %(state)s. Promoting %(replica)s is not "
 2119                     "possible.")
 2120             self.message_api.create(
 2121                 context,
 2122                 message_field.Action.PROMOTE,
 2123                 share_replica['project_id'],
 2124                 resource_type=message_field.Resource.SHARE_REPLICA,
 2125                 resource_id=share_replica['id'],
 2126                 detail=message_field.Detail.NO_ACTIVE_REPLICA)
 2127             raise exception.ReplicationException(
 2128                 reason=msg % {'share': share_replica['share_id'],
 2129                               'state': constants.REPLICA_STATE_ACTIVE,
 2130                               'replica': share_replica['id']})
 2131 
 2132         access_rules = self.db.share_access_get_all_for_share(
 2133             context, share_replica['share_id'])
 2134 
 2135         replica_list = [self._get_share_replica_dict(context, r)
 2136                         for r in replica_list]
 2137         share_replica = self._get_share_replica_dict(context, share_replica)
 2138 
 2139         try:
 2140             updated_replica_list = (
 2141                 self.driver.promote_replica(
 2142                     context, replica_list, share_replica, access_rules,
 2143                     share_server=share_server)
 2144             )
 2145         except Exception as excep:
 2146             with excutils.save_and_reraise_exception():
 2147                 # (NOTE) gouthamr: If the driver throws an exception at
 2148                 # this stage, there is a good chance that the replicas are
 2149                 # somehow altered on the backend. We loop through the
 2150                 # replicas and set their 'status's to 'error' and
 2151                 # leave the 'replica_state' unchanged. This also changes the
 2152                 # 'status' of the replica that failed to promote to 'error' as
 2153                 # before this operation. The backend may choose to update
 2154                 # the actual replica_state during the replica_monitoring
 2155                 # stage.
 2156                 updates = {'status': constants.STATUS_ERROR}
 2157                 for replica_ref in replica_list:
 2158                     self.db.share_replica_update(
 2159                         context, replica_ref['id'], updates)
 2160                     self.message_api.create(
 2161                         context,
 2162                         message_field.Action.PROMOTE,
 2163                         replica_ref['project_id'],
 2164                         resource_type=message_field.Resource.SHARE_REPLICA,
 2165                         resource_id=replica_ref['id'],
 2166                         exception=excep)
 2167 
 2168         # Set any 'creating' snapshots on the currently active replica to
 2169         # 'error' since we cannot guarantee they will finish 'creating'.
 2170         active_replica_snapshot_instances = (
 2171             self.db.share_snapshot_instance_get_all_with_filters(
 2172                 context, {'share_instance_ids': share_replica['id']})
 2173         )
 2174         for instance in active_replica_snapshot_instances:
 2175             if instance['status'] in (constants.STATUS_CREATING,
 2176                                       constants.STATUS_DELETING):
 2177                 msg = ("The replica snapshot instance %(instance)s was "
 2178                        "in %(state)s. Since it was not in %(available)s "
 2179                        "state when the replica was promoted, it will be "
 2180                        "set to %(error)s.")
 2181                 payload = {
 2182                     'instance': instance['id'],
 2183                     'state': instance['status'],
 2184                     'available': constants.STATUS_AVAILABLE,
 2185                     'error': constants.STATUS_ERROR,
 2186                 }
 2187                 LOG.info(msg, payload)
 2188                 self.db.share_snapshot_instance_update(
 2189                     context, instance['id'],
 2190                     {'status': constants.STATUS_ERROR})
 2191 
 2192         if not updated_replica_list:
 2193             self.db.share_replica_update(
 2194                 context, old_active_replica['id'],
 2195                 {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC,
 2196                  'cast_rules_to_readonly':
 2197                      ensure_old_active_replica_to_readonly})
 2198             self.db.share_replica_update(
 2199                 context, share_replica['id'],
 2200                 {'status': constants.STATUS_AVAILABLE,
 2201                  'replica_state': constants.REPLICA_STATE_ACTIVE,
 2202                  'cast_rules_to_readonly': False})
 2203         else:
 2204             while updated_replica_list:
 2205                 # NOTE(vponomaryov): update 'active' replica last.
 2206                 for updated_replica in updated_replica_list:
 2207                     if (updated_replica['id'] == share_replica['id'] and
 2208                             len(updated_replica_list) > 1):
 2209                         continue
 2210                     updated_replica_list.remove(updated_replica)
 2211                     break
 2212 
 2213                 updated_export_locs = updated_replica.get(
 2214                     'export_locations')
 2215                 if(updated_export_locs is not None
 2216                    and isinstance(updated_export_locs, list)):
 2217                     self.db.share_export_locations_update(
 2218                         context, updated_replica['id'],
 2219                         updated_export_locs)
 2220 
 2221                 updated_replica_state = updated_replica.get(
 2222                     'replica_state')
 2223                 updates = {}
 2224                 # Change the promoted replica's status from 'available' to
 2225                 # 'replication_change' and unset cast_rules_to_readonly
 2226                 if updated_replica['id'] == share_replica['id']:
 2227                     updates['cast_rules_to_readonly'] = False
 2228                     updates['status'] = constants.STATUS_AVAILABLE
 2229                 elif updated_replica['id'] == old_active_replica['id']:
 2230                     updates['cast_rules_to_readonly'] = (
 2231                         ensure_old_active_replica_to_readonly)
 2232                 if updated_replica_state == constants.STATUS_ERROR:
 2233                     updates['status'] = constants.STATUS_ERROR
 2234                 if updated_replica_state is not None:
 2235                     updates['replica_state'] = updated_replica_state
 2236                 if updates:
 2237                     self.db.share_replica_update(
 2238                         context, updated_replica['id'], updates)
 2239 
 2240                 if updated_replica.get('access_rules_status'):
 2241                     self._update_share_replica_access_rules_state(
 2242                         context, share_replica['id'],
 2243                         updated_replica.get('access_rules_status'))
 2244 
 2245         LOG.info("Share replica %s: promoted to active state "
 2246                  "successfully.", share_replica['id'])
 2247 
 2248     @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval)
 2249     @utils.require_driver_initialized
 2250     def periodic_share_replica_update(self, context):
 2251         LOG.debug("Updating status of share replica instances.")
 2252         replicas = self.db.share_replicas_get_all(context,
 2253                                                   with_share_data=True)
 2254 
 2255         # Filter only non-active replicas belonging to this backend
 2256         def qualified_replica(r):
 2257             return (share_utils.extract_host(r['host']) ==
 2258                     share_utils.extract_host(self.host))
 2259 
 2260         replicas = list(filter(lambda x: qualified_replica(x), replicas))
 2261         for replica in replicas:
 2262             self._share_replica_update(
 2263                 context, replica, share_id=replica['share_id'])
 2264 
 2265     @add_hooks
 2266     @utils.require_driver_initialized
 2267     def update_share_replica(self, context, share_replica_id, share_id=None):
 2268         """Initiated by the force_update API."""
 2269         share_replica = self.db.share_replica_get(
 2270             context, share_replica_id, with_share_data=True,
 2271             with_share_server=True)
 2272         self._share_replica_update(context, share_replica, share_id=share_id)
 2273 
 2274     @locked_share_replica_operation
 2275     def _share_replica_update(self, context, share_replica, share_id=None):
 2276         share_server = self._get_share_server(context, share_replica)
 2277 
 2278         # Re-grab the replica:
 2279         try:
 2280             share_replica = self.db.share_replica_get(
 2281                 context, share_replica['id'], with_share_data=True,
 2282                 with_share_server=True)
 2283         except exception.ShareReplicaNotFound:
 2284             # Replica may have been deleted, nothing to do here
 2285             return
 2286 
 2287         # We don't poll for replicas that are busy in some operation,
 2288         # or if they are the 'active' instance.
 2289         if (share_replica['status'] in constants.TRANSITIONAL_STATUSES
 2290             or share_replica['replica_state'] ==
 2291                 constants.REPLICA_STATE_ACTIVE):
 2292             return
 2293 
 2294         access_rules = self.db.share_access_get_all_for_share(
 2295             context, share_replica['share_id'])
 2296 
 2297         LOG.debug("Updating status of share share_replica %s: ",
 2298                   share_replica['id'])
 2299 
 2300         replica_list = (
 2301             self.db.share_replicas_get_all_by_share(
 2302                 context, share_replica['share_id'],
 2303                 with_share_data=True, with_share_server=True)
 2304         )
 2305 
 2306         _active_replica = [x for x in replica_list
 2307                            if x['replica_state'] ==
 2308                            constants.REPLICA_STATE_ACTIVE][0]
 2309 
 2310         # Get snapshots for the share.
 2311         share_snapshots = self.db.share_snapshot_get_all_for_share(
 2312             context, share_id)
 2313 
 2314         # Get the required data for snapshots that have 'aggregate_status'
 2315         # set to 'available'.
 2316         available_share_snapshots = [
 2317             self._get_replica_snapshots_for_snapshot(
 2318                 context, x['id'], _active_replica['id'], share_replica['id'])
 2319             for x in share_snapshots
 2320             if x['aggregate_status'] == constants.STATUS_AVAILABLE]
 2321 
 2322         replica_list = [self._get_share_replica_dict(context, r)
 2323                         for r in replica_list]
 2324 
 2325         share_replica = self._get_share_replica_dict(context, share_replica)
 2326 
 2327         try:
 2328             replica_state = self.driver.update_replica_state(
 2329                 context, replica_list, share_replica, access_rules,
 2330                 available_share_snapshots, share_server=share_server)
 2331         except Exception as excep:
 2332             msg = ("Driver error when updating replica "
 2333                    "state for replica %s.")
 2334             LOG.exception(msg, share_replica['id'])
 2335             self.db.share_replica_update(
 2336                 context, share_replica['id'],
 2337                 {'replica_state': constants.STATUS_ERROR,
 2338                  'status': constants.STATUS_ERROR})
 2339             self.message_api.create(
 2340                 context,
 2341                 message_field.Action.UPDATE,
 2342                 share_replica['project_id'],
 2343                 resource_type=message_field.Resource.SHARE_REPLICA,
 2344                 resource_id=share_replica['id'],
 2345                 exception=excep)
 2346             return
 2347 
 2348         if replica_state in (constants.REPLICA_STATE_IN_SYNC,
 2349                              constants.REPLICA_STATE_OUT_OF_SYNC,
 2350                              constants.STATUS_ERROR):
 2351             self.db.share_replica_update(context, share_replica['id'],
 2352                                          {'replica_state': replica_state})
 2353         elif replica_state:
 2354             msg = (("Replica %(id)s cannot be set to %(state)s "
 2355                     "through update call.") %
 2356                    {'id': share_replica['id'], 'state': replica_state})
 2357             LOG.warning(msg)
 2358 
 2359     def _validate_share_and_driver_mode(self, share_instance):
 2360         driver_dhss = self.driver.driver_handles_share_servers
 2361 
 2362         share_dhss = share_types.parse_boolean_extra_spec(
 2363             'driver_handles_share_servers',
 2364             share_types.get_share_type_extra_specs(
 2365                 share_instance['share_type_id'],
 2366                 constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS))
 2367 
 2368         if driver_dhss != share_dhss:
 2369             msg = _("Driver mode of share %(share)s being managed is "
 2370                     "incompatible with mode DHSS=%(dhss)s configured for"
 2371                     " this backend.") % {'share': share_instance['share_id'],
 2372                                          'dhss': driver_dhss}
 2373             raise exception.InvalidShare(reason=msg)
 2374 
 2375         return driver_dhss
 2376 
 2377     @add_hooks
 2378     @utils.require_driver_initialized
 2379     def manage_share(self, context, share_id, driver_options):
 2380         context = context.elevated()
 2381         share_ref = self.db.share_get(context, share_id)
 2382         share_instance = self._get_share_instance(context, share_ref)
 2383         project_id = share_ref['project_id']
 2384 
 2385         try:
 2386 
 2387             driver_dhss = self._validate_share_and_driver_mode(share_instance)
 2388 
 2389             if driver_dhss is True:
 2390                 share_server = self._get_share_server(context, share_instance)
 2391 
 2392                 share_update = (
 2393                     self.driver.manage_existing_with_server(
 2394                         share_instance, driver_options, share_server)
 2395                     or {}
 2396                 )
 2397             else:
 2398                 share_update = (
 2399                     self.driver.manage_existing(
 2400                         share_instance, driver_options)
 2401                     or {}
 2402                 )
 2403 
 2404             if not share_update.get('size'):
 2405                 msg = _("Driver cannot calculate share size.")
 2406                 raise exception.InvalidShare(reason=msg)
 2407 
 2408             reservations = QUOTAS.reserve(
 2409                 context,
 2410                 project_id=project_id,
 2411                 user_id=context.user_id,
 2412                 shares=1,
 2413                 gigabytes=share_update['size'],
 2414                 share_type_id=share_instance['share_type_id'],
 2415             )
 2416             QUOTAS.commit(
 2417                 context, reservations, project_id=project_id,
 2418                 share_type_id=share_instance['share_type_id'],
 2419             )
 2420 
 2421             share_update.update({
 2422                 'status': constants.STATUS_AVAILABLE,
 2423                 'launched_at': timeutils.utcnow(),
 2424                 'availability_zone': self.availability_zone,
 2425             })
 2426 
 2427             # If the share was managed with `replication_type` extra-spec, the
 2428             # instance becomes an `active` replica.
 2429             if share_ref.get('replication_type'):
 2430                 share_update['replica_state'] = constants.REPLICA_STATE_ACTIVE
 2431 
 2432             # NOTE(vponomaryov): we should keep only those export locations
 2433             # that driver has calculated to avoid incompatibilities with one
 2434             # provided by user.
 2435             if 'export_locations' in share_update:
 2436                 self.db.share_export_locations_update(
 2437                     context, share_instance['id'],
 2438                     share_update.pop('export_locations'),
 2439                     delete=True)
 2440 
 2441             self.db.share_update(context, share_id, share_update)
 2442         except Exception:
 2443             # NOTE(vponomaryov): set size as 1 because design expects size
 2444             # to be set, it also will allow us to handle delete/unmanage
 2445             # operations properly with this errored share according to quotas.
 2446             self.db.share_update(
 2447                 context, share_id,
 2448                 {'status': constants.STATUS_MANAGE_ERROR, 'size': 1})
 2449             raise
 2450 
 2451     @add_hooks
 2452     @utils.require_driver_initialized
 2453     def manage_snapshot(self, context, snapshot_id, driver_options):
 2454 
 2455         context = context.elevated()
 2456         snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
 2457 
 2458         snapshot_instance = self.db.share_snapshot_instance_get(
 2459             context, snapshot_ref.instance['id'], with_share_data=True
 2460         )
 2461         project_id = snapshot_ref['project_id']
 2462 
 2463         driver_dhss = self.driver.driver_handles_share_servers
 2464 
 2465         try:
 2466             if driver_dhss is True:
 2467 
 2468                 share_server = self._get_share_server(context,
 2469                                                       snapshot_ref['share'])
 2470 
 2471                 snapshot_update = (
 2472                     self.driver.manage_existing_snapshot_with_server(
 2473                         snapshot_instance, driver_options, share_server)
 2474                     or {}
 2475                 )
 2476             else:
 2477                 snapshot_update = (
 2478                     self.driver.manage_existing_snapshot(
 2479                         snapshot_instance, driver_options)
 2480                     or {}
 2481                 )
 2482 
 2483             if not snapshot_update.get('size'):
 2484                 snapshot_update['size'] = snapshot_ref['share']['size']
 2485                 LOG.warning("Cannot get the size of the snapshot "
 2486                             "%(snapshot_id)s. Using the size of "
 2487                             "the share instead.",
 2488                             {'snapshot_id': snapshot_id})
 2489 
 2490             self._update_quota_usages(context, project_id, {
 2491                 "snapshots": 1,
 2492                 "snapshot_gigabytes": snapshot_update['size'],
 2493             })
 2494 
 2495             snapshot_export_locations = snapshot_update.pop(
 2496                 'export_locations', [])
 2497 
 2498             if snapshot_instance['share']['mount_snapshot_support']:
 2499 
 2500                 for el in snapshot_export_locations:
 2501                     values = {
 2502                         'share_snapshot_instance_id': snapshot_instance['id'],
 2503                         'path': el['path'],
 2504                         'is_admin_only': el['is_admin_only'],
 2505                     }
 2506 
 2507                     self.db.share_snapshot_instance_export_location_create(
 2508                         context, values)
 2509 
 2510             snapshot_update.update({
 2511                 'status': constants.STATUS_AVAILABLE,
 2512                 'progress': '100%',
 2513             })
 2514             snapshot_update.pop('id', None)
 2515             self.db.share_snapshot_update(context, snapshot_id,
 2516                                           snapshot_update)
 2517         except Exception:
 2518             # NOTE(vponomaryov): set size as 1 because design expects size
 2519             # to be set, it also will allow us to handle delete/unmanage
 2520             # operations properly with this errored snapshot according to
 2521             # quotas.
 2522             self.db.share_snapshot_update(
 2523                 context, snapshot_id,
 2524                 {'status': constants.STATUS_MANAGE_ERROR, 'size': 1})
 2525             raise
 2526 
 2527     def _update_quota_usages(self, context, project_id, usages):
 2528         user_id = context.user_id
 2529         for resource, usage in usages.items():
 2530             try:
 2531                 current_usage = self.db.quota_usage_get(
 2532                     context, project_id, resource, user_id)
 2533                 self.db.quota_usage_update(
 2534                     context, project_id, user_id, resource,
 2535                     in_use=current_usage['in_use'] + usage)
 2536             except exception.QuotaUsageNotFound:
 2537                 self.db.quota_usage_create(context, project_id,
 2538                                            user_id, resource, usage)
 2539 
 2540     @add_hooks
 2541     @utils.require_driver_initialized
 2542     def unmanage_share(self, context, share_id):
 2543         context = context.elevated()
 2544         share_ref = self.db.share_get(context, share_id)
 2545         share_instance = self._get_share_instance(context, share_ref)
 2546         share_server = None
 2547         project_id = share_ref['project_id']
 2548 
 2549         def share_manage_set_error_status(msg, exception):
 2550             status = {'status': constants.STATUS_UNMANAGE_ERROR}
 2551             self.db.share_update(context, share_id, status)
 2552             LOG.error(msg, exception)
 2553 
 2554         dhss = self.driver.driver_handles_share_servers
 2555 
 2556         try:
 2557             if dhss is True:
 2558                 share_server = self._get_share_server(context, share_instance)
 2559                 self.driver.unmanage_with_server(share_instance, share_server)
 2560             else:
 2561                 self.driver.unmanage(share_instance)
 2562 
 2563         except exception.InvalidShare as e:
 2564             share_manage_set_error_status(
 2565                 ("Share can not be unmanaged: %s."), e)
 2566             return
 2567 
 2568         try:
 2569             reservations = QUOTAS.reserve(
 2570                 context,
 2571                 project_id=project_id,
 2572                 shares=-1,
 2573                 gigabytes=-share_ref['size'],
 2574                 share_type_id=share_instance['share_type_id'],
 2575             )
 2576             QUOTAS.commit(
 2577                 context, reservations, project_id=project_id,
 2578                 share_type_id=share_instance['share_type_id'],
 2579             )
 2580         except Exception as e:
 2581             # Note(imalinovskiy):
 2582             # Quota reservation errors here are not fatal, because
 2583             # unmanage is administrator API and he/she could update user
 2584             # quota usages later if it's required.
 2585             LOG.warning("Failed to update quota usages: %s.", e)
 2586 
 2587         if self.configuration.safe_get('unmanage_remove_access_rules'):
 2588             try:
 2589                 self.access_helper.update_access_rules(
 2590                     context,
 2591                     share_instance['id'],
 2592                     delete_all_rules=True,
 2593                     share_server=share_server
 2594                 )
 2595             except Exception as e:
 2596                 share_manage_set_error_status(
 2597                     ("Can not remove access rules of share: %s."), e)
 2598                 return
 2599 
 2600         self.db.share_instance_delete(context, share_instance['id'])
 2601 
 2602         # NOTE(ganso): Since we are unmanaging a share that is still within a
 2603         # share server, we need to prevent the share server from being
 2604         # auto-deleted.
 2605         if share_server and share_server['is_auto_deletable']:
 2606             self.db.share_server_update(context, share_server['id'],
 2607                                         {'is_auto_deletable': False})
 2608             msg = ("Since share %(share)s has been un-managed from share "
 2609                    "server %(server)s. This share server must be removed "
 2610                    "manually, either by un-managing or by deleting it. The "
 2611                    "share network %(network)s cannot be deleted unless this "
 2612                    "share server has been removed.")
 2613             msg_args = {
 2614                 'share': share_id,
 2615                 'server': share_server['id'],
 2616                 'network': share_server['share_network_id']
 2617             }
 2618             LOG.warning(msg, msg_args)
 2619 
 2620         LOG.info("Share %s: unmanaged successfully.", share_id)
 2621 
 2622     @add_hooks
 2623     @utils.require_driver_initialized
 2624     def unmanage_snapshot(self, context, snapshot_id):
 2625         status = {'status': constants.STATUS_UNMANAGE_ERROR}
 2626 
 2627         context = context.elevated()
 2628         snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
 2629         share_server = self._get_share_server(context,
 2630                                               snapshot_ref['share'])
 2631 
 2632         snapshot_instance = self.db.share_snapshot_instance_get(
 2633             context, snapshot_ref.instance['id'], with_share_data=True
 2634         )
 2635 
 2636         project_id = snapshot_ref['project_id']
 2637 
 2638         if self.configuration.safe_get('unmanage_remove_access_rules'):
 2639             try:
 2640                 self.snapshot_access_helper.update_access_rules(
 2641                     context,
 2642                     snapshot_instance['id'],
 2643                     delete_all_rules=True,
 2644                     share_server=share_server)
 2645             except Exception:
 2646                 LOG.exception(
 2647                     ("Cannot remove access rules of snapshot %s."),
 2648                     snapshot_id)
 2649                 self.db.share_snapshot_update(context, snapshot_id, status)
 2650                 return
 2651 
 2652         dhss = self.driver.driver_handles_share_servers
 2653 
 2654         try:
 2655             if dhss:
 2656                 self.driver.unmanage_snapshot_with_server(
 2657                     snapshot_instance, share_server)
 2658             else:
 2659                 self.driver.unmanage_snapshot(snapshot_instance)
 2660         except exception.UnmanageInvalidShareSnapshot as e:
 2661             self.db.share_snapshot_update(context, snapshot_id, status)
 2662             LOG.error("Share snapshot cannot be unmanaged: %s.", e)
 2663             return
 2664 
 2665         try:
 2666             share_type_id = snapshot_ref['share']['instance']['share_type_id']
 2667             reservations = QUOTAS.reserve(
 2668                 context,
 2669                 project_id=project_id,
 2670                 snapshots=-1,
 2671                 snapshot_gigabytes=-snapshot_ref['size'],
 2672                 share_type_id=share_type_id,
 2673             )
 2674             QUOTAS.commit(
 2675                 context, reservations, project_id=project_id,
 2676                 share_type_id=share_type_id,
 2677             )
 2678         except Exception as e:
 2679             # Note(imalinovskiy):
 2680             # Quota reservation errors here are not fatal, because
 2681             # unmanage is administrator API and he/she could update user
 2682             # quota usages later if it's required.
 2683             LOG.warning("Failed to update quota usages: %s.", e)
 2684 
 2685         self.db.share_snapshot_instance_delete(
 2686             context, snapshot_instance['id'])
 2687 
 2688     @add_hooks
 2689     @utils.require_driver_initialized
 2690     def manage_share_server(self, context, share_server_id, identifier,
 2691                             driver_opts):
 2692 
 2693         if self.driver.driver_handles_share_servers is False:
 2694             msg = _("Cannot manage share server %s in a "
 2695                     "backend configured with driver_handles_share_servers"
 2696                     " set to False.") % share_server_id
 2697             raise exception.ManageShareServerError(reason=msg)
 2698 
 2699         server = self.db.share_server_get(context, share_server_id)
 2700 
 2701         share_network = self.db.share_network_get(
 2702             context, server['share_network_id'])
 2703 
 2704         try:
 2705 
 2706             number_allocations = (
 2707                 self.driver.get_network_allocations_number())
 2708 
 2709             if self.driver.admin_network_api:
 2710                 number_allocations += (
 2711                     self.driver.get_admin_network_allocations_number())
 2712 
 2713             if number_allocations > 0:
 2714 
 2715                 # allocations obtained from the driver that still need to
 2716                 # be validated
 2717                 remaining_allocations = (
 2718                     self.driver.get_share_server_network_info(
 2719                         context, server, identifier, driver_opts))
 2720 
 2721                 if len(remaining_allocations) > 0:
 2722 
 2723                     if self.driver.admin_network_api:
 2724                         remaining_allocations = (
 2725                             self.driver.admin_network_api.
 2726                             manage_network_allocations(
 2727                                 context, remaining_allocations, server))
 2728 
 2729                     # allocations that are managed are removed from
 2730                     # remaining_allocations
 2731 
 2732                     remaining_allocations = (
 2733                         self.driver.network_api.
 2734                         manage_network_allocations(
 2735                             context, remaining_allocations, server,
 2736                             share_network))
 2737 
 2738                     # We require that all allocations are managed, else we
 2739                     # may have problems deleting this share server
 2740                     if len(remaining_allocations) > 0:
 2741                         msg = ("Failed to manage all allocations. "
 2742                                "Allocations %s were not "
 2743                                "managed." % six.text_type(
 2744                                    remaining_allocations))
 2745                         raise exception.ManageShareServerError(reason=msg)
 2746 
 2747                 else:
 2748                     # if there should be allocations, but the driver
 2749                     # doesn't return any something is wrong
 2750 
 2751                     msg = ("Driver did not return required network "
 2752                            "allocations to be managed. Required number "
 2753                            "of allocations is %s." % number_allocations)
 2754                     raise exception.ManageShareServerError(reason=msg)
 2755 
 2756             new_identifier, backend_details = self.driver.manage_server(
 2757                 context, server, identifier, driver_opts)
 2758 
 2759             if not new_identifier:
 2760                 new_identifier = server['id']
 2761 
 2762             if backend_details is None or not isinstance(
 2763                     backend_details, dict):
 2764                 backend_details = {}
 2765 
 2766             for security_service in share_network['security_services']:
 2767                 ss_type = security_service['type']
 2768                 data = {
 2769                     'name': security_service['name'],
 2770                     'ou': security_service['ou'],
 2771                     'domain': security_service['domain'],
 2772                     'server': security_service['server'],
 2773                     'dns_ip': security_service['dns_ip'],
 2774                     'user': security_service['user'],
 2775                     'type': ss_type,
 2776                     'password': security_service['password'],
 2777                 }
 2778                 backend_details.update({
 2779                     'security_service_' + ss_type: jsonutils.dumps(data)
 2780                 })
 2781 
 2782             if backend_details:
 2783                 self.db.share_server_backend_details_set(
 2784                     context, server['id'], backend_details)
 2785 
 2786             self.db.share_server_update(
 2787                 context, share_server_id,
 2788                 {'status': constants.STATUS_ACTIVE,
 2789                  'identifier': new_identifier})
 2790 
 2791         except Exception:
 2792             msg = "Error managing share server %s"
 2793             LOG.exception(msg, share_server_id)
 2794             self.db.share_server_update(
 2795                 context, share_server_id,
 2796                 {'status': constants.STATUS_MANAGE_ERROR})
 2797             raise
 2798 
 2799         LOG.info("Share server %s managed successfully.", share_server_id)
 2800 
 2801     @add_hooks
 2802     @utils.require_driver_initialized
 2803     def unmanage_share_server(self, context, share_server_id, force=False):
 2804 
 2805         server = self.db.share_server_get(
 2806             context, share_server_id)
 2807         server_details = server['backend_details']
 2808 
 2809         security_services = []
 2810         for ss_name in constants.SECURITY_SERVICES_ALLOWED_TYPES:
 2811             ss = server_details.get('security_service_' + ss_name)
 2812             if ss:
 2813                 security_services.append(jsonutils.loads(ss))
 2814 
 2815         try:
 2816             self.driver.unmanage_server(server_details, security_services)
 2817         except NotImplementedError:
 2818             if not force:
 2819                 LOG.error("Did not unmanage share server %s since the driver "
 2820                           "does not support managing share servers and no "
 2821                           "``force`` option was supplied.",
 2822                           share_server_id)
 2823                 self.db.share_server_update(
 2824                     context, share_server_id,
 2825                     {'status': constants.STATUS_UNMANAGE_ERROR})
 2826                 return
 2827 
 2828         try:
 2829 
 2830             if self.driver.get_network_allocations_number() > 0:
 2831                 # NOTE(ganso): This will already remove admin allocations.
 2832                 self.driver.network_api.unmanage_network_allocations(
 2833                     context, share_server_id)
 2834             elif (self.driver.get_admin_network_allocations_number() > 0
 2835                     and self.driver.admin_network_api):
 2836                 # NOTE(ganso): This is here in case there are only admin
 2837                 # allocations.
 2838                 self.driver.admin_network_api.unmanage_network_allocations(
 2839                     context, share_server_id)
 2840             self.db.share_server_delete(context, share_server_id)
 2841         except Exception:
 2842             msg = "Error unmanaging share server %s"
 2843             LOG.exception(msg, share_server_id)
 2844             self.db.share_server_update(
 2845                 context, share_server_id,
 2846                 {'status': constants.STATUS_UNMANAGE_ERROR})
 2847             raise
 2848 
 2849         LOG.info("Share server %s unmanaged successfully.", share_server_id)
 2850 
 2851     @add_hooks
 2852     @utils.require_driver_initialized
 2853     def revert_to_snapshot(self, context, snapshot_id,
 2854                            reservations):
 2855         context = context.elevated()
 2856         snapshot = self.db.share_snapshot_get(context, snapshot_id)
 2857         share = snapshot['share']
 2858         share_id = share['id']
 2859         share_instance_id = snapshot.instance.share_instance_id
 2860         share_access_rules = (
 2861             self.access_helper.get_share_instance_access_rules(
 2862                 context, filters={'state': constants.STATUS_ACTIVE},
 2863                 share_instance_id=share_instance_id))
 2864         snapshot_access_rules = (
 2865             self.snapshot_access_helper.get_snapshot_instance_access_rules(
 2866                 context, snapshot.instance['id']))
 2867 
 2868         if share.get('has_replicas'):
 2869             self._revert_to_replicated_snapshot(
 2870                 context, share, snapshot, reservations, share_access_rules,
 2871                 snapshot_access_rules, share_id=share_id)
 2872         else:
 2873             self._revert_to_snapshot(context, share, snapshot, reservations,
 2874                                      share_access_rules, snapshot_access_rules)
 2875 
 2876     def _revert_to_snapshot(self, context, share, snapshot, reservations,
 2877                             share_access_rules, snapshot_access_rules):
 2878 
 2879         share_server = self._get_share_server(context, share)
 2880         share_id = share['id']
 2881         snapshot_id = snapshot['id']
 2882         project_id = share['project_id']
 2883         user_id = share['user_id']
 2884 
 2885         snapshot_instance = self.db.share_snapshot_instance_get(
 2886             context, snapshot.instance['id'], with_share_data=True)
 2887         share_type_id = snapshot_instance["share_instance"]["share_type_id"]
 2888 
 2889         # Make primitive to pass the information to the driver
 2890         snapshot_instance_dict = self._get_snapshot_instance_dict(
 2891             context, snapshot_instance, snapshot=snapshot)
 2892 
 2893         try:
 2894             self.driver.revert_to_snapshot(context,
 2895                                            snapshot_instance_dict,
 2896                                            share_access_rules,
 2897                                            snapshot_access_rules,
 2898                                            share_server=share_server)
 2899         except Exception as excep:
 2900             with excutils.save_and_reraise_exception():
 2901 
 2902                 msg = ('Share %(share)s could not be reverted '
 2903                        'to snapshot %(snap)s.')
 2904                 msg_args = {'share': share_id, 'snap': snapshot_id}
 2905                 LOG.exception(msg, msg_args)
 2906 
 2907                 if reservations:
 2908                     QUOTAS.rollback(
 2909                         context, reservations, project_id=project_id,
 2910                         user_id=user_id, share_type_id=share_type_id,
 2911                     )
 2912 
 2913                 self.db.share_update(
 2914                     context, share_id,
 2915                     {'status': constants.STATUS_REVERTING_ERROR})
 2916                 self.db.share_snapshot_update(
 2917                     context, snapshot_id,
 2918                     {'status': constants.STATUS_AVAILABLE})
 2919                 self.message_api.create(
 2920                     context,
 2921                     message_field.Action.REVERT_TO_SNAPSHOT,
 2922                     share['project_id'],
 2923                     resource_type=message_field.Resource.SHARE,
 2924                     resource_id=share_id,
 2925                     exception=excep)
 2926 
 2927         if reservations:
 2928             QUOTAS.commit(
 2929                 context, reservations, project_id=project_id, user_id=user_id,
 2930                 share_type_id=share_type_id,
 2931             )
 2932 
 2933         self.db.share_update(
 2934             context, share_id,
 2935             {'status': constants.STATUS_AVAILABLE, 'size': snapshot['size']})
 2936         self.db.share_snapshot_update(
 2937             context, snapshot_id, {'status': constants.STATUS_AVAILABLE})
 2938 
 2939         msg = ('Share %(share)s reverted to snapshot %(snap)s '
 2940                'successfully.')
 2941         msg_args = {'share': share_id, 'snap': snapshot_id}
 2942         LOG.info(msg, msg_args)
 2943 
 2944     @add_hooks
 2945     @utils.require_driver_initialized
 2946     def delete_share_instance(self, context, share_instance_id, force=False):
 2947         """Delete a share instance."""
 2948         context = context.elevated()
 2949         share_instance = self._get_share_instance(context, share_instance_id)
 2950         share_id = share_instance.get('share_id')
 2951         share_server = self._get_share_server(context, share_instance)
 2952         share = self.db.share_get(context, share_id)
 2953 
 2954         self._notify_about_share_usage(context, share,
 2955                                        share_instance, "delete.start")
 2956 
 2957         try:
 2958             self.access_helper.update_access_rules(
 2959                 context,
 2960                 share_instance_id,
 2961                 delete_all_rules=True,
 2962                 share_server=share_server
 2963             )
 2964         except exception.ShareResourceNotFound:
 2965             LOG.warning("Share instance %s does not exist in the "
 2966                         "backend.", share_instance_id)
 2967         except Exception as excep:
 2968             with excutils.save_and_reraise_exception() as exc_context:
 2969                 if force:
 2970                     msg = ("The driver was unable to delete access rules "
 2971                            "for the instance: %s. Will attempt to delete "
 2972                            "the instance anyway.")
 2973                     LOG.error(msg, share_instance_id)
 2974                     exc_context.reraise = False
 2975                 else:
 2976                     self.db.share_instance_update(
 2977                         context,
 2978                         share_instance_id,
 2979                         {'status': constants.STATUS_ERROR_DELETING})
 2980                 self.message_api.create(
 2981                     context,
 2982                     message_field.Action.DELETE_ACCESS_RULES,
 2983                     share_instance['project_id'],
 2984                     resource_type=message_field.Resource.SHARE,
 2985                     resource_id=share_instance_id,
 2986                     exception=excep)
 2987 
 2988         try:
 2989             self.driver.delete_share(context, share_instance,
 2990                                      share_server=share_server)
 2991         except exception.ShareResourceNotFound:
 2992             LOG.warning("Share instance %s does not exist in the "
 2993                         "backend.", share_instance_id)
 2994         except Exception as excep:
 2995             with excutils.save_and_reraise_exception() as exc_context:
 2996                 if force:
 2997                     msg = ("The driver was unable to delete the share "
 2998                            "instance: %s on the backend. Since this "
 2999                            "operation is forced, the instance will be "
 3000                            "deleted from Manila's database. A cleanup on "
 3001                            "the backend may be necessary.")
 3002                     LOG.error(msg, share_instance_id)
 3003                     exc_context.reraise = False
 3004                 else:
 3005                     self.db.share_instance_update(
 3006                         context,
 3007                         share_instance_id,
 3008                         {'status': constants.STATUS_ERROR_DELETING})
 3009                 self.message_api.create(
 3010                     context,
 3011                     message_field.Action.DELETE,
 3012                     share_instance['project_id'],
 3013                     resource_type=message_field.Resource.SHARE,
 3014                     resource_id=share_instance_id,
 3015                     exception=excep)
 3016 
 3017         self.db.share_instance_delete(
 3018             context, share_instance_id, need_to_update_usages=True)
 3019 
 3020         LOG.info("Share instance %s: deleted successfully.",
 3021                  share_instance_id)
 3022 
 3023         self._check_delete_share_server(context, share_instance)
 3024 
 3025         self._notify_about_share_usage(context, share,
 3026                                        share_instance, "delete.end")
 3027 
 3028     def _check_delete_share_server(self, context, share_instance):
 3029 
 3030         if CONF.delete_share_server_with_last_share:
 3031             share_server = self._get_share_server(context, share_instance)
 3032             if (share_server and len(share_server.share_instances) == 0
 3033                     and share_server.is_auto_deletable is True):
 3034                 LOG.debug("Scheduled deletion of share-server "
 3035                           "with id '%s' automatically by "
 3036                           "deletion of last share.", share_server['id'])
 3037                 self.delete_share_server(context, share_server)
 3038 
 3039     @periodic_task.periodic_task(spacing=600)
 3040     @utils.require_driver_initialized
 3041     def delete_free_share_servers(self, ctxt):
 3042         if not (self.driver.driver_handles_share_servers and
 3043                 self.configuration.automatic_share_server_cleanup):
 3044             return
 3045         LOG.info("Check for unused share servers to delete.")
 3046         updated_before = timeutils.utcnow() - datetime.timedelta(
 3047             minutes=self.configuration.unused_share_server_cleanup_interval)
 3048         servers = self.db.share_server_get_all_unused_deletable(ctxt,
 3049                                                                 self.host,
 3050                                                                 updated_before)
 3051         for server in servers:
 3052             self.delete_share_server(ctxt, server)
 3053 
 3054     @add_hooks
 3055     @utils.require_driver_initialized
 3056     def create_snapshot(self, context, share_id, snapshot_id):
 3057         """Create snapshot for share."""
 3058         snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
 3059         share_server = self._get_share_server(
 3060             context, snapshot_ref['share']['instance'])
 3061         snapshot_instance = self.db.share_snapshot_instance_get(
 3062             context, snapshot_ref.instance['id'], with_share_data=True
 3063         )
 3064         snapshot_instance_id = snapshot_instance['id']
 3065 
 3066         snapshot_instance = self._get_snapshot_instance_dict(
 3067             context, snapshot_instance)
 3068 
 3069         try:
 3070 
 3071             model_update = self.driver.create_snapshot(
 3072                 context, snapshot_instance, share_server=share_server) or {}
 3073 
 3074         except Exception as excep:
 3075             with excutils.save_and_reraise_exception():
 3076                 self.db.share_snapshot_instance_update(
 3077                     context,
 3078                     snapshot_instance_id,
 3079                     {'status': constants.STATUS_ERROR})
 3080                 self.message_api.create(
 3081                     context,
 3082                     message_field.Action.CREATE,
 3083                     snapshot_ref['project_id'],
 3084                     resource_type=message_field.Resource.SHARE_SNAPSHOT,
 3085                     resource_id=snapshot_instance_id,
 3086                     exception=excep)
 3087 
 3088         snapshot_export_locations = model_update.pop('export_locations', [])
 3089 
 3090         if snapshot_instance['share']['mount_snapshot_support']:
 3091 
 3092             for el in snapshot_export_locations:
 3093                 values = {
 3094                     'share_snapshot_instance_id': snapshot_instance_id,
 3095                     'path': el['path'],
 3096                     'is_admin_only': el['is_admin_only'],
 3097                 }
 3098 
 3099                 self.db.share_snapshot_instance_export_location_create(context,
 3100                                                                        values)
 3101 
 3102         if model_update.get('status') in (None, constants.STATUS_AVAILABLE):
 3103             model_update['status'] = constants.STATUS_AVAILABLE
 3104             model_update['progress'] = '100%'
 3105 
 3106         self.db.share_snapshot_instance_update(
 3107             context, snapshot_instance_id, model_update)
 3108 
 3109     @add_hooks
 3110     @utils.require_driver_initialized
 3111     def delete_snapshot(self, context, snapshot_id, force=False):
 3112         """Delete share snapshot."""
 3113         context = context.elevated()
 3114         snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)
 3115 
 3116         share_server = self._get_share_server(
 3117             context, snapshot_ref['share']['instance'])
 3118         snapshot_instance = self.db.share_snapshot_instance_get(
 3119             context, snapshot_ref.instance['id'], with_share_data=True)
 3120         snapshot_instance_id = snapshot_instance['id']
 3121 
 3122         if context.project_id != snapshot_ref['project_id']:
 3123             project_id = snapshot_ref['project_id']
 3124         else:
 3125             project_id = context.project_id
 3126 
 3127         snapshot_instance = self._get_snapshot_instance_dict(
 3128             context, snapshot_instance)
 3129 
 3130         share_ref = self.db.share_get(context, snapshot_ref['share_id'])
 3131 
 3132         if share_ref['mount_snapshot_support']:
 3133             try:
 3134                 self.snapshot_access_helper.update_access_rules(
 3135                     context, snapshot_instance['id'], delete_all_rules=True,
 3136                     share_server=share_server)
 3137             except Exception:
 3138                 LOG.exception(
 3139                     ("Failed to remove access rules for snapshot %s."),
 3140                     snapshot_instance['id'])
 3141                 LOG.warning("The driver was unable to remove access rules "
 3142                             "for snapshot %s. Moving on.",
 3143                             snapshot_instance['snapshot_id'])
 3144 
 3145         try:
 3146             self.driver.delete_snapshot(context, snapshot_instance,
 3147                                         share_server=share_server)
 3148         except Exception as excep:
 3149             with excutils.save_and_reraise_exception() as exc:
 3150                 if force:
 3151                     msg = _("The driver was unable to delete the "
 3152                             "snapshot %s on the backend. Since this "
 3153                             "operation is forced, the snapshot will "
 3154                             "be deleted from Manila's database. A cleanup on "
 3155                             "the backend may be necessary.")
 3156                     LOG.exception(msg, snapshot_id)
 3157                     exc.reraise = False
 3158                 else:
 3159                     self.db.share_snapshot_instance_update(
 3160                         context,
 3161                         snapshot_instance_id,
 3162                         {'status': constants.STATUS_ERROR_DELETING})
 3163                 self.message_api.create(
 3164                     context,
 3165                     message_field.Action.DELETE,
 3166                     snapshot_ref['project_id'],
 3167                     resource_type=message_field.Resource.SHARE_SNAPSHOT,
 3168                     resource_id=snapshot_instance_id,
 3169                     exception=excep)
 3170 
 3171         self.db.share_snapshot_instance_delete(context, snapshot_instance_id)
 3172 
 3173         share_type_id = snapshot_ref['share']['instance']['share_type_id']
 3174         try:
 3175             reservations = QUOTAS.reserve(
 3176                 context, project_id=project_id, snapshots=-1,
 3177                 snapshot_gigabytes=-snapshot_ref['size'],
 3178                 user_id=snapshot_ref['user_id'],
 3179                 share_type_id=share_type_id,
 3180             )
 3181         except Exception:
 3182             reservations = None
 3183             LOG.exception("Failed to update quota usages while deleting "
 3184                           "snapshot %s.", snapshot_id)
 3185 
 3186         if reservations:
 3187             QUOTAS.commit(
 3188                 context, reservations, project_id=project_id,
 3189                 user_id=snapshot_ref['user_id'],
 3190                 share_type_id=share_type_id,
 3191             )
 3192 
 3193     @add_hooks
 3194     @utils.require_driver_initialized
 3195     @locked_share_replica_operation
 3196     def create_replicated_snapshot(self, context, snapshot_id, share_id=None):
 3197         """Create a snapshot for a replicated share."""
 3198         # Grab the snapshot and replica information from the DB.
 3199         snapshot = self.db.share_snapshot_get(context, snapshot_id)
 3200         share_server = self._get_share_server(context, snapshot['share'])
 3201         replica_snapshots = (
 3202             self.db.share_snapshot_instance_get_all_with_filters(
 3203                 context, {'snapshot_ids': snapshot['id']},
 3204                 with_share_data=True)
 3205         )
 3206         replica_list = (
 3207             self.db.share_replicas_get_all_by_share(
 3208                 context, share_id, with_share_data=True,
 3209                 with_share_server=True)
 3210         )
 3211 
 3212         # Make primitives to pass the information to the driver.
 3213 
 3214         replica_list = [self._get_share_replica_dict(context, r)
 3215                         for r in replica_list]
 3216         replica_snapshots = [self._get_snapshot_instance_dict(context, s)
 3217                              for s in replica_snapshots]
 3218         updated_instances = []
 3219 
 3220         try:
 3221             updated_instances = self.driver.create_replicated_snapshot(
 3222                 context, replica_list, replica_snapshots,
 3223                 share_server=share_server) or []
 3224         except Exception:
 3225             with excutils.save_and_reraise_exception():
 3226                 for instance in replica_snapshots:
 3227                     self.db.share_snapshot_instance_update(
 3228                         context, instance['id'],
 3229                         {'status': constants.STATUS_ERROR})
 3230 
 3231         for instance in updated_instances:
 3232             if instance['status'] == constants.STATUS_AVAILABLE:
 3233                 instance.update({'progress': '100%'})
 3234             self.db.share_snapshot_instance_update(
 3235                 context, instance['id'], instance)
 3236 
 3237     def _find_active_replica_on_host(self, replica_list):
 3238         """Find the active replica matching this manager's host."""
 3239         for replica in replica_list:
 3240             if (replica['replica_state'] == constants.REPLICA_STATE_ACTIVE and
 3241                     share_utils.extract_host(replica['host']) == self.host):
 3242                 return replica
 3243 
 3244     @locked_share_replica_operation
 3245     def _revert_to_replicated_snapshot(self, context, share, snapshot,
 3246                                        reservations, share_access_rules,
 3247                                        snapshot_access_rules, share_id=None):
 3248 
 3249         share_server = self._get_share_server(context, share)
 3250         snapshot_id = snapshot['id']
 3251         project_id = share['project_id']
 3252         user_id = share['user_id']
 3253 
 3254         # Get replicas, including an active replica
 3255         replica_list = self.db.share_replicas_get_all_by_share(
 3256             context, share_id, with_share_data=True, with_share_server=True)
 3257         active_replica = self._find_active_replica_on_host(replica_list)
 3258 
 3259         # Get snapshot instances, including one on an active replica
 3260         replica_snapshots = (
 3261             self.db.share_snapshot_instance_get_all_with_filters(
 3262                 context, {'snapshot_ids': snapshot_id},
 3263                 with_share_data=True))
 3264         snapshot_instance_filters = {
 3265             'share_instance_ids': active_replica['id'],
 3266             'snapshot_ids': snapshot_id,
 3267         }
 3268         active_replica_snapshot = (
 3269             self.db.share_snapshot_instance_get_all_with_filters(
 3270                 context, snapshot_instance_filters))[0]
 3271 
 3272         # Make primitives to pass the information to the driver
 3273         replica_list = [self._get_share_replica_dict(context, replica)
 3274                         for replica in replica_list]
 3275         active_replica = self._get_share_replica_dict(context, active_replica)
 3276         replica_snapshots = [self._get_snapshot_instance_dict(context, s)
 3277                              for s in replica_snapshots]
 3278         active_replica_snapshot = self._get_snapshot_instance_dict(
 3279             context, active_replica_snapshot, snapshot=snapshot)
 3280 
 3281         try:
 3282             self.driver.revert_to_replicated_snapshot(
 3283                 context, active_replica, replica_list, active_replica_snapshot,
 3284                 replica_snapshots, share_access_rules,
 3285                 snapshot_access_rules, share_server=share_server)
 3286         except Exception:
 3287             with excutils.save_and_reraise_exception():
 3288 
 3289                 msg = ('Share %(share)s could not be reverted '
 3290                        'to snapshot %(snap)s.')
 3291                 msg_args = {'share': share_id, 'snap': snapshot_id}
 3292                 LOG.exception(msg, msg_args)
 3293 
 3294                 if reservations:
 3295                     QUOTAS.rollback(
 3296                         context, reservations, project_id=project_id,
 3297                         user_id=user_id,
 3298                         share_type_id=active_replica['share_type_id'],
 3299                     )
 3300 
 3301                 self.db.share_replica_update(
 3302                     context, active_replica['id'],
 3303                     {'status': constants.STATUS_REVERTING_ERROR})
 3304                 self.db.share_snapshot_instance_update(
 3305                     context, active_replica_snapshot['id'],
 3306                     {'status': constants.STATUS_AVAILABLE})
 3307 
 3308         if reservations:
 3309             QUOTAS.commit(
 3310                 context, reservations, project_id=project_id, user_id=user_id,
 3311                 share_type_id=active_replica['share_type_id'],
 3312             )
 3313 
 3314         self.db.share_update(context, share_id, {'size': snapshot['size']})
 3315         self.db.share_replica_update(
 3316             context, active_replica['id'],
 3317             {'status': constants.STATUS_AVAILABLE})
 3318         self.db.share_snapshot_instance_update(
 3319             context, active_replica_snapshot['id'],
 3320             {'status': constants.STATUS_AVAILABLE})
 3321 
 3322         msg = ('Share %(share)s reverted to snapshot %(snap)s '
 3323                'successfully.')
 3324         msg_args = {'share': share_id, 'snap': snapshot_id}
 3325         LOG.info(msg, msg_args)
 3326 
 3327     @add_hooks
 3328     @utils.require_driver_initialized
 3329     @locked_share_replica_operation
 3330     def delete_replicated_snapshot(self, context, snapshot_id,
 3331                                    share_id=None, force=False):
 3332         """Delete a snapshot from a replicated share."""
 3333         # Grab the replica and snapshot information from the DB.
 3334         snapshot = self.db.share_snapshot_get(context, snapshot_id)
 3335         share_server = self._get_share_server(context, snapshot['share'])
 3336         replica_snapshots = (
 3337             self.db.share_snapshot_instance_get_all_with_filters(
 3338                 context, {'snapshot_ids': snapshot['id']},
 3339                 with_share_data=True)
 3340         )
 3341         replica_list = (
 3342             self.db.share_replicas_get_all_by_share(
 3343                 context, share_id, with_share_data=True,
 3344                 with_share_server=True)
 3345         )
 3346 
 3347         replica_list = [self._get_share_replica_dict(context, r)
 3348                         for r in replica_list]
 3349         replica_snapshots = [self._get_snapshot_instance_dict(context, s)
 3350                              for s in replica_snapshots]
 3351         deleted_instances = []
 3352         updated_instances = []
 3353         db_force_delete_msg = _('The driver was unable to delete some or all '
 3354                                 'of the share replica snapshots on the '
 3355                                 'backend/s. Since this operation is forced, '
 3356                                 'the replica snapshots will be deleted from '
 3357                                 'Manila.')
 3358 
 3359         try:
 3360 
 3361             updated_instances = self.driver.delete_replicated_snapshot(
 3362                 context, replica_list, replica_snapshots,
 3363                 share_server=share_server) or []
 3364 
 3365         except Exception:
 3366             with excutils.save_and_reraise_exception() as e:
 3367                 if force:
 3368                     # Can delete all instances if forced.
 3369                     deleted_instances = replica_snapshots
 3370                     LOG.exception(db_force_delete_msg)
 3371                     e.reraise = False
 3372                 else:
 3373                     for instance in replica_snapshots:
 3374                         self.db.share_snapshot_instance_update(
 3375                             context, instance['id'],
 3376                             {'status': constants.STATUS_ERROR_DELETING})
 3377 
 3378         if not deleted_instances:
 3379             if force:
 3380                 # Ignore model updates on 'force' delete.
 3381                 LOG.warning(db_force_delete_msg)
 3382                 deleted_instances = replica_snapshots
 3383             else:
 3384                 deleted_instances = list(filter(
 3385                     lambda x: x['status'] == constants.STATUS_DELETED,
 3386                     updated_instances))
 3387                 updated_instances = list(filter(
 3388                     lambda x: x['status'] != constants.STATUS_DELETED,
 3389                     updated_instances))
 3390 
 3391         for instance in deleted_instances:
 3392             self.db.share_snapshot_instance_delete(context, instance['id'])
 3393 
 3394         for instance in updated_instances:
 3395             self.db.share_snapshot_instance_update(
 3396                 context, instance['id'], instance)
 3397 
 3398     @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval)
 3399     @utils.require_driver_initialized
 3400     def periodic_share_replica_snapshot_update(self, context):
 3401         LOG.debug("Updating status of share replica snapshots.")
 3402         transitional_statuses = (constants.STATUS_CREATING,
 3403                                  constants.STATUS_DELETING)
 3404         replicas = self.db.share_replicas_get_all(context,
 3405                                                   with_share_data=True)
 3406 
 3407         def qualified_replica(r):
 3408             # Filter non-active replicas belonging to this backend
 3409             return (share_utils.extract_host(r['host']) ==
 3410                     share_utils.extract_host(self.host) and
 3411                     r['replica_state'] != constants.REPLICA_STATE_ACTIVE)
 3412 
 3413         host_replicas = list(filter(
 3414             lambda x: qualified_replica(x), replicas))
 3415         transitional_replica_snapshots = []
 3416 
 3417         # Get snapshot instances for each replica that are in 'creating' or
 3418         # 'deleting' states.
 3419         for replica in host_replicas:
 3420             filters = {
 3421                 'share_instance_ids': replica['id'],
 3422                 'statuses': transitional_statuses,
 3423             }
 3424             replica_snapshots = (
 3425                 self.db.share_snapshot_instance_get_all_with_filters(
 3426                     context, filters, with_share_data=True)
 3427             )
 3428             transitional_replica_snapshots.extend(replica_snapshots)
 3429 
 3430         for replica_snapshot in transitional_replica_snapshots:
 3431             replica_snapshots = (
 3432                 self.db.share_snapshot_instance_get_all_with_filters(
 3433                     context,
 3434                     {'snapshot_ids': replica_snapshot['snapshot_id']})
 3435             )
 3436             share_id = replica_snapshot['share']['share_id']
 3437             self._update_replica_snapshot(
 3438                 context, replica_snapshot,
 3439                 replica_snapshots=replica_snapshots, share_id=share_id)
 3440 
 3441     @locked_share_replica_operation
 3442     def _update_replica_snapshot(self, context, replica_snapshot,
 3443                                  replica_snapshots=None, share_id=None):
 3444         # Re-grab the replica:
 3445         try:
 3446             share_replica = self.db.share_replica_get(
 3447                 context, replica_snapshot['share_instance_id'],
 3448                 with_share_data=True, with_share_server=True)
 3449             replica_snapshot = self.db.share_snapshot_instance_get(
 3450                 context, replica_snapshot['id'], with_share_data=True)
 3451         except exception.NotFound:
 3452             # Replica may have been deleted, try to cleanup the snapshot
 3453             # instance
 3454             try:
 3455                 self.db.share_snapshot_instance_delete(
 3456                     context, replica_snapshot['id'])
 3457             except exception.ShareSnapshotInstanceNotFound:
 3458                 # snapshot instance has been deleted, nothing to do here
 3459                 pass
 3460             return
 3461 
 3462         msg_payload = {
 3463             'snapshot_instance': replica_snapshot['id'],
 3464             'replica': share_replica['id'],
 3465         }
 3466 
 3467         LOG.debug("Updating status of replica snapshot %(snapshot_instance)s: "
 3468                   "on replica: %(replica)s", msg_payload)
 3469 
 3470         # Grab all the replica and snapshot information.
 3471         replica_list = (
 3472             self.db.share_replicas_get_all_by_share(
 3473                 context, share_replica['share_id'],
 3474                 with_share_data=True, with_share_server=True)
 3475         )
 3476 
 3477         replica_list = [self._get_share_replica_dict(context, r)
 3478                         for r in replica_list]
 3479         replica_snapshots = replica_snapshots or []
 3480 
 3481         # Convert data to primitives to send to the driver.
 3482 
 3483         replica_snapshots = [self._get_snapshot_instance_dict(context, s)
 3484                              for s in replica_snapshots]
 3485         replica_snapshot = self._get_snapshot_instance_dict(
 3486             context, replica_snapshot)
 3487         share_replica = self._get_share_replica_dict(context, share_replica)
 3488         share_server = share_replica['share_server']
 3489         snapshot_update = None
 3490 
 3491         try:
 3492 
 3493             snapshot_update = self.driver.update_replicated_snapshot(
 3494                 context, replica_list, share_replica, replica_snapshots,
 3495                 replica_snapshot, share_server=share_server) or {}
 3496 
 3497         except exception.SnapshotResourceNotFound:
 3498             if replica_snapshot['status'] == constants.STATUS_DELETING:
 3499                 LOG.info('Snapshot %(snapshot_instance)s on replica '
 3500                          '%(replica)s has been deleted.', msg_payload)
 3501                 self.db.share_snapshot_instance_delete(
 3502                     context, replica_snapshot['id'])
 3503             else:
 3504                 LOG.exception("Replica snapshot %s was not found on "
 3505                               "the backend.", replica_snapshot['id'])
 3506                 self.db.share_snapshot_instance_update(
 3507                     context, replica_snapshot['id'],
 3508                     {'status': constants.STATUS_ERROR})
 3509         except Exception:
 3510             LOG.exception("Driver error while updating replica snapshot: "
 3511                           "%s", replica_snapshot['id'])
 3512             self.db.share_snapshot_instance_update(
 3513                 context, replica_snapshot['id'],
 3514                 {'status': constants.STATUS_ERROR})
 3515 
 3516         if snapshot_update:
 3517             snapshot_status = snapshot_update.get('status')
 3518             if snapshot_status == constants.STATUS_AVAILABLE:
 3519                 snapshot_update['progress'] = '100%'
 3520             self.db.share_snapshot_instance_update(
 3521                 context, replica_snapshot['id'], snapshot_update)
 3522 
 3523     @add_hooks
 3524     @utils.require_driver_initialized
 3525     def update_access(self, context, share_instance_id):
 3526         """Allow/Deny access to some share."""
 3527         share_instance = self._get_share_instance(context, share_instance_id)
 3528         share_server = self._get_share_server(context, share_instance)
 3529 
 3530         LOG.debug("Received request to update access for share instance"
 3531                   " %s.", share_instance_id)
 3532 
 3533         self.access_helper.update_access_rules(
 3534             context,
 3535             share_instance_id,
 3536             share_server=share_server)
 3537 
 3538     @periodic_task.periodic_task(spacing=CONF.periodic_interval)
 3539     @utils.require_driver_initialized
 3540     def _report_driver_status(self, context):
 3541         LOG.info('Updating share status')
 3542         share_stats = self.driver.get_share_stats(refresh=True)
 3543         if not share_stats:
 3544             return
 3545 
 3546         if self.driver.driver_handles_share_servers:
 3547             share_stats['server_pools_mapping'] = (
 3548                 self._get_servers_pool_mapping(context)
 3549             )
 3550 
 3551         self.update_service_capabilities(share_stats)
 3552 
 3553     @periodic_task.periodic_task(spacing=CONF.periodic_hooks_interval)
 3554     @utils.require_driver_initialized
 3555     def _execute_periodic_hook(self, context):
 3556         """Executes periodic-based hooks."""
 3557         # TODO(vponomaryov): add also access rules and share servers
 3558         share_instances = (
 3559             self.db.share_instances_get_all_by_host(
 3560                 context=context, host=self.host))
 3561         periodic_hook_data = self.driver.get_periodic_hook_data(
 3562             context=context, share_instances=share_instances)
 3563         for hook in self.hooks:
 3564             hook.execute_periodic_hook(
 3565                 context=context, periodic_hook_data=periodic_hook_data)
 3566 
 3567     def _get_servers_pool_mapping(self, context):
 3568         """Get info about relationships between pools and share_servers."""
 3569         share_servers = self.db.share_server_get_all_by_host(context,
 3570                                                              self.host)
 3571         return {server['id']: self.driver.get_share_server_pools(server)
 3572                 for server in share_servers}
 3573 
 3574     @add_hooks
 3575     @utils.require_driver_initialized
 3576     def publish_service_capabilities(self, context):
 3577         """Collect driver status and then publish it."""
 3578         self._report_driver_status(context)
 3579         self._publish_service_capabilities(context)
 3580 
 3581     def _form_server_setup_info(self, context, share_server, share_network):
 3582         # Network info is used by driver for setting up share server
 3583         # and getting server info on share creation.
 3584         network_allocations = self.db.network_allocations_get_for_share_server(
 3585             context, share_server['id'], label='user')
 3586         admin_network_allocations = (
 3587             self.db.network_allocations_get_for_share_server(
 3588                 context, share_server['id'], label='admin'))
 3589         # NOTE(vponomaryov): following network_info fields are deprecated:
 3590         # 'segmentation_id', 'cidr' and 'network_type'.
 3591         # And they should be used from network allocations directly.
 3592         # They should be removed right after no one uses them.
 3593         network_info = {
 3594             'server_id': share_server['id'],
 3595             'segmentation_id': share_network['segmentation_id'],
 3596             'cidr': share_network['cidr'],
 3597             'neutron_net_id': share_network['neutron_net_id'],
 3598             'neutron_subnet_id': share_network['neutron_subnet_id'],
 3599             'security_services': share_network['security_services'],
 3600             'network_allocations': network_allocations,
 3601             'admin_network_allocations': admin_network_allocations,
 3602             'backend_details': share_server.get('backend_details'),
 3603             'network_type': share_network['network_type'],
 3604         }
 3605         return network_info
 3606 
 3607     def _setup_server(self, context, share_server, metadata=None):
 3608         try:
 3609             share_network = self.db.share_network_get(
 3610                 context, share_server['share_network_id'])
 3611             self.driver.allocate_network(context, share_server, share_network)
 3612             self.driver.allocate_admin_network(context, share_server)
 3613 
 3614             # Get share_network again in case it was updated.
 3615             share_network = self.db.share_network_get(
 3616                 context, share_server['share_network_id'])
 3617             network_info = self._form_server_setup_info(
 3618                 context, share_server, share_network)
 3619             self._validate_segmentation_id(network_info)
 3620 
 3621             # NOTE(vponomaryov): Save security services data to share server
 3622             # details table to remove dependency from share network after
 3623             # creation operation. It will allow us to delete share server and
 3624             # share network separately without dependency on each other.
 3625             for security_service in network_info['security_services']:
 3626                 ss_type = security_service['type']
 3627                 data = {
 3628                     'name': security_service['name'],
 3629                     'ou': security_service['ou'],
 3630                     'domain': security_service['domain'],
 3631                     'server': security_service['server'],
 3632                     'dns_ip': security_service['dns_ip'],
 3633                     'user': security_service['user'],
 3634                     'type': ss_type,
 3635                     'password': security_service['password'],
 3636                 }
 3637                 self.db.share_server_backend_details_set(
 3638                     context, share_server['id'],
 3639                     {'security_service_' + ss_type: jsonutils.dumps(data)})
 3640 
 3641             server_info = self.driver.setup_server(
 3642                 network_info, metadata=metadata)
 3643 
 3644             self.driver.update_network_allocation(context, share_server)
 3645             self.driver.update_admin_network_allocation(context, share_server)
 3646 
 3647             if server_info and isinstance(server_info, dict):
 3648                 self.db.share_server_backend_details_set(
 3649                     context, share_server['id'], server_info)
 3650             return self.db.share_server_update(
 3651                 context, share_server['id'],
 3652                 {'status': constants.STATUS_ACTIVE,
 3653                  'identifier': server_info.get(
 3654                      'identifier', share_server['id'])})
 3655         except Exception as e:
 3656             with excutils.save_and_reraise_exception():
 3657                 details = getattr(e, "detail_data", {})
 3658 
 3659                 if isinstance(details, dict):
 3660                     server_details = details.get("server_details", {})
 3661                     if not isinstance(server_details, dict):
 3662                         LOG.debug(
 3663                             ("Cannot save non-dict data (%(data)s) "
 3664                              "provided as 'server details' of "
 3665                              "failed share server '%(server)s'."),
 3666                             {"server": share_server["id"],
 3667                              "data": server_details})
 3668                     else:
 3669                         invalid_details = []
 3670                         for key, value in server_details.items():
 3671                             try:
 3672                                 self.db.share_server_backend_details_set(
 3673                                     context, share_server['id'], {key: value})
 3674                             except Exception:
 3675                                 invalid_details.append("%(key)s: %(value)s" % {
 3676                                     'key': six.text_type(key),
 3677                                     'value': six.text_type(value)
 3678                                 })
 3679                         if invalid_details:
 3680                             LOG.debug(
 3681                                 ("Following server details "
 3682                                  "cannot be written to db : %s"),
 3683                                 six.text_type("\n".join(invalid_details)))
 3684                 else:
 3685                     LOG.debug(
 3686                         ("Cannot save non-dict data (%(data)s) provided as "
 3687                          "'detail data' of failed share server '%(server)s'."),
 3688                         {"server": share_server["id"], "data": details})
 3689 
 3690                 self.db.share_server_update(
 3691                     context, share_server['id'],
 3692                     {'status': constants.STATUS_ERROR})
 3693                 self.driver.deallocate_network(context, share_server['id'])
 3694 
 3695     def _validate_segmentation_id(self, network_info):
 3696         """Raises exception if the segmentation type is incorrect."""
 3697         if (network_info['network_type'] in (None, 'flat') and
 3698                 network_info['segmentation_id']):
 3699             msg = _('A segmentation ID %(vlan_id)s was specified but can not '
 3700                     'be used with a network of type %(seg_type)s; the '
 3701                     'segmentation ID option must be omitted or set to 0')
 3702             raise exception.NetworkBadConfigurationException(
 3703                 reason=msg % {'vlan_id': network_info['segmentation_id'],
 3704                               'seg_type': network_info['network_type']})
 3705         elif (network_info['network_type'] == 'vlan'
 3706               and (network_info['segmentation_id'] is None
 3707                    or int(network_info['segmentation_id']) > 4094
 3708                    or int(network_info['segmentation_id']) < 1)):
 3709             msg = _('A segmentation ID %s was specified but is not valid for '
 3710                     'a VLAN network type; the segmentation ID must be an '
 3711                     'integer value in the range of [1,4094]')
 3712             raise exception.NetworkBadConfigurationException(
 3713                 reason=msg % network_info['segmentation_id'])
 3714         elif (network_info['network_type'] == 'vxlan'
 3715               and (network_info['segmentation_id'] is None
 3716                    or int(network_info['segmentation_id']) > 16777215
 3717                    or int(network_info['segmentation_id']) < 1)):
 3718             msg = _('A segmentation ID %s was specified but is not valid for '
 3719                     'a VXLAN network type; the segmentation ID must be an '
 3720                     'integer value in the range of [1,16777215]')
 3721             raise exception.NetworkBadConfigurationException(
 3722                 reason=msg % network_info['segmentation_id'])
 3723         elif (network_info['network_type'] == 'gre'
 3724               and (network_info['segmentation_id'] is None
 3725                    or int(network_info['segmentation_id']) > 4294967295
 3726                    or int(network_info['segmentation_id']) < 1)):
 3727             msg = _('A segmentation ID %s was specified but is not valid for '
 3728                     'a GRE network type; the segmentation ID must be an '
 3729                     'integer value in the range of [1, 4294967295]')
 3730             raise exception.NetworkBadConfigurationException(
 3731                 reason=msg % network_info['segmentation_id'])
 3732 
 3733     @add_hooks
 3734     @utils.require_driver_initialized
 3735     def delete_share_server(self, context, share_server):
 3736 
 3737         @utils.synchronized(
 3738             "share_manager_%s" % share_server['share_network_id'])
 3739         def _wrapped_delete_share_server():
 3740             # NOTE(vponomaryov): Verify that there are no dependent shares.
 3741             # Without this verification we can get here exception in next case:
 3742             # share-server-delete API was called after share creation scheduled
 3743             # and share_server reached ACTIVE status, but before update
 3744             # of share_server_id field for share. If so, after lock realese
 3745             # this method starts executing when amount of dependent shares
 3746             # has been changed.
 3747             server_id = share_server['id']
 3748             shares = self.db.share_instances_get_all_by_share_server(
 3749                 context, server_id)
 3750 
 3751             if shares:
 3752                 raise exception.ShareServerInUse(share_server_id=server_id)
 3753 
 3754             server_details = share_server['backend_details']
 3755 
 3756             self.db.share_server_update(context, server_id,
 3757                                         {'status': constants.STATUS_DELETING})
 3758             try:
 3759                 LOG.debug("Deleting share server '%s'", server_id)
 3760                 security_services = []
 3761                 for ss_name in constants.SECURITY_SERVICES_ALLOWED_TYPES:
 3762                     ss = server_details.get('security_service_' + ss_name)
 3763                     if ss:
 3764                         security_services.append(jsonutils.loads(ss))
 3765 
 3766                 self.driver.teardown_server(
 3767                     server_details=server_details,
 3768                     security_services=security_services)
 3769             except Exception:
 3770                 with excutils.save_and_reraise_exception():
 3771                     LOG.error(
 3772                         "Share server '%s' failed on deletion.",
 3773                         server_id)
 3774                     self.db.share_server_update(
 3775                         context, server_id, {'status': constants.STATUS_ERROR})
 3776             else:
 3777                 self.db.share_server_delete(context, share_server['id'])
 3778 
 3779         _wrapped_delete_share_server()
 3780         LOG.info(
 3781             "Share server '%s' has been deleted successfully.",
 3782             share_server['id'])
 3783         self.driver.deallocate_network(context, share_server['id'])
 3784 
 3785     @add_hooks
 3786     @utils.require_driver_initialized
 3787     def extend_share(self, context, share_id, new_size, reservations):
 3788         context = context.elevated()
 3789         share = self.db.share_get(context, share_id)
 3790         share_instance = self._get_share_instance(context, share)
 3791         share_server = self._get_share_server(context, share_instance)
 3792         project_id = share['project_id']
 3793         user_id = share['user_id']
 3794 
 3795         self._notify_about_share_usage(context, share,
 3796                                        share_instance, "extend.start")
 3797 
 3798         try:
 3799             self.driver.extend_share(
 3800                 share_instance, new_size, share_server=share_server)
 3801         except Exception as e:
 3802             LOG.exception("Extend share failed.", resource=share)
 3803             self.message_api.create(
 3804                 context,
 3805                 message_field.Action.EXTEND,
 3806                 project_id,
 3807                 resource_type=message_field.Resource.SHARE,
 3808                 resource_id=share_id,
 3809                 detail=message_field.Detail.DRIVER_FAILED_EXTEND)
 3810             try:
 3811                 self.db.share_update(
 3812                     context, share['id'],
 3813                     {'status': constants.STATUS_EXTENDING_ERROR}
 3814                 )
 3815                 raise exception.ShareExtendingError(
 3816                     reason=six.text_type(e), share_id=share_id)
 3817             finally:
 3818                 QUOTAS.rollback(
 3819                     context, reservations, project_id=project_id,
 3820                     user_id=user_id,
 3821                     share_type_id=share_instance['share_type_id'],
 3822                 )
 3823 
 3824         # we give the user_id of the share, to update the quota usage
 3825         # for the user, who created the share, because on share delete
 3826         # only this quota will be decreased
 3827         QUOTAS.commit(
 3828             context, reservations, project_id=project_id,
 3829             user_id=user_id, share_type_id=share_instance['share_type_id'],
 3830         )
 3831 
 3832         share_update = {
 3833             'size': int(new_size),
 3834             # NOTE(u_glide): translation to lower case should be removed in
 3835             # a row with usage of upper case of share statuses in all places
 3836             'status': constants.STATUS_AVAILABLE.lower()
 3837         }
 3838         share = self.db.share_update(context, share['id'], share_update)
 3839 
 3840         LOG.info("Extend share completed successfully.", resource=share)
 3841 
 3842         self._notify_about_share_usage(context, share,
 3843                                        share_instance, "extend.end")
 3844 
 3845     @add_hooks
 3846     @utils.require_driver_initialized
 3847     def shrink_share(self, context, share_id, new_size):
 3848         context = context.elevated()
 3849         share = self.db.share_get(context, share_id)
 3850         share_instance = self._get_share_instance(context, share)
 3851         share_server = self._get_share_server(context, share_instance)
 3852         project_id = share['project_id']
 3853         user_id = share['user_id']
 3854         new_size = int(new_size)
 3855 
 3856         self._notify_about_share_usage(context, share,
 3857                                        share_instance, "shrink.start")
 3858 
 3859         def error_occurred(exc, msg, status=constants.STATUS_SHRINKING_ERROR):
 3860             LOG.exception(msg, resource=share)
 3861             self.db.share_update(context, share['id'], {'status': status})
 3862 
 3863             raise exception.ShareShrinkingError(
 3864                 reason=six.text_type(exc), share_id=share_id)
 3865 
 3866         reservations = None
 3867 
 3868         try:
 3869             size_decrease = int(share['size']) - new_size
 3870             # we give the user_id of the share, to update the quota usage
 3871             # for the user, who created the share, because on share delete
 3872             # only this quota will be decreased
 3873             reservations = QUOTAS.reserve(
 3874                 context,
 3875                 project_id=project_id,
 3876                 user_id=user_id,
 3877                 share_type_id=share_instance['share_type_id'],
 3878                 gigabytes=-size_decrease,
 3879             )
 3880         except Exception as e:
 3881             error_occurred(
 3882                 e, ("Failed to update quota on share shrinking."))
 3883 
 3884         try:
 3885             self.driver.shrink_share(
 3886                 share_instance, new_size, share_server=share_server)
 3887         # NOTE(u_glide): Replace following except block by error notification
 3888         # when Manila has such mechanism. It's possible because drivers
 3889         # shouldn't shrink share when this validation error occurs.
 3890         except Exception as e:
 3891             if isinstance(e, exception.ShareShrinkingPossibleDataLoss):
 3892                 msg = ("Shrink share failed due to possible data loss.")
 3893                 status = constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR
 3894                 error_params = {'msg': msg, 'status': status}
 3895             else:
 3896                 error_params = {'msg': ("Shrink share failed.")}
 3897 
 3898             try:
 3899                 error_occurred(e, **error_params)
 3900             finally:
 3901                 QUOTAS.rollback(
 3902                     context, reservations, project_id=project_id,
 3903                     user_id=user_id,
 3904                     share_type_id=share_instance['share_type_id'],
 3905                 )
 3906 
 3907         QUOTAS.commit(
 3908             context, reservations, project_id=project_id,
 3909             user_id=user_id, share_type_id=share_instance['share_type_id'],
 3910         )
 3911 
 3912         share_update = {
 3913             'size': new_size,
 3914             'status': constants.STATUS_AVAILABLE
 3915         }
 3916         share = self.db.share_update(context, share['id'], share_update)
 3917 
 3918         LOG.info("Shrink share completed successfully.", resource=share)
 3919 
 3920         self._notify_about_share_usage(context, share,
 3921                                        share_instance, "shrink.end")
 3922 
 3923     @utils.require_driver_initialized
 3924     def create_share_group(self, context, share_group_id):
 3925         context = context.elevated()
 3926         share_group_ref = self.db.share_group_get(context, share_group_id)
 3927         share_group_ref['host'] = self.host
 3928         shares = self.db.share_instances_get_all_by_share_group_id(
 3929             context, share_group_id)
 3930 
 3931         source_share_group_snapshot_id = share_group_ref.get(
 3932             "source_share_group_snapshot_id")
 3933         snap_ref = None
 3934         parent_share_server_id = None
 3935         if source_share_group_snapshot_id:
 3936             snap_ref = self.db.share_group_snapshot_get(
 3937                 context, source_share_group_snapshot_id)
 3938             for member in snap_ref['share_group_snapshot_members']:
 3939                 member['share'] = self.db.share_instance_get(
 3940                     context, member['share_instance_id'], with_share_data=True)
 3941             if 'share_group' in snap_ref:
 3942                 parent_share_server_id = snap_ref['share_group'][
 3943                     'share_server_id']
 3944 
 3945         status = constants.STATUS_AVAILABLE
 3946 
 3947         share_network_id = share_group_ref.get('share_network_id')
 3948         share_server = None
 3949 
 3950         if parent_share_server_id and self.driver.driver_handles_share_servers:
 3951             share_server = self.db.share_server_get(context,
 3952                                                     parent_share_server_id)
 3953             share_network_id = share_server['share_network_id']
 3954 
 3955         if share_network_id and not self.driver.driver_handles_share_servers:
 3956             self.db.share_group_update(
 3957                 context, share_group_id, {'status': constants.STATUS_ERROR})
 3958             msg = _("Driver does not expect share-network to be provided "
 3959                     "with current configuration.")
 3960             raise exception.InvalidInput(reason=msg)
 3961 
 3962         if not share_server and share_network_id:
 3963             try:
 3964                 share_server, share_group_ref = (
 3965                     self._provide_share_server_for_share_group(
 3966                         context, share_network_id, share_group_ref,
 3967                         share_group_snapshot=snap_ref,
 3968                     )
 3969                 )
 3970             except Exception:
 3971                 with excutils.save_and_reraise_exception():
 3972                     LOG.error("Failed to get share server"
 3973                               " for share group creation.")
 3974                     self.db.share_group_update(
 3975                         context, share_group_id,
 3976                         {'status': constants.STATUS_ERROR})
 3977                     self.message_api.create(
 3978                         context,
 3979                         message_field.Action.CREATE,
 3980                         share_group_ref['project_id'],
 3981                         resource_type=message_field.Resource.SHARE_GROUP,
 3982                         resource_id=share_group_id,
 3983                         detail=message_field.Detail.NO_SHARE_SERVER)
 3984 
 3985         try:
 3986             # TODO(ameade): Add notification for create.start
 3987             LOG.info("Share group %s: creating", share_group_id)
 3988 
 3989             model_update, share_update_list = None, None
 3990 
 3991             share_group_ref['shares'] = shares
 3992             if snap_ref:
 3993                 model_update, share_update_list = (
 3994                     self.driver.create_share_group_from_share_group_snapshot(
 3995                         context, share_group_ref, snap_ref,
 3996                         share_server=share_server))
 3997             else:
 3998                 model_update = self.driver.create_share_group(
 3999                     context, share_group_ref, share_server=share_server)
 4000 
 4001             if model_update:
 4002                 share_group_ref = self.db.share_group_update(
 4003                     context, share_group_ref['id'], model_update)
 4004 
 4005             if share_update_list:
 4006                 for share in share_update_list:
 4007                     values = copy.deepcopy(share)
 4008                     values.pop('id')
 4009                     export_locations = values.pop('export_locations')
 4010                     self.db.share_instance_update(context, share['id'], values)
 4011                     self.db.share_export_locations_update(context,
 4012                                                           share['id'],
 4013                                                           export_locations)
 4014 
 4015         except Exception:
 4016             with excutils.save_and_reraise_exception():
 4017                 self.db.share_group_update(
 4018                     context,
 4019                     share_group_ref['id'],
 4020                     {'status': constants.STATUS_ERROR,
 4021                      'availability_zone_id': self._get_az_for_share_group(
 4022                          context, share_group_ref),
 4023                      'consistent_snapshot_support': self.driver._stats[
 4024                          'share_group_stats'].get(
 4025                              'consistent_snapshot_support')})
 4026                 for share in shares:
 4027                     self.db.share_instance_update(
 4028                         context, share['id'],
 4029                         {'status': constants.STATUS_ERROR})
 4030                 LOG.error("Share group %s: create failed", share_group_id)
 4031 
 4032         now = timeutils.utcnow()
 4033         for share in shares:
 4034             self.db.share_instance_update(
 4035                 context, share['id'], {'status': constants.STATUS_AVAILABLE})
 4036         self.db.share_group_update(
 4037             context,
 4038             share_group_ref['id'],
 4039             {'status': status,
 4040              'created_at': now,
 4041              'availability_zone_id': self._get_az_for_share_group(
 4042                  context, share_group_ref),
 4043              'consistent_snapshot_support': self.driver._stats[
 4044                  'share_group_stats'].get('consistent_snapshot_support')})
 4045         LOG.info("Share group %s: created successfully", share_group_id)
 4046 
 4047         # TODO(ameade): Add notification for create.end
 4048 
 4049         return share_group_ref['id']
 4050 
 4051     def _get_az_for_share_group(self, context, share_group_ref):
 4052         if not share_group_ref['availability_zone_id']:
 4053             return self.db.availability_zone_get(
 4054                 context, self.availability_zone)['id']
 4055         return share_group_ref['availability_zone_id']
 4056 
 4057     @utils.require_driver_initialized
 4058     def delete_share_group(self, context, share_group_id):
 4059         context = context.elevated()
 4060         share_group_ref = self.db.share_group_get(context, share_group_id)
 4061         share_group_ref['host'] = self.host
 4062         share_group_ref['shares'] = (
 4063             self.db.share_instances_get_all_by_share_group_id(
 4064                 context, share_group_id))
 4065 
 4066         # TODO(ameade): Add notification for delete.start
 4067 
 4068         try:
 4069             LOG.info("Share group %s: deleting", share_group_id)
 4070             share_server = None
 4071             if share_group_ref.get('share_server_id'):
 4072                 share_server = self.db.share_server_get(
 4073                     context, share_group_ref['share_server_id'])
 4074             model_update = self.driver.delete_share_group(
 4075                 context, share_group_ref, share_server=share_server)
 4076 
 4077             if model_update:
 4078                 share_group_ref = self.db.share_group_update(
 4079                     context, share_group_ref['id'], model_update)
 4080 
 4081         except Exception:
 4082             with excutils.save_and_reraise_exception():
 4083                 self.db.share_group_update(
 4084                     context,
 4085                     share_group_ref['id'],
 4086                     {'status': constants.STATUS_ERROR})
 4087                 LOG.error("Share group %s: delete failed",
 4088                           share_group_ref['id'])
 4089 
 4090         self.db.share_group_destroy(context, share_group_id)
 4091         LOG.info("Share group %s: deleted successfully", share_group_id)
 4092 
 4093         # TODO(ameade): Add notification for delete.end
 4094 
 4095     @utils.require_driver_initialized
 4096     def create_share_group_snapshot(self, context, share_group_snapshot_id):
 4097         context = context.elevated()
 4098         snap_ref = self.db.share_group_snapshot_get(
 4099             context, share_group_snapshot_id)
 4100         for member in snap_ref['share_group_snapshot_members']:
 4101             member['share'] = self.db.share_instance_get(
 4102                 context, member['share_instance_id'], with_share_data=True)
 4103 
 4104         status = constants.STATUS_AVAILABLE
 4105         now = timeutils.utcnow()
 4106         updated_members_ids = []
 4107 
 4108         try:
 4109             LOG.info("Share group snapshot %s: creating",
 4110                      share_group_snapshot_id)
 4111             share_server = None
 4112             if snap_ref['share_group'].get('share_server_id'):
 4113                 share_server = self.db.share_server_get(
 4114                     context, snap_ref['share_group']['share_server_id'])
 4115             snapshot_update, member_update_list = (
 4116                 self.driver.create_share_group_snapshot(
 4117                     context, snap_ref, share_server=share_server))
 4118 
 4119             for update in (member_update_list or []):
 4120                 # NOTE(vponomaryov): we expect that each member is a dict
 4121                 # and has required 'id' key and some optional keys
 4122                 # to be updated such as 'provider_location'. It is planned
 4123                 # to have here also 'export_locations' when it is supported.
 4124                 member_id = update.pop('id', None)
 4125                 if not member_id:
 4126                     LOG.warning(
 4127                         "One of share group snapshot '%s' members does not "
 4128                         "have reference ID. Its update was skipped.",
 4129                         share_group_snapshot_id)
 4130                     continue
 4131                 # TODO(vponomaryov): remove following condition when
 4132                 # sgs members start supporting export locations.
 4133                 if 'export_locations' in update:
 4134                     LOG.debug(
 4135                         "Removing 'export_locations' data from "
 4136                         "share group snapshot member '%s' update because "
 4137                         "export locations are not supported.",
 4138                         member_id)
 4139                     update.pop('export_locations')
 4140 
 4141                 db_update = {
 4142                     'updated_at': now,
 4143                     'status': update.pop('status', status)
 4144                 }
 4145                 if 'provider_location' in update:
 4146                     db_update['provider_location'] = (
 4147                         update.pop('provider_location'))
 4148                 if 'size' in update:
 4149                     db_update['size'] = int(update.pop('size'))
 4150 
 4151                 updated_members_ids.append(member_id)
 4152                 self.db.share_group_snapshot_member_update(
 4153                     context, member_id, db_update)
 4154 
 4155                 if update:
 4156                     LOG.debug(
 4157                         "Share group snapshot ID='%(sgs_id)s', "
 4158                         "share group snapshot member ID='%(sgsm_id)s'. "
 4159                         "Following keys of sgs member were not updated "
 4160                         "as not allowed: %(keys)s.",
 4161                         {'sgs_id': share_group_snapshot_id,
 4162                          'sgsm_id': member_id,
 4163                          'keys': ', '.join(update)})
 4164 
 4165             if snapshot_update:
 4166                 snap_ref = self.db.share_group_snapshot_update(
 4167                     context, snap_ref['id'], snapshot_update)
 4168 
 4169         except Exception:
 4170             with excutils.save_and_reraise_exception():
 4171                 self.db.share_group_snapshot_update(
 4172                     context,
 4173                     snap_ref['id'],
 4174                     {'status': constants.STATUS_ERROR})
 4175                 LOG.error("Share group snapshot %s: create failed",
 4176                           share_group_snapshot_id)
 4177 
 4178         for member in (snap_ref.get('share_group_snapshot_members') or []):
 4179             if member['id'] in updated_members_ids:
 4180                 continue
 4181             update = {'status': status, 'updated_at': now}
 4182             self.db.share_group_snapshot_member_update(
 4183                 context, member['id'], update)
 4184 
 4185         self.db.share_group_snapshot_update(
 4186             context, snap_ref['id'],
 4187             {'status': status, 'updated_at': now})
 4188         LOG.info("Share group snapshot %s: created successfully",
 4189                  share_group_snapshot_id)
 4190 
 4191         return snap_ref['id']
 4192 
 4193     @utils.require_driver_initialized
 4194     def delete_share_group_snapshot(self, context, share_group_snapshot_id):
 4195         context = context.elevated()
 4196         snap_ref = self.db.share_group_snapshot_get(
 4197             context, share_group_snapshot_id)
 4198         for member in snap_ref['share_group_snapshot_members']:
 4199             member['share'] = self.db.share_instance_get(
 4200                 context, member['share_instance_id'], with_share_data=True)
 4201 
 4202         snapshot_update = False
 4203 
 4204         try:
 4205             LOG.info("Share group snapshot %s: deleting",
 4206                      share_group_snapshot_id)
 4207 
 4208             share_server = None
 4209             if snap_ref['share_group'].get('share_server_id'):
 4210                 share_server = self.db.share_server_get(
 4211                     context, snap_ref['share_group']['share_server_id'])
 4212 
 4213             snapshot_update, member_update_list = (
 4214                 self.driver.delete_share_group_snapshot(
 4215                     context, snap_ref, share_server=share_server))
 4216 
 4217             if member_update_list:
 4218                 snapshot_update = snapshot_update or {}
 4219                 snapshot_update['share_group_snapshot_members'] = []
 4220             for update in (member_update_list or []):
 4221                 snapshot_update['share_group_snapshot_members'].append(update)
 4222 
 4223             if snapshot_update:
 4224                 snap_ref = self.db.share_group_snapshot_update(
 4225                     context, snap_ref['id'], snapshot_update)
 4226 
 4227         except Exception:
 4228             with excutils.save_and_reraise_exception():
 4229                 self.db.share_group_snapshot_update(
 4230                     context,
 4231                     snap_ref['id'],
 4232                     {'status': constants.STATUS_ERROR})
 4233                 LOG.error("Share group snapshot %s: delete failed",
 4234                           snap_ref['name'])
 4235 
 4236         self.db.share_group_snapshot_destroy(context, share_group_snapshot_id)
 4237 
 4238         LOG.info("Share group snapshot %s: deleted successfully",
 4239                  share_group_snapshot_id)
 4240 
 4241     def _get_share_replica_dict(self, context, share_replica):
 4242         # TODO(gouthamr): remove method when the db layer returns primitives
 4243         share_replica_ref = {
 4244             'id': share_replica.get('id'),
 4245             'name': share_replica.get('name'),
 4246             'share_id': share_replica.get('share_id'),
 4247             'host': share_replica.get('host'),
 4248             'status': share_replica.get('status'),
 4249             'replica_state': share_replica.get('replica_state'),
 4250             'availability_zone_id': share_replica.get('availability_zone_id'),
 4251             'export_locations': share_replica.get('export_locations') or [],
 4252             'share_network_id': share_replica.get('share_network_id'),
 4253             'share_server_id': share_replica.get('share_server_id'),
 4254             'deleted': share_replica.get('deleted'),
 4255             'terminated_at': share_replica.get('terminated_at'),
 4256             'launched_at': share_replica.get('launched_at'),
 4257             'scheduled_at': share_replica.get('scheduled_at'),
 4258             'updated_at': share_replica.get('updated_at'),
 4259             'deleted_at': share_replica.get('deleted_at'),
 4260             'created_at': share_replica.get('created_at'),
 4261             'share_server': self._get_share_server(context, share_replica),
 4262             'access_rules_status': share_replica.get('access_rules_status'),
 4263             # Share details
 4264             'user_id': share_replica.get('user_id'),
 4265             'project_id': share_replica.get('project_id'),
 4266             'size': share_replica.get('size'),
 4267             'display_name': share_replica.get('display_name'),
 4268             'display_description': share_replica.get('display_description'),
 4269             'snapshot_id': share_replica.get('snapshot_id'),
 4270             'share_proto': share_replica.get('share_proto'),
 4271             'share_type_id': share_replica.get('share_type_id'),
 4272             'is_public': share_replica.get('is_public'),
 4273             'share_group_id': share_replica.get('share_group_id'),
 4274             'source_share_group_snapshot_member_id': share_replica.get(
 4275                 'source_share_group_snapshot_member_id'),
 4276             'availability_zone': share_replica.get('availability_zone'),
 4277         }
 4278 
 4279         return share_replica_ref
 4280 
 4281     def _get_snapshot_instance_dict(self, context, snapshot_instance,
 4282                                     snapshot=None):
 4283         # TODO(gouthamr): remove method when the db layer returns primitives
 4284         snapshot_instance_ref = {
 4285             'name': snapshot_instance.get('name'),
 4286             'share_id': snapshot_instance.get('share_id'),
 4287             'share_name': snapshot_instance.get('share_name'),
 4288             'status': snapshot_instance.get('status'),
 4289             'id': snapshot_instance.get('id'),
 4290             'deleted': snapshot_instance.get('deleted') or False,
 4291             'created_at': snapshot_instance.get('created_at'),
 4292             'share': snapshot_instance.get('share'),
 4293             'updated_at': snapshot_instance.get('updated_at'),
 4294             'share_instance_id': snapshot_instance.get('share_instance_id'),
 4295             'snapshot_id': snapshot_instance.get('snapshot_id'),
 4296             'progress': snapshot_instance.get('progress'),
 4297             'deleted_at': snapshot_instance.get('deleted_at'),
 4298             'provider_location': snapshot_instance.get('provider_location'),
 4299         }
 4300 
 4301         if snapshot:
 4302             snapshot_instance_ref.update({
 4303                 'size': snapshot.get('size'),
 4304             })
 4305 
 4306         return snapshot_instance_ref
 4307 
 4308     def snapshot_update_access(self, context, snapshot_instance_id):
 4309         snapshot_instance = self.db.share_snapshot_instance_get(
 4310             context, snapshot_instance_id, with_share_data=True)
 4311 
 4312         share_server = self._get_share_server(
 4313             context, snapshot_instance['share_instance'])
 4314 
 4315         self.snapshot_access_helper.update_access_rules(
 4316             context, snapshot_instance['id'], share_server=share_server)
 4317 
 4318     def _notify_about_share_usage(self, context, share, share_instance,
 4319                                   event_suffix, extra_usage_info=None):
 4320         share_utils.notify_about_share_usage(
 4321             context, share, share_instance, event_suffix,
 4322             extra_usage_info=extra_usage_info, host=self.host)
 4323 
 4324     @periodic_task.periodic_task(
 4325         spacing=CONF.share_usage_size_update_interval,
 4326         enabled=CONF.enable_gathering_share_usage_size)
 4327     @utils.require_driver_initialized
 4328     def update_share_usage_size(self, context):
 4329         """Invokes driver to gather usage size of shares."""
 4330         updated_share_instances = []
 4331         share_instances = self.db.share_instances_get_all_by_host(
 4332             context, host=self.host, with_share_data=True)
 4333 
 4334         if share_instances:
 4335             try:
 4336                 updated_share_instances = self.driver.update_share_usage_size(
 4337                     context, share_instances)
 4338             except Exception:
 4339                 LOG.exception("Gather share usage size failure.")
 4340 
 4341         for si in updated_share_instances:
 4342             share_instance = self._get_share_instance(context, si['id'])
 4343             share = self.db.share_get(context, share_instance['share_id'])
 4344             self._notify_about_share_usage(
 4345                 context, share, share_instance, "consumed.size",
 4346                 extra_usage_info={'used_size': si['used_size'],
 4347                                   'gathered_at': si['gathered_at']})