"Fossies" - the Fresh Open Source Software Archive

Member "cinder-17.1.0/cinder/volume/drivers/solidfire.py" (8 Mar 2021, 130382 Bytes) of package /linux/misc/openstack/cinder-17.1.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "solidfire.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 17.0.1_vs_17.1.0.

    1 # All Rights Reserved.
    2 # Copyright 2013 SolidFire Inc
    3 
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 
   16 import inspect
   17 import json
   18 import math
   19 import re
   20 import socket
   21 import string
   22 import time
   23 import warnings
   24 
   25 from oslo_config import cfg
   26 from oslo_log import log as logging
   27 from oslo_service import loopingcall
   28 from oslo_utils import excutils
   29 from oslo_utils import timeutils
   30 from oslo_utils import units
   31 import requests
   32 import six
   33 
   34 from cinder import context
   35 from cinder import exception
   36 from cinder.i18n import _
   37 from cinder import interface
   38 from cinder.objects import fields
   39 from cinder import utils
   40 from cinder.volume import configuration
   41 from cinder.volume.drivers.san import san
   42 from cinder.volume import qos_specs
   43 from cinder.volume.targets import iscsi as iscsi_driver
   44 from cinder.volume import volume_types
   45 from cinder.volume import volume_utils
   46 
   47 LOG = logging.getLogger(__name__)
   48 
   49 sf_opts = [
   50     cfg.BoolOpt('sf_emulate_512',
   51                 default=True,
   52                 help='Set 512 byte emulation on volume creation; '),
   53 
   54     cfg.BoolOpt('sf_allow_tenant_qos',
   55                 default=False,
   56                 help='Allow tenants to specify QOS on create'),
   57 
   58     cfg.StrOpt('sf_account_prefix',
   59                help='Create SolidFire accounts with this prefix. Any string '
   60                     'can be used here, but the string \"hostname\" is special '
   61                     'and will create a prefix using the cinder node hostname '
   62                     '(previous default behavior).  The default is NO prefix.'),
   63 
   64     cfg.StrOpt('sf_volume_prefix',
   65                default='UUID-',
   66                help='Create SolidFire volumes with this prefix. Volume names '
   67                     'are of the form <sf_volume_prefix><cinder-volume-id>.  '
   68                     'The default is to use a prefix of \'UUID-\'.'),
   69 
   70     cfg.StrOpt('sf_svip',
   71                help='Overrides default cluster SVIP with the one specified. '
   72                     'This is required or deployments that have implemented '
   73                     'the use of VLANs for iSCSI networks in their cloud.'),
   74 
   75     cfg.PortOpt('sf_api_port',
   76                 default=443,
   77                 help='SolidFire API port. Useful if the device api is behind '
   78                      'a proxy on a different port.'),
   79 
   80     cfg.BoolOpt('sf_enable_vag',
   81                 default=False,
   82                 help='Utilize volume access groups on a per-tenant basis.'),
   83 
   84     cfg.StrOpt('sf_provisioning_calc',
   85                default='maxProvisionedSpace',
   86                choices=['maxProvisionedSpace', 'usedSpace'],
   87                help='Change how SolidFire reports used space and '
   88                     'provisioning calculations. If this parameter is set to '
   89                     '\'usedSpace\', the  driver will report correct '
   90                     'values as expected by Cinder '
   91                     'thin provisioning.'),
   92 
   93     cfg.IntOpt('sf_cluster_pairing_timeout',
   94                default=60,
   95                min=3,
   96                help='Sets time in seconds to wait for clusters to complete '
   97                     'pairing.'),
   98 
   99     cfg.IntOpt('sf_volume_pairing_timeout',
  100                default=3600,
  101                min=30,
  102                help='Sets time in seconds to wait for a migrating volume to '
  103                     'complete pairing and sync.'),
  104 
  105     cfg.IntOpt('sf_api_request_timeout',
  106                default=30,
  107                min=30,
  108                help='Sets time in seconds to wait for an api request to '
  109                     'complete.'),
  110 
  111     cfg.IntOpt('sf_volume_clone_timeout',
  112                default=600,
  113                min=60,
  114                help='Sets time in seconds to wait for a clone of a volume or '
  115                     'snapshot to complete.'
  116                ),
  117 
  118     cfg.IntOpt('sf_volume_create_timeout',
  119                default=60,
  120                min=30,
  121                help='Sets time in seconds to wait for a create volume '
  122                     'operation to complete.')]
  123 
  124 
  125 CONF = cfg.CONF
  126 CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP)
  127 
  128 # SolidFire API Error Constants
  129 xExceededLimit = 'xExceededLimit'
  130 xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup'
  131 xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
  132 xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
  133 
  134 
  135 class DuplicateSfVolumeNames(exception.Duplicate):
  136     message = _("Detected more than one volume with name %(vol_name)s")
  137 
  138 
  139 class SolidFireAPIException(exception.VolumeBackendAPIException):
  140     message = _("Bad response from SolidFire API")
  141 
  142 
  143 class SolidFireDriverException(exception.VolumeDriverException):
  144     message = _("SolidFire Cinder Driver exception")
  145 
  146 
  147 class SolidFireAPIDataException(SolidFireAPIException):
  148     message = _("Error in SolidFire API response: data=%(data)s")
  149 
  150 
  151 class SolidFireAccountNotFound(SolidFireDriverException):
  152     message = _("Unable to locate account %(account_name)s in "
  153                 "SolidFire cluster")
  154 
  155 
  156 class SolidFireVolumeNotFound(SolidFireDriverException):
  157     message = _("Unable to locate volume id %(volume_id)s in "
  158                 "SolidFire cluster")
  159 
  160 
  161 class SolidFireRetryableException(exception.VolumeBackendAPIException):
  162     message = _("Retryable SolidFire Exception encountered")
  163 
  164 
  165 class SolidFireReplicationPairingError(exception.VolumeBackendAPIException):
  166     message = _("Error on SF Keys")
  167 
  168 
  169 class SolidFireDataSyncTimeoutError(exception.VolumeBackendAPIException):
  170     message = _("Data sync volumes timed out")
  171 
  172 
  173 def retry(exc_tuple, tries=5, delay=1, backoff=2):
  174     def retry_dec(f):
  175         @six.wraps(f)
  176         def func_retry(*args, **kwargs):
  177             _tries, _delay = tries, delay
  178             while _tries > 1:
  179                 try:
  180                     return f(*args, **kwargs)
  181                 except exc_tuple:
  182                     time.sleep(_delay)
  183                     _tries -= 1
  184                     _delay *= backoff
  185                     LOG.debug('Retrying %(args)s, %(tries)s attempts '
  186                               'remaining...',
  187                               {'args': args, 'tries': _tries})
  188             # NOTE(jdg): Don't log the params passed here
  189             # some cmds like createAccount will have sensitive
  190             # info in the params, grab only the second tuple
  191             # which should be the Method
  192             msg = (_('Retry count exceeded for command: %s') %
  193                     (args[1],))
  194             LOG.error(msg)
  195             raise SolidFireAPIException(message=msg)
  196         return func_retry
  197     return retry_dec
  198 
  199 
  200 def locked_image_id_operation(f, external=False):
  201     def lvo_inner1(inst, *args, **kwargs):
  202         lock_tag = inst.driver_prefix
  203         call_args = inspect.getcallargs(f, inst, *args, **kwargs)
  204 
  205         if call_args.get('image_meta'):
  206             image_id = call_args['image_meta']['id']
  207         else:
  208             err_msg = _('The decorated method must accept image_meta.')
  209             raise exception.VolumeBackendAPIException(data=err_msg)
  210 
  211         @utils.synchronized('%s-%s' % (lock_tag, image_id),
  212                             external=external)
  213         def lvo_inner2():
  214             return f(inst, *args, **kwargs)
  215         return lvo_inner2()
  216     return lvo_inner1
  217 
  218 
  219 def locked_source_id_operation(f, external=False):
  220     def lvo_inner1(inst, *args, **kwargs):
  221         lock_tag = inst.driver_prefix
  222         call_args = inspect.getcallargs(f, inst, *args, **kwargs)
  223         src_arg = call_args.get('source', None)
  224         if src_arg and src_arg.get('id', None):
  225             source_id = call_args['source']['id']
  226         else:
  227             err_msg = _('The decorated method must accept src_uuid.')
  228             raise exception.VolumeBackendAPIException(message=err_msg)
  229 
  230         @utils.synchronized('%s-%s' % (lock_tag, source_id),
  231                             external=external)
  232         def lvo_inner2():
  233             return f(inst, *args, **kwargs)
  234         return lvo_inner2()
  235     return lvo_inner1
  236 
  237 
  238 @interface.volumedriver
  239 class SolidFireDriver(san.SanISCSIDriver):
  240     """OpenStack driver to enable SolidFire cluster.
  241 
  242     .. code-block:: default
  243 
  244       Version history:
  245           1.0 - Initial driver
  246           1.1 - Refactor, clone support, qos by type and minor bug fixes
  247           1.2 - Add xfr and retype support
  248           1.2.1 - Add export/import support
  249           1.2.2 - Catch VolumeNotFound on accept xfr
  250           2.0.0 - Move from httplib to requests
  251           2.0.1 - Implement SolidFire Snapshots
  252           2.0.2 - Implement secondary account
  253           2.0.3 - Implement cluster pairing
  254           2.0.4 - Implement volume replication
  255           2.0.5 - Try and deal with the stupid retry/clear issues from objects
  256                   and tflow
  257           2.0.6 - Add a lock decorator around the clone_image method
  258           2.0.7 - Add scaled IOPS
  259           2.0.8 - Add active status filter to get volume ops
  260           2.0.9 - Always purge on delete volume
  261           2.0.10 - Add response to debug on retryable errors
  262           2.0.11 - Add ability to failback replicating volumes
  263           2.0.12 - Fix bug #1744005
  264           2.0.14 - Fix bug #1782588 qos settings on extend
  265           2.0.15 - Fix bug #1834013 NetApp SolidFire replication errors
  266           2.0.16 - Add options for replication mode (Async, Sync and
  267                    SnapshotsOnly)
  268           2.0.17 - Fix bug #1859653 SolidFire fails to failback when volume
  269                    service is restarted
  270           2.1.0  - Add Cinder Active/Active support
  271                     - Enable Active/Active support flag
  272                     - Implement Active/Active replication support
  273           2.2.0  - Add storage assisted volume migration support
  274           2.2.1  - Fix bug #1891914 fix error on cluster workload rebalancing
  275                    by adding xNotPrimary to the retryable exception list
  276           2.2.2  - Fix bug #1896112 SolidFire Driver creates duplicate volume
  277                    when API response is lost
  278     """
  279 
  280     VERSION = '2.2.2'
  281 
  282     SUPPORTS_ACTIVE_ACTIVE = True
  283 
  284     # ThirdPartySystems wiki page
  285     CI_WIKI_NAME = "NetApp_SolidFire_CI"
  286 
  287     driver_prefix = 'solidfire'
  288 
  289     sf_qos_dict = {'slow': {'minIOPS': 100,
  290                             'maxIOPS': 200,
  291                             'burstIOPS': 200},
  292                    'medium': {'minIOPS': 200,
  293                               'maxIOPS': 400,
  294                               'burstIOPS': 400},
  295                    'fast': {'minIOPS': 500,
  296                             'maxIOPS': 1000,
  297                             'burstIOPS': 1000},
  298                    'performant': {'minIOPS': 2000,
  299                                   'maxIOPS': 4000,
  300                                   'burstIOPS': 4000},
  301                    'off': None}
  302 
  303     sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
  304     sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst']
  305     sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100}
  306     sf_iops_lim_max = {'minIOPS': 15000,
  307                        'maxIOPS': 200000,
  308                        'burstIOPS': 200000}
  309     cluster_stats = {}
  310     retry_exc_tuple = (SolidFireRetryableException,
  311                        requests.exceptions.ConnectionError)
  312     retryable_errors = ['xDBVersionMismatch',
  313                         'xMaxSnapshotsPerVolumeExceeded',
  314                         'xMaxClonesPerVolumeExceeded',
  315                         'xMaxSnapshotsPerNodeExceeded',
  316                         'xMaxClonesPerNodeExceeded',
  317                         'xSliceNotRegistered',
  318                         'xNotReadyForIO',
  319                         'xNotPrimary']
  320 
  321     def __init__(self, *args, **kwargs):
  322         super(SolidFireDriver, self).__init__(*args, **kwargs)
  323         self.failed_over_id = kwargs.get('active_backend_id', None)
  324         self.replication_status = kwargs.get('replication_status', "na")
  325         self.configuration.append_config_values(sf_opts)
  326         self.template_account_id = None
  327         self.max_volumes_per_account = 1990
  328         self.volume_map = {}
  329         self.cluster_pairs = []
  330         self.replication_enabled = False
  331         self.failed_over = False
  332         self.verify_ssl = self.configuration.driver_ssl_cert_verify
  333         self.target_driver = SolidFireISCSI(solidfire_driver=self,
  334                                             configuration=self.configuration)
  335 
  336         self._check_replication_configs()
  337 
  338         # If we're failed over, we need to parse things out and set the active
  339         # cluster appropriately
  340         if self.failed_over_id:
  341             LOG.info("Running on failed-over mode. "
  342                      "Active backend-id: %s", self.failed_over_id)
  343 
  344             repl_target = self.configuration.get('replication_device', [])
  345 
  346             if not repl_target:
  347                 LOG.error('Failed to initialize SolidFire driver to '
  348                           'a remote cluster specified at id: %s',
  349                           self.failed_over_id)
  350                 raise SolidFireDriverException
  351 
  352             remote_endpoint = self._build_repl_endpoint_info(
  353                 **repl_target[0])
  354 
  355             self.active_cluster = self._create_cluster_reference(
  356                 remote_endpoint)
  357 
  358             self.failed_over = True
  359             self.replication_enabled = True
  360 
  361         else:
  362             self.active_cluster = self._create_cluster_reference()
  363             if self.configuration.replication_device:
  364                 self._set_cluster_pairs()
  365                 self.replication_enabled = True
  366 
  367         LOG.debug("Active cluster: %s", self.active_cluster)
  368 
  369         # NOTE(jdg):  This works even in a failed over state, because what we
  370         # do is use self.active_cluster in issue_api_request so by default we
  371         # always use the currently active cluster, override that by providing
  372         # an endpoint to issue_api_request if needed
  373         try:
  374             self._update_cluster_status()
  375         except SolidFireAPIException:
  376             pass
  377 
  378     @classmethod
  379     def get_driver_options(cls):
  380         additional_opts = cls._get_oslo_driver_opts(
  381             'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify',
  382             'replication_device', 'reserved_percentage',
  383             'max_over_subscription_ratio')
  384         return sf_opts + additional_opts
  385 
  386     def _init_vendor_properties(self):
  387         properties = {}
  388         self._set_property(
  389             properties,
  390             "solidfire:replication_mode",
  391             "Replication mode",
  392             _("Specifies replication mode."),
  393             "string",
  394             enum=["Async", "Sync", "SnapshotsOnly"])
  395 
  396         return properties, 'solidfire'
  397 
  398     def __getattr__(self, attr):
  399         if hasattr(self.target_driver, attr):
  400             return getattr(self.target_driver, attr)
  401         else:
  402             msg = _('Attribute: %s not found.') % attr
  403             raise NotImplementedError(msg)
  404 
  405     def _get_remote_info_by_id(self, backend_id):
  406         remote_info = None
  407         for rd in self.configuration.get('replication_device', []):
  408             if rd.get('backend_id', None) == backend_id:
  409                 remote_endpoint = self._build_endpoint_info(**rd)
  410                 remote_info = self._get_cluster_info(remote_endpoint)
  411                 remote_info['endpoint'] = remote_endpoint
  412                 if not remote_info['endpoint']['svip']:
  413                     remote_info['endpoint']['svip'] = (
  414                         remote_info['svip'] + ':3260')
  415         return remote_info
  416 
  417     def _create_remote_pairing(self, remote_device):
  418         try:
  419             pairing_info = self._issue_api_request('StartClusterPairing',
  420                                                    {}, version='8.0')['result']
  421             pair_id = self._issue_api_request(
  422                 'CompleteClusterPairing',
  423                 {'clusterPairingKey': pairing_info['clusterPairingKey']},
  424                 version='8.0',
  425                 endpoint=remote_device['endpoint'])['result']['clusterPairID']
  426         except SolidFireAPIException as ex:
  427             if 'xPairingAlreadyExists' in ex.msg:
  428                 LOG.debug('Pairing already exists during init.')
  429             else:
  430                 with excutils.save_and_reraise_exception():
  431                     LOG.error('Cluster pairing failed: %s', ex.msg)
  432         LOG.debug('Initialized Cluster pair with ID: %s', pair_id)
  433 
  434         return pair_id
  435 
  436     def _get_cluster_info(self, remote_endpoint):
  437         try:
  438             return self._issue_api_request(
  439                 'GetClusterInfo', {},
  440                 endpoint=remote_endpoint)['result']['clusterInfo']
  441         except SolidFireAPIException:
  442             msg = _("Replication device is unreachable!")
  443             LOG.exception(msg)
  444             raise
  445 
  446     def _check_replication_configs(self):
  447         repl_configs = self.configuration.replication_device
  448         if not repl_configs:
  449             return
  450 
  451         # We only support one replication target. Checking if the user is
  452         # trying to add more than one;
  453         if len(repl_configs) > 1:
  454             msg = _("SolidFire driver only supports one replication target "
  455                     "device.")
  456             LOG.error(msg)
  457             raise SolidFireDriverException(msg)
  458 
  459         repl_configs = repl_configs[0]
  460 
  461         # Check if the user is not using the same MVIP as source
  462         # and replication target.
  463         if repl_configs['mvip'] == self.configuration.san_ip:
  464             msg = _("Source mvip cannot be the same "
  465                     "as the replication target.")
  466             LOG.error(msg)
  467             raise SolidFireDriverException(msg)
  468 
  469     def _set_cluster_pairs(self):
  470 
  471         repl_configs = self.configuration.replication_device[0]
  472         remote_endpoint = self._build_repl_endpoint_info(**repl_configs)
  473         remote_cluster = self._create_cluster_reference(remote_endpoint)
  474         remote_cluster['backend_id'] = repl_configs['backend_id']
  475 
  476         cluster_pair = self._get_or_create_cluster_pairing(
  477             remote_cluster, check_connected=True)
  478         remote_cluster['clusterPairID'] = cluster_pair['clusterPairID']
  479 
  480         if self.cluster_pairs:
  481             self.cluster_pairs.clear()
  482         self.cluster_pairs.append(remote_cluster)
  483 
  484     def _get_cluster_pair(self, remote_cluster):
  485 
  486         existing_pairs = self._issue_api_request(
  487             'ListClusterPairs', {}, version='8.0')['result']['clusterPairs']
  488 
  489         LOG.debug("Existing cluster pairs: %s", existing_pairs)
  490 
  491         remote_pair = None
  492         for ep in existing_pairs:
  493             if remote_cluster['mvip'] == ep['mvip']:
  494                 remote_pair = ep
  495                 LOG.debug("Found remote pair: %s", remote_pair)
  496                 break
  497 
  498         return remote_pair
  499 
  500     def _get_or_create_cluster_pairing(self, remote_cluster,
  501                                        check_connected=False):
  502 
  503         # FIXME(sfernand): We check for pairs only in the remote cluster.
  504         #  This is an issue if a pair exists only in destination cluster.
  505         remote_pair = self._get_cluster_pair(remote_cluster)
  506 
  507         if not remote_pair:
  508             LOG.debug("Setting up new cluster pairs.")
  509             self._create_remote_pairing(remote_cluster)
  510             remote_pair = self._get_cluster_pair(remote_cluster)
  511 
  512         if check_connected:
  513             if not remote_pair:
  514                 msg = _("Cluster pair not found for cluster [%s]",
  515                         remote_cluster['mvip'])
  516                 raise SolidFireReplicationPairingError(message=msg)
  517 
  518             if remote_pair['status'] == 'Connected':
  519                 return remote_pair
  520 
  521             def _wait_cluster_pairing_connected():
  522                 pair = self._get_cluster_pair(remote_cluster)
  523                 if pair and pair['status'] == 'Connected':
  524                     raise loopingcall.LoopingCallDone(pair)
  525 
  526             try:
  527                 timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
  528                     _wait_cluster_pairing_connected)
  529                 remote_pair = timer.start(
  530                     interval=3,
  531                     timeout=self.configuration.sf_cluster_pairing_timeout) \
  532                     .wait()
  533 
  534             except loopingcall.LoopingCallTimeOut:
  535                 msg = _("Cluster pair not found or in an invalid state.")
  536                 raise SolidFireReplicationPairingError(message=msg)
  537 
  538         return remote_pair
  539 
  540     def _create_cluster_reference(self, endpoint=None):
  541         cluster_ref = {}
  542         cluster_ref['endpoint'] = endpoint
  543         if not endpoint:
  544             cluster_ref['endpoint'] = self._build_endpoint_info()
  545 
  546         cluster_info = (self._issue_api_request(
  547             'GetClusterInfo', {}, endpoint=cluster_ref['endpoint'])
  548             ['result']['clusterInfo'])
  549 
  550         for k, v in cluster_info.items():
  551             cluster_ref[k] = v
  552 
  553         # Add a couple extra things that are handy for us
  554         cluster_ref['clusterAPIVersion'] = (
  555             self._issue_api_request('GetClusterVersionInfo',
  556                                     {}, endpoint=cluster_ref['endpoint'])
  557             ['result']['clusterAPIVersion'])
  558 
  559         # NOTE(sfernand): If a custom svip is configured, we update the
  560         # default storage ip to the configuration value.
  561         # Otherwise, we update endpoint info with the default storage ip
  562         # retrieved from GetClusterInfo API call.
  563         svip = cluster_ref['endpoint'].get('svip')
  564 
  565         if not svip:
  566             svip = cluster_ref['svip']
  567 
  568         if ':' not in svip:
  569             svip += ':3260'
  570 
  571         cluster_ref['svip'] = svip
  572         cluster_ref['endpoint']['svip'] = svip
  573 
  574         return cluster_ref
  575 
  576     def _set_active_cluster(self, endpoint=None):
  577         if not endpoint:
  578             self.active_cluster['endpoint'] = self._build_endpoint_info()
  579         else:
  580             self.active_cluster['endpoint'] = endpoint
  581 
  582         for k, v in self._issue_api_request(
  583                 'GetClusterInfo',
  584                 {})['result']['clusterInfo'].items():
  585             self.active_cluster[k] = v
  586 
  587         # Add a couple extra things that are handy for us
  588         self.active_cluster['clusterAPIVersion'] = (
  589             self._issue_api_request('GetClusterVersionInfo',
  590                                     {})['result']['clusterAPIVersion'])
  591         if self.configuration.get('sf_svip', None):
  592             self.active_cluster['svip'] = (
  593                 self.configuration.get('sf_svip'))
  594 
  595     def _create_provider_id_string(self,
  596                                    resource_id,
  597                                    account_or_vol_id):
  598         # NOTE(jdg): We use the same format, but in the case
  599         # of snapshots, we don't have an account id, we instead
  600         # swap that with the parent volume id
  601         return "%s %s %s" % (resource_id,
  602                              account_or_vol_id,
  603                              self.active_cluster['uuid'])
  604 
  605     def _init_snapshot_mappings(self, srefs):
  606         updates = []
  607         sf_snaps = self._issue_api_request(
  608             'ListSnapshots', {}, version='6.0')['result']['snapshots']
  609         for s in srefs:
  610             seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id'])
  611             sfsnap = next(
  612                 (ss for ss in sf_snaps if ss['name'] == seek_name), None)
  613             if sfsnap:
  614                 id_string = self._create_provider_id_string(
  615                     sfsnap['snapshotID'],
  616                     sfsnap['volumeID'])
  617                 if s.get('provider_id') != id_string:
  618                     updates.append(
  619                         {'id': s['id'],
  620                          'provider_id': id_string})
  621         return updates
  622 
  623     def _init_volume_mappings(self, vrefs):
  624         updates = []
  625         sf_vols = self._issue_api_request('ListActiveVolumes',
  626                                           {})['result']['volumes']
  627         self.volume_map = {}
  628         for v in vrefs:
  629             seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id'])
  630             sfvol = next(
  631                 (sv for sv in sf_vols if sv['name'] == seek_name), None)
  632             if sfvol:
  633                 if v.get('provider_id', 'nil') != sfvol['volumeID']:
  634                     updates.append(
  635                         {'id': v['id'],
  636                          'provider_id': self._create_provider_id_string(
  637                              sfvol['volumeID'], sfvol['accountID'])})
  638 
  639         return updates
  640 
  641     def update_provider_info(self, vrefs, snaprefs):
  642         volume_updates = self._init_volume_mappings(vrefs)
  643         snapshot_updates = self._init_snapshot_mappings(snaprefs)
  644         return (volume_updates, snapshot_updates)
  645 
  646     def _build_repl_endpoint_info(self, **repl_device):
  647         endpoint = {
  648             'mvip': repl_device.get('mvip'),
  649             'login': repl_device.get('login'),
  650             'passwd': repl_device.get('password'),
  651             'port': repl_device.get('port', 443),
  652             'url': 'https://%s:%s' % (repl_device.get('mvip'),
  653                                       repl_device.get('port', 443)),
  654             'svip': repl_device.get('svip')
  655         }
  656         return endpoint
  657 
  658     def _build_endpoint_info(self, backend_conf=None, **kwargs):
  659         endpoint = {}
  660 
  661         if not backend_conf:
  662             backend_conf = self.configuration
  663 
  664         # NOTE(jdg): We default to the primary cluster config settings
  665         # but always check to see if desired settings were passed in
  666         # to handle things like replication targets with unique settings
  667         endpoint['mvip'] = (
  668             kwargs.get('mvip', backend_conf.san_ip))
  669         endpoint['login'] = (
  670             kwargs.get('login', backend_conf.san_login))
  671         endpoint['passwd'] = (
  672             kwargs.get('password', backend_conf.san_password))
  673         endpoint['port'] = (
  674             kwargs.get(('port'), backend_conf.sf_api_port))
  675         sanitized_mvip = volume_utils.sanitize_host(endpoint['mvip'])
  676         endpoint['url'] = 'https://%s:%s' % (sanitized_mvip,
  677                                              endpoint['port'])
  678         endpoint['svip'] = kwargs.get('svip', backend_conf.sf_svip)
  679         if not endpoint.get('mvip', None) and kwargs.get('backend_id', None):
  680             endpoint['mvip'] = kwargs.get('backend_id')
  681         return endpoint
  682 
  683     @retry(retry_exc_tuple, tries=6)
  684     def _issue_api_request(self, method, params, version='1.0',
  685                            endpoint=None, timeout=None):
  686         if params is None:
  687             params = {}
  688         if endpoint is None:
  689             endpoint = self.active_cluster['endpoint']
  690         if not timeout:
  691             timeout = self.configuration.sf_api_request_timeout
  692 
  693         payload = {'method': method, 'params': params}
  694         url = '%s/json-rpc/%s/' % (endpoint['url'], version)
  695         with warnings.catch_warnings():
  696             warnings.simplefilter(
  697                 "ignore",
  698                 requests.packages.urllib3.exceptions.InsecureRequestWarning)
  699             req = requests.post(url,
  700                                 data=json.dumps(payload),
  701                                 auth=(endpoint['login'], endpoint['passwd']),
  702                                 verify=self.verify_ssl,
  703                                 timeout=timeout)
  704         response = req.json()
  705         req.close()
  706         if (('error' in response) and
  707                 (response['error']['name'] in self.retryable_errors)):
  708             msg = ('Retryable error (%s) encountered during '
  709                    'SolidFire API call.' % response['error']['name'])
  710             LOG.debug(msg)
  711             LOG.debug("API response: %s", response)
  712 
  713             raise SolidFireRetryableException(message=msg)
  714 
  715         if (('error' in response) and
  716                 response['error']['name'] == 'xInvalidPairingKey'):
  717             LOG.debug("Error on volume pairing")
  718             raise SolidFireReplicationPairingError
  719 
  720         if 'error' in response:
  721             msg = _('API response: %s') % response
  722             raise SolidFireAPIException(msg)
  723 
  724         return response
  725 
  726     def _get_volumes_by_sfaccount(self, account_id, endpoint=None):
  727         """Get all volumes on cluster for specified account."""
  728         params = {'accountID': account_id}
  729         return self._issue_api_request(
  730             'ListVolumesForAccount',
  731             params,
  732             endpoint=endpoint)['result']['volumes']
  733 
  734     def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None,
  735                                  endpoint=None):
  736         # ListVolumesForAccount gives both Active and Deleted
  737         # we require the solidfire accountID, uuid of volume
  738         # is optional
  739         vols = self._get_volumes_by_sfaccount(sf_account_id, endpoint=endpoint)
  740         if cinder_uuid:
  741             vlist = [v for v in vols if
  742                      cinder_uuid in v['name']]
  743         else:
  744             vlist = [v for v in vols]
  745         vlist = sorted(vlist, key=lambda k: k['volumeID'])
  746         return vlist
  747 
  748     def _get_sfvol_by_cinder_vref(self, vref):
  749         # sfvols is one or more element objects returned from a list call
  750         # sfvol is the single volume object that will be returned or it will
  751         # be None
  752         sfvols = None
  753         sfvol = None
  754 
  755         provider_id = vref.get('provider_id', None)
  756         if provider_id:
  757             try:
  758                 sf_vid, sf_aid, sf_cluster_id = provider_id.split(' ')
  759             except ValueError:
  760                 LOG.warning("Invalid provider_id entry for volume: %s",
  761                             vref.id)
  762             else:
  763                 # So there shouldn't be any clusters out in the field that are
  764                 # running Element < 8.0, but just in case; we'll to a try
  765                 # block here and fall back to the old methods just to be safe
  766                 try:
  767                     sfvol = self._issue_api_request(
  768                         'ListVolumes',
  769                         {'startVolumeID': sf_vid,
  770                          'limit': 1},
  771                         version='8.0')['result']['volumes'][0]
  772                     # Bug 1782373 validate the list returned has what we asked
  773                     # for, check if there was no match
  774                     if sfvol['volumeID'] != int(sf_vid):
  775                         sfvol = None
  776                 except Exception:
  777                     pass
  778         if not sfvol:
  779             LOG.info("Failed to find volume by provider_id, "
  780                      "attempting ListForAccount")
  781             for account in self._get_sfaccounts_for_tenant(vref.project_id):
  782                 sfvols = self._issue_api_request(
  783                     'ListVolumesForAccount',
  784                     {'accountID': account['accountID']})['result']['volumes']
  785                 # Bug 1782373  match single vref.id encase no provider as the
  786                 # above call will return a list for the account
  787                 for sfv in sfvols:
  788                     if sfv['attributes'].get('uuid', None) == vref.id:
  789                         sfvol = sfv
  790                         break
  791 
  792         return sfvol
  793 
  794     def _get_sfaccount_by_name(self, sf_account_name, endpoint=None):
  795         """Get SolidFire account object by name."""
  796         sfaccount = None
  797         params = {'username': sf_account_name}
  798         try:
  799             data = self._issue_api_request('GetAccountByName',
  800                                            params,
  801                                            endpoint=endpoint)
  802             if 'result' in data and 'account' in data['result']:
  803                 LOG.debug('Found solidfire account: %s', sf_account_name)
  804                 sfaccount = data['result']['account']
  805         except SolidFireAPIException as ex:
  806             if 'xUnknownAccount' in ex.msg:
  807                 return sfaccount
  808             else:
  809                 raise
  810         return sfaccount
  811 
  812     def _get_sf_account_name(self, project_id):
  813         """Build the SolidFire account name to use."""
  814         prefix = self.configuration.sf_account_prefix or ''
  815         if prefix == 'hostname':
  816             prefix = socket.gethostname()
  817         return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
  818 
  819     def _get_sfaccount(self, project_id):
  820         sf_account_name = self._get_sf_account_name(project_id)
  821         sfaccount = self._get_sfaccount_by_name(sf_account_name)
  822         if sfaccount is None:
  823             raise SolidFireAccountNotFound(
  824                 account_name=sf_account_name)
  825 
  826         return sfaccount
  827 
  828     def _create_sfaccount(self, sf_account_name, endpoint=None):
  829         """Create account on SolidFire device if it doesn't already exist.
  830 
  831         We're first going to check if the account already exists, if it does
  832         just return it.  If not, then create it.
  833 
  834         """
  835 
  836         sfaccount = self._get_sfaccount_by_name(sf_account_name,
  837                                                 endpoint=endpoint)
  838         if sfaccount is None:
  839             LOG.debug('solidfire account: %s does not exist, create it...',
  840                       sf_account_name)
  841             chap_secret = self._generate_random_string(12)
  842             params = {'username': sf_account_name,
  843                       'initiatorSecret': chap_secret,
  844                       'targetSecret': chap_secret,
  845                       'attributes': {}}
  846             self._issue_api_request('AddAccount', params,
  847                                     endpoint=endpoint)
  848             sfaccount = self._get_sfaccount_by_name(sf_account_name,
  849                                                     endpoint=endpoint)
  850 
  851         return sfaccount
  852 
  853     def _generate_random_string(self, length):
  854         """Generates random_string to use for CHAP password."""
  855 
  856         return volume_utils.generate_password(
  857             length=length,
  858             symbolgroups=(string.ascii_uppercase + string.digits))
  859 
  860     def _build_connection_info(self, sfaccount, vol, endpoint=None):
  861         """Gets the connection info for specified account and volume."""
  862         if endpoint:
  863             iscsi_portal = endpoint['svip']
  864         else:
  865             iscsi_portal = self.active_cluster['svip']
  866 
  867         if ':' not in iscsi_portal:
  868             iscsi_portal += ':3260'
  869 
  870         chap_secret = sfaccount['targetSecret']
  871         vol_id = vol['volumeID']
  872         iqn = vol['iqn']
  873 
  874         conn_info = {
  875             # NOTE(john-griffith): SF volumes are always at lun 0
  876             'provider_location': ('%s %s %s' % (iscsi_portal, iqn, 0)),
  877             'provider_auth': ('CHAP %s %s' % (sfaccount['username'],
  878                                               chap_secret))
  879         }
  880 
  881         if not self.configuration.sf_emulate_512:
  882             conn_info['provider_geometry'] = ('%s %s' % (4096, 4096))
  883 
  884         conn_info['provider_id'] = (
  885             self._create_provider_id_string(vol_id, sfaccount['accountID']))
  886         return conn_info
  887 
  888     def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
  889         volume = None
  890         volume_list = self._get_volumes_by_sfaccount(
  891             sfaccount['accountID'], endpoint=endpoint)
  892 
  893         for v in volume_list:
  894             if v['volumeID'] == sf_volume_id:
  895                 volume = v
  896                 break
  897 
  898         if not volume:
  899             LOG.error('Failed to retrieve volume SolidFire-'
  900                       'ID: %s in get_by_account!', sf_volume_id)
  901             raise exception.VolumeNotFound(volume_id=sf_volume_id)
  902 
  903         model_update = self._build_connection_info(sfaccount, volume,
  904                                                    endpoint=endpoint)
  905         return model_update
  906 
  907     def _snapshot_discovery(self, src_uuid, params, vref):
  908         # NOTE(jdg): First check the SF snapshots
  909         # if we don't find a snap by the given name, just move on to check
  910         # volumes.  This may be a running system that was updated from
  911         # before we did snapshots, so need to check both
  912         is_clone = False
  913         sf_vol = None
  914         snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid)
  915         snaps = self._get_sf_snapshots()
  916         snap = next((s for s in snaps if s["name"] == snap_name), None)
  917         if snap:
  918             params['snapshotID'] = int(snap['snapshotID'])
  919             params['volumeID'] = int(snap['volumeID'])
  920             params['newSize'] = int(vref['size'] * units.Gi)
  921         else:
  922             sf_vol = self._get_sf_volume(src_uuid)
  923             if sf_vol is None:
  924                 raise exception.VolumeNotFound(volume_id=src_uuid)
  925             params['volumeID'] = int(sf_vol['volumeID'])
  926             params['newSize'] = int(vref['size'] * units.Gi)
  927             is_clone = True
  928         return params, is_clone, sf_vol
  929 
  930     def _do_clone_volume(self, src_uuid,
  931                          vref, sf_src_snap=None):
  932         """Create a clone of an existing volume or snapshot."""
  933 
  934         LOG.debug("Creating cloned volume from vol %(src)s to %(dst)s.",
  935                   {'src': src_uuid, 'dst': vref.id})
  936 
  937         sf_account = self._get_create_account(vref['project_id'])
  938         params = {'name': '%(prefix)s%(id)s' %
  939                           {'prefix': self.configuration.sf_volume_prefix,
  940                            'id': vref['id']},
  941                   'newAccountID': sf_account['accountID']}
  942 
  943         is_clone = False
  944         if sf_src_snap:
  945             # In some scenarios we are passed the snapshot information that we
  946             # are supposed to clone.
  947             params['snapshotID'] = sf_src_snap['snapshotID']
  948             params['volumeID'] = sf_src_snap['volumeID']
  949             params['newSize'] = int(vref['size'] * units.Gi)
  950         else:
  951             params, is_clone, sf_src_vol = self._snapshot_discovery(
  952                 src_uuid, params, vref)
  953         data = self._issue_api_request('CloneVolume', params, version='6.0')
  954         if (('result' not in data) or ('volumeID' not in data['result'])):
  955             msg = _("API response: %s") % data
  956             raise SolidFireAPIException(msg)
  957 
  958         sf_cloned_id = data['result']['volumeID']
  959 
  960         # NOTE(jdg): all attributes are copied via clone, need to do an update
  961         # to set any that were provided
  962         params = self._get_default_volume_params(vref, is_clone=is_clone)
  963         params['volumeID'] = sf_cloned_id
  964         data = self._issue_api_request('ModifyVolume', params)
  965 
  966         def _wait_volume_is_active():
  967             try:
  968                 model_info = self._get_model_info(sf_account, sf_cloned_id)
  969                 if model_info:
  970                     raise loopingcall.LoopingCallDone(model_info)
  971             except exception.VolumeNotFound:
  972                 LOG.debug('Waiting for cloned volume [%s] - [%s] to become '
  973                           'active', sf_cloned_id, vref.id)
  974                 pass
  975 
  976         try:
  977             timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
  978                 _wait_volume_is_active)
  979             model_update = timer.start(
  980                 interval=1,
  981                 timeout=self.configuration.sf_volume_clone_timeout).wait()
  982         except loopingcall.LoopingCallTimeOut:
  983             msg = _('Failed to get model update from clone [%s] - [%s]' %
  984                     (sf_cloned_id, vref.id))
  985             LOG.error(msg)
  986             raise SolidFireAPIException(msg)
  987 
  988         rep_settings = self._retrieve_replication_settings(vref)
  989         if self.replication_enabled and rep_settings:
  990             try:
  991                 vref['volumeID'] = sf_cloned_id
  992                 rep_updates = self._replicate_volume(
  993                     vref, params, sf_account, rep_settings)
  994                 model_update.update(rep_updates)
  995             except SolidFireDriverException:
  996                 with excutils.save_and_reraise_exception():
  997                     self._issue_api_request('DeleteVolume',
  998                                             {'volumeID': sf_cloned_id})
  999                     self._issue_api_request('PurgeDeletedVolume',
 1000                                             {'volumeID': sf_cloned_id})
 1001         # Increment the usage count, just for data collection
 1002         # We're only doing this for clones, not create_from snaps
 1003         if is_clone:
 1004             data = self._update_attributes(sf_src_vol)
 1005         return (data, sf_account, model_update)
 1006 
 1007     def _update_attributes(self, sf_vol):
 1008         cloned_count = sf_vol['attributes'].get('cloned_count', 0)
 1009         cloned_count += 1
 1010         attributes = sf_vol['attributes']
 1011         attributes['cloned_count'] = cloned_count
 1012 
 1013         params = {'volumeID': int(sf_vol['volumeID'])}
 1014         params['attributes'] = attributes
 1015         return self._issue_api_request('ModifyVolume', params)
 1016 
 1017     def _list_volumes_by_name(self, sf_volume_name):
 1018         params = {'volumeName': sf_volume_name}
 1019         return self._issue_api_request(
 1020             'ListVolumes', params, version='8.0')['result']['volumes']
 1021 
 1022     def _wait_volume_is_active(self, sf_volume_name):
 1023 
 1024         def _wait():
 1025             volumes = self._list_volumes_by_name(sf_volume_name)
 1026             if volumes:
 1027                 LOG.debug("Found Volume [%s] in SolidFire backend. "
 1028                           "Current status is [%s].",
 1029                           sf_volume_name, volumes[0]['status'])
 1030                 if volumes[0]['status'] == 'active':
 1031                     raise loopingcall.LoopingCallDone(volumes[0])
 1032 
 1033         try:
 1034             timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
 1035                 _wait)
 1036             sf_volume = (timer.start(
 1037                 interval=1,
 1038                 timeout=self.configuration.sf_volume_create_timeout).wait())
 1039 
 1040             return sf_volume
 1041         except loopingcall.LoopingCallTimeOut:
 1042             msg = ("Timeout while waiting volume [%s] "
 1043                    "to be in active state." % sf_volume_name)
 1044             LOG.error(msg)
 1045             raise SolidFireAPIException(msg)
 1046 
 1047     def _do_volume_create(self, sf_account, params, endpoint=None):
 1048 
 1049         sf_volume_name = params['name']
 1050         volumes_found = self._list_volumes_by_name(sf_volume_name)
 1051         if volumes_found:
 1052             msg = ('Volume name [%s] already exists '
 1053                    'in SolidFire backend.') % sf_volume_name
 1054             LOG.error(msg)
 1055             raise DuplicateSfVolumeNames(message=msg)
 1056 
 1057         sf_volid = None
 1058         try:
 1059             params['accountID'] = sf_account['accountID']
 1060             response = self._issue_api_request(
 1061                 'CreateVolume', params, endpoint=endpoint)
 1062             sf_volid = response['result']['volumeID']
 1063 
 1064         except requests.exceptions.ReadTimeout:
 1065             LOG.debug("Read Timeout exception caught while creating "
 1066                       "volume [%s].", sf_volume_name)
 1067             # Check if volume was created for the given name,
 1068             # in case the backend has processed the request but failed
 1069             # to deliver the response before api request timeout.
 1070             volume_created = self._wait_volume_is_active(sf_volume_name)
 1071             sf_volid = volume_created['volumeID']
 1072 
 1073         return self._get_model_info(sf_account, sf_volid, endpoint=endpoint)
 1074 
 1075     def _do_snapshot_create(self, params):
 1076         model_update = {}
 1077         snapshot_id = self._issue_api_request(
 1078             'CreateSnapshot', params, version='6.0')['result']['snapshotID']
 1079         snaps = self._get_sf_snapshots()
 1080         snap = (
 1081             next((s for s in snaps if int(s["snapshotID"]) ==
 1082                   int(snapshot_id)), None))
 1083         model_update['provider_id'] = (
 1084             self._create_provider_id_string(snap['snapshotID'],
 1085                                             snap['volumeID']))
 1086         return model_update
 1087 
 1088     def _set_qos_presets(self, volume):
 1089         qos = {}
 1090         valid_presets = self.sf_qos_dict.keys()
 1091 
 1092         # First look to see if they included a preset
 1093         presets = [i.value for i in volume.get('volume_metadata')
 1094                    if i.key == 'sf-qos' and i.value in valid_presets]
 1095         if len(presets) > 0:
 1096             if len(presets) > 1:
 1097                 LOG.warning('More than one valid preset was '
 1098                             'detected, using %s', presets[0])
 1099             qos = self.sf_qos_dict[presets[0]]
 1100         else:
 1101             # look for explicit settings
 1102             for i in volume.get('volume_metadata'):
 1103                 if i.key in self.sf_qos_keys:
 1104                     qos[i.key] = int(i.value)
 1105         return qos
 1106 
 1107     def _extract_sf_attributes_from_extra_specs(self, type_id):
 1108         # This will do a 1:1 copy of the extra spec keys that
 1109         # include the SolidFire delimeter into a Volume attribute
 1110         # K/V pair
 1111         ctxt = context.get_admin_context()
 1112         volume_type = volume_types.get_volume_type(ctxt, type_id)
 1113         specs = volume_type.get('extra_specs')
 1114         sf_keys = []
 1115         for key, value in specs.items():
 1116             if "SFAttribute:" in key:
 1117                 fields = key.split(':')
 1118                 sf_keys.append({fields[1]: value})
 1119         return sf_keys
 1120 
 1121     def _set_qos_by_volume_type(self, ctxt, type_id, vol_size):
 1122         qos = {}
 1123         scale_qos = {}
 1124         volume_type = volume_types.get_volume_type(ctxt, type_id)
 1125         qos_specs_id = volume_type.get('qos_specs_id')
 1126         specs = volume_type.get('extra_specs')
 1127 
 1128         # NOTE(jdg): We prefer the qos_specs association
 1129         # and over-ride any existing
 1130         # extra-specs settings if present
 1131         if qos_specs_id is not None:
 1132             # Policy changes require admin context to get QoS specs
 1133             # at the object layer (base:get_by_id), we can either
 1134             # explicitly promote here, or pass in a context of None
 1135             # and let the qos_specs api get an admin context for us
 1136             # personally I prefer explicit, so here ya go.
 1137             admin_ctxt = context.get_admin_context()
 1138             kvs = qos_specs.get_qos_specs(admin_ctxt, qos_specs_id)['specs']
 1139         else:
 1140             kvs = specs
 1141 
 1142         for key, value in kvs.items():
 1143             if ':' in key:
 1144                 fields = key.split(':')
 1145                 key = fields[1]
 1146             if key in self.sf_qos_keys:
 1147                 qos[key] = int(value)
 1148             if key in self.sf_scale_qos_keys:
 1149                 scale_qos[key] = value
 1150 
 1151         # look for the 'scaledIOPS' key and scale QoS if set
 1152         if 'scaledIOPS' in scale_qos:
 1153             scale_qos.pop('scaledIOPS')
 1154             for key, value in scale_qos.items():
 1155                 if key == 'scaleMin':
 1156                     qos['minIOPS'] = (qos['minIOPS'] +
 1157                                       (int(value) * (vol_size - 1)))
 1158                 elif key == 'scaleMax':
 1159                     qos['maxIOPS'] = (qos['maxIOPS'] +
 1160                                       (int(value) * (vol_size - 1)))
 1161                 elif key == 'scaleBurst':
 1162                     qos['burstIOPS'] = (qos['burstIOPS'] +
 1163                                         (int(value) * (vol_size - 1)))
 1164         # Cap the IOPS values at their limits
 1165         capped = False
 1166         for key, value in qos.items():
 1167             if value > self.sf_iops_lim_max[key]:
 1168                 qos[key] = self.sf_iops_lim_max[key]
 1169                 capped = True
 1170             if value < self.sf_iops_lim_min[key]:
 1171                 qos[key] = self.sf_iops_lim_min[key]
 1172                 capped = True
 1173         if capped:
 1174             LOG.debug("A SolidFire QoS value was capped at the defined limits")
 1175         # Check that minIOPS <= maxIOPS <= burstIOPS
 1176         if (qos.get('minIOPS', 0) > qos.get('maxIOPS', 0) or
 1177                 qos.get('maxIOPS', 0) > qos.get('burstIOPS', 0)):
 1178             msg = (_("Scaled QoS error. Must be minIOPS <= maxIOPS <= "
 1179                      "burstIOPS. Currently: Min: %(min)s, Max: "
 1180                      "%(max)s, Burst: %(burst)s.") %
 1181                    {"min": qos['minIOPS'],
 1182                     "max": qos['maxIOPS'],
 1183                     "burst": qos['burstIOPS']})
 1184             raise exception.InvalidQoSSpecs(reason=msg)
 1185         return qos
 1186 
 1187     def _get_sf_volume(self, uuid, params=None, endpoint=None):
 1188         if params:
 1189             vols = [v for v in self._issue_api_request(
 1190                 'ListVolumesForAccount',
 1191                 params)['result']['volumes'] if v['status'] == "active"]
 1192         else:
 1193             vols = self._issue_api_request(
 1194                 'ListActiveVolumes', params,
 1195                 endpoint=endpoint)['result']['volumes']
 1196 
 1197         found_count = 0
 1198         sf_volref = None
 1199         for v in vols:
 1200             # NOTE(jdg): In the case of "name" we can't
 1201             # update that on manage/import, so we use
 1202             # the uuid attribute
 1203             meta = v.get('attributes')
 1204             alt_id = ''
 1205             if meta:
 1206                 alt_id = meta.get('uuid', '')
 1207 
 1208             if uuid in v['name'] or uuid in alt_id:
 1209                 found_count += 1
 1210                 sf_volref = v
 1211                 LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
 1212                           "to cinder ID %(uuid)s.",
 1213                           {'volume_id': v['volumeID'], 'uuid': uuid})
 1214 
 1215         if found_count == 0:
 1216             # NOTE(jdg): Previously we would raise here, but there are cases
 1217             # where this might be a cleanup for a failed delete.
 1218             # Until we get better states we'll just log an error
 1219             LOG.error("Volume %s, not found on SF Cluster.", uuid)
 1220 
 1221         if found_count > 1:
 1222             LOG.error("Found %(count)s volumes mapped to id: %(uuid)s.",
 1223                       {'count': found_count,
 1224                        'uuid': uuid})
 1225             raise DuplicateSfVolumeNames(vol_name=uuid)
 1226 
 1227         return sf_volref
 1228 
 1229     def _get_sf_snapshots(self, sf_volid=None):
 1230         params = {}
 1231         if sf_volid:
 1232             params = {'volumeID': sf_volid}
 1233         return self._issue_api_request(
 1234             'ListSnapshots', params, version='6.0')['result']['snapshots']
 1235 
 1236     def _get_sfaccounts_for_tenant(self, cinder_project_id, endpoint=None):
 1237         accounts = self._issue_api_request(
 1238             'ListAccounts', {}, endpoint=endpoint)['result']['accounts']
 1239 
 1240         # Note(jdg): On SF we map account-name to OpenStack's tenant ID
 1241         # we use tenantID in here to get secondaries that might exist
 1242         # Also: we expect this to be sorted, so we get the primary first
 1243         # in the list
 1244         return sorted([acc for acc in accounts
 1245                        if self._get_sf_account_name(cinder_project_id) in
 1246                        acc['username']],
 1247                       key=lambda k: k['accountID'])
 1248 
 1249     def _get_all_active_volumes(self, cinder_uuid=None):
 1250         params = {}
 1251         volumes = self._issue_api_request('ListActiveVolumes',
 1252                                           params)['result']['volumes']
 1253         if cinder_uuid:
 1254             vols = ([v for v in volumes if
 1255                      cinder_uuid in v.name])
 1256         else:
 1257             vols = [v for v in volumes]
 1258 
 1259         return vols
 1260 
 1261     def _get_all_deleted_volumes(self, cinder_uuid=None):
 1262         params = {}
 1263         vols = self._issue_api_request('ListDeletedVolumes',
 1264                                        params)['result']['volumes']
 1265         if cinder_uuid:
 1266             deleted_vols = ([v for v in vols if
 1267                              cinder_uuid in v['name']])
 1268         else:
 1269             deleted_vols = [v for v in vols]
 1270         return deleted_vols
 1271 
 1272     def _get_account_create_availability(self, accounts, endpoint=None):
 1273         # we'll check both the primary and the secondary
 1274         # if it exists and return whichever one has count
 1275         # available.
 1276         for acc in accounts:
 1277             if len(self._get_volumes_for_account(
 1278                     acc['accountID'],
 1279                     endpoint=endpoint)) < self.max_volumes_per_account:
 1280                 return acc
 1281         if len(accounts) == 1:
 1282             sfaccount = self._create_sfaccount(accounts[0]['username'] + '_',
 1283                                                endpoint=endpoint)
 1284             return sfaccount
 1285         return None
 1286 
 1287     def _get_create_account(self, proj_id, endpoint=None):
 1288         # Retrieve SolidFire accountID to be used for creating volumes.
 1289         sf_accounts = self._get_sfaccounts_for_tenant(
 1290             proj_id, endpoint=endpoint)
 1291 
 1292         if not sf_accounts:
 1293             sf_account_name = self._get_sf_account_name(proj_id)
 1294             sf_account = self._create_sfaccount(
 1295                 sf_account_name, endpoint=endpoint)
 1296         else:
 1297             # Check availability for creates
 1298             sf_account = self._get_account_create_availability(
 1299                 sf_accounts, endpoint=endpoint)
 1300             if not sf_account:
 1301                 msg = _('Volumes/account exceeded on both primary and '
 1302                         'secondary SolidFire accounts.')
 1303                 raise SolidFireDriverException(msg)
 1304         return sf_account
 1305 
 1306     def _create_vag(self, iqn, vol_id=None):
 1307         """Create a volume access group(vag).
 1308 
 1309            Returns the vag_id.
 1310         """
 1311         vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
 1312         params = {'name': vag_name,
 1313                   'initiators': [iqn],
 1314                   'volumes': [vol_id],
 1315                   'attributes': {'openstack': True}}
 1316         try:
 1317             result = self._issue_api_request('CreateVolumeAccessGroup',
 1318                                              params,
 1319                                              version='7.0')
 1320             return result['result']['volumeAccessGroupID']
 1321         except SolidFireAPIException as error:
 1322             if xExceededLimit in error.msg:
 1323                 if iqn in error.msg:
 1324                     # Initiator double registered.
 1325                     return self._safe_create_vag(iqn, vol_id)
 1326                 else:
 1327                     # VAG limit reached. Purge and start over.
 1328                     self._purge_vags()
 1329                     return self._safe_create_vag(iqn, vol_id)
 1330             else:
 1331                 raise
 1332 
 1333     def _safe_create_vag(self, iqn, vol_id=None):
 1334         # Potential race condition with simultaneous volume attaches to the
 1335         # same host. To help avoid this, VAG creation makes a best attempt at
 1336         # finding and using an existing VAG.
 1337 
 1338         vags = self._get_vags_by_name(iqn)
 1339         if vags:
 1340             # Filter through the vags and find the one with matching initiator
 1341             vag = next((v for v in vags if iqn in v['initiators']), None)
 1342             if vag:
 1343                 return vag['volumeAccessGroupID']
 1344             else:
 1345                 # No matches, use the first result, add initiator IQN.
 1346                 vag_id = vags[0]['volumeAccessGroupID']
 1347                 return self._add_initiator_to_vag(iqn, vag_id)
 1348         return self._create_vag(iqn, vol_id)
 1349 
 1350     def _base_get_vags(self):
 1351         params = {}
 1352         vags = self._issue_api_request(
 1353             'ListVolumeAccessGroups',
 1354             params,
 1355             version='7.0')['result']['volumeAccessGroups']
 1356         return vags
 1357 
 1358     def _get_vags_by_name(self, iqn):
 1359         """Retrieve SolidFire volume access group objects by name.
 1360 
 1361            Returns an array of vags with a matching name value.
 1362            Returns an empty array if there are no matches.
 1363         """
 1364         vags = self._base_get_vags()
 1365         vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
 1366         matching_vags = [vag for vag in vags if vag['name'] == vag_name]
 1367         return matching_vags
 1368 
 1369     def _get_vags_by_volume(self, vol_id):
 1370         params = {"volumeID": vol_id}
 1371         vags = self._issue_api_request(
 1372             'GetVolumeStats',
 1373             params)['result']['volumeStats']['volumeAccessGroups']
 1374         return vags
 1375 
 1376     def _add_initiator_to_vag(self, iqn, vag_id):
 1377         # Added a vag_id return as there is a chance that we might have to
 1378         # create a new VAG if our target VAG is deleted underneath us.
 1379         params = {"initiators": [iqn],
 1380                   "volumeAccessGroupID": vag_id}
 1381         try:
 1382             self._issue_api_request('AddInitiatorsToVolumeAccessGroup',
 1383                                     params,
 1384                                     version='7.0')
 1385             return vag_id
 1386         except SolidFireAPIException as error:
 1387             if xAlreadyInVolumeAccessGroup in error.msg:
 1388                 return vag_id
 1389             elif xVolumeAccessGroupIDDoesNotExist in error.msg:
 1390                 # No locking means sometimes a VAG can be removed by a parallel
 1391                 # volume detach against the same host.
 1392                 return self._safe_create_vag(iqn)
 1393             else:
 1394                 raise
 1395 
 1396     def _add_volume_to_vag(self, vol_id, iqn, vag_id):
 1397         # Added a vag_id return to be consistent with add_initiator_to_vag. It
 1398         # isn't necessary but may be helpful in the future.
 1399         params = {"volumeAccessGroupID": vag_id,
 1400                   "volumes": [vol_id]}
 1401         try:
 1402             self._issue_api_request('AddVolumesToVolumeAccessGroup',
 1403                                     params,
 1404                                     version='7.0')
 1405             return vag_id
 1406 
 1407         except SolidFireAPIException as error:
 1408             if xAlreadyInVolumeAccessGroup in error.msg:
 1409                 return vag_id
 1410             elif xVolumeAccessGroupIDDoesNotExist in error.msg:
 1411                 return self._safe_create_vag(iqn, vol_id)
 1412             else:
 1413                 raise
 1414 
 1415     def _remove_volume_from_vag(self, vol_id, vag_id):
 1416         params = {"volumeAccessGroupID": vag_id,
 1417                   "volumes": [vol_id]}
 1418         try:
 1419             self._issue_api_request('RemoveVolumesFromVolumeAccessGroup',
 1420                                     params,
 1421                                     version='7.0')
 1422         except SolidFireAPIException as error:
 1423             if xNotInVolumeAccessGroup in error.msg:
 1424                 pass
 1425             elif xVolumeAccessGroupIDDoesNotExist in error.msg:
 1426                 pass
 1427             else:
 1428                 raise
 1429 
 1430     def _remove_volume_from_vags(self, vol_id):
 1431         # Due to all sorts of uncertainty around multiattach, on volume
 1432         # deletion we make a best attempt at removing the vol_id from VAGs.
 1433         vags = self._get_vags_by_volume(vol_id)
 1434         for vag in vags:
 1435             self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID'])
 1436 
 1437     def _remove_vag(self, vag_id):
 1438         params = {"volumeAccessGroupID": vag_id}
 1439         try:
 1440             self._issue_api_request('DeleteVolumeAccessGroup',
 1441                                     params,
 1442                                     version='7.0')
 1443         except SolidFireAPIException as error:
 1444             if xVolumeAccessGroupIDDoesNotExist not in error.msg:
 1445                 raise
 1446 
 1447     def _purge_vags(self, limit=10):
 1448         # Purge up to limit number of VAGs that have no active volumes,
 1449         # initiators, and an OpenStack attribute. Purge oldest VAGs first.
 1450         vags = self._base_get_vags()
 1451         targets = [v for v in vags if v['volumes'] == [] and
 1452                    v['initiators'] == [] and
 1453                    v['deletedVolumes'] == [] and
 1454                    v['attributes'].get('openstack')]
 1455         sorted_targets = sorted(targets,
 1456                                 key=lambda k: k['volumeAccessGroupID'])
 1457         for vag in sorted_targets[:limit]:
 1458             self._remove_vag(vag['volumeAccessGroupID'])
 1459 
 1460     @locked_image_id_operation
 1461     def clone_image(self, context,
 1462                     volume, image_location,
 1463                     image_meta, image_service):
 1464         """Clone an existing image volume."""
 1465         public = False
 1466         # NOTE(jdg): Glance V2 moved from is_public to visibility
 1467         # so we check both, as we don't necessarily know or want
 1468         # to care which we're using.  Will need to look at
 1469         # future handling of things like shared and community
 1470         # but for now, it's owner or public and that's it
 1471         visibility = image_meta.get('visibility', None)
 1472         if visibility and visibility == 'public':
 1473             public = True
 1474         elif image_meta.get('is_public', False):
 1475             public = True
 1476         else:
 1477             if image_meta['owner'] == volume['project_id']:
 1478                 public = True
 1479         if not public:
 1480             LOG.warning("Requested image is not "
 1481                         "accessible by current Tenant.")
 1482             return None, False
 1483         # If we don't have the image-volume to clone from return failure
 1484         # cinder driver will then create source for clone first
 1485         try:
 1486             (data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
 1487                                                              volume)
 1488         except exception.VolumeNotFound:
 1489             return None, False
 1490 
 1491         return model, True
 1492 
 1493     # extended_size > 0 when we are extending a volume
 1494     def _retrieve_qos_setting(self, volume, extended_size=0):
 1495         qos = {}
 1496         if (self.configuration.sf_allow_tenant_qos and
 1497                 volume.get('volume_metadata') is not None):
 1498             qos = self._set_qos_presets(volume)
 1499 
 1500         ctxt = context.get_admin_context()
 1501         type_id = volume.get('volume_type_id', None)
 1502         if type_id is not None:
 1503             qos = self._set_qos_by_volume_type(ctxt, type_id,
 1504                                                extended_size if extended_size
 1505                                                > 0 else volume.get('size'))
 1506         return qos
 1507 
 1508     def _get_default_volume_params(self, volume, sf_account=None,
 1509                                    is_clone=False):
 1510 
 1511         if not sf_account:
 1512             sf_account = self._get_create_account(volume.project_id)
 1513 
 1514         qos = self._retrieve_qos_setting(volume)
 1515 
 1516         create_time = volume.created_at.isoformat()
 1517         attributes = {
 1518             'uuid': volume.id,
 1519             'is_clone': is_clone,
 1520             'created_at': create_time,
 1521             'cinder-name': volume.get('display_name', "")
 1522         }
 1523 
 1524         if volume.volume_type_id:
 1525             for attr in self._extract_sf_attributes_from_extra_specs(
 1526                     volume.volume_type_id):
 1527                 for k, v in attr.items():
 1528                     attributes[k] = v
 1529 
 1530         vol_name = '%s%s' % (self.configuration.sf_volume_prefix, volume.id)
 1531         params = {'name': vol_name,
 1532                   'accountID': sf_account['accountID'],
 1533                   'sliceCount': 1,
 1534                   'totalSize': int(volume.size * units.Gi),
 1535                   'enable512e': self.configuration.sf_emulate_512,
 1536                   'attributes': attributes,
 1537                   'qos': qos}
 1538 
 1539         return params
 1540 
 1541     def create_volume(self, volume):
 1542         """Create volume on SolidFire device.
 1543 
 1544         The account is where CHAP settings are derived from, volume is
 1545         created and exported.  Note that the new volume is immediately ready
 1546         for use.
 1547 
 1548         One caveat here is that an existing user account must be specified
 1549         in the API call to create a new volume.  We use a set algorithm to
 1550         determine account info based on passed in cinder volume object.  First
 1551         we check to see if the account already exists (and use it), or if it
 1552         does not already exist, we'll go ahead and create it.
 1553 
 1554         """
 1555 
 1556         sf_account = self._get_create_account(volume['project_id'])
 1557         params = self._get_default_volume_params(volume, sf_account)
 1558 
 1559         # NOTE(jdg): Check if we're a migration tgt, if so
 1560         # use the old volume-id here for the SF Name
 1561         migration_status = volume.get('migration_status', None)
 1562         if migration_status and 'target' in migration_status:
 1563             k, v = migration_status.split(':')
 1564             vname = '%s%s' % (self.configuration.sf_volume_prefix, v)
 1565             params['name'] = vname
 1566             params['attributes']['migration_uuid'] = volume['id']
 1567             params['attributes']['uuid'] = v
 1568 
 1569         model_update = self._do_volume_create(sf_account, params)
 1570         try:
 1571             rep_settings = self._retrieve_replication_settings(volume)
 1572             if self.replication_enabled and rep_settings:
 1573                 volume['volumeID'] = (
 1574                     int(model_update['provider_id'].split()[0]))
 1575                 rep_updates = self._replicate_volume(volume, params,
 1576                                                      sf_account, rep_settings)
 1577                 if rep_updates:
 1578                     model_update.update(rep_updates)
 1579 
 1580         except SolidFireAPIException:
 1581             # NOTE(jdg): Something went wrong after the source create, due to
 1582             # the way TFLOW works and it's insistence on retrying the same
 1583             # command over and over coupled with the fact that the introduction
 1584             # of objects now sets host to None on failures we'll end up with an
 1585             # orphaned volume on the backend for every one of these segments
 1586             # that fail, for n-retries.  Sad Sad Panda!!  We'll just do it
 1587             # ourselves until we can get a general fix in Cinder further up the
 1588             # line
 1589             with excutils.save_and_reraise_exception():
 1590                 sf_volid = int(model_update['provider_id'].split()[0])
 1591                 self._issue_api_request('DeleteVolume', {'volumeID': sf_volid})
 1592                 self._issue_api_request('PurgeDeletedVolume',
 1593                                         {'volumeID': sf_volid})
 1594         return model_update
 1595 
 1596     def _retrieve_replication_settings(self, volume):
 1597         rep_data = "Async"
 1598         ctxt = context.get_admin_context()
 1599         type_id = volume.get('volume_type_id', None)
 1600         if type_id is not None:
 1601             rep_data = self._set_rep_by_volume_type(ctxt, type_id)
 1602         return rep_data
 1603 
 1604     def _set_rep_by_volume_type(self, ctxt, type_id):
 1605         rep_modes = ['Async', 'Sync', 'SnapshotsOnly']
 1606         rep_opts = {}
 1607         type_ref = volume_types.get_volume_type(ctxt, type_id)
 1608         specs = type_ref.get('extra_specs')
 1609         if specs.get('replication_enabled', "") == "<is> True":
 1610             if specs.get('solidfire:replication_mode') in rep_modes:
 1611                 rep_opts['rep_type'] = specs.get('solidfire:replication_mode')
 1612             else:
 1613                 rep_opts['rep_type'] = 'Async'
 1614 
 1615         return rep_opts
 1616 
 1617     def _create_volume_pairing(self, volume, dst_volume, tgt_cluster):
 1618 
 1619         src_sf_volid = int(volume['provider_id'].split()[0])
 1620         dst_sf_volid = int(dst_volume['provider_id'].split()[0])
 1621 
 1622         @retry(SolidFireReplicationPairingError, tries=6)
 1623         def _pair_volumes():
 1624             rep_type = "Sync"
 1625             # Enable volume pairing
 1626             LOG.debug("Starting pairing source volume ID: %s",
 1627                       src_sf_volid)
 1628 
 1629             # Make sure we split any pair the volume has
 1630             params = {
 1631                 'volumeID': src_sf_volid,
 1632                 'mode': rep_type
 1633             }
 1634 
 1635             self._issue_api_request('RemoveVolumePair', params, '8.0')
 1636 
 1637             rep_key = self._issue_api_request(
 1638                 'StartVolumePairing', params,
 1639                 '8.0')['result']['volumePairingKey']
 1640 
 1641             LOG.debug("Volume pairing started on source: "
 1642                       "%(endpoint)s",
 1643                       {'endpoint': tgt_cluster['endpoint']['url']})
 1644 
 1645             params = {
 1646                 'volumeID': dst_sf_volid,
 1647                 'volumePairingKey': rep_key
 1648             }
 1649 
 1650             self._issue_api_request('CompleteVolumePairing',
 1651                                     params,
 1652                                     '8.0',
 1653                                     endpoint=tgt_cluster['endpoint'])
 1654 
 1655             LOG.debug("Volume pairing completed on destination: "
 1656                       "%(endpoint)s",
 1657                       {'endpoint': tgt_cluster['endpoint']['url']})
 1658 
 1659         _pair_volumes()
 1660 
 1661     def _replicate_volume(self, volume, params,
 1662                           parent_sfaccount, rep_info):
 1663 
 1664         updates = {}
 1665         rep_success_status = fields.ReplicationStatus.ENABLED
 1666 
 1667         # NOTE(erlon): Right now we only support 1 remote target so, we always
 1668         # get cluster_pairs[0]
 1669         tgt_endpoint = self.cluster_pairs[0]['endpoint']
 1670         LOG.debug("Replicating volume on remote cluster: %(tgt)s\n params: "
 1671                   "%(params)s", {'tgt': tgt_endpoint, 'params': params})
 1672 
 1673         params['username'] = self._get_sf_account_name(volume['project_id'])
 1674         try:
 1675             params['initiatorSecret'] = parent_sfaccount['initiatorSecret']
 1676             params['targetSecret'] = parent_sfaccount['targetSecret']
 1677             self._issue_api_request(
 1678                 'AddAccount',
 1679                 params,
 1680                 endpoint=tgt_endpoint)['result']['accountID']
 1681         except SolidFireAPIException as ex:
 1682             if 'xDuplicateUsername' not in ex.msg:
 1683                 raise
 1684 
 1685         remote_account = (
 1686             self._get_sfaccount_by_name(params['username'],
 1687                                         endpoint=tgt_endpoint))
 1688 
 1689         # Create the volume on the remote cluster w/same params as original
 1690         params['accountID'] = remote_account['accountID']
 1691         LOG.debug("Create remote volume on: %(endpoint)s with account: "
 1692                   "%(account)s",
 1693                   {'endpoint': tgt_endpoint['url'], 'account': remote_account})
 1694         model_update = self._do_volume_create(
 1695             remote_account, params, endpoint=tgt_endpoint)
 1696 
 1697         tgt_sfid = int(model_update['provider_id'].split()[0])
 1698         params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'}
 1699         self._issue_api_request('ModifyVolume',
 1700                                 params,
 1701                                 '8.0',
 1702                                 endpoint=tgt_endpoint)
 1703 
 1704         # NOTE(erlon): For some reason the SF cluster randomly fail the
 1705         # replication of volumes. The generated keys are deemed invalid by the
 1706         # target backend. When that happens, we re-start the volume pairing
 1707         # process.
 1708         @retry(SolidFireReplicationPairingError, tries=6)
 1709         def _pair_volumes():
 1710             # Enable volume pairing
 1711             LOG.debug("Start volume pairing on volume ID: %s",
 1712                       volume['volumeID'])
 1713 
 1714             # Make sure we split any pair the volume have
 1715             params = {'volumeID': volume['volumeID'],
 1716                       'mode': rep_info['rep_type']}
 1717             self._issue_api_request('RemoveVolumePair', params, '8.0')
 1718 
 1719             rep_key = self._issue_api_request(
 1720                 'StartVolumePairing', params,
 1721                 '8.0')['result']['volumePairingKey']
 1722             params = {'volumeID': tgt_sfid,
 1723                       'volumePairingKey': rep_key}
 1724             LOG.debug("Sending issue CompleteVolumePairing request on remote: "
 1725                       "%(endpoint)s, %(parameters)s",
 1726                       {'endpoint': tgt_endpoint['url'], 'parameters': params})
 1727             self._issue_api_request('CompleteVolumePairing',
 1728                                     params,
 1729                                     '8.0',
 1730                                     endpoint=tgt_endpoint)
 1731 
 1732         try:
 1733             _pair_volumes()
 1734         except SolidFireAPIException:
 1735             with excutils.save_and_reraise_exception():
 1736                 params = {'volumeID': tgt_sfid}
 1737                 LOG.debug("Error pairing volume on remote cluster. Rolling "
 1738                           "back and deleting volume %(vol)s at cluster "
 1739                           "%(cluster)s.",
 1740                           {'vol': tgt_sfid, 'cluster': tgt_endpoint})
 1741                 self._issue_api_request('DeleteVolume', params,
 1742                                         endpoint=tgt_endpoint)
 1743                 self._issue_api_request('PurgeDeletedVolume', params,
 1744                                         endpoint=tgt_endpoint)
 1745 
 1746         updates['replication_status'] = rep_success_status
 1747 
 1748         LOG.debug("Completed volume pairing.")
 1749         return updates
 1750 
 1751     def _disable_replication(self, volume):
 1752 
 1753         updates = {}
 1754         tgt_endpoint = self.cluster_pairs[0]['endpoint']
 1755 
 1756         sfvol = self._get_sfvol_by_cinder_vref(volume)
 1757         if len(sfvol['volumePairs']) != 1:
 1758             LOG.warning("Trying to disable replication on volume %s but "
 1759                         "volume does not have pairs.", volume.id)
 1760 
 1761             updates['replication_status'] = fields.ReplicationStatus.DISABLED
 1762             return updates
 1763 
 1764         params = {'volumeID': sfvol['volumeID']}
 1765         self._issue_api_request('RemoveVolumePair', params, '8.0')
 1766 
 1767         remote_sfid = sfvol['volumePairs'][0]['remoteVolumeID']
 1768         params = {'volumeID': remote_sfid}
 1769         self._issue_api_request('RemoveVolumePair',
 1770                                 params, '8.0', endpoint=tgt_endpoint)
 1771         self._issue_api_request('DeleteVolume', params,
 1772                                 endpoint=tgt_endpoint)
 1773         self._issue_api_request('PurgeDeletedVolume', params,
 1774                                 endpoint=tgt_endpoint)
 1775 
 1776         updates['replication_status'] = fields.ReplicationStatus.DISABLED
 1777         return updates
 1778 
 1779     @locked_source_id_operation
 1780     def create_cloned_volume(self, volume, source):
 1781         """Create a clone of an existing volume."""
 1782         (_data, _sfaccount, model) = self._do_clone_volume(
 1783             source['id'],
 1784             volume)
 1785 
 1786         return model
 1787 
 1788     def delete_volume(self, volume):
 1789         """Delete SolidFire Volume from device.
 1790 
 1791         SolidFire allows multiple volumes with same name,
 1792         volumeID is what's guaranteed unique.
 1793 
 1794         """
 1795         sf_vol = self._get_sfvol_by_cinder_vref(volume)
 1796         if sf_vol is not None:
 1797             for vp in sf_vol.get('volumePairs', []):
 1798                 LOG.debug("Deleting paired volume on remote cluster...")
 1799                 pair_id = vp['clusterPairID']
 1800                 for cluster in self.cluster_pairs:
 1801                     if cluster['clusterPairID'] == pair_id:
 1802                         params = {'volumeID': vp['remoteVolumeID']}
 1803                         LOG.debug("Issue Delete request on cluster: "
 1804                                   "%(remote)s with params: %(parameters)s",
 1805                                   {'remote': cluster['endpoint']['url'],
 1806                                    'parameters': params})
 1807                         self._issue_api_request('DeleteVolume', params,
 1808                                                 endpoint=cluster['endpoint'])
 1809                         self._issue_api_request('PurgeDeletedVolume', params,
 1810                                                 endpoint=cluster['endpoint'])
 1811 
 1812             # The multiattach volumes are only removed from the VAG on
 1813             # deletion.
 1814             if volume.get('multiattach'):
 1815                 self._remove_volume_from_vags(sf_vol['volumeID'])
 1816 
 1817             if sf_vol['status'] == 'active':
 1818                 params = {'volumeID': sf_vol['volumeID']}
 1819                 self._issue_api_request('DeleteVolume', params)
 1820                 self._issue_api_request('PurgeDeletedVolume', params)
 1821         else:
 1822             LOG.error("Volume ID %s was not found on "
 1823                       "the SolidFire Cluster while attempting "
 1824                       "delete_volume operation!", volume['id'])
 1825 
 1826     def delete_snapshot(self, snapshot):
 1827         """Delete the specified snapshot from the SolidFire cluster."""
 1828         sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
 1829                                  snapshot['id'])
 1830         accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
 1831         snap = None
 1832         for acct in accounts:
 1833             params = {'accountID': acct['accountID']}
 1834             sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
 1835             if sf_vol:
 1836                 sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
 1837                 snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
 1838                             None)
 1839                 if snap:
 1840                     params = {'snapshotID': snap['snapshotID']}
 1841                     self._issue_api_request('DeleteSnapshot',
 1842                                             params,
 1843                                             version='6.0')
 1844                     return
 1845         LOG.warning(
 1846             "Snapshot %s not found, old style clones may not be deleted.",
 1847             snapshot.id)
 1848 
 1849     def create_snapshot(self, snapshot):
 1850         sfaccount = self._get_sfaccount(snapshot['project_id'])
 1851         if sfaccount is None:
 1852             LOG.error("Account for Volume ID %s was not found on "
 1853                       "the SolidFire Cluster while attempting "
 1854                       "create_snapshot operation!", snapshot['volume_id'])
 1855 
 1856         params = {'accountID': sfaccount['accountID']}
 1857         sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
 1858 
 1859         if sf_vol is None:
 1860             raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
 1861         params = {'volumeID': sf_vol['volumeID'],
 1862                   'name': '%s%s' % (self.configuration.sf_volume_prefix,
 1863                                     snapshot['id'])}
 1864 
 1865         rep_settings = self._retrieve_replication_settings(snapshot.volume)
 1866         if self.replication_enabled and rep_settings:
 1867             params['enableRemoteReplication'] = True
 1868 
 1869         return self._do_snapshot_create(params)
 1870 
 1871     @locked_source_id_operation
 1872     def create_volume_from_snapshot(self, volume, source):
 1873         """Create a volume from the specified snapshot."""
 1874         if source.get('group_snapshot_id'):
 1875             # We're creating a volume from a snapshot that resulted from a
 1876             # consistency group snapshot. Because of the way that SolidFire
 1877             # creates cgsnaps, we have to search for the correct snapshot.
 1878             group_snapshot_id = source.get('group_snapshot_id')
 1879             snapshot_id = source.get('volume_id')
 1880             sf_name = self.configuration.sf_volume_prefix + group_snapshot_id
 1881             sf_group_snap = self._get_group_snapshot_by_name(sf_name)
 1882             return self._create_clone_from_sf_snapshot(snapshot_id,
 1883                                                        group_snapshot_id,
 1884                                                        sf_group_snap,
 1885                                                        volume)
 1886 
 1887         (_data, _sfaccount, model) = self._do_clone_volume(
 1888             source['id'],
 1889             volume)
 1890 
 1891         return model
 1892 
 1893     # Consistency group helpers
 1894     def _sf_create_group_snapshot(self, name, sf_volumes):
 1895         # Group snapshot is our version of a consistency group snapshot.
 1896         vol_ids = [vol['volumeID'] for vol in sf_volumes]
 1897         params = {'name': name,
 1898                   'volumes': vol_ids}
 1899         snapshot_id = self._issue_api_request('CreateGroupSnapshot',
 1900                                               params,
 1901                                               version='7.0')
 1902         return snapshot_id['result']
 1903 
 1904     def _group_snapshot_creator(self, gsnap_name, src_vol_ids):
 1905         # Common helper that takes in an array of OpenStack Volume UUIDs and
 1906         # creates a SolidFire group snapshot with them.
 1907         vol_names = [self.configuration.sf_volume_prefix + vol_id
 1908                      for vol_id in src_vol_ids]
 1909         active_sf_vols = self._get_all_active_volumes()
 1910         target_vols = [vol for vol in active_sf_vols
 1911                        if vol['name'] in vol_names]
 1912         if len(src_vol_ids) != len(target_vols):
 1913             msg = (_("Retrieved a different amount of SolidFire volumes for "
 1914                      "the provided Cinder volumes. Retrieved: %(ret)s "
 1915                      "Desired: %(des)s") % {"ret": len(target_vols),
 1916                                             "des": len(src_vol_ids)})
 1917             raise SolidFireDriverException(msg)
 1918 
 1919         result = self._sf_create_group_snapshot(gsnap_name, target_vols)
 1920         return result
 1921 
 1922     def _create_temp_group_snapshot(self, source_cg, source_vols):
 1923         # Take a temporary snapshot to create the volumes for a new
 1924         # consistency group.
 1925         gsnap_name = ("%(prefix)s%(id)s-tmp" %
 1926                       {"prefix": self.configuration.sf_volume_prefix,
 1927                        "id": source_cg['id']})
 1928         vol_ids = [vol['id'] for vol in source_vols]
 1929         self._group_snapshot_creator(gsnap_name, vol_ids)
 1930         return gsnap_name
 1931 
 1932     def _list_group_snapshots(self):
 1933         result = self._issue_api_request('ListGroupSnapshots',
 1934                                          {},
 1935                                          version='7.0')
 1936         return result['result']['groupSnapshots']
 1937 
 1938     def _get_group_snapshot_by_name(self, name):
 1939         target_snaps = self._list_group_snapshots()
 1940         target = next((snap for snap in target_snaps
 1941                        if snap['name'] == name), None)
 1942         return target
 1943 
 1944     def _delete_group_snapshot(self, gsnapid):
 1945         params = {'groupSnapshotID': gsnapid}
 1946         self._issue_api_request('DeleteGroupSnapshot',
 1947                                 params,
 1948                                 version='7.0')
 1949 
 1950     def _delete_cgsnapshot_by_name(self, snap_name):
 1951         # Common function used to find and delete a snapshot.
 1952         target = self._get_group_snapshot_by_name(snap_name)
 1953         if not target:
 1954             msg = _("Failed to find group snapshot named: %s") % snap_name
 1955             raise SolidFireDriverException(msg)
 1956         self._delete_group_snapshot(target['groupSnapshotID'])
 1957 
 1958     def _find_linked_snapshot(self, target_uuid, group_snap):
 1959         # Because group snapshots name each individual snapshot the group
 1960         # snapshot name, we have to trawl through the SolidFire snapshots to
 1961         # find the SolidFire snapshot from the group that is linked with the
 1962         # SolidFire volumeID that is linked to the Cinder snapshot source
 1963         # volume.
 1964         source_vol = self._get_sf_volume(target_uuid)
 1965         target_snap = next((sn for sn in group_snap['members']
 1966                             if sn['volumeID'] == source_vol['volumeID']), None)
 1967         return target_snap
 1968 
 1969     def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid,
 1970                                        sf_group_snap, vol):
 1971         # Find the correct SolidFire backing snapshot.
 1972         sf_src_snap = self._find_linked_snapshot(target_uuid,
 1973                                                  sf_group_snap)
 1974         _data, _sfaccount, model = self._do_clone_volume(src_uuid,
 1975                                                          vol,
 1976                                                          sf_src_snap)
 1977         model['id'] = vol['id']
 1978         model['status'] = 'available'
 1979         return model
 1980 
 1981     def _map_sf_volumes(self, cinder_volumes, endpoint=None):
 1982         """Get a list of SolidFire volumes.
 1983 
 1984         Creates a list of SolidFire volumes based
 1985         on matching a list of cinder volume ID's,
 1986         also adds an 'cinder_id' key to match cinder.
 1987         """
 1988         vols = self._issue_api_request(
 1989             'ListActiveVolumes', {},
 1990             endpoint=endpoint)['result']['volumes']
 1991         # FIXME(erlon): When we fetch only for the volume name, we miss
 1992         #  volumes that where brought to Cinder via cinder-manage.
 1993         vlist = (
 1994             [sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in
 1995              sfvol['name']])
 1996         for v in vlist:
 1997             v['cinder_id'] = v['name'].split(
 1998                 self.configuration.sf_volume_prefix)[1]
 1999         return vlist
 2000 
 2001     # Generic Volume Groups.
 2002     def create_group(self, ctxt, group):
 2003         # SolidFire does not have the concept of volume groups. We're going to
 2004         # play along with the group song and dance. There will be a lot of
 2005         # no-ops because of this.
 2006         if volume_utils.is_group_a_cg_snapshot_type(group):
 2007             return {'status': fields.GroupStatus.AVAILABLE}
 2008 
 2009         # Blatantly ripping off this pattern from other drivers.
 2010         raise NotImplementedError()
 2011 
 2012     def create_group_from_src(self, ctxt, group, volumes, group_snapshots=None,
 2013                               snapshots=None, source_group=None,
 2014                               source_vols=None):
 2015         # At this point this is just a pass-through.
 2016         if volume_utils.is_group_a_cg_snapshot_type(group):
 2017             return self._create_consistencygroup_from_src(
 2018                 ctxt,
 2019                 group,
 2020                 volumes,
 2021                 group_snapshots,
 2022                 snapshots,
 2023                 source_group,
 2024                 source_vols)
 2025 
 2026         # Default implementation handles other scenarios.
 2027         raise NotImplementedError()
 2028 
 2029     def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
 2030         # This is a pass-through to the old consistency group stuff.
 2031         if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
 2032             return self._create_cgsnapshot(ctxt, group_snapshot, snapshots)
 2033 
 2034         # Default implementation handles other scenarios.
 2035         raise NotImplementedError()
 2036 
 2037     def delete_group(self, ctxt, group, volumes):
 2038         # Delete a volume group. SolidFire does not track volume groups,
 2039         # however we do need to actually remove the member volumes of the
 2040         # group. Right now only consistent volume groups are supported.
 2041         if volume_utils.is_group_a_cg_snapshot_type(group):
 2042             return self._delete_consistencygroup(ctxt, group, volumes)
 2043 
 2044         # Default implementation handles other scenarios.
 2045         raise NotImplementedError()
 2046 
 2047     def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None):
 2048         # Regarding consistency groups SolidFire does not track volumes, so
 2049         # this is a no-op. In the future with replicated volume groups this
 2050         # might actually do something.
 2051         if volume_utils.is_group_a_cg_snapshot_type(group):
 2052             return self._update_consistencygroup(ctxt,
 2053                                                  group,
 2054                                                  add_volumes,
 2055                                                  remove_volumes)
 2056 
 2057         # Default implementation handles other scenarios.
 2058         raise NotImplementedError()
 2059 
 2060     def _create_consistencygroup_from_src(self, ctxt, group, volumes,
 2061                                           cgsnapshot, snapshots,
 2062                                           source_cg, source_vols):
 2063         if cgsnapshot and snapshots:
 2064             sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
 2065             sf_group_snap = self._get_group_snapshot_by_name(sf_name)
 2066 
 2067             # Go about creating volumes from provided snaps.
 2068             vol_models = []
 2069             for vol, snap in zip(volumes, snapshots):
 2070                 vol_models.append(self._create_clone_from_sf_snapshot(
 2071                     snap['volume_id'],
 2072                     snap['id'],
 2073                     sf_group_snap,
 2074                     vol))
 2075             return ({'status': fields.GroupStatus.AVAILABLE},
 2076                     vol_models)
 2077 
 2078         elif source_cg and source_vols:
 2079             # Create temporary group snapshot.
 2080             gsnap_name = self._create_temp_group_snapshot(source_cg,
 2081                                                           source_vols)
 2082             try:
 2083                 sf_group_snap = self._get_group_snapshot_by_name(gsnap_name)
 2084                 # For each temporary snapshot clone the volume.
 2085                 vol_models = []
 2086                 for vol in volumes:
 2087                     vol_models.append(self._create_clone_from_sf_snapshot(
 2088                         vol['source_volid'],
 2089                         vol['source_volid'],
 2090                         sf_group_snap,
 2091                         vol))
 2092             finally:
 2093                 self._delete_cgsnapshot_by_name(gsnap_name)
 2094             return {'status': fields.GroupStatus.AVAILABLE}, vol_models
 2095 
 2096     def _create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
 2097         vol_ids = [snapshot['volume_id'] for snapshot in snapshots]
 2098         vol_names = [self.configuration.sf_volume_prefix + vol_id
 2099                      for vol_id in vol_ids]
 2100         active_sf_vols = self._get_all_active_volumes()
 2101         target_vols = [vol for vol in active_sf_vols
 2102                        if vol['name'] in vol_names]
 2103         if len(snapshots) != len(target_vols):
 2104             msg = (_("Retrieved a different amount of SolidFire volumes for "
 2105                      "the provided Cinder snapshots. Retrieved: %(ret)s "
 2106                      "Desired: %(des)s") % {"ret": len(target_vols),
 2107                                             "des": len(snapshots)})
 2108             raise SolidFireDriverException(msg)
 2109         snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
 2110         self._sf_create_group_snapshot(snap_name, target_vols)
 2111         return None, None
 2112 
 2113     def _update_consistencygroup(self, context, group,
 2114                                  add_volumes=None, remove_volumes=None):
 2115         # Similar to create_consistencygroup, SolidFire's lack of a consistency
 2116         # group object means there is nothing to update on the cluster.
 2117         return None, None, None
 2118 
 2119     def _delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
 2120         snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
 2121         self._delete_cgsnapshot_by_name(snap_name)
 2122         return None, None
 2123 
 2124     def delete_group_snapshot(self, context, group_snapshot, snapshots):
 2125         if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
 2126             return self._delete_cgsnapshot(context, group_snapshot, snapshots)
 2127 
 2128         # Default implementation handles other scenarios.
 2129         raise NotImplementedError()
 2130 
 2131     def _delete_consistencygroup(self, ctxt, group, volumes):
 2132         # TODO(chris_morrell): exception handling and return correctly updated
 2133         # volume_models.
 2134         for vol in volumes:
 2135             self.delete_volume(vol)
 2136 
 2137         return None, None
 2138 
 2139     def get_volume_stats(self, refresh=False):
 2140         """Get volume status.
 2141 
 2142         If 'refresh' is True, run update first.
 2143         The name is a bit misleading as
 2144         the majority of the data here is cluster
 2145         data
 2146         """
 2147         if refresh:
 2148             try:
 2149                 self._update_cluster_status()
 2150             except SolidFireAPIException:
 2151                 pass
 2152 
 2153         LOG.debug("SolidFire cluster_stats: %s", self.cluster_stats)
 2154         return self.cluster_stats
 2155 
 2156     def extend_volume(self, volume, new_size):
 2157         """Extend an existing volume."""
 2158         sfaccount = self._get_sfaccount(volume['project_id'])
 2159         params = {'accountID': sfaccount['accountID']}
 2160 
 2161         sf_vol = self._get_sf_volume(volume['id'], params)
 2162 
 2163         if sf_vol is None:
 2164             LOG.error("Volume ID %s was not found on "
 2165                       "the SolidFire Cluster while attempting "
 2166                       "extend_volume operation!", volume['id'])
 2167             raise exception.VolumeNotFound(volume_id=volume['id'])
 2168         qos = self._retrieve_qos_setting(volume, new_size)
 2169         params = {
 2170             'volumeID': sf_vol['volumeID'],
 2171             'totalSize': int(new_size * units.Gi),
 2172             'qos': qos
 2173         }
 2174         self._issue_api_request('ModifyVolume',
 2175                                 params, version='5.0')
 2176 
 2177         rep_settings = self._retrieve_replication_settings(volume)
 2178         if self.replication_enabled and rep_settings:
 2179             if len(sf_vol['volumePairs']) != 1:
 2180                 LOG.error("Can't find remote pair while extending the "
 2181                           "volume or multiple replication pairs found!")
 2182                 raise exception.VolumeNotFound(volume_id=volume['id'])
 2183 
 2184             tgt_endpoint = self.cluster_pairs[0]['endpoint']
 2185             target_vol_id = sf_vol['volumePairs'][0]['remoteVolumeID']
 2186             params2 = params.copy()
 2187             params2['volumeID'] = target_vol_id
 2188             self._issue_api_request('ModifyVolume',
 2189                                     params2, version='5.0',
 2190                                     endpoint=tgt_endpoint)
 2191 
 2192     def _get_provisioned_capacity_iops(self):
 2193         response = self._issue_api_request('ListVolumes', {}, version='8.0')
 2194         volumes = response['result']['volumes']
 2195 
 2196         LOG.debug("%s volumes present in cluster", len(volumes))
 2197 
 2198         provisioned_cap = 0
 2199         provisioned_iops = 0
 2200 
 2201         for vol in volumes:
 2202             provisioned_cap += vol['totalSize']
 2203             provisioned_iops += vol['qos']['minIOPS']
 2204 
 2205         return provisioned_cap, provisioned_iops
 2206 
 2207     def _update_cluster_status(self):
 2208         """Retrieve status info for the Cluster."""
 2209         params = {}
 2210         data = {}
 2211         backend_name = self.configuration.safe_get('volume_backend_name')
 2212         data["volume_backend_name"] = backend_name or self.__class__.__name__
 2213         data["vendor_name"] = 'SolidFire Inc'
 2214         data["driver_version"] = self.VERSION
 2215         data["storage_protocol"] = 'iSCSI'
 2216         data['consistencygroup_support'] = True
 2217         data['consistent_group_snapshot_enabled'] = True
 2218         data['replication_enabled'] = self.replication_enabled
 2219         if self.replication_enabled:
 2220             data['replication'] = 'enabled'
 2221         data['active_cluster_mvip'] = self.active_cluster['mvip']
 2222         data['reserved_percentage'] = self.configuration.reserved_percentage
 2223         data['QoS_support'] = True
 2224         data['multiattach'] = True
 2225 
 2226         try:
 2227             results = self._issue_api_request('GetClusterCapacity', params,
 2228                                               version='8.0')
 2229         except SolidFireAPIException:
 2230             data['total_capacity_gb'] = 0
 2231             data['free_capacity_gb'] = 0
 2232             self.cluster_stats = data
 2233             return
 2234 
 2235         results = results['result']['clusterCapacity']
 2236         prov_cap, prov_iops = self._get_provisioned_capacity_iops()
 2237 
 2238         if self.configuration.sf_provisioning_calc == 'usedSpace':
 2239             free_capacity = (
 2240                 results['maxUsedSpace'] - results['usedSpace'])
 2241             data['total_capacity_gb'] = results['maxUsedSpace'] / units.Gi
 2242             data['thin_provisioning_support'] = True
 2243             data['provisioned_capacity_gb'] = prov_cap / units.Gi
 2244             data['max_over_subscription_ratio'] = (
 2245                 self.configuration.max_over_subscription_ratio
 2246             )
 2247         else:
 2248             free_capacity = (
 2249                 results['maxProvisionedSpace'] - results['usedSpace'])
 2250             data['total_capacity_gb'] = (
 2251                 results['maxProvisionedSpace'] / units.Gi)
 2252 
 2253         data['free_capacity_gb'] = float(free_capacity / units.Gi)
 2254 
 2255         if (results['uniqueBlocksUsedSpace'] == 0 or
 2256                 results['uniqueBlocks'] == 0 or
 2257                 results['zeroBlocks'] == 0 or
 2258                 results['nonZeroBlocks'] == 0):
 2259             data['compression_percent'] = 100
 2260             data['deduplication_percent'] = 100
 2261             data['thin_provision_percent'] = 100
 2262         else:
 2263             data['compression_percent'] = (
 2264                 (float(results['uniqueBlocks'] * 4096) /
 2265                  results['uniqueBlocksUsedSpace']) * 100)
 2266             data['deduplication_percent'] = (
 2267                 float(results['nonZeroBlocks'] /
 2268                       results['uniqueBlocks']) * 100)
 2269             data['thin_provision_percent'] = (
 2270                 (float(results['nonZeroBlocks'] + results['zeroBlocks']) /
 2271                  results['nonZeroBlocks']) * 100)
 2272 
 2273         data['provisioned_iops'] = prov_iops
 2274         data['current_iops'] = results['currentIOPS']
 2275         data['average_iops'] = results['averageIOPS']
 2276         data['max_iops'] = results['maxIOPS']
 2277         data['peak_iops'] = results['peakIOPS']
 2278 
 2279         data['shared_targets'] = False
 2280         self.cluster_stats = data
 2281 
 2282     def initialize_connection(self, volume, connector):
 2283         """Initialize the connection and return connection info.
 2284 
 2285            Optionally checks and utilizes volume access groups.
 2286         """
 2287         properties = self._sf_initialize_connection(volume, connector)
 2288         properties['data']['discard'] = True
 2289         return properties
 2290 
 2291     def attach_volume(self, context, volume,
 2292                       instance_uuid, host_name,
 2293                       mountpoint):
 2294 
 2295         sfaccount = self._get_sfaccount(volume['project_id'])
 2296         params = {'accountID': sfaccount['accountID']}
 2297 
 2298         # In a retype of an attached volume scenario, the volume id will be
 2299         # as a target on 'migration_status', otherwise it'd be None.
 2300         migration_status = volume.get('migration_status')
 2301         if migration_status and 'target' in migration_status:
 2302             __, vol_id = migration_status.split(':')
 2303         else:
 2304             vol_id = volume['id']
 2305         sf_vol = self._get_sf_volume(vol_id, params)
 2306         if sf_vol is None:
 2307             LOG.error("Volume ID %s was not found on "
 2308                       "the SolidFire Cluster while attempting "
 2309                       "attach_volume operation!", volume['id'])
 2310             raise exception.VolumeNotFound(volume_id=volume['id'])
 2311 
 2312         attributes = sf_vol['attributes']
 2313         attributes['attach_time'] = volume.get('attach_time', None)
 2314         attributes['attached_to'] = instance_uuid
 2315         params = {
 2316             'volumeID': sf_vol['volumeID'],
 2317             'attributes': attributes
 2318         }
 2319 
 2320         self._issue_api_request('ModifyVolume', params)
 2321 
 2322     def terminate_connection(self, volume, properties, force):
 2323         return self._sf_terminate_connection(volume,
 2324                                              properties,
 2325                                              force)
 2326 
 2327     def detach_volume(self, context, volume, attachment=None):
 2328         sfaccount = self._get_sfaccount(volume['project_id'])
 2329         params = {'accountID': sfaccount['accountID']}
 2330 
 2331         sf_vol = self._get_sf_volume(volume['id'], params)
 2332         if sf_vol is None:
 2333             LOG.error("Volume ID %s was not found on "
 2334                       "the SolidFire Cluster while attempting "
 2335                       "detach_volume operation!", volume['id'])
 2336             raise exception.VolumeNotFound(volume_id=volume['id'])
 2337 
 2338         attributes = sf_vol['attributes']
 2339         attributes['attach_time'] = None
 2340         attributes['attached_to'] = None
 2341         params = {
 2342             'volumeID': sf_vol['volumeID'],
 2343             'attributes': attributes
 2344         }
 2345 
 2346         self._issue_api_request('ModifyVolume', params)
 2347 
 2348     def accept_transfer(self, context, volume,
 2349                         new_user, new_project):
 2350 
 2351         sfaccount = self._get_sfaccount(volume['project_id'])
 2352         params = {'accountID': sfaccount['accountID']}
 2353         sf_vol = self._get_sf_volume(volume['id'], params)
 2354         if sf_vol is None:
 2355             LOG.error("Volume ID %s was not found on "
 2356                       "the SolidFire Cluster while attempting "
 2357                       "accept_transfer operation!", volume['id'])
 2358             raise exception.VolumeNotFound(volume_id=volume['id'])
 2359         if new_project != volume['project_id']:
 2360             # do a create_sfaccount here as this tenant
 2361             # may not exist on the cluster yet
 2362             sfaccount = self._get_create_account(new_project)
 2363 
 2364         params = {
 2365             'volumeID': sf_vol['volumeID'],
 2366             'accountID': sfaccount['accountID']
 2367         }
 2368         self._issue_api_request('ModifyVolume',
 2369                                 params, version='5.0')
 2370 
 2371         volume['project_id'] = new_project
 2372         volume['user_id'] = new_user
 2373         return self.target_driver.ensure_export(context, volume, None)
 2374 
 2375     def _setup_intercluster_volume_migration(self, src_volume,
 2376                                              dst_cluster_ref):
 2377 
 2378         LOG.info("Setting up cluster migration for volume [%s]",
 2379                  src_volume.name)
 2380 
 2381         # We should be able to rollback in case something went wrong
 2382         def _do_migrate_setup_rollback(src_sf_volume_id, dst_sf_volume_id):
 2383             # Removing volume pair in source cluster
 2384             params = {'volumeID': src_sf_volume_id}
 2385             self._issue_api_request('RemoveVolumePair', params, '8.0')
 2386 
 2387             # Removing volume pair in destination cluster
 2388             params = {'volumeID': dst_sf_volume_id}
 2389             self._issue_api_request('RemoveVolumePair', params, '8.0',
 2390                                     endpoint=dst_cluster_ref["endpoint"])
 2391 
 2392             # Destination volume should also be removed.
 2393             self._issue_api_request('DeleteVolume', params,
 2394                                     endpoint=dst_cluster_ref["endpoint"])
 2395             self._issue_api_request('PurgeDeletedVolume', params,
 2396                                     endpoint=dst_cluster_ref["endpoint"])
 2397 
 2398         self._get_or_create_cluster_pairing(
 2399             dst_cluster_ref, check_connected=True)
 2400 
 2401         dst_sf_account = self._get_create_account(
 2402             src_volume['project_id'], endpoint=dst_cluster_ref['endpoint'])
 2403 
 2404         LOG.debug("Destination account is [%s]", dst_sf_account["username"])
 2405 
 2406         params = self._get_default_volume_params(src_volume, dst_sf_account)
 2407 
 2408         dst_volume = self._do_volume_create(
 2409             dst_sf_account, params, endpoint=dst_cluster_ref['endpoint'])
 2410 
 2411         try:
 2412             self._create_volume_pairing(
 2413                 src_volume, dst_volume, dst_cluster_ref)
 2414         except SolidFireReplicationPairingError:
 2415             with excutils.save_and_reraise_exception():
 2416                 dst_sf_volid = int(dst_volume['provider_id'].split()[0])
 2417                 src_sf_volid = int(src_volume['provider_id'].split()[0])
 2418                 LOG.debug("Error pairing volume on remote cluster. Rolling "
 2419                           "back and deleting volume %(vol)s at cluster "
 2420                           "%(cluster)s.",
 2421                           {'vol': dst_sf_volid,
 2422                            'cluster': dst_cluster_ref['mvip']})
 2423                 _do_migrate_setup_rollback(src_sf_volid, dst_sf_volid)
 2424 
 2425         return dst_volume
 2426 
 2427     def _do_intercluster_volume_migration_data_sync(self, src_volume,
 2428                                                     src_sf_account,
 2429                                                     dst_sf_volume_id,
 2430                                                     dst_cluster_ref):
 2431 
 2432         params = {'volumeID': dst_sf_volume_id, 'access': 'replicationTarget'}
 2433         self._issue_api_request('ModifyVolume',
 2434                                 params,
 2435                                 '8.0',
 2436                                 endpoint=dst_cluster_ref['endpoint'])
 2437 
 2438         def _wait_sync_completed():
 2439             vol_params = None
 2440             if src_sf_account:
 2441                 vol_params = {'accountID': src_sf_account['accountID']}
 2442 
 2443             sf_vol = self._get_sf_volume(src_volume.id, vol_params)
 2444             state = sf_vol['volumePairs'][0]['remoteReplication']['state']
 2445 
 2446             if state == 'Active':
 2447                 raise loopingcall.LoopingCallDone(sf_vol)
 2448 
 2449             LOG.debug("Waiting volume data to sync. "
 2450                       "Replication state is [%s]", state)
 2451 
 2452         try:
 2453             timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
 2454                 _wait_sync_completed)
 2455             timer.start(
 2456                 interval=30,
 2457                 timeout=self.configuration.sf_volume_pairing_timeout).wait()
 2458         except loopingcall.LoopingCallTimeOut:
 2459             msg = _("Timeout waiting volumes to sync.")
 2460             raise SolidFireDataSyncTimeoutError(reason=msg)
 2461 
 2462         self._do_intercluster_volume_migration_complete_data_sync(
 2463             dst_sf_volume_id, dst_cluster_ref)
 2464 
 2465     def _do_intercluster_volume_migration_complete_data_sync(self,
 2466                                                              sf_volume_id,
 2467                                                              cluster_ref):
 2468         params = {'volumeID': sf_volume_id, 'access': 'readWrite'}
 2469         self._issue_api_request('ModifyVolume',
 2470                                 params,
 2471                                 '8.0',
 2472                                 endpoint=cluster_ref['endpoint'])
 2473 
 2474     def _cleanup_intercluster_volume_migration(self, src_volume,
 2475                                                dst_sf_volume_id,
 2476                                                dst_cluster_ref):
 2477 
 2478         src_sf_volume_id = int(src_volume['provider_id'].split()[0])
 2479 
 2480         # Removing volume pair in destination cluster
 2481         params = {'volumeID': dst_sf_volume_id}
 2482         self._issue_api_request('RemoveVolumePair', params, '8.0',
 2483                                 endpoint=dst_cluster_ref["endpoint"])
 2484 
 2485         # Removing volume pair in source cluster
 2486         params = {'volumeID': src_sf_volume_id}
 2487         self._issue_api_request('RemoveVolumePair', params, '8.0')
 2488 
 2489         # Destination volume should also be removed.
 2490         self._issue_api_request('DeleteVolume', params)
 2491         self._issue_api_request('PurgeDeletedVolume', params)
 2492 
 2493     def _do_intercluster_volume_migration(self, volume, host, dst_config):
 2494 
 2495         LOG.debug("Start migrating volume [%(name)s] to cluster [%(cluster)s]",
 2496                   {"name": volume.name, "cluster": host["host"]})
 2497 
 2498         dst_endpoint = self._build_endpoint_info(backend_conf=dst_config)
 2499 
 2500         LOG.debug("Destination cluster mvip is [%s]", dst_endpoint["mvip"])
 2501 
 2502         dst_cluster_ref = self._create_cluster_reference(dst_endpoint)
 2503 
 2504         LOG.debug("Destination cluster reference created. API version is [%s]",
 2505                   dst_cluster_ref["clusterAPIVersion"])
 2506 
 2507         dst_volume = self._setup_intercluster_volume_migration(
 2508             volume, dst_cluster_ref)
 2509 
 2510         dst_sf_volume_id = int(dst_volume["provider_id"].split()[0])
 2511 
 2512         # FIXME(sfernand): should pass src account to improve performance
 2513         self._do_intercluster_volume_migration_data_sync(
 2514             volume, None, dst_sf_volume_id, dst_cluster_ref)
 2515 
 2516         self._cleanup_intercluster_volume_migration(
 2517             volume, dst_sf_volume_id, dst_cluster_ref)
 2518 
 2519         return dst_volume
 2520 
 2521     def migrate_volume(self, ctxt, volume, host):
 2522         """Migrate a SolidFire volume to the specified host/backend"""
 2523 
 2524         LOG.info("Migrate volume %(vol_id)s to %(host)s.",
 2525                  {"vol_id": volume.id, "host": host["host"]})
 2526 
 2527         if volume.status != fields.VolumeStatus.AVAILABLE:
 2528             msg = _("Volume status must be 'available' to execute "
 2529                     "storage assisted migration.")
 2530             LOG.error(msg)
 2531             raise exception.InvalidVolume(reason=msg)
 2532 
 2533         if volume.is_replicated():
 2534             msg = _("Migration of replicated volumes is not allowed.")
 2535             LOG.error(msg)
 2536             raise exception.InvalidVolume(reason=msg)
 2537 
 2538         src_backend = volume_utils.extract_host(
 2539             volume.host, "backend").split("@")[1]
 2540         dst_backend = volume_utils.extract_host(
 2541             host["host"], "backend").split("@")[1]
 2542 
 2543         if src_backend == dst_backend:
 2544             LOG.info("Same backend, nothing to do.")
 2545             return True, {}
 2546 
 2547         try:
 2548             dst_config = volume_utils.get_backend_configuration(
 2549                 dst_backend, self.get_driver_options())
 2550         except exception.ConfigNotFound:
 2551             msg = _("Destination backend config not found. Check if "
 2552                     "destination backend stanza is properly configured in "
 2553                     "cinder.conf, or add parameter --force-host-copy True "
 2554                     "to perform host-assisted migration.")
 2555             raise exception.VolumeMigrationFailed(reason=msg)
 2556 
 2557         if self.active_cluster['mvip'] == dst_config.san_ip:
 2558             LOG.info("Same cluster, nothing to do.")
 2559             return True, {}
 2560         else:
 2561             LOG.info("Source and destination clusters are different. "
 2562                      "A cluster migration will be performed.")
 2563             LOG.debug("Active cluster: [%(active)s], "
 2564                       "Destination: [%(dst)s]",
 2565                       {"active": self.active_cluster['mvip'],
 2566                        "dst": dst_config.san_ip})
 2567 
 2568             updates = self._do_intercluster_volume_migration(volume, host,
 2569                                                              dst_config)
 2570             LOG.info("Successfully migrated volume %(vol_id)s to %(host)s.",
 2571                      {"vol_id": volume.id, "host": host["host"]})
 2572             return True, updates
 2573 
 2574     def retype(self, ctxt, volume, new_type, diff, host):
 2575         """Convert the volume to be of the new type.
 2576 
 2577         Returns a boolean indicating whether the retype occurred and a dict
 2578         with the updates on the volume.
 2579 
 2580         :param ctxt: Context
 2581         :param volume: A dictionary describing the volume to migrate
 2582         :param new_type: A dictionary describing the volume type to convert to
 2583         :param diff: A dictionary with the difference between the two types
 2584         :param host: A dictionary describing the host to migrate to, where
 2585                      host['host'] is its name, and host['capabilities'] is a
 2586                      dictionary of its reported capabilities (Not Used).
 2587 
 2588         """
 2589         model_update = {}
 2590 
 2591         LOG.debug("Retyping volume %(vol)s to new type %(type)s",
 2592                   {'vol': volume.id, 'type': new_type})
 2593 
 2594         sfaccount = self._get_sfaccount(volume['project_id'])
 2595         params = {'accountID': sfaccount['accountID']}
 2596         sf_vol = self._get_sf_volume(volume['id'], params)
 2597 
 2598         if sf_vol is None:
 2599             raise exception.VolumeNotFound(volume_id=volume['id'])
 2600 
 2601         if self.replication_enabled:
 2602             ctxt = context.get_admin_context()
 2603             src_rep_type = self._set_rep_by_volume_type(
 2604                 ctxt, volume.volume_type_id)
 2605             dst_rep_type = self._set_rep_by_volume_type(ctxt, new_type['id'])
 2606 
 2607             if src_rep_type != dst_rep_type:
 2608                 if dst_rep_type:
 2609                     rep_settings = self._retrieve_replication_settings(volume)
 2610                     rep_params = self._get_default_volume_params(volume)
 2611                     volume['volumeID'] = (
 2612                         int(volume.provider_id.split()[0]))
 2613                     rep_updates = self._replicate_volume(volume, rep_params,
 2614                                                          sfaccount,
 2615                                                          rep_settings)
 2616                 else:
 2617                     rep_updates = self._disable_replication(volume)
 2618 
 2619                 if rep_updates:
 2620                     model_update.update(rep_updates)
 2621 
 2622         attributes = sf_vol['attributes']
 2623         attributes['retyped_at'] = timeutils.utcnow().isoformat()
 2624         params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes}
 2625         qos = self._set_qos_by_volume_type(ctxt, new_type['id'],
 2626                                            volume.get('size'))
 2627 
 2628         if qos:
 2629             params['qos'] = qos
 2630 
 2631         self._issue_api_request('ModifyVolume', params)
 2632         return True, model_update
 2633 
 2634     def manage_existing(self, volume, external_ref):
 2635         """Manages an existing SolidFire Volume (import to Cinder).
 2636 
 2637         Renames the Volume to match the expected name for the volume.
 2638         Also need to consider things like QoS, Emulation, account/tenant and
 2639         replication settings.
 2640         """
 2641         sfid = external_ref.get('source-id', None)
 2642         sfname = external_ref.get('name', None)
 2643 
 2644         LOG.debug("Managing volume %(id)s to ref %(ref)s",
 2645                   {'id': volume.id, 'ref': external_ref})
 2646         if sfid is None:
 2647             raise SolidFireAPIException(_("Manage existing volume "
 2648                                           "requires 'source-id'."))
 2649 
 2650         # First get the volume on the SF cluster (MUST be active)
 2651         params = {'startVolumeID': sfid,
 2652                   'limit': 1}
 2653         vols = self._issue_api_request(
 2654             'ListActiveVolumes', params)['result']['volumes']
 2655 
 2656         sf_ref = vols[0]
 2657         sfaccount = self._get_create_account(volume['project_id'])
 2658 
 2659         import_time = volume['created_at'].isoformat()
 2660         attributes = {'uuid': volume['id'],
 2661                       'is_clone': 'False',
 2662                       'os_imported_at': import_time,
 2663                       'old_name': sfname}
 2664 
 2665         params = self._get_default_volume_params(volume)
 2666         params['volumeID'] = sf_ref['volumeID']
 2667         params['attributes'] = attributes
 2668         params.pop('totalSize')
 2669         self._issue_api_request('ModifyVolume',
 2670                                 params, version='5.0')
 2671 
 2672         try:
 2673             rep_updates = {}
 2674             rep_settings = self._retrieve_replication_settings(volume)
 2675             if self.replication_enabled and rep_settings:
 2676                 if len(sf_ref['volumePairs']) != 0:
 2677                     msg = _("Not possible to manage a volume with "
 2678                             "replicated pair! Please split the volume pairs.")
 2679                     LOG.error(msg)
 2680                     raise SolidFireDriverException(msg)
 2681                 else:
 2682                     params = self._get_default_volume_params(volume)
 2683                     params['volumeID'] = sf_ref['volumeID']
 2684                     volume['volumeID'] = sf_ref['volumeID']
 2685                     params['totalSize'] = sf_ref['totalSize']
 2686                     rep_updates = self._replicate_volume(
 2687                         volume, params, sfaccount, rep_settings)
 2688         except Exception:
 2689             with excutils.save_and_reraise_exception():
 2690                 # When the replication fails in mid process, we need to
 2691                 # set the volume properties the way it was before.
 2692                 LOG.error("Error trying to replicate volume %s",
 2693                           volume.id)
 2694                 params = {'volumeID': sf_ref['volumeID']}
 2695                 params['attributes'] = sf_ref['attributes']
 2696                 self._issue_api_request('ModifyVolume',
 2697                                         params, version='5.0')
 2698 
 2699         model_update = self._get_model_info(sfaccount, sf_ref['volumeID'])
 2700 
 2701         model_update.update(rep_updates)
 2702 
 2703         return model_update
 2704 
 2705     def manage_existing_get_size(self, volume, external_ref):
 2706         """Return size of an existing LV for manage_existing.
 2707 
 2708         existing_ref is a dictionary of the form:
 2709         {'name': <name of existing volume on SF Cluster>}
 2710         """
 2711         sfid = external_ref.get('source-id', None)
 2712         if sfid is None:
 2713             raise SolidFireAPIException(_("Manage existing get size "
 2714                                           "requires 'id'."))
 2715 
 2716         params = {'startVolumeID': int(sfid),
 2717                   'limit': 1}
 2718         vols = self._issue_api_request(
 2719             'ListActiveVolumes', params)['result']['volumes']
 2720         if len(vols) != 1:
 2721             msg = _("Provided volume id does not exist on SolidFire backend.")
 2722             raise SolidFireDriverException(msg)
 2723 
 2724         return int(math.ceil(float(vols[0]['totalSize']) / units.Gi))
 2725 
 2726     def unmanage(self, volume):
 2727         """Mark SolidFire Volume as unmanaged (export from Cinder)."""
 2728         sfaccount = self._get_sfaccount(volume['project_id'])
 2729         if sfaccount is None:
 2730             LOG.error("Account for Volume ID %s was not found on "
 2731                       "the SolidFire Cluster while attempting "
 2732                       "unmanage operation!", volume['id'])
 2733             raise SolidFireAPIException(_("Failed to find account "
 2734                                           "for volume."))
 2735 
 2736         params = {'accountID': sfaccount['accountID']}
 2737         sf_vol = self._get_sf_volume(volume['id'], params)
 2738         if sf_vol is None:
 2739             raise exception.VolumeNotFound(volume_id=volume['id'])
 2740 
 2741         export_time = timeutils.utcnow().isoformat()
 2742         attributes = sf_vol['attributes']
 2743         attributes['os_exported_at'] = export_time
 2744         params = {'volumeID': int(sf_vol['volumeID']),
 2745                   'attributes': attributes}
 2746 
 2747         self._issue_api_request('ModifyVolume',
 2748                                 params, version='5.0')
 2749 
 2750     def _failover_volume(self, tgt_vol, tgt_cluster, src_vol=None):
 2751         """Modify remote volume to R/W mode."""
 2752 
 2753         if src_vol:
 2754             # Put the src in tgt mode assuming it's still available
 2755             # catch the exception if the cluster isn't available and
 2756             # continue on
 2757             params = {'volumeID': src_vol['volumeID'],
 2758                       'access': 'replicationTarget'}
 2759             try:
 2760                 self._issue_api_request('ModifyVolume', params)
 2761             except SolidFireAPIException:
 2762                 # FIXME
 2763                 pass
 2764 
 2765         # Now call out to the remote and make the tgt our new src
 2766         params = {'volumeID': tgt_vol['volumeID'],
 2767                   'access': 'readWrite'}
 2768         self._issue_api_request('ModifyVolume', params,
 2769                                 endpoint=tgt_cluster['endpoint'])
 2770 
 2771     def failover(self, context, volumes, secondary_id=None, groups=None):
 2772         """Failover to replication target.
 2773 
 2774         In order to do failback, you MUST specify the original/default cluster
 2775         using secondary_id option.  You can do this simply by specifying:
 2776         `secondary_id=default`
 2777         """
 2778         remote = None
 2779         failback = False
 2780         volume_updates = []
 2781 
 2782         if not self.replication_enabled:
 2783             LOG.error("SolidFire driver received failover_host "
 2784                       "request, however replication is NOT "
 2785                       "enabled.")
 2786             raise exception.UnableToFailOver(reason=_("Failover requested "
 2787                                                       "on non replicated "
 2788                                                       "backend."))
 2789 
 2790         # NOTE(erlon): For now we only support one replication target device.
 2791         # So, there are two cases we have to deal with here:
 2792         #   1. Caller specified a backend target to fail-over to (this must be
 2793         #     the backend_id as defined in replication_device. Any other values
 2794         #     will raise an error. If the user does not specify anything, we
 2795         #     also fall in this case.
 2796         #   2. Caller wants to failback and therefore sets backend_id=default.
 2797         secondary_id = secondary_id.lower() if secondary_id else None
 2798 
 2799         if secondary_id == "default" and not self.failed_over:
 2800             msg = _("SolidFire driver received failover_host "
 2801                     "specifying failback to default, the "
 2802                     "host however is not in `failed_over` "
 2803                     "state.")
 2804             raise exception.InvalidReplicationTarget(msg)
 2805         elif secondary_id == "default" and self.failed_over:
 2806             LOG.info("Failing back to primary cluster.")
 2807             remote = self._create_cluster_reference()
 2808             failback = True
 2809 
 2810         else:
 2811             repl_configs = self.configuration.replication_device[0]
 2812             if secondary_id and repl_configs['backend_id'] != secondary_id:
 2813                 msg = _("Replication id (%s) does not match the configured "
 2814                         "one in cinder.conf.") % secondary_id
 2815                 raise exception.InvalidReplicationTarget(msg)
 2816 
 2817             LOG.info("Failing over to secondary cluster %s.", secondary_id)
 2818             remote = self.cluster_pairs[0]
 2819 
 2820         LOG.debug("Target cluster to failover: %s.",
 2821                   {'name': remote['name'],
 2822                    'mvip': remote['mvip'],
 2823                    'clusterAPIVersion': remote['clusterAPIVersion']})
 2824 
 2825         target_vols = self._map_sf_volumes(volumes,
 2826                                            endpoint=remote['endpoint'])
 2827         LOG.debug("Total Cinder volumes found in target: %d",
 2828                   len(target_vols))
 2829 
 2830         primary_vols = None
 2831         try:
 2832             primary_vols = self._map_sf_volumes(volumes)
 2833             LOG.debug("Total Cinder volumes found in primary cluster: %d",
 2834                       len(primary_vols))
 2835         except SolidFireAPIException:
 2836             # API Request failed on source. Failover/failback will skip next
 2837             # calls to it.
 2838             pass
 2839 
 2840         for v in volumes:
 2841             if v['status'] == "error":
 2842                 LOG.debug("Skipping operation for Volume %s as it is "
 2843                           "on error state.", v['id'])
 2844                 continue
 2845 
 2846             target_vlist = [sfv for sfv in target_vols
 2847                             if sfv['cinder_id'] == v['id']]
 2848 
 2849             if len(target_vlist) > 0:
 2850                 target_vol = target_vlist[0]
 2851 
 2852                 if primary_vols:
 2853                     vols = [sfv for sfv in primary_vols
 2854                             if sfv['cinder_id'] == v['id']]
 2855 
 2856                     if not vols:
 2857                         LOG.error("SolidFire driver cannot proceed. "
 2858                                   "Could not find volume %s in "
 2859                                   "back-end storage.", v['id'])
 2860                         raise exception.UnableToFailOver(
 2861                             reason=_("Cannot find cinder volume in "
 2862                                      "back-end storage."))
 2863 
 2864                     # Have at least one cinder volume in storage
 2865                     primary_vol = vols[0]
 2866                 else:
 2867                     primary_vol = None
 2868 
 2869                 LOG.info('Failing-over volume %s.', v.id)
 2870                 LOG.debug('Target vol: %s',
 2871                           {'access': target_vol['access'],
 2872                            'accountID': target_vol['accountID'],
 2873                            'name': target_vol['name'],
 2874                            'status': target_vol['status'],
 2875                            'volumeID': target_vol['volumeID']})
 2876                 LOG.debug('Primary vol: %s',
 2877                           {'access': primary_vol['access'],
 2878                            'accountID': primary_vol['accountID'],
 2879                            'name': primary_vol['name'],
 2880                            'status': primary_vol['status'],
 2881                            'volumeID': primary_vol['volumeID']})
 2882 
 2883                 try:
 2884                     self._failover_volume(target_vol, remote, primary_vol)
 2885 
 2886                     sf_account = self._get_create_account(
 2887                         v.project_id, endpoint=remote['endpoint'])
 2888                     LOG.debug("Target account: %s", sf_account['accountID'])
 2889 
 2890                     conn_info = self._build_connection_info(
 2891                         sf_account, target_vol, endpoint=remote['endpoint'])
 2892 
 2893                     # volume status defaults to failed-over
 2894                     replication_status = 'failed-over'
 2895 
 2896                     # in case of a failback, volume status must be reset to its
 2897                     # original state
 2898                     if failback:
 2899                         replication_status = 'enabled'
 2900 
 2901                     vol_updates = {
 2902                         'volume_id': v['id'],
 2903                         'updates': {
 2904                             'replication_status': replication_status
 2905                         }
 2906                     }
 2907                     vol_updates['updates'].update(conn_info)
 2908                     volume_updates.append(vol_updates)
 2909 
 2910                 except Exception:
 2911                     volume_updates.append({'volume_id': v['id'],
 2912                                            'updates': {'status': 'error', }})
 2913                     LOG.exception("Error trying to failover volume %s.",
 2914                                   v['id'])
 2915             else:
 2916                 volume_updates.append({'volume_id': v['id'],
 2917                                        'updates': {'status': 'error', }})
 2918 
 2919         return '' if failback else remote['backend_id'], volume_updates, []
 2920 
 2921     def failover_completed(self, context, active_backend_id=None):
 2922         """Update volume node when `failover` is completed.
 2923 
 2924         Expects the following scenarios:
 2925             1) active_backend_id='' when failing back
 2926             2) active_backend_id=<secondary_backend_id> when failing over
 2927             3) When `failover` raises an Exception, this will be called
 2928                 with the previous active_backend_id (Will be empty string
 2929                 in case backend wasn't in failed-over state).
 2930         """
 2931         if not active_backend_id:
 2932             LOG.info("Failback completed. "
 2933                      "Switching active cluster back to default.")
 2934             self.active_cluster = self._create_cluster_reference()
 2935 
 2936             self.failed_over = False
 2937 
 2938             # Recreating cluster pairs after a successful failback
 2939             if self.configuration.replication_device:
 2940                 self._set_cluster_pairs()
 2941                 self.replication_enabled = True
 2942         else:
 2943             LOG.info("Failover completed. "
 2944                      "Switching active cluster to %s.", active_backend_id)
 2945             self.active_cluster = self.cluster_pairs[0]
 2946             self.failed_over = True
 2947 
 2948     def failover_host(self, context, volumes, secondary_id=None, groups=None):
 2949         """Failover to replication target in non-clustered deployment."""
 2950         active_cluster_id, volume_updates, group_updates = (
 2951             self.failover(context, volumes, secondary_id, groups))
 2952         self.failover_completed(context, active_cluster_id)
 2953         return active_cluster_id, volume_updates, group_updates
 2954 
 2955     def freeze_backend(self, context):
 2956         """Freeze backend notification."""
 2957         pass
 2958 
 2959     def thaw_backend(self, context):
 2960         """Thaw backend notification."""
 2961         pass
 2962 
 2963     def revert_to_snapshot(self, context, volume, snapshot):
 2964         """Revert a volume to a given snapshot."""
 2965 
 2966         sfaccount = self._get_sfaccount(volume.project_id)
 2967         params = {'accountID': sfaccount['accountID']}
 2968 
 2969         sf_vol = self._get_sf_volume(volume.id, params)
 2970         if sf_vol is None:
 2971             LOG.error("Volume ID %s was not found on "
 2972                       "the SolidFire Cluster while attempting "
 2973                       "revert_to_snapshot operation!", volume.id)
 2974             raise exception.VolumeNotFound(volume_id=volume['id'])
 2975 
 2976         params['volumeID'] = sf_vol['volumeID']
 2977 
 2978         sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
 2979                                  snapshot.id)
 2980         sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
 2981         snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
 2982                     None)
 2983         if not snap:
 2984             LOG.error("Snapshot ID %s was not found on "
 2985                       "the SolidFire Cluster while attempting "
 2986                       "revert_to_snapshot operation!", snapshot.id)
 2987             raise exception.VolumeSnapshotNotFound(volume_id=volume.id)
 2988 
 2989         params['snapshotID'] = snap['snapshotID']
 2990         params['saveCurrentState'] = 'false'
 2991 
 2992         self._issue_api_request('RollbackToSnapshot',
 2993                                 params,
 2994                                 version='6.0')
 2995 
 2996 
 2997 class SolidFireISCSI(iscsi_driver.SanISCSITarget):
 2998     def __init__(self, *args, **kwargs):
 2999         super(SolidFireISCSI, self).__init__(*args, **kwargs)
 3000         self.sf_driver = kwargs.get('solidfire_driver')
 3001 
 3002     def __getattr__(self, attr):
 3003         if hasattr(self.sf_driver, attr):
 3004             return getattr(self.sf_driver, attr)
 3005         else:
 3006             msg = _('Attribute: %s not found.') % attr
 3007             raise NotImplementedError(msg)
 3008 
 3009     def _do_iscsi_export(self, volume):
 3010         sfaccount = self._get_sfaccount(volume['project_id'])
 3011         model_update = {}
 3012         model_update['provider_auth'] = ('CHAP %s %s'
 3013                                          % (sfaccount['username'],
 3014                                             sfaccount['targetSecret']))
 3015 
 3016         return model_update
 3017 
 3018     def create_export(self, context, volume, volume_path):
 3019         return self._do_iscsi_export(volume)
 3020 
 3021     def ensure_export(self, context, volume, volume_path):
 3022         try:
 3023             return self._do_iscsi_export(volume)
 3024         except SolidFireAPIException:
 3025             return None
 3026 
 3027     # Following are abc's that we make sure are caught and
 3028     # paid attention to.  In our case we don't use them
 3029     # so just stub them out here.
 3030     def remove_export(self, context, volume):
 3031         pass
 3032 
 3033     def terminate_connection(self, volume, connector, **kwargs):
 3034         pass
 3035 
 3036     def _sf_initialize_connection(self, volume, connector):
 3037         """Initialize the connection and return connection info.
 3038 
 3039            Optionally checks and utilizes volume access groups.
 3040         """
 3041         if self.configuration.sf_enable_vag:
 3042             iqn = connector['initiator']
 3043             provider_id = volume['provider_id']
 3044             vol_id = int(provider_id.split()[0])
 3045 
 3046             # safe_create_vag may opt to reuse vs create a vag, so we need to
 3047             # add our vol_id.
 3048             vag_id = self._safe_create_vag(iqn, vol_id)
 3049             self._add_volume_to_vag(vol_id, iqn, vag_id)
 3050 
 3051         # Continue along with default behavior
 3052         return super(SolidFireISCSI, self).initialize_connection(volume,
 3053                                                                  connector)
 3054 
 3055     def _sf_terminate_connection(self, volume, properties, force):
 3056         """Terminate the volume connection.
 3057 
 3058            Optionally remove volume from volume access group.
 3059            If the VAG is empty then the VAG is also removed.
 3060         """
 3061         if self.configuration.sf_enable_vag:
 3062             provider_id = volume['provider_id']
 3063             vol_id = int(provider_id.split()[0])
 3064 
 3065             if properties:
 3066                 iqn = properties['initiator']
 3067                 vag = self._get_vags_by_name(iqn)
 3068 
 3069                 if vag and not volume['multiattach']:
 3070                     # Multiattach causes problems with removing volumes from
 3071                     # VAGs.
 3072                     # Compromise solution for now is to remove multiattach
 3073                     # volumes from VAGs during volume deletion.
 3074                     vag = vag[0]
 3075                     vag_id = vag['volumeAccessGroupID']
 3076                     if [vol_id] == vag['volumes']:
 3077                         self._remove_vag(vag_id)
 3078                     elif vol_id in vag['volumes']:
 3079                         self._remove_volume_from_vag(vol_id, vag_id)
 3080             else:
 3081                 self._remove_volume_from_vags(vol_id)
 3082 
 3083         return super(SolidFireISCSI, self).terminate_connection(volume,
 3084                                                                 properties,
 3085                                                                 force=force)