"Fossies" - the Fresh Open Source Software Archive

Member "cinder-17.1.0/cinder/volume/drivers/pure.py" (8 Mar 2021, 121651 Bytes) of package /linux/misc/openstack/cinder-17.1.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "pure.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 17.0.1_vs_17.1.0.

    1 # Copyright (c) 2014 Pure Storage, Inc.
    2 # All Rights Reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 """Volume driver for Pure Storage FlashArray storage system.
   16 
   17 This driver requires Purity version 4.0.0 or later.
   18 """
   19 
   20 from distutils import version
   21 import functools
   22 import ipaddress
   23 import math
   24 import platform
   25 import re
   26 import uuid
   27 
   28 from oslo_config import cfg
   29 from oslo_log import log as logging
   30 from oslo_utils import excutils
   31 from oslo_utils import strutils
   32 from oslo_utils import units
   33 try:
   34     from purestorage import purestorage
   35 except ImportError:
   36     purestorage = None
   37 
   38 from cinder import exception
   39 from cinder.i18n import _
   40 from cinder import interface
   41 from cinder.objects import fields
   42 from cinder.objects import volume_type
   43 from cinder import utils
   44 from cinder.volume import configuration
   45 from cinder.volume import driver
   46 from cinder.volume.drivers.san import san
   47 from cinder.volume import volume_utils
   48 from cinder.zonemanager import utils as fczm_utils
   49 
   50 LOG = logging.getLogger(__name__)
   51 
   52 PURE_OPTS = [
   53     cfg.StrOpt("pure_api_token",
   54                help="REST API authorization token."),
   55     cfg.BoolOpt("pure_automatic_max_oversubscription_ratio",
   56                 default=True,
   57                 help="Automatically determine an oversubscription ratio based "
   58                      "on the current total data reduction values. If used "
   59                      "this calculated value will override the "
   60                      "max_over_subscription_ratio config option."),
   61     cfg.StrOpt("pure_host_personality",
   62                default=None,
   63                choices=['aix', 'esxi', 'hitachi-vsp', 'hpux',
   64                         'oracle-vm-server', 'solaris', 'vms', None],
   65                help="Determines how the Purity system tunes the protocol used "
   66                     "between the array and the initiator."),
   67     # These are used as default settings.  In future these can be overridden
   68     # by settings in volume-type.
   69     cfg.IntOpt("pure_replica_interval_default", default=3600,
   70                help="Snapshot replication interval in seconds."),
   71     cfg.IntOpt("pure_replica_retention_short_term_default", default=14400,
   72                help="Retain all snapshots on target for this "
   73                     "time (in seconds.)"),
   74     cfg.IntOpt("pure_replica_retention_long_term_per_day_default", default=3,
   75                help="Retain how many snapshots for each day."),
   76     cfg.IntOpt("pure_replica_retention_long_term_default", default=7,
   77                help="Retain snapshots per day on target for this time "
   78                     "(in days.)"),
   79     cfg.StrOpt("pure_replication_pg_name", default="cinder-group",
   80                help="Pure Protection Group name to use for async replication "
   81                     "(will be created if it does not exist)."),
   82     cfg.StrOpt("pure_replication_pod_name", default="cinder-pod",
   83                help="Pure Pod name to use for sync replication "
   84                     "(will be created if it does not exist)."),
   85     cfg.StrOpt("pure_iscsi_cidr", default="0.0.0.0/0",
   86                help="CIDR of FlashArray iSCSI targets hosts are allowed to "
   87                     "connect to. Default will allow connection to any "
   88                     "IP address."),
   89     cfg.BoolOpt("pure_eradicate_on_delete",
   90                 default=False,
   91                 help="When enabled, all Pure volumes, snapshots, and "
   92                      "protection groups will be eradicated at the time of "
   93                      "deletion in Cinder. Data will NOT be recoverable after "
   94                      "a delete with this set to True! When disabled, volumes "
   95                      "and snapshots will go into pending eradication state "
   96                      "and can be recovered."
   97                 )
   98 ]
   99 
  100 CONF = cfg.CONF
  101 CONF.register_opts(PURE_OPTS, group=configuration.SHARED_CONF_GROUP)
  102 
  103 INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]")
  104 GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$")
  105 
  106 REPLICATION_TYPE_SYNC = "sync"
  107 REPLICATION_TYPE_ASYNC = "async"
  108 REPLICATION_TYPES = [REPLICATION_TYPE_SYNC, REPLICATION_TYPE_ASYNC]
  109 
  110 CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET"
  111 
  112 ERR_MSG_NOT_EXIST = "does not exist"
  113 ERR_MSG_HOST_NOT_EXIST = "Host " + ERR_MSG_NOT_EXIST
  114 ERR_MSG_NO_SUCH_SNAPSHOT = "No such volume or snapshot"
  115 ERR_MSG_PENDING_ERADICATION = "has been destroyed"
  116 ERR_MSG_ALREADY_EXISTS = "already exists"
  117 ERR_MSG_COULD_NOT_BE_FOUND = "could not be found"
  118 ERR_MSG_ALREADY_INCLUDES = "already includes"
  119 ERR_MSG_ALREADY_ALLOWED = "already allowed on"
  120 ERR_MSG_NOT_CONNECTED = "is not connected"
  121 ERR_MSG_ALREADY_BELONGS = "already belongs to"
  122 ERR_MSG_EXISTING_CONNECTIONS = "cannot be deleted due to existing connections"
  123 ERR_MSG_ALREADY_IN_USE = "already in use"
  124 
  125 EXTRA_SPECS_REPL_ENABLED = "replication_enabled"
  126 EXTRA_SPECS_REPL_TYPE = "replication_type"
  127 
  128 UNMANAGED_SUFFIX = '-unmanaged'
  129 SYNC_REPLICATION_REQUIRED_API_VERSIONS = ['1.13', '1.14']
  130 ASYNC_REPLICATION_REQUIRED_API_VERSIONS = [
  131     '1.3', '1.4', '1.5'] + SYNC_REPLICATION_REQUIRED_API_VERSIONS
  132 MANAGE_SNAP_REQUIRED_API_VERSIONS = [
  133     '1.4', '1.5'] + SYNC_REPLICATION_REQUIRED_API_VERSIONS
  134 PERSONALITY_REQUIRED_API_VERSIONS = ['1.14']
  135 
  136 REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL = 5  # 5 seconds
  137 REPL_SETTINGS_PROPAGATE_MAX_RETRIES = 36  # 36 * 5 = 180 seconds
  138 
  139 HOST_CREATE_MAX_RETRIES = 5
  140 
  141 USER_AGENT_BASE = 'OpenStack Cinder'
  142 
  143 
  144 class PureDriverException(exception.VolumeDriverException):
  145     message = _("Pure Storage Cinder driver failure: %(reason)s")
  146 
  147 
  148 class PureRetryableException(exception.VolumeBackendAPIException):
  149     message = _("Retryable Pure Storage Exception encountered")
  150 
  151 
  152 def pure_driver_debug_trace(f):
  153     """Log the method entrance and exit including active backend name.
  154 
  155     This should only be used on VolumeDriver class methods. It depends on
  156     having a 'self' argument that is a PureBaseVolumeDriver.
  157     """
  158     @functools.wraps(f)
  159     def wrapper(*args, **kwargs):
  160         driver = args[0]  # self
  161         cls_name = driver.__class__.__name__
  162         method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
  163                                                    "method": f.__name__}
  164         backend_name = driver._get_current_array().backend_id
  165         LOG.debug("[%(backend_name)s] Enter %(method_name)s, args=%(args)s,"
  166                   " kwargs=%(kwargs)s",
  167                   {
  168                       "method_name": method_name,
  169                       "backend_name": backend_name,
  170                       "args": args,
  171                       "kwargs": kwargs,
  172                   })
  173         result = f(*args, **kwargs)
  174         LOG.debug("[%(backend_name)s] Leave %(method_name)s, ret=%(result)s",
  175                   {
  176                       "method_name": method_name,
  177                       "backend_name": backend_name,
  178                       "result": result,
  179                   })
  180         return result
  181 
  182     return wrapper
  183 
  184 
  185 class PureBaseVolumeDriver(san.SanDriver):
  186     """Performs volume management on Pure Storage FlashArray."""
  187 
  188     SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4', '1.5', '1.13', '1.14']
  189 
  190     # ThirdPartySystems wiki page
  191     CI_WIKI_NAME = "Pure_Storage_CI"
  192 
  193     def __init__(self, *args, **kwargs):
  194         execute = kwargs.pop("execute", utils.execute)
  195         super(PureBaseVolumeDriver, self).__init__(execute=execute, *args,
  196                                                    **kwargs)
  197         self.configuration.append_config_values(PURE_OPTS)
  198         self._array = None
  199         self._storage_protocol = None
  200         self._backend_name = (self.configuration.volume_backend_name or
  201                               self.__class__.__name__)
  202         self._replication_target_arrays = []
  203         self._active_cluster_target_arrays = []
  204         self._uniform_active_cluster_target_arrays = []
  205         self._replication_pg_name = None
  206         self._replication_pod_name = None
  207         self._replication_interval = None
  208         self._replication_retention_short_term = None
  209         self._replication_retention_long_term = None
  210         self._replication_retention_long_term_per_day = None
  211         self._async_replication_retention_policy = None
  212         self._is_replication_enabled = False
  213         self._is_active_cluster_enabled = False
  214         self._active_backend_id = kwargs.get('active_backend_id', None)
  215         self._failed_over_primary_array = None
  216         self._user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
  217             'base': USER_AGENT_BASE,
  218             'class': self.__class__.__name__,
  219             'version': self.VERSION,
  220             'platform': platform.platform()
  221         }
  222 
  223     @classmethod
  224     def get_driver_options(cls):
  225         additional_opts = cls._get_oslo_driver_opts(
  226             'san_ip', 'driver_ssl_cert_verify', 'driver_ssl_cert_path',
  227             'use_chap_auth', 'replication_device', 'reserved_percentage',
  228             'max_over_subscription_ratio')
  229         return PURE_OPTS + additional_opts
  230 
  231     def parse_replication_configs(self):
  232         self._replication_pg_name = (
  233             self.configuration.pure_replication_pg_name)
  234         self._replication_pod_name = (
  235             self.configuration.pure_replication_pod_name)
  236         self._replication_interval = (
  237             self.configuration.pure_replica_interval_default)
  238         self._replication_retention_short_term = (
  239             self.configuration.pure_replica_retention_short_term_default)
  240         self._replication_retention_long_term = (
  241             self.configuration.pure_replica_retention_long_term_default)
  242         self._replication_retention_long_term_per_day = (
  243             self.configuration.
  244             pure_replica_retention_long_term_per_day_default)
  245 
  246         self._async_replication_retention_policy = (
  247             self._generate_replication_retention())
  248 
  249         replication_devices = self.configuration.safe_get(
  250             'replication_device')
  251 
  252         if replication_devices:
  253             for replication_device in replication_devices:
  254                 backend_id = replication_device["backend_id"]
  255                 san_ip = replication_device["san_ip"]
  256                 api_token = replication_device["api_token"]
  257                 verify_https = strutils.bool_from_string(
  258                     replication_device.get("ssl_cert_verify", False))
  259                 ssl_cert_path = replication_device.get("ssl_cert_path", None)
  260                 repl_type = replication_device.get("type",
  261                                                    REPLICATION_TYPE_ASYNC)
  262                 uniform = strutils.bool_from_string(
  263                     replication_device.get("uniform", False))
  264 
  265                 target_array = self._get_flasharray(
  266                     san_ip,
  267                     api_token,
  268                     verify_https=verify_https,
  269                     ssl_cert_path=ssl_cert_path
  270                 )
  271 
  272                 api_version = target_array.get_rest_version()
  273 
  274                 if repl_type == REPLICATION_TYPE_ASYNC:
  275                     req_api_versions = ASYNC_REPLICATION_REQUIRED_API_VERSIONS
  276                 elif repl_type == REPLICATION_TYPE_SYNC:
  277                     req_api_versions = SYNC_REPLICATION_REQUIRED_API_VERSIONS
  278                 else:
  279                     msg = _('Invalid replication type specified:') % repl_type
  280                     raise PureDriverException(reason=msg)
  281 
  282                 if api_version not in req_api_versions:
  283                     msg = _('Unable to do replication with Purity REST '
  284                             'API version %(api_version)s, requires one of '
  285                             '%(required_versions)s.') % {
  286                         'api_version': api_version,
  287                         'required_versions':
  288                             ASYNC_REPLICATION_REQUIRED_API_VERSIONS
  289                     }
  290                     raise PureDriverException(reason=msg)
  291 
  292                 target_array_info = target_array.get()
  293                 target_array.array_name = target_array_info["array_name"]
  294                 target_array.array_id = target_array_info["id"]
  295                 target_array.replication_type = repl_type
  296                 target_array.backend_id = backend_id
  297                 target_array.uniform = uniform
  298 
  299                 LOG.info("Added secondary array: backend_id='%s', name='%s',"
  300                          " id='%s', type='%s', uniform='%s'",
  301                          target_array.backend_id,
  302                          target_array.array_name,
  303                          target_array.array_id,
  304                          target_array.replication_type,
  305                          target_array.uniform)
  306 
  307                 self._replication_target_arrays.append(target_array)
  308                 if repl_type == REPLICATION_TYPE_SYNC:
  309                     self._active_cluster_target_arrays.append(target_array)
  310                     if target_array.uniform:
  311                         self._uniform_active_cluster_target_arrays.append(
  312                             target_array)
  313 
  314     def do_setup(self, context):
  315         """Performs driver initialization steps that could raise exceptions."""
  316         if purestorage is None:
  317             msg = _("Missing 'purestorage' python module, ensure the library"
  318                     " is installed and available.")
  319             raise PureDriverException(msg)
  320 
  321         # Raises PureDriverException if unable to connect and PureHTTPError
  322         # if unable to authenticate.
  323         purestorage.FlashArray.supported_rest_versions = \
  324             self.SUPPORTED_REST_API_VERSIONS
  325         self._array = self._get_flasharray(
  326             self.configuration.san_ip,
  327             api_token=self.configuration.pure_api_token,
  328             verify_https=self.configuration.driver_ssl_cert_verify,
  329             ssl_cert_path=self.configuration.driver_ssl_cert_path
  330         )
  331 
  332         array_info = self._array.get()
  333         self._array.array_name = array_info["array_name"]
  334         self._array.array_id = array_info["id"]
  335         self._array.replication_type = None
  336         self._array.backend_id = self._backend_name
  337         self._array.preferred = True
  338         self._array.uniform = True
  339 
  340         LOG.info("Primary array: backend_id='%s', name='%s', id='%s'",
  341                  self.configuration.config_group,
  342                  self._array.array_name,
  343                  self._array.array_id)
  344 
  345         self.do_setup_replication()
  346 
  347         # If we have failed over at some point we need to adjust our current
  348         # array based on the one that we have failed over to
  349         if (self._active_backend_id is not None and
  350                 self._active_backend_id != self._array.backend_id):
  351             for secondary_array in self._replication_target_arrays:
  352                 if secondary_array.backend_id == self._active_backend_id:
  353                     self._swap_replication_state(self._array, secondary_array)
  354                     break
  355 
  356     def do_setup_replication(self):
  357         replication_devices = self.configuration.safe_get(
  358             'replication_device')
  359         if replication_devices:
  360             self.parse_replication_configs()
  361             self._is_replication_enabled = True
  362 
  363             if len(self._active_cluster_target_arrays) > 0:
  364                 self._is_active_cluster_enabled = True
  365 
  366                 # Only set this up on sync rep arrays
  367                 self._setup_replicated_pods(
  368                     self._get_current_array(),
  369                     self._active_cluster_target_arrays,
  370                     self._replication_pod_name
  371                 )
  372 
  373             # Even if the array is configured for sync rep set it
  374             # up to handle async too
  375             self._setup_replicated_pgroups(
  376                 self._get_current_array(),
  377                 self._replication_target_arrays,
  378                 self._replication_pg_name,
  379                 self._replication_interval,
  380                 self._async_replication_retention_policy
  381             )
  382 
  383     def check_for_setup_error(self):
  384         # Avoid inheriting check_for_setup_error from SanDriver, which checks
  385         # for san_password or san_private_key, not relevant to our driver.
  386         pass
  387 
  388     def update_provider_info(self, volumes, snapshots):
  389         """Ensure we have a provider_id set on volumes.
  390 
  391         If there is a provider_id already set then skip, if it is missing then
  392         we will update it based on the volume object. We can always compute
  393         the id if we have the full volume object, but not all driver API's
  394         give us that info.
  395 
  396         We don't care about snapshots, they just use the volume's provider_id.
  397         """
  398         vol_updates = []
  399         for vol in volumes:
  400             if not vol.provider_id:
  401                 vol_updates.append({
  402                     'id': vol.id,
  403                     'provider_id': self._generate_purity_vol_name(vol),
  404                 })
  405         return vol_updates, None
  406 
  407     @pure_driver_debug_trace
  408     def revert_to_snapshot(self, context, volume, snapshot):
  409         """Is called to perform revert volume from snapshot.
  410 
  411         :param context: Our working context.
  412         :param volume: the volume to be reverted.
  413         :param snapshot: the snapshot data revert to volume.
  414         :return None
  415         """
  416         vol_name = self._generate_purity_vol_name(volume)
  417         if snapshot['group_snapshot'] or snapshot['cgsnapshot']:
  418             snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot)
  419         else:
  420             snap_name = self._get_snap_name(snapshot)
  421 
  422         LOG.debug("Reverting from snapshot %(snap)s to volume "
  423                   "%(vol)s", {'vol': vol_name, 'snap': snap_name})
  424 
  425         current_array = self._get_current_array()
  426 
  427         current_array.copy_volume(snap_name, vol_name, overwrite=True)
  428 
  429     @pure_driver_debug_trace
  430     def create_volume(self, volume):
  431         """Creates a volume."""
  432         vol_name = self._generate_purity_vol_name(volume)
  433         vol_size = volume["size"] * units.Gi
  434         current_array = self._get_current_array()
  435         current_array.create_volume(vol_name, vol_size)
  436 
  437         return self._setup_volume(current_array, volume, vol_name)
  438 
  439     @pure_driver_debug_trace
  440     def create_volume_from_snapshot(self, volume, snapshot):
  441         """Creates a volume from a snapshot."""
  442         vol_name = self._generate_purity_vol_name(volume)
  443         if snapshot['group_snapshot'] or snapshot['cgsnapshot']:
  444             snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot)
  445         else:
  446             snap_name = self._get_snap_name(snapshot)
  447 
  448         current_array = self._get_current_array()
  449 
  450         current_array.copy_volume(snap_name, vol_name)
  451         self._extend_if_needed(current_array,
  452                                vol_name,
  453                                snapshot["volume_size"],
  454                                volume["size"])
  455         return self._setup_volume(current_array, volume, vol_name)
  456 
  457     def _setup_volume(self, array, volume, purity_vol_name):
  458         # set provider_id early so other methods can use it even though
  459         # it wont be set in the cinder DB until we return from create_volume
  460         volume.provider_id = purity_vol_name
  461         async_enabled = False
  462         try:
  463             self._add_to_group_if_needed(volume, purity_vol_name)
  464             async_enabled = self._enable_async_replication_if_needed(
  465                 array, volume)
  466         except purestorage.PureError as err:
  467             with excutils.save_and_reraise_exception():
  468                 LOG.error("Failed to add volume %s to pgroup, removing volume",
  469                           err)
  470                 array.destroy_volume(purity_vol_name)
  471                 array.eradicate_volume(purity_vol_name)
  472 
  473         repl_status = fields.ReplicationStatus.DISABLED
  474         if self._is_vol_in_pod(purity_vol_name) or async_enabled:
  475             repl_status = fields.ReplicationStatus.ENABLED
  476 
  477         model_update = {
  478             'provider_id': purity_vol_name,
  479             'replication_status': repl_status,
  480         }
  481         return model_update
  482 
  483     def _enable_async_replication_if_needed(self, array, volume):
  484         repl_type = self._get_replication_type_from_vol_type(
  485             volume.volume_type)
  486         if repl_type == REPLICATION_TYPE_ASYNC:
  487             self._enable_async_replication(array, volume)
  488             return True
  489         return False
  490 
  491     def _enable_async_replication(self, array, volume):
  492         """Add volume to replicated protection group."""
  493         try:
  494             array.set_pgroup(self._replication_pg_name,
  495                              addvollist=[self._get_vol_name(volume)])
  496         except purestorage.PureHTTPError as err:
  497             with excutils.save_and_reraise_exception() as ctxt:
  498                 if (err.code == 400 and
  499                         ERR_MSG_ALREADY_BELONGS in err.text):
  500                     # Happens if the volume already added to PG.
  501                     ctxt.reraise = False
  502                     LOG.warning("Adding Volume to Protection Group "
  503                                 "failed with message: %s", err.text)
  504 
  505     @pure_driver_debug_trace
  506     def create_cloned_volume(self, volume, src_vref):
  507         """Creates a clone of the specified volume."""
  508         vol_name = self._generate_purity_vol_name(volume)
  509         src_name = self._get_vol_name(src_vref)
  510 
  511         # Check which backend the source volume is on. In case of failover
  512         # the source volume may be on the secondary array.
  513         current_array = self._get_current_array()
  514         current_array.copy_volume(src_name, vol_name)
  515         self._extend_if_needed(current_array,
  516                                vol_name,
  517                                src_vref["size"],
  518                                volume["size"])
  519 
  520         return self._setup_volume(current_array, volume, vol_name)
  521 
  522     def _extend_if_needed(self, array, vol_name, src_size, vol_size):
  523         """Extend the volume from size src_size to size vol_size."""
  524         if vol_size > src_size:
  525             vol_size = vol_size * units.Gi
  526             array.extend_volume(vol_name, vol_size)
  527 
  528     @pure_driver_debug_trace
  529     def delete_volume(self, volume):
  530         """Disconnect all hosts and delete the volume"""
  531         vol_name = self._get_vol_name(volume)
  532         current_array = self._get_current_array()
  533         try:
  534             # Do a pass over remaining connections on the current array, if
  535             # we can try and remove any remote connections too.
  536             if (current_array.get_rest_version() in
  537                     SYNC_REPLICATION_REQUIRED_API_VERSIONS):
  538                 hosts = current_array.list_volume_private_connections(
  539                     vol_name, remote=True)
  540             else:
  541                 hosts = current_array.list_volume_private_connections(
  542                     vol_name)
  543             for host_info in hosts:
  544                 host_name = host_info["host"]
  545                 self._disconnect_host(current_array, host_name, vol_name)
  546 
  547             # Finally, it should be safe to delete the volume
  548             current_array.destroy_volume(vol_name)
  549             if self.configuration.pure_eradicate_on_delete:
  550                 current_array.eradicate_volume(vol_name)
  551         except purestorage.PureHTTPError as err:
  552             with excutils.save_and_reraise_exception() as ctxt:
  553                 if (err.code == 400 and
  554                         ERR_MSG_NOT_EXIST in err.text):
  555                     # Happens if the volume does not exist.
  556                     ctxt.reraise = False
  557                     LOG.warning("Volume deletion failed with message: %s",
  558                                 err.text)
  559 
  560     @pure_driver_debug_trace
  561     def create_snapshot(self, snapshot):
  562         """Creates a snapshot."""
  563 
  564         # Get current array in case we have failed over via replication.
  565         current_array = self._get_current_array()
  566         vol_name, snap_suff = self._get_snap_name(snapshot).split(".")
  567         current_array.create_snapshot(vol_name, suffix=snap_suff)
  568 
  569     @pure_driver_debug_trace
  570     def delete_snapshot(self, snapshot):
  571         """Deletes a snapshot."""
  572 
  573         # Get current array in case we have failed over via replication.
  574         current_array = self._get_current_array()
  575 
  576         snap_name = self._get_snap_name(snapshot)
  577         try:
  578             current_array.destroy_volume(snap_name)
  579             if self.configuration.pure_eradicate_on_delete:
  580                 current_array.eradicate_volume(snap_name)
  581         except purestorage.PureHTTPError as err:
  582             with excutils.save_and_reraise_exception() as ctxt:
  583                 if err.code == 400 and (
  584                         ERR_MSG_NOT_EXIST in err.text or
  585                         ERR_MSG_NO_SUCH_SNAPSHOT in err.text or
  586                         ERR_MSG_PENDING_ERADICATION in err.text):
  587                     # Happens if the snapshot does not exist.
  588                     ctxt.reraise = False
  589                     LOG.warning("Unable to delete snapshot, assuming "
  590                                 "already deleted. Error: %s", err.text)
  591 
  592     def ensure_export(self, context, volume):
  593         pass
  594 
  595     def create_export(self, context, volume, connector):
  596         pass
  597 
  598     def initialize_connection(self, volume, connector):
  599         """Connect the volume to the specified initiator in Purity.
  600 
  601         This implementation is specific to the host type (iSCSI, FC, etc).
  602         """
  603         raise NotImplementedError
  604 
  605     def _get_host(self, array, connector, remote=False):
  606         """Get a Purity Host that corresponds to the host in the connector.
  607 
  608         This implementation is specific to the host type (iSCSI, FC, etc).
  609         """
  610         raise NotImplementedError
  611 
  612     @pure_driver_debug_trace
  613     def _disconnect(self, array, volume, connector, remove_remote_hosts=False):
  614         """Disconnect the volume from the host described by the connector.
  615 
  616         If no connector is specified it will remove *all* attachments for
  617         the volume.
  618 
  619         Returns True if it was the hosts last connection.
  620         """
  621         vol_name = self._get_vol_name(volume)
  622         if connector is None:
  623             # If no connector was provided it is a force-detach, remove all
  624             # host connections for the volume
  625             LOG.warning("Removing ALL host connections for volume %s",
  626                         vol_name)
  627             if (array.get_rest_version() in
  628                     SYNC_REPLICATION_REQUIRED_API_VERSIONS):
  629                 # Remote connections are only allowed in newer API versions
  630                 connections = array.list_volume_private_connections(
  631                     vol_name, remote=True)
  632             else:
  633                 connections = array.list_volume_private_connections(vol_name)
  634 
  635             for connection in connections:
  636                 self._disconnect_host(array, connection['host'], vol_name)
  637             return False
  638         else:
  639             # Normal case with a specific initiator to detach it from
  640             hosts = self._get_host(array, connector,
  641                                    remote=remove_remote_hosts)
  642             if hosts:
  643                 any_in_use = False
  644                 for host in hosts:
  645                     host_name = host["name"]
  646                     host_in_use = self._disconnect_host(array,
  647                                                         host_name,
  648                                                         vol_name)
  649                     any_in_use = any_in_use or host_in_use
  650                 return any_in_use
  651             else:
  652                 LOG.error("Unable to disconnect host from volume, could not "
  653                           "determine Purity host on array %s",
  654                           array.backend_id)
  655                 return False
  656 
  657     @pure_driver_debug_trace
  658     def terminate_connection(self, volume, connector, **kwargs):
  659         """Terminate connection."""
  660         vol_name = self._get_vol_name(volume)
  661         if self._is_vol_in_pod(vol_name):
  662             # Try to disconnect from each host, they may not be online though
  663             # so if they fail don't cause a problem.
  664             for array in self._uniform_active_cluster_target_arrays:
  665                 try:
  666                     self._disconnect(array, volume, connector,
  667                                      remove_remote_hosts=False)
  668                 except purestorage.PureError as err:
  669                     # Swallow any exception, just warn and continue
  670                     LOG.warning("Disconnect on secondary array failed with"
  671                                 " message: %(msg)s", {"msg": err.text})
  672         # Now disconnect from the current array
  673         self._disconnect(self._get_current_array(), volume,
  674                          connector, remove_remote_hosts=False)
  675 
  676     @pure_driver_debug_trace
  677     def _disconnect_host(self, array, host_name, vol_name):
  678         """Return value indicates if host should be cleaned up."""
  679         try:
  680             array.disconnect_host(host_name, vol_name)
  681         except purestorage.PureHTTPError as err:
  682             with excutils.save_and_reraise_exception() as ctxt:
  683                 if err.code == 400 and (ERR_MSG_NOT_CONNECTED in err.text or
  684                                         ERR_MSG_HOST_NOT_EXIST in err.text):
  685                     # Happens if the host and volume are not connected or
  686                     # the host has already been deleted
  687                     ctxt.reraise = False
  688                     LOG.warning("Disconnection failed with message: "
  689                                 "%(msg)s.", {"msg": err.text})
  690 
  691         # If it is a remote host, call it quits here. We cannot delete a remote
  692         # host even if it should be cleaned up now.
  693         if ':' in host_name:
  694             return
  695 
  696         connections = None
  697         try:
  698             connections = array.list_host_connections(host_name, private=True)
  699         except purestorage.PureHTTPError as err:
  700             with excutils.save_and_reraise_exception() as ctxt:
  701                 if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
  702                     ctxt.reraise = False
  703 
  704         # Assume still used if volumes are attached
  705         host_still_used = bool(connections)
  706 
  707         if GENERATED_NAME.match(host_name) and not host_still_used:
  708             LOG.info("Attempting to delete unneeded host %(host_name)r.",
  709                      {"host_name": host_name})
  710             try:
  711                 array.delete_host(host_name)
  712                 host_still_used = False
  713             except purestorage.PureHTTPError as err:
  714                 with excutils.save_and_reraise_exception() as ctxt:
  715                     if err.code == 400:
  716                         if ERR_MSG_NOT_EXIST in err.text:
  717                             # Happens if the host is already deleted.
  718                             # This is fine though, just log so we know what
  719                             # happened.
  720                             ctxt.reraise = False
  721                             host_still_used = False
  722                             LOG.debug("Purity host deletion failed: "
  723                                       "%(msg)s.", {"msg": err.text})
  724                         if ERR_MSG_EXISTING_CONNECTIONS in err.text:
  725                             # If someone added a connection underneath us
  726                             # that's ok, just keep going.
  727                             ctxt.reraise = False
  728                             host_still_used = True
  729                             LOG.debug("Purity host deletion ignored: %(msg)s",
  730                                       {"msg": err.text})
  731         return not host_still_used
  732 
  733     @pure_driver_debug_trace
  734     def _update_volume_stats(self):
  735         """Set self._stats with relevant information."""
  736         current_array = self._get_current_array()
  737 
  738         # Collect info from the array
  739         space_info = current_array.get(space=True)
  740         if not isinstance(space_info, dict):
  741             # Some versions of the API give back a list of dicts, always use 0
  742             space_info = space_info[0]
  743         perf_info = current_array.get(action='monitor')[0]  # Always index 0
  744         hosts = current_array.list_hosts()
  745         snaps = current_array.list_volumes(snap=True, pending=True)
  746         pgroups = current_array.list_pgroups(pending=True)
  747 
  748         # Perform some translations and calculations
  749         total_capacity = float(space_info["capacity"]) / units.Gi
  750         used_space = float(space_info["total"]) / units.Gi
  751         free_space = float(total_capacity - used_space)
  752         prov_space, total_vols = self._get_provisioned_space()
  753         total_hosts = len(hosts)
  754         total_snaps = len(snaps)
  755         total_pgroups = len(pgroups)
  756         provisioned_space = float(prov_space) / units.Gi
  757         thin_provisioning = self._get_thin_provisioning(provisioned_space,
  758                                                         used_space)
  759 
  760         # Start with some required info
  761         data = dict(
  762             volume_backend_name=self._backend_name,
  763             vendor_name='Pure Storage',
  764             driver_version=self.VERSION,
  765             storage_protocol=self._storage_protocol,
  766         )
  767 
  768         # Add flags for supported features
  769         data['consistencygroup_support'] = True
  770         data['thin_provisioning_support'] = True
  771         data['multiattach'] = True
  772         data['QoS_support'] = False
  773 
  774         # Add capacity info for scheduler
  775         data['total_capacity_gb'] = total_capacity
  776         data['free_capacity_gb'] = free_space
  777         data['reserved_percentage'] = self.configuration.reserved_percentage
  778         data['provisioned_capacity'] = provisioned_space
  779         data['max_over_subscription_ratio'] = thin_provisioning
  780 
  781         # Add the filtering/goodness functions
  782         data['filter_function'] = self.get_filter_function()
  783         data['goodness_function'] = self.get_goodness_function()
  784 
  785         # Add array metadata counts for filtering and weighing functions
  786         data['total_volumes'] = total_vols
  787         data['total_snapshots'] = total_snaps
  788         data['total_hosts'] = total_hosts
  789         data['total_pgroups'] = total_pgroups
  790 
  791         # Add performance stats for filtering and weighing functions
  792         #  IOPS
  793         data['writes_per_sec'] = perf_info['writes_per_sec']
  794         data['reads_per_sec'] = perf_info['reads_per_sec']
  795 
  796         #  Bandwidth
  797         data['input_per_sec'] = perf_info['input_per_sec']
  798         data['output_per_sec'] = perf_info['output_per_sec']
  799 
  800         #  Latency
  801         data['usec_per_read_op'] = perf_info['usec_per_read_op']
  802         data['usec_per_write_op'] = perf_info['usec_per_write_op']
  803         data['queue_depth'] = perf_info['queue_depth']
  804 
  805         #  Replication
  806         data["replication_enabled"] = self._is_replication_enabled
  807         repl_types = []
  808         if self._is_replication_enabled:
  809             repl_types = [REPLICATION_TYPE_ASYNC]
  810         if self._is_active_cluster_enabled:
  811             repl_types.append(REPLICATION_TYPE_SYNC)
  812         data["replication_type"] = repl_types
  813         data["replication_count"] = len(self._replication_target_arrays)
  814         data["replication_targets"] = [array.backend_id for array
  815                                        in self._replication_target_arrays]
  816         self._stats = data
  817 
  818     def _get_provisioned_space(self):
  819         """Sum up provisioned size of all volumes on array"""
  820         volumes = self._get_current_array().list_volumes(pending=True)
  821         return sum(item["size"] for item in volumes), len(volumes)
  822 
  823     def _get_thin_provisioning(self, provisioned_space, used_space):
  824         """Get the current value for the thin provisioning ratio.
  825 
  826         If pure_automatic_max_oversubscription_ratio is True we will calculate
  827         a value, if not we will respect the configuration option for the
  828         max_over_subscription_ratio.
  829         """
  830 
  831         if (self.configuration.pure_automatic_max_oversubscription_ratio and
  832                 used_space != 0 and provisioned_space != 0):
  833             # If array is empty we can not calculate a max oversubscription
  834             # ratio. In this case we look to the config option as a starting
  835             # point. Once some volumes are actually created and some data is
  836             # stored on the array a much more accurate number will be
  837             # presented based on current usage.
  838             thin_provisioning = provisioned_space / used_space
  839         else:
  840             thin_provisioning = volume_utils.get_max_over_subscription_ratio(
  841                 self.configuration.max_over_subscription_ratio,
  842                 supports_auto=True)
  843 
  844         return thin_provisioning
  845 
  846     @pure_driver_debug_trace
  847     def extend_volume(self, volume, new_size):
  848         """Extend volume to new_size."""
  849 
  850         # Get current array in case we have failed over via replication.
  851         current_array = self._get_current_array()
  852 
  853         vol_name = self._get_vol_name(volume)
  854         new_size = new_size * units.Gi
  855         current_array.extend_volume(vol_name, new_size)
  856 
  857     def _add_volume_to_consistency_group(self, group, vol_name):
  858         pgroup_name = self._get_pgroup_name(group)
  859         current_array = self._get_current_array()
  860         current_array.set_pgroup(pgroup_name, addvollist=[vol_name])
  861 
  862     @pure_driver_debug_trace
  863     def create_consistencygroup(self, context, group):
  864         """Creates a consistencygroup."""
  865 
  866         current_array = self._get_current_array()
  867         current_array.create_pgroup(self._get_pgroup_name(group))
  868 
  869         model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
  870         return model_update
  871 
  872     def _create_cg_from_cgsnap(self, volumes, snapshots):
  873         """Creates a new consistency group from a cgsnapshot.
  874 
  875         The new volumes will be consistent with the snapshot.
  876         """
  877         for volume, snapshot in zip(volumes, snapshots):
  878             self.create_volume_from_snapshot(volume, snapshot)
  879 
  880     def _create_cg_from_cg(self, group, source_group, volumes, source_vols):
  881         """Creates a new consistency group from an existing cg.
  882 
  883         The new volumes will be in a consistent state, but this requires
  884         taking a new temporary group snapshot and cloning from that.
  885         """
  886         pgroup_name = self._get_pgroup_name(source_group)
  887         tmp_suffix = '%s-tmp' % uuid.uuid4()
  888         tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % {
  889             'pgroup_name': pgroup_name,
  890             'pgsnap_suffix': tmp_suffix,
  891         }
  892         LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s '
  893                   'while cloning Consistency Group %(source_group)s.',
  894                   {'snap_name': tmp_pgsnap_name,
  895                    'source_group': source_group.id})
  896         current_array = self._get_current_array()
  897         current_array.create_pgroup_snapshot(pgroup_name, suffix=tmp_suffix)
  898         try:
  899             for source_vol, cloned_vol in zip(source_vols, volumes):
  900                 source_snap_name = self._get_pgroup_vol_snap_name(
  901                     pgroup_name,
  902                     tmp_suffix,
  903                     self._get_vol_name(source_vol)
  904                 )
  905                 cloned_vol_name = self._get_vol_name(cloned_vol)
  906                 current_array.copy_volume(source_snap_name, cloned_vol_name)
  907                 self._add_volume_to_consistency_group(
  908                     group,
  909                     cloned_vol_name
  910                 )
  911         finally:
  912             self._delete_pgsnapshot(tmp_pgsnap_name)
  913 
  914     @pure_driver_debug_trace
  915     def create_consistencygroup_from_src(self, context, group, volumes,
  916                                          cgsnapshot=None, snapshots=None,
  917                                          source_cg=None, source_vols=None):
  918         self.create_consistencygroup(context, group)
  919         if cgsnapshot and snapshots:
  920             self._create_cg_from_cgsnap(volumes,
  921                                         snapshots)
  922         elif source_cg:
  923             self._create_cg_from_cg(group, source_cg, volumes, source_vols)
  924 
  925         return None, None
  926 
  927     @pure_driver_debug_trace
  928     def delete_consistencygroup(self, context, group, volumes):
  929         """Deletes a consistency group."""
  930 
  931         try:
  932             pgroup_name = self._get_pgroup_name(group)
  933             current_array = self._get_current_array()
  934             current_array.destroy_pgroup(pgroup_name)
  935             if self.configuration.pure_eradicate_on_delete:
  936                 current_array.eradicate_pgroup(pgroup_name)
  937         except purestorage.PureHTTPError as err:
  938             with excutils.save_and_reraise_exception() as ctxt:
  939                 if (err.code == 400 and
  940                         (ERR_MSG_PENDING_ERADICATION in err.text or
  941                          ERR_MSG_NOT_EXIST in err.text)):
  942                     # Treat these as a "success" case since we are trying
  943                     # to delete them anyway.
  944                     ctxt.reraise = False
  945                     LOG.warning("Unable to delete Protection Group: %s",
  946                                 err.text)
  947 
  948         for volume in volumes:
  949             self.delete_volume(volume)
  950 
  951         return None, None
  952 
  953     @pure_driver_debug_trace
  954     def update_consistencygroup(self, context, group,
  955                                 add_volumes=None, remove_volumes=None):
  956 
  957         pgroup_name = self._get_pgroup_name(group)
  958         if add_volumes:
  959             addvollist = [self._get_vol_name(vol) for vol in add_volumes]
  960         else:
  961             addvollist = []
  962 
  963         if remove_volumes:
  964             remvollist = [self._get_vol_name(vol) for vol in remove_volumes]
  965         else:
  966             remvollist = []
  967 
  968         current_array = self._get_current_array()
  969         current_array.set_pgroup(pgroup_name, addvollist=addvollist,
  970                                  remvollist=remvollist)
  971 
  972         return None, None, None
  973 
  974     @pure_driver_debug_trace
  975     def create_cgsnapshot(self, context, cgsnapshot, snapshots):
  976         """Creates a cgsnapshot."""
  977 
  978         pgroup_name = self._get_pgroup_name(cgsnapshot.group)
  979         pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot)
  980         current_array = self._get_current_array()
  981         current_array.create_pgroup_snapshot(pgroup_name, suffix=pgsnap_suffix)
  982 
  983         return None, None
  984 
  985     def _delete_pgsnapshot(self, pgsnap_name):
  986         current_array = self._get_current_array()
  987         try:
  988             # FlashArray.destroy_pgroup is also used for deleting
  989             # pgroup snapshots. The underlying REST API is identical.
  990             current_array.destroy_pgroup(pgsnap_name)
  991             if self.configuration.pure_eradicate_on_delete:
  992                 current_array.eradicate_pgroup(pgsnap_name)
  993         except purestorage.PureHTTPError as err:
  994             with excutils.save_and_reraise_exception() as ctxt:
  995                 if (err.code == 400 and
  996                         (ERR_MSG_PENDING_ERADICATION in err.text or
  997                          ERR_MSG_NOT_EXIST in err.text)):
  998                     # Treat these as a "success" case since we are trying
  999                     # to delete them anyway.
 1000                     ctxt.reraise = False
 1001                     LOG.warning("Unable to delete Protection Group "
 1002                                 "Snapshot: %s", err.text)
 1003 
 1004     @pure_driver_debug_trace
 1005     def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
 1006         """Deletes a cgsnapshot."""
 1007 
 1008         pgsnap_name = self._get_pgroup_snap_name(cgsnapshot)
 1009         self._delete_pgsnapshot(pgsnap_name)
 1010 
 1011         return None, None
 1012 
 1013     def _validate_manage_existing_vol_type(self, volume):
 1014         """Ensure the volume type makes sense for being managed.
 1015 
 1016         We will not allow volumes that need to be sync-rep'd to be managed.
 1017         There isn't a safe way to automate adding them to the Pod from here,
 1018         an admin doing the import to Cinder would need to handle that part
 1019         first.
 1020         """
 1021         replication_type = self._get_replication_type_from_vol_type(
 1022             volume.volume_type)
 1023         if replication_type == REPLICATION_TYPE_SYNC:
 1024             raise exception.ManageExistingVolumeTypeMismatch(
 1025                 _("Unable to managed volume with type requiring sync"
 1026                   " replication enabled."))
 1027 
 1028     def _validate_manage_existing_ref(self, existing_ref, is_snap=False):
 1029         """Ensure that an existing_ref is valid and return volume info
 1030 
 1031         If the ref is not valid throw a ManageExistingInvalidReference
 1032         exception with an appropriate error.
 1033 
 1034         Will return volume or snapshot information from the array for
 1035         the object specified by existing_ref.
 1036         """
 1037         if "name" not in existing_ref or not existing_ref["name"]:
 1038             raise exception.ManageExistingInvalidReference(
 1039                 existing_ref=existing_ref,
 1040                 reason=_("manage_existing requires a 'name'"
 1041                          " key to identify an existing volume."))
 1042 
 1043         if is_snap:
 1044             # Purity snapshot names are prefixed with the source volume name.
 1045             ref_vol_name, ref_snap_suffix = existing_ref['name'].split('.')
 1046         else:
 1047             ref_vol_name = existing_ref['name']
 1048 
 1049         if not is_snap and '::' in ref_vol_name:
 1050             # Don't allow for managing volumes in a pod
 1051             raise exception.ManageExistingInvalidReference(
 1052                 _("Unable to manage volume in a Pod"))
 1053 
 1054         current_array = self._get_current_array()
 1055         try:
 1056             volume_info = current_array.get_volume(ref_vol_name, snap=is_snap)
 1057             if volume_info:
 1058                 if is_snap:
 1059                     for snap in volume_info:
 1060                         if snap['name'] == existing_ref['name']:
 1061                             return snap
 1062                 else:
 1063                     return volume_info
 1064         except purestorage.PureHTTPError as err:
 1065             with excutils.save_and_reraise_exception() as ctxt:
 1066                 if (err.code == 400 and
 1067                         ERR_MSG_NOT_EXIST in err.text):
 1068                     ctxt.reraise = False
 1069 
 1070         # If volume information was unable to be retrieved we need
 1071         # to throw an Invalid Reference exception.
 1072         raise exception.ManageExistingInvalidReference(
 1073             existing_ref=existing_ref,
 1074             reason=_("Unable to find Purity ref with name=%s") % ref_vol_name)
 1075 
 1076     def _add_to_group_if_needed(self, volume, vol_name):
 1077         if volume['group_id']:
 1078             if volume_utils.is_group_a_cg_snapshot_type(volume.group):
 1079                 self._add_volume_to_consistency_group(
 1080                     volume.group,
 1081                     vol_name
 1082                 )
 1083         elif volume['consistencygroup_id']:
 1084             self._add_volume_to_consistency_group(
 1085                 volume.consistencygroup,
 1086                 vol_name
 1087             )
 1088 
 1089     def create_group(self, ctxt, group):
 1090         """Creates a group.
 1091 
 1092         :param ctxt: the context of the caller.
 1093         :param group: the Group object of the group to be created.
 1094         :returns: model_update
 1095         """
 1096         if volume_utils.is_group_a_cg_snapshot_type(group):
 1097             return self.create_consistencygroup(ctxt, group)
 1098 
 1099         # If it wasn't a consistency group request ignore it and we'll rely on
 1100         # the generic group implementation.
 1101         raise NotImplementedError()
 1102 
 1103     def delete_group(self, ctxt, group, volumes):
 1104         """Deletes a group.
 1105 
 1106         :param ctxt: the context of the caller.
 1107         :param group: the Group object of the group to be deleted.
 1108         :param volumes: a list of Volume objects in the group.
 1109         :returns: model_update, volumes_model_update
 1110         """
 1111         if volume_utils.is_group_a_cg_snapshot_type(group):
 1112             return self.delete_consistencygroup(ctxt, group, volumes)
 1113 
 1114         # If it wasn't a consistency group request ignore it and we'll rely on
 1115         # the generic group implementation.
 1116         raise NotImplementedError()
 1117 
 1118     def update_group(self, ctxt, group,
 1119                      add_volumes=None, remove_volumes=None):
 1120         """Updates a group.
 1121 
 1122         :param ctxt: the context of the caller.
 1123         :param group: the Group object of the group to be updated.
 1124         :param add_volumes: a list of Volume objects to be added.
 1125         :param remove_volumes: a list of Volume objects to be removed.
 1126         :returns: model_update, add_volumes_update, remove_volumes_update
 1127         """
 1128 
 1129         if volume_utils.is_group_a_cg_snapshot_type(group):
 1130             return self.update_consistencygroup(ctxt,
 1131                                                 group,
 1132                                                 add_volumes,
 1133                                                 remove_volumes)
 1134 
 1135         # If it wasn't a consistency group request ignore it and we'll rely on
 1136         # the generic group implementation.
 1137         raise NotImplementedError()
 1138 
 1139     def create_group_from_src(self, ctxt, group, volumes,
 1140                               group_snapshot=None, snapshots=None,
 1141                               source_group=None, source_vols=None):
 1142         """Creates a group from source.
 1143 
 1144         :param ctxt: the context of the caller.
 1145         :param group: the Group object to be created.
 1146         :param volumes: a list of Volume objects in the group.
 1147         :param group_snapshot: the GroupSnapshot object as source.
 1148         :param snapshots: a list of snapshot objects in group_snapshot.
 1149         :param source_group: the Group object as source.
 1150         :param source_vols: a list of volume objects in the source_group.
 1151         :returns: model_update, volumes_model_update
 1152         """
 1153         if volume_utils.is_group_a_cg_snapshot_type(group):
 1154             return self.create_consistencygroup_from_src(ctxt,
 1155                                                          group,
 1156                                                          volumes,
 1157                                                          group_snapshot,
 1158                                                          snapshots,
 1159                                                          source_group,
 1160                                                          source_vols)
 1161 
 1162         # If it wasn't a consistency group request ignore it and we'll rely on
 1163         # the generic group implementation.
 1164         raise NotImplementedError()
 1165 
 1166     def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
 1167         """Creates a group_snapshot.
 1168 
 1169         :param ctxt: the context of the caller.
 1170         :param group_snapshot: the GroupSnapshot object to be created.
 1171         :param snapshots: a list of Snapshot objects in the group_snapshot.
 1172         :returns: model_update, snapshots_model_update
 1173         """
 1174         if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
 1175             return self.create_cgsnapshot(ctxt, group_snapshot, snapshots)
 1176 
 1177         # If it wasn't a consistency group request ignore it and we'll rely on
 1178         # the generic group implementation.
 1179         raise NotImplementedError()
 1180 
 1181     def delete_group_snapshot(self, ctxt, group_snapshot, snapshots):
 1182         """Deletes a group_snapshot.
 1183 
 1184         :param ctxt: the context of the caller.
 1185         :param group_snapshot: the GroupSnapshot object to be deleted.
 1186         :param snapshots: a list of snapshot objects in the group_snapshot.
 1187         :returns: model_update, snapshots_model_update
 1188         """
 1189         if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
 1190             return self.delete_cgsnapshot(ctxt, group_snapshot, snapshots)
 1191 
 1192         # If it wasn't a consistency group request ignore it and we'll rely on
 1193         # the generic group implementation.
 1194         raise NotImplementedError()
 1195 
 1196     @pure_driver_debug_trace
 1197     def manage_existing(self, volume, existing_ref):
 1198         """Brings an existing backend storage object under Cinder management.
 1199 
 1200         We expect a volume name in the existing_ref that matches one in Purity.
 1201         """
 1202         self._validate_manage_existing_vol_type(volume)
 1203         self._validate_manage_existing_ref(existing_ref)
 1204 
 1205         ref_vol_name = existing_ref['name']
 1206         current_array = self._get_current_array()
 1207         connected_hosts = \
 1208             current_array.list_volume_private_connections(ref_vol_name)
 1209         if len(connected_hosts) > 0:
 1210             raise exception.ManageExistingInvalidReference(
 1211                 existing_ref=existing_ref,
 1212                 reason=_("%(driver)s manage_existing cannot manage a volume "
 1213                          "connected to hosts. Please disconnect this volume "
 1214                          "from existing hosts before importing"
 1215                          ) % {'driver': self.__class__.__name__})
 1216         new_vol_name = self._generate_purity_vol_name(volume)
 1217         LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s",
 1218                  {"ref_name": ref_vol_name, "new_name": new_vol_name})
 1219         self._rename_volume_object(ref_vol_name,
 1220                                    new_vol_name,
 1221                                    raise_not_exist=True)
 1222         volume.provider_id = new_vol_name
 1223         async_enabled = self._enable_async_replication_if_needed(current_array,
 1224                                                                  volume)
 1225         repl_status = fields.ReplicationStatus.DISABLED
 1226         if async_enabled:
 1227             repl_status = fields.ReplicationStatus.ENABLED
 1228         return {
 1229             'provider_id': new_vol_name,
 1230             'replication_status': repl_status,
 1231         }
 1232 
 1233     @pure_driver_debug_trace
 1234     def manage_existing_get_size(self, volume, existing_ref):
 1235         """Return size of volume to be managed by manage_existing.
 1236 
 1237         We expect a volume name in the existing_ref that matches one in Purity.
 1238         """
 1239         volume_info = self._validate_manage_existing_ref(existing_ref)
 1240         size = self._round_bytes_to_gib(volume_info['size'])
 1241 
 1242         return size
 1243 
 1244     def _rename_volume_object(self, old_name, new_name, raise_not_exist=False):
 1245         """Rename a volume object (could be snapshot) in Purity.
 1246 
 1247         This will not raise an exception if the object does not exist
 1248         """
 1249         current_array = self._get_current_array()
 1250         try:
 1251             current_array.rename_volume(old_name, new_name)
 1252         except purestorage.PureHTTPError as err:
 1253             with excutils.save_and_reraise_exception() as ctxt:
 1254                 if (err.code == 400 and
 1255                         ERR_MSG_NOT_EXIST in err.text):
 1256                     ctxt.reraise = raise_not_exist
 1257                     LOG.warning("Unable to rename %(old_name)s, error "
 1258                                 "message: %(error)s",
 1259                                 {"old_name": old_name, "error": err.text})
 1260         return new_name
 1261 
 1262     @pure_driver_debug_trace
 1263     def unmanage(self, volume):
 1264         """Removes the specified volume from Cinder management.
 1265 
 1266         Does not delete the underlying backend storage object.
 1267 
 1268         The volume will be renamed with "-unmanaged" as a suffix
 1269         """
 1270 
 1271         vol_name = self._get_vol_name(volume)
 1272         unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX
 1273         LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s",
 1274                  {"ref_name": vol_name, "new_name": unmanaged_vol_name})
 1275         self._rename_volume_object(vol_name, unmanaged_vol_name)
 1276 
 1277     def _verify_manage_snap_api_requirements(self):
 1278         current_array = self._get_current_array()
 1279         api_version = current_array.get_rest_version()
 1280         if api_version not in MANAGE_SNAP_REQUIRED_API_VERSIONS:
 1281             msg = _('Unable to do manage snapshot operations with Purity REST '
 1282                     'API version %(api_version)s, requires '
 1283                     '%(required_versions)s.') % {
 1284                 'api_version': api_version,
 1285                 'required_versions': MANAGE_SNAP_REQUIRED_API_VERSIONS
 1286             }
 1287             raise PureDriverException(reason=msg)
 1288 
 1289     def manage_existing_snapshot(self, snapshot, existing_ref):
 1290         """Brings an existing backend storage object under Cinder management.
 1291 
 1292         We expect a snapshot name in the existing_ref that matches one in
 1293         Purity.
 1294         """
 1295         self._verify_manage_snap_api_requirements()
 1296         self._validate_manage_existing_ref(existing_ref, is_snap=True)
 1297         ref_snap_name = existing_ref['name']
 1298         new_snap_name = self._get_snap_name(snapshot)
 1299         LOG.info("Renaming existing snapshot %(ref_name)s to "
 1300                  "%(new_name)s", {"ref_name": ref_snap_name,
 1301                                   "new_name": new_snap_name})
 1302         self._rename_volume_object(ref_snap_name,
 1303                                    new_snap_name,
 1304                                    raise_not_exist=True)
 1305         return None
 1306 
 1307     def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
 1308         """Return size of snapshot to be managed by manage_existing.
 1309 
 1310         We expect a snapshot name in the existing_ref that matches one in
 1311         Purity.
 1312         """
 1313         self._verify_manage_snap_api_requirements()
 1314         snap_info = self._validate_manage_existing_ref(existing_ref,
 1315                                                        is_snap=True)
 1316         size = self._round_bytes_to_gib(snap_info['size'])
 1317         return size
 1318 
 1319     def unmanage_snapshot(self, snapshot):
 1320         """Removes the specified snapshot from Cinder management.
 1321 
 1322         Does not delete the underlying backend storage object.
 1323 
 1324         We expect a snapshot name in the existing_ref that matches one in
 1325         Purity.
 1326         """
 1327         self._verify_manage_snap_api_requirements()
 1328         snap_name = self._get_snap_name(snapshot)
 1329         unmanaged_snap_name = snap_name + UNMANAGED_SUFFIX
 1330         LOG.info("Renaming existing snapshot %(ref_name)s to "
 1331                  "%(new_name)s", {"ref_name": snap_name,
 1332                                   "new_name": unmanaged_snap_name})
 1333         self._rename_volume_object(snap_name, unmanaged_snap_name)
 1334 
 1335     def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
 1336                                sort_keys, sort_dirs):
 1337         """List volumes on the backend available for management by Cinder.
 1338 
 1339         Rule out volumes that are attached to a Purity host or that
 1340         are already in the list of cinder_volumes.
 1341 
 1342         Also exclude any volumes that are in a pod, it is difficult to safely
 1343         move in/out of pods from here without more context so we'll rely on
 1344         the admin to move them before managing the volume.
 1345 
 1346         We return references of the volume names for any others.
 1347         """
 1348         array = self._get_current_array()
 1349         pure_vols = array.list_volumes()
 1350         hosts_with_connections = array.list_hosts(all=True)
 1351 
 1352         # Put together a map of volumes that are connected to hosts
 1353         connected_vols = {}
 1354         for host in hosts_with_connections:
 1355             vol = host.get('vol')
 1356             if vol:
 1357                 connected_vols[vol] = host['name']
 1358 
 1359         # Put together a map of existing cinder volumes on the array
 1360         # so we can lookup cinder id's by purity volume names
 1361         existing_vols = {}
 1362         for cinder_vol in cinder_volumes:
 1363             existing_vols[self._get_vol_name(cinder_vol)] = cinder_vol.name_id
 1364 
 1365         manageable_vols = []
 1366         for pure_vol in pure_vols:
 1367             vol_name = pure_vol['name']
 1368             cinder_id = existing_vols.get(vol_name)
 1369             not_safe_msgs = []
 1370             host = connected_vols.get(vol_name)
 1371             in_pod = ("::" in vol_name)
 1372 
 1373             if host:
 1374                 not_safe_msgs.append(_('Volume connected to host %s') % host)
 1375 
 1376             if cinder_id:
 1377                 not_safe_msgs.append(_('Volume already managed'))
 1378 
 1379             if in_pod:
 1380                 not_safe_msgs.append(_('Volume is in a Pod'))
 1381 
 1382             is_safe = (len(not_safe_msgs) == 0)
 1383             reason_not_safe = ''
 1384             if not is_safe:
 1385                 for i, msg in enumerate(not_safe_msgs):
 1386                     if i > 0:
 1387                         reason_not_safe += ' && '
 1388                     reason_not_safe += "%s" % msg
 1389 
 1390             manageable_vols.append({
 1391                 'reference': {'name': vol_name},
 1392                 'size': self._round_bytes_to_gib(pure_vol['size']),
 1393                 'safe_to_manage': is_safe,
 1394                 'reason_not_safe': reason_not_safe,
 1395                 'cinder_id': cinder_id,
 1396                 'extra_info': None,
 1397             })
 1398 
 1399         return volume_utils.paginate_entries_list(
 1400             manageable_vols, marker, limit, offset, sort_keys, sort_dirs)
 1401 
 1402     def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
 1403                                  sort_keys, sort_dirs):
 1404         """List snapshots on the backend available for management by Cinder."""
 1405         array = self._get_current_array()
 1406         pure_snapshots = array.list_volumes(snap=True)
 1407 
 1408         # Put together a map of existing cinder snapshots on the array
 1409         # so we can lookup cinder id's by purity snapshot names
 1410         existing_snapshots = {}
 1411         for cinder_snap in cinder_snapshots:
 1412             name = self._get_snap_name(cinder_snap)
 1413             existing_snapshots[name] = cinder_snap.id
 1414 
 1415         manageable_snaps = []
 1416         for pure_snap in pure_snapshots:
 1417             snap_name = pure_snap['name']
 1418             cinder_id = existing_snapshots.get(snap_name)
 1419             is_safe = True
 1420             reason_not_safe = None
 1421 
 1422             if cinder_id:
 1423                 is_safe = False
 1424                 reason_not_safe = _("Snapshot already managed.")
 1425 
 1426             manageable_snaps.append({
 1427                 'reference': {'name': snap_name},
 1428                 'size': self._round_bytes_to_gib(pure_snap['size']),
 1429                 'safe_to_manage': is_safe,
 1430                 'reason_not_safe': reason_not_safe,
 1431                 'cinder_id': cinder_id,
 1432                 'extra_info': None,
 1433                 'source_reference': {'name': pure_snap['source']},
 1434             })
 1435 
 1436         return volume_utils.paginate_entries_list(
 1437             manageable_snaps, marker, limit, offset, sort_keys, sort_dirs)
 1438 
 1439     @staticmethod
 1440     def _round_bytes_to_gib(size):
 1441         return int(math.ceil(float(size) / units.Gi))
 1442 
 1443     def _get_flasharray(self, san_ip, api_token, rest_version=None,
 1444                         verify_https=None, ssl_cert_path=None,
 1445                         request_kwargs=None):
 1446 
 1447         if (version.LooseVersion(purestorage.VERSION) <
 1448                 version.LooseVersion('1.14.0')):
 1449             if request_kwargs is not None:
 1450                 LOG.warning("Unable to specify request_kwargs='%s' on "
 1451                             "purestorage.FlashArray using 'purestorage' "
 1452                             "python module <1.14.0. Current version: %s",
 1453                             request_kwargs,
 1454                             purestorage.VERSION)
 1455             array = purestorage.FlashArray(san_ip,
 1456                                            api_token=api_token,
 1457                                            rest_version=rest_version,
 1458                                            verify_https=verify_https,
 1459                                            ssl_cert=ssl_cert_path,
 1460                                            user_agent=self._user_agent)
 1461         else:
 1462             array = purestorage.FlashArray(san_ip,
 1463                                            api_token=api_token,
 1464                                            rest_version=rest_version,
 1465                                            verify_https=verify_https,
 1466                                            ssl_cert=ssl_cert_path,
 1467                                            user_agent=self._user_agent,
 1468                                            request_kwargs=request_kwargs)
 1469         array_info = array.get()
 1470         array.array_name = array_info["array_name"]
 1471         array.array_id = array_info["id"]
 1472 
 1473         # Configure some extra tracing on requests made to the array
 1474         if hasattr(array, '_request'):
 1475             def trace_request(fn):
 1476                 def wrapper(*args, **kwargs):
 1477                     request_id = uuid.uuid4().hex
 1478                     LOG.debug("Making HTTP Request [%(id)s]:"
 1479                               " 'args=%(args)s kwargs=%(kwargs)s'",
 1480                               {
 1481                                   "id": request_id,
 1482                                   "args": args,
 1483                                   "kwargs": kwargs,
 1484                               })
 1485                     ret = fn(*args, **kwargs)
 1486                     LOG.debug(
 1487                         "Response for HTTP request [%(id)s]: '%(response)s'",
 1488                         {
 1489                             "id": request_id,
 1490                             "response": ret,
 1491                         }
 1492                     )
 1493                     return ret
 1494                 return wrapper
 1495             array._request = trace_request(array._request)
 1496 
 1497         LOG.debug("connected to %(array_name)s with REST API %(api_version)s",
 1498                   {"array_name": array.array_name,
 1499                    "api_version": array._rest_version})
 1500         return array
 1501 
 1502     @staticmethod
 1503     def _client_version_greater_than(version):
 1504         module_version = [int(v) for v in purestorage.VERSION.split('.')]
 1505         for limit_version, actual_version in zip(version, module_version):
 1506             if actual_version > limit_version:
 1507                 return True
 1508         return False
 1509 
 1510     @staticmethod
 1511     def _get_pod_for_volume(volume_name):
 1512         """Return the Purity pod name for the given volume.
 1513 
 1514         This works on the assumption that volume names are always prefixed
 1515         with the pod name followed by '::'
 1516         """
 1517         if '::' not in volume_name:
 1518             # Not in a pod
 1519             return None
 1520         parts = volume_name.split('::')
 1521         if len(parts) != 2 or not parts[0]:
 1522             # Can't parse this.. Should never happen though, would mean a
 1523             # break to the API contract with Purity.
 1524             raise PureDriverException(
 1525                 _("Unable to determine pod for volume %s") % volume_name)
 1526         return parts[0]
 1527 
 1528     @classmethod
 1529     def _is_vol_in_pod(cls, pure_vol_name):
 1530         return bool(cls._get_pod_for_volume(pure_vol_name) is not None)
 1531 
 1532     @staticmethod
 1533     def _get_replication_type_from_vol_type(volume_type):
 1534         if volume_type and volume_type.is_replicated():
 1535             specs = volume_type.get("extra_specs")
 1536             if specs and EXTRA_SPECS_REPL_TYPE in specs:
 1537                 replication_type_spec = specs[EXTRA_SPECS_REPL_TYPE]
 1538                 # Do not validate settings, ignore invalid.
 1539                 if replication_type_spec == "<in> async":
 1540                     return REPLICATION_TYPE_ASYNC
 1541                 elif replication_type_spec == "<in> sync":
 1542                     return REPLICATION_TYPE_SYNC
 1543             else:
 1544                 # if no type was specified but replication is enabled assume
 1545                 # that async replication is enabled
 1546                 return REPLICATION_TYPE_ASYNC
 1547         return None
 1548 
 1549     def _generate_purity_vol_name(self, volume):
 1550         """Return the name of the volume Purity will use.
 1551 
 1552         This expects to be given a Volume OVO and not a volume
 1553         dictionary.
 1554         """
 1555         base_name = volume.name
 1556 
 1557         # Some OpenStack deployments, eg PowerVC, create a volume.name that
 1558         # when appended with out '-cinder' string will exceed the maximum
 1559         # volume name length for Pure, so here we left truncate the true volume
 1560         # name before the opennstack volume_name_template affected it and
 1561         # then put back the template format
 1562         if len(base_name) > 56:
 1563             actual_name = base_name[7:]
 1564             base_name = "volume-" + actual_name[-52:]
 1565 
 1566         repl_type = self._get_replication_type_from_vol_type(
 1567             volume.volume_type)
 1568         if repl_type == REPLICATION_TYPE_SYNC:
 1569             base_name = self._replication_pod_name + "::" + base_name
 1570 
 1571         return base_name + "-cinder"
 1572 
 1573     def _get_vol_name(self, volume):
 1574         """Return the name of the volume Purity will use."""
 1575         # Use the dictionary access style for compatibility, this works for
 1576         # db or OVO volume objects too.
 1577         return volume['provider_id']
 1578 
 1579     def _get_snap_name(self, snapshot):
 1580         """Return the name of the snapshot that Purity will use."""
 1581         return "%s.%s" % (self._get_vol_name(snapshot.volume),
 1582                           snapshot["name"])
 1583 
 1584     def _group_potential_repl_types(self, pgroup):
 1585         repl_types = set()
 1586         for type in pgroup.volume_types:
 1587             repl_type = self._get_replication_type_from_vol_type(type)
 1588             repl_types.add(repl_type)
 1589         return repl_types
 1590 
 1591     def _get_pgroup_name(self, pgroup):
 1592         # check if the pgroup has any volume types that are sync rep enabled,
 1593         # if so, we need to use a group name accounting for the ActiveCluster
 1594         # pod.
 1595         base_name = ""
 1596         if REPLICATION_TYPE_SYNC in self._group_potential_repl_types(pgroup):
 1597             base_name = self._replication_pod_name + "::"
 1598 
 1599         return "%(base)sconsisgroup-%(id)s-cinder" % {
 1600             'base': base_name, 'id': pgroup.id}
 1601 
 1602     @staticmethod
 1603     def _get_pgroup_snap_suffix(group_snapshot):
 1604         return "cgsnapshot-%s-cinder" % group_snapshot['id']
 1605 
 1606     @staticmethod
 1607     def _get_group_id_from_snap(group_snap):
 1608         # We don't really care what kind of group it is, if we are calling
 1609         # this look for a group_id and fall back to using a consistencygroup_id
 1610         id = None
 1611         try:
 1612             id = group_snap['group_id']
 1613         except AttributeError:
 1614             pass
 1615         if id is None:
 1616             try:
 1617                 id = group_snap['consistencygroup_id']
 1618             except AttributeError:
 1619                 pass
 1620         return id
 1621 
 1622     def _get_pgroup_snap_name(self, group_snapshot):
 1623         """Return the name of the pgroup snapshot that Purity will use"""
 1624         return "%s.%s" % (self._get_pgroup_name(group_snapshot.group),
 1625                           self._get_pgroup_snap_suffix(group_snapshot))
 1626 
 1627     @staticmethod
 1628     def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name):
 1629         return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % {
 1630             'pgroup_name': pg_name,
 1631             'pgsnap_suffix': pgsnap_suffix,
 1632             'volume_name': volume_name,
 1633         }
 1634 
 1635     def _get_pgroup_snap_name_from_snapshot(self, snapshot):
 1636         """Return the name of the snapshot that Purity will use."""
 1637 
 1638         group_snap = None
 1639         if snapshot.group_snapshot:
 1640             group_snap = snapshot.group_snapshot
 1641         elif snapshot.cgsnapshot:
 1642             group_snap = snapshot.cgsnapshot
 1643 
 1644         pg_vol_snap_name = "%(group_snap)s.%(volume_name)s-cinder" % {
 1645             'group_snap': self._get_pgroup_snap_name(group_snap),
 1646             'volume_name': snapshot.volume_name
 1647         }
 1648         return pg_vol_snap_name
 1649 
 1650     @staticmethod
 1651     def _generate_purity_host_name(name):
 1652         """Return a valid Purity host name based on the name passed in."""
 1653         if len(name) > 23:
 1654             name = name[0:23]
 1655         name = INVALID_CHARACTERS.sub("-", name)
 1656         name = name.lstrip("-")
 1657         return "{name}-{uuid}-cinder".format(name=name, uuid=uuid.uuid4().hex)
 1658 
 1659     @staticmethod
 1660     def _connect_host_to_vol(array, host_name, vol_name):
 1661         connection = None
 1662         try:
 1663             connection = array.connect_host(host_name, vol_name)
 1664         except purestorage.PureHTTPError as err:
 1665             if err.code == 400 and ERR_MSG_HOST_NOT_EXIST in err.text:
 1666                 LOG.debug('Unable to attach volume to host: %s', err.text)
 1667                 raise PureRetryableException()
 1668             with excutils.save_and_reraise_exception() as ctxt:
 1669                 if (err.code == 400 and
 1670                         ERR_MSG_ALREADY_EXISTS in err.text):
 1671                     # Happens if the volume is already connected to the host.
 1672                     # Treat this as a success.
 1673                     ctxt.reraise = False
 1674                     LOG.debug("Volume connection already exists for Purity "
 1675                               "host with message: %s", err.text)
 1676 
 1677                     # Get the info for the existing connection.
 1678                     connected_hosts = (
 1679                         array.list_volume_private_connections(vol_name))
 1680                     for host_info in connected_hosts:
 1681                         if host_info["host"] == host_name:
 1682                             connection = host_info
 1683                             break
 1684         if not connection:
 1685             raise PureDriverException(
 1686                 reason=_("Unable to connect or find connection to host"))
 1687 
 1688         return connection
 1689 
 1690     @pure_driver_debug_trace
 1691     def retype(self, context, volume, new_type, diff, host):
 1692         """Retype from one volume type to another on the same backend.
 1693 
 1694         For a Pure Array there is currently no differentiation between types
 1695         of volumes other than some being part of a protection group to be
 1696         replicated for async, or part of a pod for sync replication.
 1697         """
 1698 
 1699         # TODO(patrickeast): Can remove this once new_type is a VolumeType OVO
 1700         new_type = volume_type.VolumeType.get_by_name_or_id(context,
 1701                                                             new_type['id'])
 1702         previous_vol_replicated = volume.is_replicated()
 1703         new_vol_replicated = (new_type and new_type.is_replicated())
 1704 
 1705         prev_repl_type = None
 1706         new_repl_type = None
 1707 
 1708         # See if the type specifies the replication type. If we know it is
 1709         # replicated but doesn't specify a type assume that it is async rep
 1710         # for backwards compatibility. This applies to both old and new types
 1711 
 1712         if previous_vol_replicated:
 1713             prev_repl_type = self._get_replication_type_from_vol_type(
 1714                 volume.volume_type)
 1715 
 1716         if new_vol_replicated:
 1717             new_repl_type = self._get_replication_type_from_vol_type(new_type)
 1718             if new_repl_type is None:
 1719                 new_repl_type = REPLICATION_TYPE_ASYNC
 1720 
 1721         # There are a few cases we care about, going from non-replicated to
 1722         # replicated, from replicated to non-replicated, and switching
 1723         # replication types.
 1724         model_update = None
 1725         if previous_vol_replicated and not new_vol_replicated:
 1726             if prev_repl_type == REPLICATION_TYPE_ASYNC:
 1727                 # Remove from protection group.
 1728                 self._disable_async_replication(volume)
 1729                 model_update = {
 1730                     "replication_status": fields.ReplicationStatus.DISABLED
 1731                 }
 1732             elif prev_repl_type == REPLICATION_TYPE_SYNC:
 1733                 # We can't pull a volume out of a stretched pod, indicate
 1734                 # to the volume manager that we need to use a migration instead
 1735                 return False, None
 1736         elif not previous_vol_replicated and new_vol_replicated:
 1737             if new_repl_type == REPLICATION_TYPE_ASYNC:
 1738                 # Add to protection group.
 1739                 self._enable_async_replication(self._get_current_array(),
 1740                                                volume)
 1741                 model_update = {
 1742                     "replication_status": fields.ReplicationStatus.ENABLED
 1743                 }
 1744             elif new_repl_type == REPLICATION_TYPE_SYNC:
 1745                 # We can't add a volume to a stretched pod, they must be
 1746                 # created in one, indicate to the volume manager that it
 1747                 # should do a migration.
 1748                 return False, None
 1749         elif (previous_vol_replicated and new_vol_replicated
 1750                 and (prev_repl_type != new_repl_type)):
 1751             # We can't move a volume in or out of a pod, indicate to the
 1752             #  manager that it should do a migration for this retype
 1753             return False, None
 1754         return True, model_update
 1755 
 1756     @pure_driver_debug_trace
 1757     def _disable_async_replication(self, volume):
 1758         """Disable replication on the given volume."""
 1759 
 1760         current_array = self._get_current_array()
 1761         LOG.debug("Disabling replication for volume %(id)s residing on "
 1762                   "array %(backend_id)s.",
 1763                   {"id": volume["id"],
 1764                    "backend_id": current_array.backend_id})
 1765         try:
 1766             current_array.set_pgroup(self._replication_pg_name,
 1767                                      remvollist=([self._get_vol_name(volume)]))
 1768         except purestorage.PureHTTPError as err:
 1769             with excutils.save_and_reraise_exception() as ctxt:
 1770                 if (err.code == 400 and
 1771                         ERR_MSG_COULD_NOT_BE_FOUND in err.text):
 1772                     ctxt.reraise = False
 1773                     LOG.warning("Disable replication on volume failed: "
 1774                                 "already disabled: %s", err.text)
 1775                 else:
 1776                     LOG.error("Disable replication on volume failed with "
 1777                               "message: %s", err.text)
 1778 
 1779     @pure_driver_debug_trace
 1780     def failover_host(self, context, volumes, secondary_id=None, groups=None):
 1781         """Failover backend to a secondary array
 1782 
 1783         This action will not affect the original volumes in any
 1784         way and it will stay as is. If a subsequent failover is performed we
 1785         will simply overwrite the original (now unmanaged) volumes.
 1786         """
 1787         if secondary_id == 'default':
 1788             # We are going back to the 'original' driver config, just put
 1789             # our current array back to the primary.
 1790             if self._failed_over_primary_array:
 1791 
 1792                 # If the "default" and current host are in an ActiveCluster
 1793                 # with volumes stretched between the two then we can put
 1794                 # the sync rep enabled volumes into available states, anything
 1795                 # else will go into an error state pending an admin to check
 1796                 # them and adjust states as appropriate.
 1797 
 1798                 current_array = self._get_current_array()
 1799                 repl_type = current_array.replication_type
 1800                 is_in_ac = bool(repl_type == REPLICATION_TYPE_SYNC)
 1801                 model_updates = []
 1802 
 1803                 # We are only given replicated volumes, but any non sync rep
 1804                 # volumes should go into error upon doing a failback as the
 1805                 # async replication is not bi-directional.
 1806                 for vol in volumes:
 1807                     repl_type = self._get_replication_type_from_vol_type(
 1808                         vol.volume_type)
 1809                     if not (is_in_ac and repl_type == REPLICATION_TYPE_SYNC):
 1810                         model_updates.append({
 1811                             'volume_id': vol['id'],
 1812                             'updates': {
 1813                                 'status': 'error',
 1814                             }
 1815                         })
 1816                 self._swap_replication_state(current_array,
 1817                                              self._failed_over_primary_array,
 1818                                              failback=True)
 1819                 return secondary_id, model_updates, []
 1820             else:
 1821                 msg = _('Unable to failback to "default", this can only be '
 1822                         'done after a failover has completed.')
 1823                 raise exception.InvalidReplicationTarget(message=msg)
 1824 
 1825         current_array = self._get_current_array()
 1826         LOG.debug("Failover replication for array %(primary)s to "
 1827                   "%(secondary)s.",
 1828                   {"primary": current_array.backend_id,
 1829                    "secondary": secondary_id})
 1830 
 1831         if secondary_id == current_array.backend_id:
 1832             raise exception.InvalidReplicationTarget(
 1833                 reason=_("Secondary id can not be the same as primary array, "
 1834                          "backend_id = %(secondary)s.") %
 1835                 {"secondary": secondary_id}
 1836             )
 1837 
 1838         secondary_array = None
 1839         pg_snap = None  # used for async only
 1840         if secondary_id:
 1841             for array in self._replication_target_arrays:
 1842                 if array.backend_id == secondary_id:
 1843                     secondary_array = array
 1844                     break
 1845 
 1846             if not secondary_array:
 1847                 raise exception.InvalidReplicationTarget(
 1848                     reason=_("Unable to determine secondary_array from"
 1849                              " supplied secondary: %(secondary)s.") %
 1850                     {"secondary": secondary_id}
 1851                 )
 1852 
 1853             if secondary_array.replication_type == REPLICATION_TYPE_ASYNC:
 1854                 pg_snap = self._get_latest_replicated_pg_snap(
 1855                     secondary_array,
 1856                     self._get_current_array().array_name,
 1857                     self._replication_pg_name
 1858                 )
 1859         else:
 1860             LOG.debug('No secondary array id specified, checking all targets.')
 1861             # Favor sync-rep targets options
 1862             secondary_array = self._find_sync_failover_target()
 1863 
 1864             if not secondary_array:
 1865                 # Now look for an async one
 1866                 secondary_array, pg_snap = self._find_async_failover_target()
 1867 
 1868         # If we *still* don't have a secondary array it means we couldn't
 1869         # determine one to use. Stop now.
 1870         if not secondary_array:
 1871             raise PureDriverException(
 1872                 reason=_("Unable to find viable secondary array from "
 1873                          "configured targets: %(targets)s.") %
 1874                 {"targets": str(self._replication_target_arrays)}
 1875             )
 1876 
 1877         LOG.debug("Starting failover from %(primary)s to %(secondary)s",
 1878                   {"primary": current_array.array_name,
 1879                    "secondary": secondary_array.array_name})
 1880 
 1881         model_updates = []
 1882         if secondary_array.replication_type == REPLICATION_TYPE_ASYNC:
 1883             model_updates = self._async_failover_host(
 1884                 volumes, secondary_array, pg_snap)
 1885         elif secondary_array.replication_type == REPLICATION_TYPE_SYNC:
 1886             model_updates = self._sync_failover_host(volumes, secondary_array)
 1887 
 1888         current_array = self._get_current_array()
 1889         self._swap_replication_state(current_array, secondary_array)
 1890 
 1891         return secondary_array.backend_id, model_updates, []
 1892 
 1893     def _swap_replication_state(self, current_array, secondary_array,
 1894                                 failback=False):
 1895         # After failover we want our current array to be swapped for the
 1896         # secondary array we just failed over to.
 1897         self._failed_over_primary_array = current_array
 1898 
 1899         # Remove the new primary from our secondary targets
 1900         if secondary_array in self._replication_target_arrays:
 1901             self._replication_target_arrays.remove(secondary_array)
 1902 
 1903         # For async, if we're doing a failback then add the old primary back
 1904         # into the replication list
 1905         if failback:
 1906             self._replication_target_arrays.append(current_array)
 1907             self._is_replication_enabled = True
 1908 
 1909         # If its sync rep then swap the two in their lists since it is a
 1910         # bi-directional setup, if the primary is still OK or comes back
 1911         # it can continue being used as a secondary target until a 'failback'
 1912         # occurs. This is primarily important for "uniform" environments with
 1913         # attachments to both arrays. We may need to adjust flags on the
 1914         # primary array object to lock it into one type of replication.
 1915         if secondary_array.replication_type == REPLICATION_TYPE_SYNC:
 1916             self._is_active_cluster_enabled = True
 1917             self._is_replication_enabled = True
 1918             if secondary_array in self._active_cluster_target_arrays:
 1919                 self._active_cluster_target_arrays.remove(secondary_array)
 1920 
 1921             current_array.replication_type = REPLICATION_TYPE_SYNC
 1922             self._replication_target_arrays.append(current_array)
 1923             self._active_cluster_target_arrays.append(current_array)
 1924         else:
 1925             # If the target is not configured for sync rep it means it isn't
 1926             # part of the ActiveCluster and we need to reflect this in our
 1927             # capabilities.
 1928             self._is_active_cluster_enabled = False
 1929             self._is_replication_enabled = False
 1930 
 1931         if secondary_array.uniform:
 1932             if secondary_array in self._uniform_active_cluster_target_arrays:
 1933                 self._uniform_active_cluster_target_arrays.remove(
 1934                     secondary_array)
 1935             current_array.unform = True
 1936             self._uniform_active_cluster_target_arrays.append(current_array)
 1937 
 1938         self._set_current_array(secondary_array)
 1939 
 1940     def _does_pgroup_exist(self, array, pgroup_name):
 1941         """Return True/False"""
 1942         try:
 1943             array.get_pgroup(pgroup_name)
 1944             return True
 1945         except purestorage.PureHTTPError as err:
 1946             with excutils.save_and_reraise_exception() as ctxt:
 1947                 if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
 1948                     ctxt.reraise = False
 1949                     return False
 1950             # Any unexpected exception to be handled by caller.
 1951 
 1952     @pure_driver_debug_trace
 1953     @utils.retry(PureDriverException,
 1954                  REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL,
 1955                  REPL_SETTINGS_PROPAGATE_MAX_RETRIES)
 1956     def _wait_until_target_group_setting_propagates(
 1957             self, target_array, pgroup_name_on_target):
 1958         # Wait for pgroup to show up on target array.
 1959         if self._does_pgroup_exist(target_array, pgroup_name_on_target):
 1960             return
 1961         else:
 1962             raise PureDriverException(message=
 1963                                       _('Protection Group not ready.'))
 1964 
 1965     @pure_driver_debug_trace
 1966     @utils.retry(PureDriverException,
 1967                  REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL,
 1968                  REPL_SETTINGS_PROPAGATE_MAX_RETRIES)
 1969     def _wait_until_source_array_allowed(self, source_array, pgroup_name):
 1970         result = source_array.get_pgroup(pgroup_name)
 1971         if result["targets"][0]["allowed"]:
 1972             return
 1973         else:
 1974             raise PureDriverException(message=_('Replication not '
 1975                                                 'allowed yet.'))
 1976 
 1977     def _get_pgroup_name_on_target(self, source_array_name, pgroup_name):
 1978         return "%s:%s" % (source_array_name, pgroup_name)
 1979 
 1980     @pure_driver_debug_trace
 1981     def _setup_replicated_pods(self, primary, ac_secondaries, pod_name):
 1982         # Make sure the pod exists
 1983         self._create_pod_if_not_exist(primary, pod_name)
 1984 
 1985         # Stretch it across arrays we have configured, assume all secondary
 1986         # arrays given to this method are configured for sync rep with active
 1987         # cluster enabled.
 1988         for target_array in ac_secondaries:
 1989             try:
 1990                 primary.add_pod(pod_name, target_array.array_name)
 1991             except purestorage.PureHTTPError as err:
 1992                 with excutils.save_and_reraise_exception() as ctxt:
 1993                     if err.code == 400 and (
 1994                             ERR_MSG_ALREADY_EXISTS
 1995                             in err.text):
 1996                         ctxt.reraise = False
 1997                         LOG.info("Skipping add array %(target_array)s to pod"
 1998                                  " %(pod_name)s since it's already added.",
 1999                                  {"target_array": target_array.array_name,
 2000                                   "pod_name": pod_name})
 2001 
 2002     @pure_driver_debug_trace
 2003     def _setup_replicated_pgroups(self, primary, secondaries, pg_name,
 2004                                   replication_interval, retention_policy):
 2005         self._create_protection_group_if_not_exist(
 2006             primary, pg_name)
 2007 
 2008         # Apply retention policies to a protection group.
 2009         # These retention policies will be applied on the replicated
 2010         # snapshots on the target array.
 2011         primary.set_pgroup(pg_name, **retention_policy)
 2012 
 2013         # Configure replication propagation frequency on a
 2014         # protection group.
 2015         primary.set_pgroup(pg_name,
 2016                            replicate_frequency=replication_interval)
 2017         for target_array in secondaries:
 2018             try:
 2019                 # Configure PG to replicate to target_array.
 2020                 primary.set_pgroup(pg_name,
 2021                                    addtargetlist=[target_array.array_name])
 2022             except purestorage.PureHTTPError as err:
 2023                 with excutils.save_and_reraise_exception() as ctxt:
 2024                     if err.code == 400 and (
 2025                             ERR_MSG_ALREADY_INCLUDES
 2026                             in err.text):
 2027                         ctxt.reraise = False
 2028                         LOG.info("Skipping add target %(target_array)s"
 2029                                  " to protection group %(pgname)s"
 2030                                  " since it's already added.",
 2031                                  {"target_array": target_array.array_name,
 2032                                   "pgname": pg_name})
 2033 
 2034         # Wait until "Target Group" setting propagates to target_array.
 2035         pgroup_name_on_target = self._get_pgroup_name_on_target(
 2036             primary.array_name, pg_name)
 2037 
 2038         for target_array in secondaries:
 2039             self._wait_until_target_group_setting_propagates(
 2040                 target_array,
 2041                 pgroup_name_on_target)
 2042             try:
 2043                 # Configure the target_array to allow replication from the
 2044                 # PG on source_array.
 2045                 target_array.set_pgroup(pgroup_name_on_target,
 2046                                         allowed=True)
 2047             except purestorage.PureHTTPError as err:
 2048                 with excutils.save_and_reraise_exception() as ctxt:
 2049                     if (err.code == 400 and
 2050                             ERR_MSG_ALREADY_ALLOWED in err.text):
 2051                         ctxt.reraise = False
 2052                         LOG.info("Skipping allow pgroup %(pgname)s on "
 2053                                  "target array %(target_array)s since "
 2054                                  "it is already allowed.",
 2055                                  {"pgname": pg_name,
 2056                                   "target_array": target_array.array_name})
 2057 
 2058         # Wait until source array acknowledges previous operation.
 2059         self._wait_until_source_array_allowed(primary, pg_name)
 2060         # Start replication on the PG.
 2061         primary.set_pgroup(pg_name, replicate_enabled=True)
 2062 
 2063     @pure_driver_debug_trace
 2064     def _generate_replication_retention(self):
 2065         """Generates replication retention settings in Purity compatible format
 2066 
 2067         An example of the settings:
 2068         target_all_for = 14400 (i.e. 4 hours)
 2069         target_per_day = 6
 2070         target_days = 4
 2071         The settings above configure the target array to retain 4 hours of
 2072         the most recent snapshots.
 2073         After the most recent 4 hours, the target will choose 4 snapshots
 2074         per day from the previous 6 days for retention
 2075 
 2076         :return: a dictionary representing replication retention settings
 2077         """
 2078         replication_retention = dict(
 2079             target_all_for=self._replication_retention_short_term,
 2080             target_per_day=self._replication_retention_long_term_per_day,
 2081             target_days=self._replication_retention_long_term
 2082         )
 2083         return replication_retention
 2084 
 2085     @pure_driver_debug_trace
 2086     def _get_latest_replicated_pg_snap(self,
 2087                                        target_array,
 2088                                        source_array_name,
 2089                                        pgroup_name):
 2090         # Get all protection group snapshots.
 2091         snap_name = "%s:%s" % (source_array_name, pgroup_name)
 2092         LOG.debug("Looking for snap %(snap)s on array id %(array_id)s",
 2093                   {"snap": snap_name, "array_id": target_array.array_id})
 2094         pg_snaps = target_array.get_pgroup(snap_name, snap=True, transfer=True)
 2095         LOG.debug("Retrieved snapshots on target %(pg_snaps)s",
 2096                   {"pg_snaps": pg_snaps})
 2097 
 2098         # Only use snapshots that are replicated completely.
 2099         pg_snaps_filtered = [s for s in pg_snaps if s["progress"] == 1]
 2100         LOG.debug("Filtered list of snapshots %(pg_snaps_filtered)s",
 2101                   {"pg_snaps_filtered": pg_snaps_filtered})
 2102 
 2103         # Go through the protection group snapshots, latest first ....
 2104         #   stop when we find required volume snapshot.
 2105         pg_snaps_filtered.sort(key=lambda x: x["created"], reverse=True)
 2106         LOG.debug("Sorted list of snapshots %(pg_snaps_filtered)s",
 2107                   {"pg_snaps_filtered": pg_snaps_filtered})
 2108 
 2109         pg_snap = pg_snaps_filtered[0] if pg_snaps_filtered else None
 2110         LOG.debug("Selecting snapshot %(pg_snap)s for failover.",
 2111                   {"pg_snap": pg_snap})
 2112 
 2113         return pg_snap
 2114 
 2115     @pure_driver_debug_trace
 2116     def _create_pod_if_not_exist(self, source_array, name):
 2117         if not name:
 2118             raise PureDriverException(
 2119                 reason=_("Empty string passed for Pod name."))
 2120         try:
 2121             source_array.create_pod(name)
 2122         except purestorage.PureHTTPError as err:
 2123             with excutils.save_and_reraise_exception() as ctxt:
 2124                 if err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text:
 2125                     # Happens if the pod already exists
 2126                     ctxt.reraise = False
 2127                     LOG.warning("Skipping creation of pod %s since it "
 2128                                 "already exists.", name)
 2129                     return
 2130                 if err.code == 400 and (
 2131                         ERR_MSG_PENDING_ERADICATION in err.text):
 2132                     ctxt.reraise = False
 2133                     LOG.warning("Pod %s is deleted but not"
 2134                                 " eradicated - will recreate.", name)
 2135                     source_array.eradicate_pod(name)
 2136                     self._create_pod_if_not_exist(source_array, name)
 2137 
 2138     @pure_driver_debug_trace
 2139     def _create_protection_group_if_not_exist(self, source_array, pgname):
 2140         if not pgname:
 2141             raise PureDriverException(
 2142                 reason=_("Empty string passed for PG name."))
 2143         try:
 2144             source_array.create_pgroup(pgname)
 2145         except purestorage.PureHTTPError as err:
 2146             with excutils.save_and_reraise_exception() as ctxt:
 2147                 if err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text:
 2148                     # Happens if the PG already exists
 2149                     ctxt.reraise = False
 2150                     LOG.warning("Skipping creation of PG %s since it "
 2151                                 "already exists.", pgname)
 2152                     # We assume PG has already been setup with correct
 2153                     # replication settings.
 2154                     return
 2155                 if err.code == 400 and (
 2156                         ERR_MSG_PENDING_ERADICATION in err.text):
 2157                     ctxt.reraise = False
 2158                     LOG.warning("Protection group %s is deleted but not"
 2159                                 " eradicated - will recreate.", pgname)
 2160                     source_array.eradicate_pgroup(pgname)
 2161                     self._create_protection_group_if_not_exist(source_array,
 2162                                                                pgname)
 2163 
 2164     def _find_async_failover_target(self):
 2165         if not self._replication_target_arrays:
 2166             raise PureDriverException(
 2167                 reason=_("Unable to find failover target, no "
 2168                          "secondary targets configured."))
 2169         secondary_array = None
 2170         pg_snap = None
 2171         for array in self._replication_target_arrays:
 2172             if array.replication_type != REPLICATION_TYPE_ASYNC:
 2173                 continue
 2174             try:
 2175                 secondary_array = array
 2176                 pg_snap = self._get_latest_replicated_pg_snap(
 2177                     secondary_array,
 2178                     self._get_current_array().array_name,
 2179                     self._replication_pg_name
 2180                 )
 2181                 if pg_snap:
 2182                     break
 2183             except Exception:
 2184                 LOG.exception('Error finding replicated pg snapshot '
 2185                               'on %(secondary)s.',
 2186                               {'secondary': array.backend_id})
 2187                 secondary_array = None
 2188 
 2189         if not pg_snap:
 2190             raise PureDriverException(
 2191                 reason=_("Unable to find viable pg snapshot to use for "
 2192                          "failover on selected secondary array: %(id)s.") %
 2193                 {"id": secondary_array.backend_id if secondary_array else None}
 2194             )
 2195 
 2196         return secondary_array, pg_snap
 2197 
 2198     def _find_sync_failover_target(self):
 2199         secondary_array = None
 2200         if not self._active_cluster_target_arrays:
 2201             LOG.warning("Unable to find failover target, no "
 2202                         "sync rep secondary targets configured.")
 2203             return secondary_array
 2204 
 2205         for array in self._active_cluster_target_arrays:
 2206             try:
 2207                 secondary_array = array
 2208                 # Ensure the pod is in a good state on the array
 2209                 pod_info = secondary_array.get_pod(self._replication_pod_name)
 2210                 for pod_array in pod_info["arrays"]:
 2211                     # Compare against Purity ID's
 2212                     if pod_array["array_id"] == secondary_array.array_id:
 2213                         if pod_array["status"] == "online":
 2214                             # Success! Use this array.
 2215                             break
 2216                         else:
 2217                             secondary_array = None
 2218 
 2219             except purestorage.PureHTTPError as err:
 2220                 LOG.warning("Failed to get pod status for secondary array "
 2221                             "%(id)s: %(err)s",
 2222                             {
 2223                                 "id": secondary_array.backend_id,
 2224                                 "err": err,
 2225                             })
 2226                 secondary_array = None
 2227         return secondary_array
 2228 
 2229     def _async_failover_host(self, volumes, secondary_array, pg_snap):
 2230         # NOTE(patrickeast): This currently requires a call with REST API 1.3.
 2231         # If we need to, create a temporary FlashArray for this operation.
 2232         api_version = secondary_array.get_rest_version()
 2233         LOG.debug("Current REST API for array id %(id)s is %(api_version)s",
 2234                   {"id": secondary_array.array_id, "api_version": api_version})
 2235         if api_version != '1.3':
 2236             # Try to copy the flasharray as close as we can..
 2237             if hasattr(secondary_array, '_request_kwargs'):
 2238                 target_array = self._get_flasharray(
 2239                     secondary_array._target,
 2240                     api_token=secondary_array._api_token,
 2241                     rest_version='1.3',
 2242                     request_kwargs=secondary_array._request_kwargs,
 2243                 )
 2244             else:
 2245                 target_array = self._get_flasharray(
 2246                     secondary_array._target,
 2247                     api_token=secondary_array._api_token,
 2248                     rest_version='1.3',
 2249                 )
 2250         else:
 2251             target_array = secondary_array
 2252 
 2253         volume_snaps = target_array.get_volume(pg_snap['name'],
 2254                                                snap=True,
 2255                                                pgroup=True)
 2256 
 2257         # We only care about volumes that are in the list we are given.
 2258         vol_names = set()
 2259         for vol in volumes:
 2260             vol_names.add(self._get_vol_name(vol))
 2261 
 2262         for snap in volume_snaps:
 2263             vol_name = snap['name'].split('.')[-1]
 2264             if vol_name in vol_names:
 2265                 vol_names.remove(vol_name)
 2266                 LOG.debug('Creating volume %(vol)s from replicated snapshot '
 2267                           '%(snap)s', {'vol': vol_name, 'snap': snap['name']})
 2268                 secondary_array.copy_volume(snap['name'],
 2269                                             vol_name,
 2270                                             overwrite=True)
 2271             else:
 2272                 LOG.debug('Ignoring unmanaged volume %(vol)s from replicated '
 2273                           'snapshot %(snap)s.', {'vol': vol_name,
 2274                                                  'snap': snap['name']})
 2275         # The only volumes remaining in the vol_names set have been left behind
 2276         # on the array and should be considered as being in an error state.
 2277         model_updates = []
 2278         for vol in volumes:
 2279             if self._get_vol_name(vol) in vol_names:
 2280                 model_updates.append({
 2281                     'volume_id': vol['id'],
 2282                     'updates': {
 2283                         'status': 'error',
 2284                     }
 2285                 })
 2286             else:
 2287                 repl_status = fields.ReplicationStatus.FAILED_OVER
 2288                 model_updates.append({
 2289                     'volume_id': vol['id'],
 2290                     'updates': {
 2291                         'replication_status': repl_status,
 2292                     }
 2293                 })
 2294         return model_updates
 2295 
 2296     def _sync_failover_host(self, volumes, secondary_array):
 2297         """Perform a failover for hosts in an ActiveCluster setup
 2298 
 2299         There isn't actually anything that needs to be changed, only
 2300         update the volume status to distinguish the survivors..
 2301         """
 2302         array_volumes = secondary_array.list_volumes()
 2303         replicated_vol_names = set()
 2304         for vol in array_volumes:
 2305             name = vol['name']
 2306             if name.startswith(self._replication_pod_name):
 2307                 replicated_vol_names.add(name)
 2308 
 2309         model_updates = []
 2310         for vol in volumes:
 2311             if self._get_vol_name(vol) not in replicated_vol_names:
 2312                 model_updates.append({
 2313                     'volume_id': vol['id'],
 2314                     'updates': {
 2315                         'status': fields.VolumeStatus.ERROR,
 2316                     }
 2317                 })
 2318             else:
 2319                 repl_status = fields.ReplicationStatus.FAILED_OVER
 2320                 model_updates.append({
 2321                     'volume_id': vol['id'],
 2322                     'updates': {
 2323                         'replication_status': repl_status,
 2324                     }
 2325                 })
 2326         return model_updates
 2327 
 2328     def _get_wwn(self, pure_vol_name):
 2329         """Return the WWN based on the volume's serial number
 2330 
 2331         The WWN is composed of the constant '36', the OUI for Pure, followed
 2332         by '0', and finally the serial number.
 2333         """
 2334         array = self._get_current_array()
 2335         volume_info = array.get_volume(pure_vol_name)
 2336         wwn = '3624a9370' + volume_info['serial']
 2337         return wwn.lower()
 2338 
 2339     def _get_current_array(self):
 2340         return self._array
 2341 
 2342     def _set_current_array(self, array):
 2343         self._array = array
 2344 
 2345 
 2346 @interface.volumedriver
 2347 class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver):
 2348     """OpenStack Volume Driver to support Pure Storage FlashArray.
 2349 
 2350     This version of the driver enables the use of iSCSI for
 2351     the underlying storage connectivity with the FlashArray.
 2352     """
 2353 
 2354     VERSION = "11.0.iscsi"
 2355 
 2356     def __init__(self, *args, **kwargs):
 2357         execute = kwargs.pop("execute", utils.execute)
 2358         super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs)
 2359         self._storage_protocol = "iSCSI"
 2360 
 2361     def _get_host(self, array, connector, remote=False):
 2362         """Return dict describing existing Purity host object or None."""
 2363         if (remote and array.get_rest_version() in
 2364                 SYNC_REPLICATION_REQUIRED_API_VERSIONS):
 2365             hosts = array.list_hosts(remote=True)
 2366         else:
 2367             hosts = array.list_hosts()
 2368         matching_hosts = []
 2369         for host in hosts:
 2370             if connector["initiator"] in host["iqn"]:
 2371                 matching_hosts.append(host)
 2372         return matching_hosts
 2373 
 2374     @pure_driver_debug_trace
 2375     def initialize_connection(self, volume, connector):
 2376         """Allow connection to connector and return connection info."""
 2377         pure_vol_name = self._get_vol_name(volume)
 2378         target_arrays = [self._get_current_array()]
 2379         if (self._is_vol_in_pod(pure_vol_name) and
 2380                 self._is_active_cluster_enabled):
 2381             target_arrays += self._uniform_active_cluster_target_arrays
 2382 
 2383         chap_username = None
 2384         chap_password = None
 2385         if self.configuration.use_chap_auth:
 2386             (chap_username, chap_password) = self._get_chap_credentials(
 2387                 connector['host'], connector["initiator"])
 2388 
 2389         targets = []
 2390         for array in target_arrays:
 2391             connection = self._connect(array, pure_vol_name, connector,
 2392                                        chap_username, chap_password)
 2393 
 2394             target_ports = self._get_target_iscsi_ports(array)
 2395             targets.append({
 2396                 "connection": connection,
 2397                 "ports": target_ports,
 2398             })
 2399 
 2400         properties = self._build_connection_properties(targets)
 2401         properties["data"]["wwn"] = self._get_wwn(pure_vol_name)
 2402 
 2403         if self.configuration.use_chap_auth:
 2404             properties["data"]["auth_method"] = "CHAP"
 2405             properties["data"]["auth_username"] = chap_username
 2406             properties["data"]["auth_password"] = chap_password
 2407 
 2408         return properties
 2409 
 2410     def _build_connection_properties(self, targets):
 2411         props = {
 2412             "driver_volume_type": "iscsi",
 2413             "data": {
 2414                 "target_discovered": False,
 2415                 "discard": True,
 2416             },
 2417         }
 2418 
 2419         # Convert CIDR to the expected type
 2420         if not isinstance(self.configuration.pure_iscsi_cidr, str):
 2421             cidr = self.configuration.pure_iscsi_cidr.decode('utf8')
 2422         else:
 2423             cidr = self.configuration.pure_iscsi_cidr
 2424         check_cidr = ipaddress.IPv4Network(cidr)
 2425 
 2426         target_luns = []
 2427         target_iqns = []
 2428         target_portals = []
 2429 
 2430         # Aggregate all targets together if they're in the allowed CIDR. We may
 2431         # end up with different LUNs for different target iqn/portal sets (ie.
 2432         # it could be a unique LUN for each FlashArray)
 2433         for target in targets:
 2434             port_iter = iter(target["ports"])
 2435             for port in port_iter:
 2436                 # Check to ensure that the portal IP is in the iSCSI target
 2437                 # CIDR before adding it
 2438                 target_portal = port["portal"]
 2439                 if not isinstance(target_portal.split(":")[0], str):
 2440                     portal = (target_portal.split(":")[0]).decode('utf8')
 2441                 else:
 2442                     portal = target_portal.split(":")[0]
 2443                 check_ip = ipaddress.IPv4Address(portal)
 2444                 if check_ip in check_cidr:
 2445                     target_luns.append(target["connection"]["lun"])
 2446                     target_iqns.append(port["iqn"])
 2447                     target_portals.append(target_portal)
 2448 
 2449         LOG.info("iSCSI target portals that match CIDR range: '%s'",
 2450                  target_portals)
 2451 
 2452         # If we have multiple ports always report them.
 2453         if target_luns and target_iqns and target_portals:
 2454             props["data"]["target_luns"] = target_luns
 2455             props["data"]["target_iqns"] = target_iqns
 2456             props["data"]["target_portals"] = target_portals
 2457 
 2458         return props
 2459 
 2460     def _get_target_iscsi_ports(self, array):
 2461         """Return list of iSCSI-enabled port descriptions."""
 2462         ports = array.list_ports()
 2463         iscsi_ports = [port for port in ports if port["iqn"]]
 2464         if not iscsi_ports:
 2465             raise PureDriverException(
 2466                 reason=_("No iSCSI-enabled ports on target array."))
 2467         return iscsi_ports
 2468 
 2469     @staticmethod
 2470     def _generate_chap_secret():
 2471         return volume_utils.generate_password()
 2472 
 2473     def _get_chap_secret_from_init_data(self, initiator):
 2474         data = self.driver_utils.get_driver_initiator_data(initiator)
 2475         if data:
 2476             for d in data:
 2477                 if d["key"] == CHAP_SECRET_KEY:
 2478                     return d["value"]
 2479         return None
 2480 
 2481     def _get_chap_credentials(self, host, initiator):
 2482         username = host
 2483         password = self._get_chap_secret_from_init_data(initiator)
 2484         if not password:
 2485             password = self._generate_chap_secret()
 2486             success = self.driver_utils.insert_driver_initiator_data(
 2487                 initiator, CHAP_SECRET_KEY, password)
 2488             if not success:
 2489                 # The only reason the save would have failed is if someone
 2490                 # else (read: another thread/instance of the driver) set
 2491                 # one before we did. In that case just do another query.
 2492                 password = self._get_chap_secret_from_init_data(initiator)
 2493 
 2494         return username, password
 2495 
 2496     @utils.retry(PureRetryableException,
 2497                  retries=HOST_CREATE_MAX_RETRIES)
 2498     def _connect(self, array, vol_name, connector,
 2499                  chap_username, chap_password):
 2500         """Connect the host and volume; return dict describing connection."""
 2501         iqn = connector["initiator"]
 2502         hosts = self._get_host(array, connector, remote=False)
 2503         host = hosts[0] if len(hosts) > 0 else None
 2504         if host:
 2505             host_name = host["name"]
 2506             LOG.info("Re-using existing purity host %(host_name)r",
 2507                      {"host_name": host_name})
 2508             if self.configuration.use_chap_auth:
 2509                 if not GENERATED_NAME.match(host_name):
 2510                     LOG.error("Purity host %(host_name)s is not managed "
 2511                               "by Cinder and can't have CHAP credentials "
 2512                               "modified. Remove IQN %(iqn)s from the host "
 2513                               "to resolve this issue.",
 2514                               {"host_name": host_name,
 2515                                "iqn": connector["initiator"]})
 2516                     raise PureDriverException(
 2517                         reason=_("Unable to re-use a host that is not "
 2518                                  "managed by Cinder with use_chap_auth=True,"))
 2519                 elif chap_username is None or chap_password is None:
 2520                     LOG.error("Purity host %(host_name)s is managed by "
 2521                               "Cinder but CHAP credentials could not be "
 2522                               "retrieved from the Cinder database.",
 2523                               {"host_name": host_name})
 2524                     raise PureDriverException(
 2525                         reason=_("Unable to re-use host with unknown CHAP "
 2526                                  "credentials configured."))
 2527         else:
 2528             personality = self.configuration.safe_get('pure_host_personality')
 2529             if personality:
 2530                 api_version = array.get_rest_version()
 2531                 if api_version not in PERSONALITY_REQUIRED_API_VERSIONS:
 2532                     # Continuing here would mean creating a host not according
 2533                     # to specificiations, possibly leading to unexpected
 2534                     # behavior later on.
 2535                     msg = _('Unable to set host personality with Purity REST '
 2536                             'API version %(api_version)s, requires '
 2537                             '%(required_versions)s.') % {
 2538                         'api_version': api_version,
 2539                         'required_versions': PERSONALITY_REQUIRED_API_VERSIONS
 2540                     }
 2541                     raise PureDriverException(reason=msg)
 2542 
 2543             host_name = self._generate_purity_host_name(connector["host"])
 2544             LOG.info("Creating host object %(host_name)r with IQN:"
 2545                      " %(iqn)s.", {"host_name": host_name, "iqn": iqn})
 2546             try:
 2547                 array.create_host(host_name, iqnlist=[iqn])
 2548             except purestorage.PureHTTPError as err:
 2549                 if (err.code == 400 and
 2550                         (ERR_MSG_ALREADY_EXISTS in err.text or
 2551                             ERR_MSG_ALREADY_IN_USE in err.text)):
 2552                     # If someone created it before we could just retry, we will
 2553                     # pick up the new host.
 2554                     LOG.debug('Unable to create host: %s', err.text)
 2555                     raise PureRetryableException()
 2556 
 2557             if personality:
 2558                 try:
 2559                     array.set_host(host_name, personality=personality)
 2560                 except purestorage.PureHTTPError as err:
 2561                     if (err.code == 400 and
 2562                             ERR_MSG_HOST_NOT_EXIST in err.text):
 2563                         # If the host disappeared out from under us that's
 2564                         # ok, we will just retry and snag a new host.
 2565                         LOG.debug('Unable to set host personality: %s',
 2566                                   err.text)
 2567                         raise PureRetryableException()
 2568 
 2569             if self.configuration.use_chap_auth:
 2570                 try:
 2571                     array.set_host(host_name,
 2572                                    host_user=chap_username,
 2573                                    host_password=chap_password)
 2574                 except purestorage.PureHTTPError as err:
 2575                     if (err.code == 400 and
 2576                             ERR_MSG_HOST_NOT_EXIST in err.text):
 2577                         # If the host disappeared out from under us that's ok,
 2578                         # we will just retry and snag a new host.
 2579                         LOG.debug('Unable to set CHAP info: %s', err.text)
 2580                         raise PureRetryableException()
 2581 
 2582         # TODO(patrickeast): Ensure that the host has the correct preferred
 2583         # arrays configured for it.
 2584 
 2585         connection = self._connect_host_to_vol(array,
 2586                                                host_name,
 2587                                                vol_name)
 2588 
 2589         return connection
 2590 
 2591 
 2592 @interface.volumedriver
 2593 class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver):
 2594     """OpenStack Volume Driver to support Pure Storage FlashArray.
 2595 
 2596     This version of the driver enables the use of Fibre Channel for
 2597     the underlying storage connectivity with the FlashArray. It fully
 2598     supports the Cinder Fibre Channel Zone Manager.
 2599     """
 2600 
 2601     VERSION = "11.0.fc"
 2602 
 2603     def __init__(self, *args, **kwargs):
 2604         execute = kwargs.pop("execute", utils.execute)
 2605         super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs)
 2606         self._storage_protocol = "FC"
 2607         self._lookup_service = fczm_utils.create_lookup_service()
 2608 
 2609     def _get_host(self, array, connector, remote=False):
 2610         """Return dict describing existing Purity host object or None."""
 2611         if (remote and array.get_rest_version() in
 2612                 SYNC_REPLICATION_REQUIRED_API_VERSIONS):
 2613             hosts = array.list_hosts(remote=True)
 2614         else:
 2615             hosts = array.list_hosts()
 2616         matching_hosts = []
 2617         for host in hosts:
 2618             for wwn in connector["wwpns"]:
 2619                 if wwn.lower() in str(host["wwn"]).lower():
 2620                     matching_hosts.append(host)
 2621                     break  # go to next host
 2622         return matching_hosts
 2623 
 2624     @staticmethod
 2625     def _get_array_wwns(array):
 2626         """Return list of wwns from the array"""
 2627         ports = array.list_ports()
 2628         return [port["wwn"] for port in ports if port["wwn"]]
 2629 
 2630     @pure_driver_debug_trace
 2631     def initialize_connection(self, volume, connector):
 2632         """Allow connection to connector and return connection info."""
 2633         pure_vol_name = self._get_vol_name(volume)
 2634         target_arrays = [self._get_current_array()]
 2635         if (self._is_vol_in_pod(pure_vol_name) and
 2636                 self._is_active_cluster_enabled):
 2637             target_arrays += self._uniform_active_cluster_target_arrays
 2638 
 2639         target_luns = []
 2640         target_wwns = []
 2641         for array in target_arrays:
 2642             connection = self._connect(array, pure_vol_name, connector)
 2643             array_wwns = self._get_array_wwns(array)
 2644             for wwn in array_wwns:
 2645                 target_wwns.append(wwn)
 2646                 target_luns.append(connection["lun"])
 2647 
 2648         # Build the zoning map based on *all* wwns, this could be multiple
 2649         # arrays connecting to the same host with a strected volume.
 2650         init_targ_map = self._build_initiator_target_map(target_wwns,
 2651                                                          connector)
 2652 
 2653         properties = {
 2654             "driver_volume_type": "fibre_channel",
 2655             "data": {
 2656                 "target_discovered": True,
 2657                 "target_lun": target_luns[0],  # For backwards compatibility
 2658                 "target_luns": target_luns,
 2659                 "target_wwn": target_wwns,
 2660                 "target_wwns": target_wwns,
 2661                 "initiator_target_map": init_targ_map,
 2662                 "discard": True,
 2663             }
 2664         }
 2665         properties["data"]["wwn"] = self._get_wwn(pure_vol_name)
 2666 
 2667         fczm_utils.add_fc_zone(properties)
 2668         return properties
 2669 
 2670     @utils.retry(PureRetryableException,
 2671                  retries=HOST_CREATE_MAX_RETRIES)
 2672     def _connect(self, array, vol_name, connector):
 2673         """Connect the host and volume; return dict describing connection."""
 2674         wwns = connector["wwpns"]
 2675         hosts = self._get_host(array, connector, remote=False)
 2676         host = hosts[0] if len(hosts) > 0 else None
 2677 
 2678         if host:
 2679             host_name = host["name"]
 2680             LOG.info("Re-using existing purity host %(host_name)r",
 2681                      {"host_name": host_name})
 2682         else:
 2683             host_name = self._generate_purity_host_name(connector["host"])
 2684             LOG.info("Creating host object %(host_name)r with WWN:"
 2685                      " %(wwn)s.", {"host_name": host_name, "wwn": wwns})
 2686             try:
 2687                 array.create_host(host_name, wwnlist=wwns)
 2688             except purestorage.PureHTTPError as err:
 2689                 if (err.code == 400 and
 2690                         (ERR_MSG_ALREADY_EXISTS in err.text or
 2691                             ERR_MSG_ALREADY_IN_USE in err.text)):
 2692                     # If someone created it before we could just retry, we will
 2693                     # pick up the new host.
 2694                     LOG.debug('Unable to create host: %s', err.text)
 2695                     raise PureRetryableException()
 2696 
 2697         # TODO(patrickeast): Ensure that the host has the correct preferred
 2698         # arrays configured for it.
 2699 
 2700         return self._connect_host_to_vol(array, host_name, vol_name)
 2701 
 2702     def _build_initiator_target_map(self, target_wwns, connector):
 2703         """Build the target_wwns and the initiator target map."""
 2704         init_targ_map = {}
 2705 
 2706         if self._lookup_service:
 2707             # use FC san lookup to determine which NSPs to use
 2708             # for the new VLUN.
 2709             dev_map = self._lookup_service.get_device_mapping_from_network(
 2710                 connector['wwpns'],
 2711                 target_wwns)
 2712 
 2713             for fabric_name in dev_map:
 2714                 fabric = dev_map[fabric_name]
 2715                 for initiator in fabric['initiator_port_wwn_list']:
 2716                     if initiator not in init_targ_map:
 2717                         init_targ_map[initiator] = []
 2718                     init_targ_map[initiator] += fabric['target_port_wwn_list']
 2719                     init_targ_map[initiator] = list(set(
 2720                         init_targ_map[initiator]))
 2721         else:
 2722             init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns)
 2723 
 2724         return init_targ_map
 2725 
 2726     @pure_driver_debug_trace
 2727     def terminate_connection(self, volume, connector, **kwargs):
 2728         """Terminate connection."""
 2729         vol_name = self._get_vol_name(volume)
 2730         unused_wwns = []
 2731 
 2732         if self._is_vol_in_pod(vol_name):
 2733             # Try to disconnect from each host, they may not be online though
 2734             # so if they fail don't cause a problem.
 2735             for array in self._uniform_active_cluster_target_arrays:
 2736                 try:
 2737                     no_more_connections = self._disconnect(
 2738                         array, volume, connector, remove_remote_hosts=False)
 2739                     if no_more_connections:
 2740                         unused_wwns += self._get_array_wwns(array)
 2741                 except purestorage.PureError as err:
 2742                     # Swallow any exception, just warn and continue
 2743                     LOG.warning("Disconnect on sendondary array failed with"
 2744                                 " message: %(msg)s", {"msg": err.text})
 2745 
 2746         # Now disconnect from the current array, removing any left over
 2747         # remote hosts that we maybe couldn't reach.
 2748         current_array = self._get_current_array()
 2749         no_more_connections = self._disconnect(current_array,
 2750                                                volume, connector,
 2751                                                remove_remote_hosts=False)
 2752         if no_more_connections:
 2753             unused_wwns += self._get_array_wwns(current_array)
 2754 
 2755         properties = {"driver_volume_type": "fibre_channel", "data": {}}
 2756         if len(unused_wwns) > 0:
 2757             init_targ_map = self._build_initiator_target_map(unused_wwns,
 2758                                                              connector)
 2759             properties["data"] = {"target_wwn": unused_wwns,
 2760                                   "initiator_target_map": init_targ_map}
 2761 
 2762         fczm_utils.remove_fc_zone(properties)
 2763         return properties