"Fossies" - the Fresh Open Source Software Archive

Member "cinder-17.1.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py" (8 Mar 2021, 280421 Bytes) of package /linux/misc/openstack/cinder-17.1.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "storwize_svc_common.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 17.0.1_vs_17.1.0.

    1 # Copyright 2015 IBM Corp.
    2 # All Rights Reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 #
   16 
   17 import math
   18 import random
   19 import re
   20 import time
   21 import unicodedata
   22 
   23 from eventlet import greenthread
   24 from oslo_concurrency import processutils
   25 from oslo_config import cfg
   26 from oslo_log import log as logging
   27 from oslo_serialization import jsonutils as json
   28 from oslo_service import loopingcall
   29 from oslo_utils import encodeutils
   30 from oslo_utils import excutils
   31 from oslo_utils import strutils
   32 from oslo_utils import units
   33 import paramiko
   34 import six
   35 
   36 from cinder import context
   37 from cinder import exception
   38 from cinder.i18n import _
   39 from cinder import objects
   40 from cinder.objects import fields
   41 from cinder import ssh_utils
   42 from cinder import utils as cinder_utils
   43 from cinder.volume import configuration
   44 from cinder.volume import driver
   45 from cinder.volume.drivers.ibm.storwize_svc import (
   46     replication as storwize_rep)
   47 from cinder.volume.drivers.ibm.storwize_svc import storwize_const
   48 from cinder.volume.drivers.san import san
   49 from cinder.volume import qos_specs
   50 from cinder.volume import volume_types
   51 from cinder.volume import volume_utils
   52 
   53 
   54 INTERVAL_1_SEC = 1
   55 DEFAULT_TIMEOUT = 15
   56 CMMVC5753E = "CMMVC5753E"
   57 LOG = logging.getLogger(__name__)
   58 
   59 storwize_svc_opts = [
   60     cfg.ListOpt('storwize_svc_volpool_name',
   61                 default=['volpool'],
   62                 help='Comma separated list of storage system storage '
   63                      'pools for volumes.'),
   64     cfg.IntOpt('storwize_svc_vol_rsize',
   65                default=2,
   66                min=-1, max=100,
   67                help='Storage system space-efficiency parameter for volumes '
   68                     '(percentage)'),
   69     cfg.IntOpt('storwize_svc_vol_warning',
   70                default=0,
   71                min=-1, max=100,
   72                help='Storage system threshold for volume capacity warnings '
   73                     '(percentage)'),
   74     cfg.BoolOpt('storwize_svc_vol_autoexpand',
   75                 default=True,
   76                 help='Storage system autoexpand parameter for volumes '
   77                      '(True/False)'),
   78     cfg.IntOpt('storwize_svc_vol_grainsize',
   79                default=256,
   80                help='Storage system grain size parameter for volumes '
   81                     '(8/32/64/128/256)'),
   82     cfg.BoolOpt('storwize_svc_vol_compression',
   83                 default=False,
   84                 help='Storage system compression option for volumes'),
   85     cfg.BoolOpt('storwize_svc_vol_easytier',
   86                 default=True,
   87                 help='Enable Easy Tier for volumes'),
   88     cfg.StrOpt('storwize_svc_vol_iogrp',
   89                default='0',
   90                help='The I/O group in which to allocate volumes. It can be a '
   91                'comma-separated list in which case the driver will select an '
   92                'io_group based on least number of volumes associated with the '
   93                'io_group.'),
   94     cfg.IntOpt('storwize_svc_flashcopy_timeout',
   95                default=120,
   96                min=1, max=600,
   97                help='Maximum number of seconds to wait for FlashCopy to be '
   98                     'prepared.'),
   99     cfg.BoolOpt('storwize_svc_multihostmap_enabled',
  100                 default=True,
  101                 help='This option no longer has any affect. It is deprecated '
  102                      'and will be removed in the next release.',
  103                 deprecated_for_removal=True),
  104     cfg.BoolOpt('storwize_svc_allow_tenant_qos',
  105                 default=False,
  106                 help='Allow tenants to specify QOS on create'),
  107     cfg.StrOpt('storwize_svc_stretched_cluster_partner',
  108                default=None,
  109                help='If operating in stretched cluster mode, specify the '
  110                     'name of the pool in which mirrored copies are stored.'
  111                     'Example: "pool2"'),
  112     cfg.StrOpt('storwize_san_secondary_ip',
  113                default=None,
  114                help='Specifies secondary management IP or hostname to be '
  115                     'used if san_ip is invalid or becomes inaccessible.'),
  116     cfg.BoolOpt('storwize_svc_vol_nofmtdisk',
  117                 default=False,
  118                 help='Specifies that the volume not be formatted during '
  119                      'creation.'),
  120     cfg.IntOpt('storwize_svc_flashcopy_rate',
  121                default=50,
  122                min=1, max=150,
  123                help='Specifies the Storwize FlashCopy copy rate to be used '
  124                'when creating a full volume copy. The default is rate '
  125                'is 50, and the valid rates are 1-150.'),
  126     cfg.StrOpt('storwize_svc_mirror_pool',
  127                default=None,
  128                help='Specifies the name of the pool in which mirrored copy '
  129                     'is stored. Example: "pool2"'),
  130     cfg.StrOpt('storwize_peer_pool',
  131                default=None,
  132                help='Specifies the name of the peer pool for hyperswap '
  133                     'volume, the peer pool must exist on the other site.'),
  134     cfg.DictOpt('storwize_preferred_host_site',
  135                 default={},
  136                 help='Specifies the site information for host. '
  137                      'One WWPN or multi WWPNs used in the host can be '
  138                      'specified. For example: '
  139                      'storwize_preferred_host_site=site1:wwpn1,'
  140                      'site2:wwpn2&wwpn3 or '
  141                      'storwize_preferred_host_site=site1:iqn1,site2:iqn2'),
  142     cfg.IntOpt('cycle_period_seconds',
  143                default=300,
  144                min=60, max=86400,
  145                help='This defines an optional cycle period that applies to '
  146                'Global Mirror relationships with a cycling mode of multi. '
  147                'A Global Mirror relationship using the multi cycling_mode '
  148                'performs a complete cycle at most once each period. '
  149                'The default is 300 seconds, and the valid seconds '
  150                'are 60-86400.'),
  151     cfg.BoolOpt('storwize_svc_retain_aux_volume',
  152                 default=False,
  153                 help='Enable or disable retaining of aux volume on secondary '
  154                      'storage during delete of the volume on primary storage '
  155                      'or moving the primary volume from mirror to non-mirror '
  156                      'with replication enabled. This option is valid for '
  157                      'SVC.'),
  158 ]
  159 
  160 CONF = cfg.CONF
  161 CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP)
  162 
  163 
  164 class StorwizeSSH(object):
  165     """SSH interface to IBM Storwize family and SVC storage systems."""
  166     def __init__(self, run_ssh):
  167         self._ssh = run_ssh
  168 
  169     def _run_ssh(self, ssh_cmd):
  170         try:
  171             return self._ssh(ssh_cmd)
  172         except processutils.ProcessExecutionError as e:
  173             msg = (_('CLI Exception output:\n command: %(cmd)s\n '
  174                      'stdout: %(out)s\n stderr: %(err)s.') %
  175                    {'cmd': ssh_cmd,
  176                     'out': e.stdout,
  177                     'err': e.stderr})
  178             LOG.error(msg)
  179             raise exception.VolumeBackendAPIException(data=msg)
  180 
  181     def run_ssh_info(self, ssh_cmd, delim='!', with_header=False):
  182         """Run an SSH command and return parsed output."""
  183         raw = self._run_ssh(ssh_cmd)
  184         return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim,
  185                            with_header=with_header)
  186 
  187     def run_ssh_assert_no_output(self, ssh_cmd, log_cmd=None):
  188         """Run an SSH command and assert no output returned."""
  189         out, err = self._run_ssh(ssh_cmd)
  190         if len(out.strip()) != 0:
  191             if not log_cmd:
  192                 log_cmd = ' '.join(ssh_cmd)
  193             msg = (_('Expected no output from CLI command %(cmd)s, '
  194                      'got %(out)s.') % {'cmd': log_cmd, 'out': out})
  195             LOG.error(msg)
  196             raise exception.VolumeBackendAPIException(data=msg)
  197 
  198     def run_ssh_check_created(self, ssh_cmd):
  199         """Run an SSH command and return the ID of the created object."""
  200         out, err = self._run_ssh(ssh_cmd)
  201         try:
  202             match_obj = re.search(r'\[([0-9]+)\],? successfully created', out)
  203             return match_obj.group(1)
  204         except (AttributeError, IndexError):
  205             msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
  206                      'stdout: %(out)s\n stderr: %(err)s.') %
  207                    {'cmd': ssh_cmd,
  208                     'out': out,
  209                     'err': err})
  210             LOG.error(msg)
  211             raise exception.VolumeBackendAPIException(data=msg)
  212 
  213     def lsnode(self, node_id=None):
  214         with_header = True
  215         ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
  216         if node_id:
  217             with_header = False
  218             ssh_cmd.append(node_id)
  219         return self.run_ssh_info(ssh_cmd, with_header=with_header)
  220 
  221     def lslicense(self):
  222         ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!']
  223         return self.run_ssh_info(ssh_cmd)[0]
  224 
  225     def lsguicapabilities(self):
  226         ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!']
  227         return self.run_ssh_info(ssh_cmd)[0]
  228 
  229     def lssystem(self):
  230         ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
  231         return self.run_ssh_info(ssh_cmd)[0]
  232 
  233     def lsmdiskgrp(self, pool):
  234         ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!',
  235                    '"%s"' % pool]
  236         try:
  237             return self.run_ssh_info(ssh_cmd)[0]
  238         except exception.VolumeBackendAPIException as ex:
  239             LOG.warning("Failed to get pool %(pool)s info. "
  240                         "Exception: %(ex)s.", {'pool': pool,
  241                                                'ex': ex})
  242             return None
  243 
  244     def lsiogrp(self):
  245         ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!']
  246         return self.run_ssh_info(ssh_cmd, with_header=True)
  247 
  248     def lsportip(self):
  249         ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!']
  250         return self.run_ssh_info(ssh_cmd, with_header=True)
  251 
  252     @staticmethod
  253     def _create_port_arg(port_type, port_name):
  254         if port_type == 'initiator':
  255             port = ['-iscsiname']
  256         else:
  257             port = ['-hbawwpn']
  258         port.append(port_name)
  259         return port
  260 
  261     def mkhost(self, host_name, port_type, port_name, site=None):
  262         port = self._create_port_arg(port_type, port_name)
  263         ssh_cmd = ['svctask', 'mkhost', '-force'] + port
  264         if site:
  265             ssh_cmd += ['-site', '"%s"' % site]
  266         ssh_cmd += ['-name', '"%s"' % host_name]
  267         return self.run_ssh_check_created(ssh_cmd)
  268 
  269     def addhostport(self, host, port_type, port_name):
  270         port = self._create_port_arg(port_type, port_name)
  271         ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host]
  272         self.run_ssh_assert_no_output(ssh_cmd)
  273 
  274     def lshost(self, host=None):
  275         with_header = True
  276         ssh_cmd = ['svcinfo', 'lshost', '-delim', '!']
  277         if host:
  278             with_header = False
  279             ssh_cmd.append('"%s"' % host)
  280         return self.run_ssh_info(ssh_cmd, with_header=with_header)
  281 
  282     def add_chap_secret(self, secret, host):
  283         ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host]
  284         log_cmd = 'svctask chhost -chapsecret *** %s' % host
  285         self.run_ssh_assert_no_output(ssh_cmd, log_cmd)
  286 
  287     def chhost(self, host, site):
  288         ssh_cmd = ['svctask', 'chhost', '-site', '"%s"' % site, '"%s"' % host]
  289         self.run_ssh_assert_no_output(ssh_cmd)
  290 
  291     def lsiscsiauth(self):
  292         ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!']
  293         return self.run_ssh_info(ssh_cmd, with_header=True)
  294 
  295     def lsfabric(self, wwpn=None, host=None):
  296         ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!']
  297         if wwpn:
  298             ssh_cmd.extend(['-wwpn', wwpn])
  299         elif host:
  300             ssh_cmd.extend(['-host', '"%s"' % host])
  301         else:
  302             msg = (_('Must pass wwpn or host to lsfabric.'))
  303             LOG.error(msg)
  304             raise exception.VolumeDriverException(message=msg)
  305         return self.run_ssh_info(ssh_cmd, with_header=True)
  306 
  307     def mkvdiskhostmap(self, host, vdisk, lun, multihostmap):
  308         """Map vdisk to host.
  309 
  310         If vdisk already mapped and multihostmap is True, use the force flag.
  311         """
  312         ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host,
  313                    '-scsi', lun, '"%s"' % vdisk]
  314 
  315         if multihostmap:
  316             ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force')
  317         self.run_ssh_check_created(ssh_cmd)
  318 
  319     def mkrcrelationship(self, master, aux, system, asyncmirror,
  320                          cyclingmode=False):
  321         ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master,
  322                    '-aux', aux, '-cluster', system]
  323         if asyncmirror:
  324             ssh_cmd.append('-global')
  325         if cyclingmode:
  326             ssh_cmd.extend(['-cyclingmode', 'multi'])
  327         return self.run_ssh_check_created(ssh_cmd)
  328 
  329     def rmrcrelationship(self, relationship, force=False):
  330         ssh_cmd = ['svctask', 'rmrcrelationship']
  331         if force:
  332             ssh_cmd += ['-force']
  333         ssh_cmd += [relationship]
  334         self.run_ssh_assert_no_output(ssh_cmd)
  335 
  336     def switchrelationship(self, relationship, aux=True):
  337         primary = 'aux' if aux else 'master'
  338         ssh_cmd = ['svctask', 'switchrcrelationship', '-primary',
  339                    primary, relationship]
  340         self.run_ssh_assert_no_output(ssh_cmd)
  341 
  342     def startrcrelationship(self, rc_rel, primary=None):
  343         ssh_cmd = ['svctask', 'startrcrelationship', '-force']
  344         if primary:
  345             ssh_cmd.extend(['-primary', primary])
  346         ssh_cmd.append(rc_rel)
  347         self.run_ssh_assert_no_output(ssh_cmd)
  348 
  349     def ch_rcrelationship_cycleperiod(self, relationship,
  350                                       cycle_period_seconds):
  351         # Note: Can only change one attribute at a time,
  352         # so define two ch_rcrelationship_xxx here
  353         if cycle_period_seconds:
  354             ssh_cmd = ['svctask', 'chrcrelationship']
  355             ssh_cmd.extend(['-cycleperiodseconds',
  356                             six.text_type(cycle_period_seconds)])
  357             ssh_cmd.append(relationship)
  358             self.run_ssh_assert_no_output(ssh_cmd)
  359 
  360     def ch_rcrelationship_changevolume(self, relationship,
  361                                        changevolume, master):
  362         # Note: Can only change one attribute at a time,
  363         # so define two ch_rcrelationship_xxx here
  364         if changevolume:
  365             ssh_cmd = ['svctask', 'chrcrelationship']
  366             if master:
  367                 ssh_cmd.extend(['-masterchange', changevolume])
  368             else:
  369                 ssh_cmd.extend(['-auxchange', changevolume])
  370             ssh_cmd.append(relationship)
  371             self.run_ssh_assert_no_output(ssh_cmd)
  372 
  373     def stoprcrelationship(self, relationship, access=False):
  374         ssh_cmd = ['svctask', 'stoprcrelationship']
  375         if access:
  376             ssh_cmd.append('-access')
  377         ssh_cmd.append(relationship)
  378         self.run_ssh_assert_no_output(ssh_cmd)
  379 
  380     def lsrcrelationship(self, rc_rel):
  381         ssh_cmd = ['svcinfo', 'lsrcrelationship', '-delim', '!', rc_rel]
  382         return self.run_ssh_info(ssh_cmd)
  383 
  384     # replication cg
  385     def chrcrelationship(self, relationship, rccg=None):
  386         ssh_cmd = ['svctask', 'chrcrelationship']
  387         if rccg:
  388             ssh_cmd.extend(['-consistgrp', rccg])
  389         else:
  390             ssh_cmd.extend(['-noconsistgrp'])
  391         ssh_cmd.append(relationship)
  392         self.run_ssh_assert_no_output(ssh_cmd)
  393 
  394     def lsrcconsistgrp(self, rccg):
  395         ssh_cmd = ['svcinfo', 'lsrcconsistgrp', '-delim', '!', rccg]
  396         try:
  397             return self.run_ssh_info(ssh_cmd)[0]
  398         except exception.VolumeBackendAPIException as ex:
  399             LOG.warning("Failed to get rcconsistgrp %(rccg)s info. "
  400                         "Exception: %(ex)s.", {'rccg': rccg,
  401                                                'ex': ex})
  402             return None
  403 
  404     def mkrcconsistgrp(self, rccg, system):
  405         ssh_cmd = ['svctask', 'mkrcconsistgrp', '-name', rccg,
  406                    '-cluster', system]
  407         return self.run_ssh_check_created(ssh_cmd)
  408 
  409     def rmrcconsistgrp(self, rccg, force=True):
  410         ssh_cmd = ['svctask', 'rmrcconsistgrp']
  411         if force:
  412             ssh_cmd += ['-force']
  413         ssh_cmd += ['"%s"' % rccg]
  414         return self.run_ssh_assert_no_output(ssh_cmd)
  415 
  416     def startrcconsistgrp(self, rccg, primary=None):
  417         ssh_cmd = ['svctask', 'startrcconsistgrp', '-force']
  418         if primary:
  419             ssh_cmd.extend(['-primary', primary])
  420         ssh_cmd.append(rccg)
  421         self.run_ssh_assert_no_output(ssh_cmd)
  422 
  423     def stoprcconsistgrp(self, rccg, access=False):
  424         ssh_cmd = ['svctask', 'stoprcconsistgrp']
  425         if access:
  426             ssh_cmd.append('-access')
  427         ssh_cmd.append(rccg)
  428         self.run_ssh_assert_no_output(ssh_cmd)
  429 
  430     def switchrcconsistgrp(self, rccg, aux=True):
  431         primary = 'aux' if aux else 'master'
  432         ssh_cmd = ['svctask', 'switchrcconsistgrp', '-primary',
  433                    primary, rccg]
  434         self.run_ssh_assert_no_output(ssh_cmd)
  435 
  436     def lspartnership(self, system_name):
  437         key_value = 'name=%s' % system_name
  438         ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue',
  439                    key_value, '-delim', '!']
  440         return self.run_ssh_info(ssh_cmd, with_header=True)
  441 
  442     def lspartnershipcandidate(self):
  443         ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!']
  444         return self.run_ssh_info(ssh_cmd, with_header=True)
  445 
  446     def mkippartnership(self, ip_v4, bandwidth=1000, backgroundcopyrate=50):
  447         ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4',
  448                    '-clusterip', ip_v4, '-linkbandwidthmbits',
  449                    six.text_type(bandwidth),
  450                    '-backgroundcopyrate', six.text_type(backgroundcopyrate)]
  451         return self.run_ssh_assert_no_output(ssh_cmd)
  452 
  453     def mkfcpartnership(self, system_name, bandwidth=1000,
  454                         backgroundcopyrate=50):
  455         ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits',
  456                    six.text_type(bandwidth),
  457                    '-backgroundcopyrate', six.text_type(backgroundcopyrate),
  458                    system_name]
  459         return self.run_ssh_assert_no_output(ssh_cmd)
  460 
  461     def chpartnership(self, partnership_id, start=True):
  462         action = '-start' if start else '-stop'
  463         ssh_cmd = ['svctask', 'chpartnership', action, partnership_id]
  464         return self.run_ssh_assert_no_output(ssh_cmd)
  465 
  466     def rmvdiskhostmap(self, host, vdisk):
  467         ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host,
  468                    '"%s"' % vdisk]
  469         self.run_ssh_assert_no_output(ssh_cmd)
  470 
  471     def lsvdiskhostmap(self, vdisk):
  472         ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk]
  473         return self.run_ssh_info(ssh_cmd, with_header=True)
  474 
  475     def lshostvdiskmap(self, host):
  476         ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host]
  477         return self.run_ssh_info(ssh_cmd, with_header=True)
  478 
  479     def get_vdiskhostmapid(self, vdisk, host):
  480         resp = self.lsvdiskhostmap(vdisk)
  481         for mapping_info in resp:
  482             if mapping_info['host_name'] == host:
  483                 lun_id = mapping_info['SCSI_id']
  484                 return lun_id
  485         return None
  486 
  487     def rmhost(self, host):
  488         ssh_cmd = ['svctask', 'rmhost', '"%s"' % host]
  489         self.run_ssh_assert_no_output(ssh_cmd)
  490 
  491     def mkvdisk(self, name, size, units, pool, opts, params):
  492         ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp',
  493                    '"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']),
  494                    '-size', size, '-unit', units] + params
  495         try:
  496             return self.run_ssh_check_created(ssh_cmd)
  497         except Exception as ex:
  498             # pylint: disable=E1101
  499             if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg:
  500                 vdisk = self.lsvdisk(name)
  501                 if vdisk:
  502                     LOG.warning('CMMVC6372W The virtualized storage '
  503                                 'capacity that the cluster is using is '
  504                                 'approaching the virtualized storage '
  505                                 'capacity that is licensed.')
  506                     return vdisk['id']
  507             with excutils.save_and_reraise_exception():
  508                 LOG.exception('Failed to create vdisk %(vol)s.',
  509                               {'vol': name})
  510 
  511     def rmvdisk(self, vdisk, force_unmap=True, force_delete=True):
  512         ssh_cmd = ['svctask', 'rmvdisk']
  513         if force_unmap and not force_delete:
  514             ssh_cmd += ['-removehostmappings']
  515         if force_delete:
  516             ssh_cmd += ['-force']
  517         ssh_cmd += ['"%s"' % vdisk]
  518         self.run_ssh_assert_no_output(ssh_cmd)
  519 
  520     def lsvdisk(self, vdisk):
  521         """Return vdisk attributes or None if it doesn't exist."""
  522         ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
  523                    '"%s"' % vdisk]
  524         out, err = self._ssh(ssh_cmd, check_exit_code=False)
  525         if not err:
  526             return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
  527                                with_header=False)[0]
  528         if 'CMMVC5754E' in err:
  529             return None
  530         msg = (_('CLI Exception output:\n command: %(cmd)s\n '
  531                  'stdout: %(out)s\n stderr: %(err)s.') %
  532                {'cmd': ssh_cmd,
  533                 'out': out,
  534                 'err': err})
  535         LOG.error(msg)
  536         raise exception.VolumeBackendAPIException(data=msg)
  537 
  538     def lsvdisks_from_filter(self, filter_name, value):
  539         """Performs an lsvdisk command, filtering the results as specified.
  540 
  541         Returns an iterable for all matching vdisks.
  542         """
  543         ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!',
  544                    '-filtervalue', '%s=%s' % (filter_name, value)]
  545         return self.run_ssh_info(ssh_cmd, with_header=True)
  546 
  547     def chvdisk(self, vdisk, params):
  548         ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk]
  549         self.run_ssh_assert_no_output(ssh_cmd)
  550 
  551     def movevdisk(self, vdisk, iogrp):
  552         ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk]
  553         self.run_ssh_assert_no_output(ssh_cmd)
  554 
  555     def expandvdisksize(self, vdisk, amount):
  556         ssh_cmd = (
  557             ['svctask', 'expandvdisksize', '-size', six.text_type(amount),
  558              '-unit', 'gb', '"%s"' % vdisk])
  559         self.run_ssh_assert_no_output(ssh_cmd)
  560 
  561     def mkfcmap(self, source, target, full_copy, copy_rate, consistgrp=None):
  562         ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target',
  563                    '"%s"' % target]
  564         if not full_copy:
  565             ssh_cmd.extend(['-copyrate', '0'])
  566         else:
  567             ssh_cmd.extend(['-copyrate', six.text_type(copy_rate)])
  568             ssh_cmd.append('-autodelete')
  569         if consistgrp:
  570             ssh_cmd.extend(['-consistgrp', consistgrp])
  571         out, err = self._ssh(ssh_cmd, check_exit_code=False)
  572         if 'successfully created' not in out:
  573             msg = (_('CLI Exception output:\n command: %(cmd)s\n '
  574                      'stdout: %(out)s\n stderr: %(err)s.') %
  575                    {'cmd': ssh_cmd,
  576                     'out': out,
  577                     'err': err})
  578             LOG.error(msg)
  579             raise exception.VolumeBackendAPIException(data=msg)
  580         try:
  581             match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], '
  582                                   'successfully created', out)
  583             fc_map_id = match_obj.group(1)
  584         except (AttributeError, IndexError):
  585             msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n '
  586                      'stdout: %(out)s\n stderr: %(err)s.') %
  587                    {'cmd': ssh_cmd,
  588                     'out': out,
  589                     'err': err})
  590             LOG.error(msg)
  591             raise exception.VolumeBackendAPIException(data=msg)
  592         return fc_map_id
  593 
  594     def prestartfcmap(self, fc_map_id, restore=False):
  595         ssh_cmd = ['svctask', 'prestartfcmap']
  596         if restore:
  597             ssh_cmd.append('-restore')
  598         ssh_cmd.append(fc_map_id)
  599         self.run_ssh_assert_no_output(ssh_cmd)
  600 
  601     def startfcmap(self, fc_map_id, restore=False):
  602         ssh_cmd = ['svctask', 'startfcmap']
  603         if restore:
  604             ssh_cmd.append('-restore')
  605         ssh_cmd.append(fc_map_id)
  606         self.run_ssh_assert_no_output(ssh_cmd)
  607 
  608     def prestartfcconsistgrp(self, fc_consist_group):
  609         ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group]
  610         self.run_ssh_assert_no_output(ssh_cmd)
  611 
  612     def startfcconsistgrp(self, fc_consist_group):
  613         ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group]
  614         self.run_ssh_assert_no_output(ssh_cmd)
  615 
  616     def stopfcconsistgrp(self, fc_consist_group):
  617         ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group]
  618         self.run_ssh_assert_no_output(ssh_cmd)
  619 
  620     def chfcmap(self, fc_map_id, copyrate='50', autodel='on'):
  621         ssh_cmd = ['svctask', 'chfcmap', '-copyrate', copyrate,
  622                    '-autodelete', autodel, fc_map_id]
  623         self.run_ssh_assert_no_output(ssh_cmd)
  624 
  625     def stopfcmap(self, fc_map_id, force=False, split=False):
  626         ssh_cmd = ['svctask', 'stopfcmap']
  627         if force:
  628             ssh_cmd += ['-force']
  629         if split:
  630             ssh_cmd += ['-split']
  631         ssh_cmd += [fc_map_id]
  632         self.run_ssh_assert_no_output(ssh_cmd)
  633 
  634     def rmfcmap(self, fc_map_id):
  635         ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id]
  636         self.run_ssh_assert_no_output(ssh_cmd)
  637 
  638     def lsvdiskfcmappings(self, vdisk):
  639         ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!',
  640                    '"%s"' % vdisk]
  641         return self.run_ssh_info(ssh_cmd, with_header=True)
  642 
  643     def lsfcmap(self, fc_map_id):
  644         ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue',
  645                    'id=%s' % fc_map_id, '-delim', '!']
  646         return self.run_ssh_info(ssh_cmd, with_header=True)
  647 
  648     def lsfcconsistgrp(self, fc_consistgrp):
  649         ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp]
  650         out, err = self._ssh(ssh_cmd)
  651         return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!',
  652                            with_header=False)
  653 
  654     def mkfcconsistgrp(self, fc_consist_group):
  655         ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group]
  656         return self.run_ssh_check_created(ssh_cmd)
  657 
  658     def rmfcconsistgrp(self, fc_consist_group):
  659         ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group]
  660         return self.run_ssh_assert_no_output(ssh_cmd)
  661 
  662     def addvdiskcopy(self, vdisk, dest_pool, params, auto_delete):
  663         ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp',
  664                    '"%s"' % dest_pool])
  665         if auto_delete:
  666             ssh_cmd += ['-autodelete']
  667         ssh_cmd += ['"%s"' % vdisk]
  668         return self.run_ssh_check_created(ssh_cmd)
  669 
  670     def lsvdiskcopy(self, vdisk, copy_id=None):
  671         ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!']
  672         with_header = True
  673         if copy_id:
  674             ssh_cmd += ['-copy', copy_id]
  675             with_header = False
  676         ssh_cmd += ['"%s"' % vdisk]
  677         return self.run_ssh_info(ssh_cmd, with_header=with_header)
  678 
  679     def lsvdisksyncprogress(self, vdisk, copy_id):
  680         ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!',
  681                    '-copy', copy_id, '"%s"' % vdisk]
  682         return self.run_ssh_info(ssh_cmd, with_header=True)[0]
  683 
  684     def rmvdiskcopy(self, vdisk, copy_id):
  685         ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk]
  686         self.run_ssh_assert_no_output(ssh_cmd)
  687 
  688     def addvdiskaccess(self, vdisk, iogrp):
  689         ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp,
  690                    '"%s"' % vdisk]
  691         self.run_ssh_assert_no_output(ssh_cmd)
  692 
  693     def rmvdiskaccess(self, vdisk, iogrp):
  694         ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk]
  695         self.run_ssh_assert_no_output(ssh_cmd)
  696 
  697     def lsportfc(self, node_id):
  698         ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!',
  699                    '-filtervalue', 'node_id=%s' % node_id]
  700         return self.run_ssh_info(ssh_cmd, with_header=True)
  701 
  702     def lstargetportfc(self, current_node_id=None, host_io_permitted=None):
  703         ssh_cmd = ['svcinfo', 'lstargetportfc', '-delim', '!']
  704         if current_node_id and host_io_permitted:
  705             ssh_cmd += ['-filtervalue', '%s:%s' % (
  706                 'current_node_id=%s' % current_node_id,
  707                 'host_io_permitted=%s' % host_io_permitted)]
  708         elif current_node_id:
  709             ssh_cmd += ['-filtervalue', 'current_node_id=%s' % current_node_id]
  710         return self.run_ssh_info(ssh_cmd, with_header=True)
  711 
  712     def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
  713         ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, '-copy',
  714                    copy_id, '-vdisk', vdisk]
  715         self.run_ssh_assert_no_output(ssh_cmd)
  716 
  717     def mkvolume(self, name, size, units, pool, params):
  718         ssh_cmd = ['svctask', 'mkvolume', '-name', name, '-pool',
  719                    '"%s"' % pool, '-size', size, '-unit', units] + params
  720         return self.run_ssh_check_created(ssh_cmd)
  721 
  722     def rmvolume(self, volume, force_unmap=True, force_delete=True):
  723         ssh_cmd = ['svctask', 'rmvolume']
  724         if force_delete:
  725             ssh_cmd += ['-removehostmappings', '-removefcmaps',
  726                         '-removercrelationships']
  727         elif force_unmap:
  728             ssh_cmd += ['-removehostmappings']
  729         ssh_cmd += ['"%s"' % volume]
  730         self.run_ssh_assert_no_output(ssh_cmd)
  731 
  732     def addvolumecopy(self, name, pool, params):
  733         ssh_cmd = ['svctask', 'addvolumecopy', '-pool',
  734                    '"%s"' % pool] + params + ['"%s"' % name]
  735         self.run_ssh_assert_no_output(ssh_cmd)
  736 
  737     def rmvolumecopy(self, name, pool):
  738         ssh_cmd = ['svctask', 'rmvolumecopy', '-pool',
  739                    '"%s"' % pool, '"%s"' % name]
  740         self.run_ssh_assert_no_output(ssh_cmd)
  741 
  742 
  743 class StorwizeHelpers(object):
  744 
  745     # All the supported QoS key are saved in this dict. When a new
  746     # key is going to add, three values MUST be set:
  747     # 'default': to indicate the value, when the parameter is disabled.
  748     # 'param': to indicate the corresponding parameter in the command.
  749     # 'type': to indicate the type of this value.
  750     WAIT_TIME = 5
  751     svc_qos_keys = {'IOThrottling': {'default': '0',
  752                                      'param': 'rate',
  753                                      'type': int}}
  754 
  755     def __init__(self, run_ssh):
  756         self.ssh = StorwizeSSH(run_ssh)
  757         self.check_fcmapping_interval = 3
  758         self.code_level = None
  759         self.stats = {}
  760 
  761     @staticmethod
  762     def handle_keyerror(cmd, out):
  763         msg = (_('Could not find key in output of command %(cmd)s: %(out)s.')
  764                % {'out': out, 'cmd': cmd})
  765         raise exception.VolumeBackendAPIException(data=msg)
  766 
  767     def compression_enabled(self):
  768         """Return whether or not compression is enabled for this system."""
  769         resp = self.ssh.lslicense()
  770         keys = ['license_compression_enclosures',
  771                 'license_compression_capacity']
  772         for key in keys:
  773             if resp.get(key, '0') != '0':
  774                 return True
  775 
  776         # lslicense is not used for V9000 compression check
  777         # compression_enclosures and compression_capacity are
  778         # always 0. V9000 uses license_scheme 9846 as an
  779         # indicator and can always do compression
  780         try:
  781             resp = self.ssh.lsguicapabilities()
  782             if resp.get('license_scheme', '0') == '9846':
  783                 return True
  784             if resp.get('license_scheme', '0') == 'flex':
  785                 return True
  786         except exception.VolumeBackendAPIException:
  787             LOG.exception("Failed to fetch licensing scheme.")
  788         return False
  789 
  790     def replication_licensed(self):
  791         """Return whether or not replication is enabled for this system."""
  792         # Uses product_key as an indicator to check
  793         # whether replication is supported in storage.
  794         try:
  795             resp = self.ssh.lsguicapabilities()
  796             product_key = resp.get('product_key', '0')
  797             if product_key in storwize_const.REP_CAP_DEVS:
  798                 return True
  799         except exception.VolumeBackendAPIException as war:
  800             LOG.warning("Failed to run lsguicapability. Exception: %s.", war)
  801         return False
  802 
  803     def get_system_info(self):
  804         """Return system's name, ID, and code level."""
  805         resp = self.ssh.lssystem()
  806         level = resp['code_level']
  807         match_obj = re.search('([0-9].){3}[0-9]', level)
  808         if match_obj is None:
  809             msg = _('Failed to get code level (%s).') % level
  810             raise exception.VolumeBackendAPIException(data=msg)
  811         code_level = match_obj.group().split('.')
  812         LOG.info("code_level is: %s.", level)
  813         return {'code_level': tuple([int(x) for x in code_level]),
  814                 'topology': resp['topology'],
  815                 'system_name': resp['name'],
  816                 'system_id': resp['id']}
  817 
  818     def get_pool_attrs(self, pool):
  819         """Return attributes for the specified pool."""
  820         return self.ssh.lsmdiskgrp(pool)
  821 
  822     def is_pool_defined(self, pool_name):
  823         """Check if vdisk is defined."""
  824         attrs = self.get_pool_attrs(pool_name)
  825         return attrs is not None
  826 
  827     def is_data_reduction_pool(self, pool_name):
  828         """Check if pool is data reduction pool."""
  829         # Check pool is data reduction pool or not from pool information
  830         # saved in stats.
  831         for pool in self.stats.get('pools', []):
  832             if pool['pool_name'] == pool_name:
  833                 return pool['data_reduction']
  834 
  835         pool_data = self.get_pool_attrs(pool_name)
  836         if (pool_data and 'data_reduction' in pool_data and
  837                 pool_data['data_reduction'] == 'yes'):
  838             return True
  839         return False
  840 
  841     def get_pool_volumes(self, pool):
  842         """Return volumes for the specified pool."""
  843         vdisks = self.ssh.lsvdisks_from_filter('mdisk_grp_name', pool)
  844         return vdisks.result
  845 
  846     def get_available_io_groups(self):
  847         """Return list of available IO groups."""
  848         iogrps = []
  849         resp = self.ssh.lsiogrp()
  850         for iogrp in resp:
  851             try:
  852                 if int(iogrp['node_count']) > 0:
  853                     iogrps.append(int(iogrp['id']))
  854             except KeyError:
  855                 self.handle_keyerror('lsiogrp', iogrp)
  856             except ValueError:
  857                 msg = (_('Expected integer for node_count, '
  858                          'svcinfo lsiogrp returned: %(node)s.') %
  859                        {'node': iogrp['node_count']})
  860                 raise exception.VolumeBackendAPIException(data=msg)
  861         return iogrps
  862 
  863     def get_vdisk_count_by_io_group(self):
  864         res = {}
  865         resp = self.ssh.lsiogrp()
  866         for iogrp in resp:
  867             try:
  868                 if int(iogrp['node_count']) > 0:
  869                     res[int(iogrp['id'])] = int(iogrp['vdisk_count'])
  870             except KeyError:
  871                 self.handle_keyerror('lsiogrp', iogrp)
  872             except ValueError:
  873                 msg = (_('Expected integer for node_count, '
  874                          'svcinfo lsiogrp returned: %(node)s') %
  875                        {'node': iogrp['node_count']})
  876                 raise exception.VolumeBackendAPIException(data=msg)
  877         return res
  878 
  879     def select_io_group(self, state, opts, pool):
  880         selected_iog = 0
  881         iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
  882         if len(iog_list) == 0:
  883             raise exception.InvalidInput(
  884                 reason=_('Given I/O group(s) %(iogrp)s not valid; available '
  885                          'I/O groups are %(avail)s.')
  886                 % {'iogrp': opts['iogrp'],
  887                    'avail': state['available_iogrps']})
  888 
  889         site_iogrp = []
  890         hyperswap = opts['volume_topology'] == 'hyperswap'
  891         if hyperswap:
  892             pool_data = self.get_pool_attrs(pool)
  893             if pool_data is None:
  894                 msg = (_('Failed getting details for pool %s.') % pool)
  895                 LOG.error(msg)
  896                 raise exception.InvalidConfigurationValue(message=msg)
  897         if hyperswap and pool_data.get('site_id'):
  898             for node in state['storage_nodes'].values():
  899                 if pool_data['site_id'] == node['site_id']:
  900                     site_iogrp.append(node['IO_group'])
  901             site_iogrp = list(map(int, site_iogrp))
  902             iogroup_list = list(set(site_iogrp).intersection(iog_list))
  903             if len(iogroup_list) == 0:
  904                 LOG.warning('The storage system topology is hyperswap or '
  905                             'stretched, The site_id of pool %(pool)s is '
  906                             '%(site_id)s, the available I/O groups on this '
  907                             'site is %(site_iogrp)s, but the given I/O'
  908                             ' group(s) is %(iogrp)s.',
  909                             {'pool': pool, 'site_id': pool_data['site_id'],
  910                              'site_iogrp': site_iogrp, 'iogrp': opts['iogrp']})
  911                 iogroup_list = iog_list
  912         else:
  913             iogroup_list = iog_list
  914         iog_vdc = self.get_vdisk_count_by_io_group()
  915         LOG.debug("IO group current balance %s", iog_vdc)
  916         min_vdisk_count = iog_vdc[iogroup_list[0]]
  917         selected_iog = iogroup_list[0]
  918         for iog in iogroup_list:
  919             if iog_vdc[iog] < min_vdisk_count:
  920                 min_vdisk_count = iog_vdc[iog]
  921                 selected_iog = iog
  922         LOG.debug("Selected io_group is %d", selected_iog)
  923         return selected_iog
  924 
  925     def get_volume_io_group(self, vol_name):
  926         vdisk = self.ssh.lsvdisk(vol_name)
  927         if vdisk:
  928             resp = self.ssh.lsiogrp()
  929             for iogrp in resp:
  930                 if iogrp['name'] == vdisk['IO_group_name']:
  931                     return int(iogrp['id'])
  932         return None
  933 
  934     def get_node_info(self):
  935         """Return dictionary containing information on system's nodes."""
  936         nodes = {}
  937         resp = self.ssh.lsnode()
  938         for node_data in resp:
  939             try:
  940                 if node_data['status'] != 'online':
  941                     continue
  942                 node = {}
  943                 node['id'] = node_data['id']
  944                 node['name'] = node_data['name']
  945                 node['IO_group'] = node_data['IO_group_id']
  946                 node['iscsi_name'] = node_data['iscsi_name']
  947                 node['WWNN'] = node_data['WWNN']
  948                 node['status'] = node_data['status']
  949                 node['WWPN'] = []
  950                 node['ipv4'] = []
  951                 node['ipv6'] = []
  952                 node['enabled_protocols'] = []
  953                 nodes[node['id']] = node
  954                 node['site_id'] = (node_data['site_id']
  955                                    if 'site_id' in node_data else None)
  956             except KeyError:
  957                 self.handle_keyerror('lsnode', node_data)
  958         return nodes
  959 
  960     def add_iscsi_ip_addrs(self, storage_nodes):
  961         """Add iSCSI IP addresses to system node information."""
  962         resp = self.ssh.lsportip()
  963         for ip_data in resp:
  964             try:
  965                 state = ip_data['state']
  966                 if ip_data['node_id'] in storage_nodes and (
  967                         state == 'configured' or state == 'online'):
  968                     node = storage_nodes[ip_data['node_id']]
  969                     if len(ip_data['IP_address']):
  970                         node['ipv4'].append(ip_data['IP_address'])
  971                     if len(ip_data['IP_address_6']):
  972                         node['ipv6'].append(ip_data['IP_address_6'])
  973             except KeyError:
  974                 self.handle_keyerror('lsportip', ip_data)
  975 
  976     def add_fc_wwpns(self, storage_nodes, code_level):
  977         """Add FC WWPNs to system node information."""
  978         for key in storage_nodes:
  979             node = storage_nodes[key]
  980             wwpns = set(node['WWPN'])
  981             # The Storwize/svc release 7.7.0.0 introduced NPIV feature.
  982             # The virtual wwpns will be included in cli lstargetportfc
  983             if code_level < (7, 7, 0, 0):
  984                 resp = self.ssh.lsportfc(node_id=node['id'])
  985                 for port_info in resp:
  986                     if (port_info['type'] == 'fc' and
  987                             port_info['status'] == 'active'):
  988                         wwpns.add(port_info['WWPN'])
  989             else:
  990                 npiv_wwpns = self.get_npiv_wwpns(node_id=node['id'])
  991                 wwpns.update(npiv_wwpns)
  992             node['WWPN'] = list(wwpns)
  993             LOG.info('WWPN on node %(node)s: %(wwpn)s.',
  994                      {'node': node['id'], 'wwpn': node['WWPN']})
  995 
  996     def get_npiv_wwpns(self, node_id=None, host_io=None):
  997         wwpns = set()
  998         # In the response of lstargetportfc, the host_io_permitted
  999         # indicates whether the port can be used for host I/O
 1000         resp = self.ssh.lstargetportfc(current_node_id=node_id,
 1001                                        host_io_permitted=host_io)
 1002         for port_info in resp:
 1003             wwpns.add(port_info['WWPN'])
 1004         return list(wwpns)
 1005 
 1006     def add_chap_secret_to_host(self, host_name):
 1007         """Generate and store a randomly-generated CHAP secret for the host."""
 1008         chap_secret = volume_utils.generate_password()
 1009         self.ssh.add_chap_secret(chap_secret, host_name)
 1010         return chap_secret
 1011 
 1012     def get_chap_secret_for_host(self, host_name):
 1013         """Generate and store a randomly-generated CHAP secret for the host."""
 1014         resp = self.ssh.lsiscsiauth()
 1015         host_found = False
 1016         for host_data in resp:
 1017             try:
 1018                 if host_data['name'] == host_name:
 1019                     host_found = True
 1020                     if host_data['iscsi_auth_method'] == 'chap':
 1021                         return host_data['iscsi_chap_secret']
 1022             except KeyError:
 1023                 self.handle_keyerror('lsiscsiauth', host_data)
 1024         if not host_found:
 1025             msg = _('Failed to find host %s.') % host_name
 1026             raise exception.VolumeBackendAPIException(data=msg)
 1027         return None
 1028 
 1029     def get_conn_fc_wwpns(self, host):
 1030         wwpns = set()
 1031         resp = self.ssh.lsfabric(host=host)
 1032         for wwpn in resp.select('local_wwpn'):
 1033             if wwpn is not None:
 1034                 wwpns.add(wwpn)
 1035         return list(wwpns)
 1036 
 1037     def get_host_from_connector(self, connector, volume_name=None,
 1038                                 iscsi=False):
 1039         """Return the Storwize host described by the connector."""
 1040         LOG.debug('Enter: get_host_from_connector: %s.', connector)
 1041 
 1042         # If we have FC information, we have a faster lookup option
 1043         host_name = None
 1044         if 'wwpns' in connector and not iscsi:
 1045             for wwpn in connector['wwpns']:
 1046                 resp = self.ssh.lsfabric(wwpn=wwpn)
 1047                 for wwpn_info in resp:
 1048                     try:
 1049                         if (wwpn_info['remote_wwpn'] and
 1050                                 wwpn_info['name'] and
 1051                                 wwpn_info['remote_wwpn'].lower() ==
 1052                                 wwpn.lower()):
 1053                             host_name = wwpn_info['name']
 1054                             break
 1055                     except KeyError:
 1056                         self.handle_keyerror('lsfabric', wwpn_info)
 1057                 if host_name:
 1058                     break
 1059         if host_name:
 1060             LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
 1061             return host_name
 1062 
 1063         def update_host_list(host, host_list):
 1064             idx = host_list.index(host)
 1065             del host_list[idx]
 1066             host_list.insert(0, host)
 1067 
 1068         # That didn't work, so try exhaustive search
 1069         hosts_info = self.ssh.lshost()
 1070         host_list = list(hosts_info.select('name'))
 1071         # If we have a "real" connector, we might be able to find the
 1072         # host entry with fewer queries if we move the host entries
 1073         # that contain the connector's host property value to the front
 1074         # of the list
 1075         if 'host' in connector:
 1076             # order host_list such that the host entries that
 1077             # contain the connector's host name are at the
 1078             # beginning of the list
 1079             for host in host_list:
 1080                 if re.search(connector['host'], host):
 1081                     update_host_list(host, host_list)
 1082         # If we have a volume name we have a potential fast path
 1083         # for finding the matching host for that volume.
 1084         # Add the host_names that have mappings for our volume to the
 1085         # head of the list of host names to search them first
 1086         if volume_name:
 1087             hosts_map_info = self.ssh.lsvdiskhostmap(volume_name)
 1088             hosts_map_info_list = list(hosts_map_info.select('host_name'))
 1089             # remove the fast path host names from the end of the list
 1090             # and move to the front so they are only searched for once.
 1091             for host in hosts_map_info_list:
 1092                 update_host_list(host, host_list)
 1093         found = False
 1094         for name in host_list:
 1095             try:
 1096                 resp = self.ssh.lshost(host=name)
 1097             except exception.VolumeBackendAPIException as ex:
 1098                 LOG.debug("Exception message: %s", ex.msg)
 1099                 if 'CMMVC5754E' in ex.msg:
 1100                     LOG.debug("CMMVC5754E found in CLI exception.")
 1101                     # CMMVC5754E: The specified object does not exist
 1102                     # The host has been deleted while walking the list.
 1103                     # This is a result of a host change on the SVC that
 1104                     # is out of band to this request.
 1105                     continue
 1106                 # unexpected error so reraise it
 1107                 with excutils.save_and_reraise_exception():
 1108                     pass
 1109             if iscsi:
 1110                 if 'initiator' in connector:
 1111                     for iscsi_name in resp.select('iscsi_name'):
 1112                         if iscsi_name == connector['initiator']:
 1113                             host_name = name
 1114                             found = True
 1115                             break
 1116             elif 'wwpns' in connector and len(connector['wwpns']):
 1117                 connector_wwpns = [str(x).lower() for x in connector['wwpns']]
 1118                 for wwpn in resp.select('WWPN'):
 1119                     if wwpn and wwpn.lower() in connector_wwpns:
 1120                         host_name = name
 1121                         found = True
 1122                         break
 1123             if found:
 1124                 break
 1125 
 1126         LOG.debug('Leave: get_host_from_connector: host %s.', host_name)
 1127         return host_name
 1128 
 1129     def create_host(self, connector, iscsi=False, site=None):
 1130         """Create a new host on the storage system.
 1131 
 1132         We create a host name and associate it with the given connection
 1133         information.  The host name will be a cleaned up version of the given
 1134         host name (at most 55 characters), plus a random 8-character suffix to
 1135         avoid collisions. The total length should be at most 63 characters.
 1136         """
 1137         LOG.debug('Enter: create_host: host %s.', connector['host'])
 1138 
 1139         # Before we start, make sure host name is a string and that we have at
 1140         # least one port.
 1141         host_name = connector['host']
 1142         if not isinstance(host_name, six.string_types):
 1143             msg = _('create_host: Host name is not unicode or string.')
 1144             LOG.error(msg)
 1145             raise exception.VolumeDriverException(message=msg)
 1146 
 1147         ports = []
 1148         if iscsi:
 1149             if 'initiator' in connector:
 1150                 ports.append(['initiator', '%s' % connector['initiator']])
 1151             else:
 1152                 msg = _('create_host: No initiators supplied.')
 1153         else:
 1154             if 'wwpns' in connector:
 1155                 for wwpn in connector['wwpns']:
 1156                     ports.append(['wwpn', '%s' % wwpn])
 1157             else:
 1158                 msg = _('create_host: No wwpns supplied.')
 1159         if not len(ports):
 1160             LOG.error(msg)
 1161             raise exception.VolumeDriverException(message=msg)
 1162 
 1163         # Build a host name for the Storwize host - first clean up the name
 1164         if isinstance(host_name, six.text_type):
 1165             host_name = unicodedata.normalize('NFKD', host_name).encode(
 1166                 'ascii', 'replace').decode('ascii')
 1167 
 1168         for num in range(0, 128):
 1169             ch = str(chr(num))
 1170             if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
 1171                 host_name = host_name.replace(ch, '-')
 1172 
 1173         # Storwize doesn't like hostname that doesn't starts with letter or _.
 1174         if not re.match('^[A-Za-z]', host_name):
 1175             host_name = '_' + host_name
 1176 
 1177         # Add a random 8-character suffix to avoid collisions
 1178         rand_id = str(random.randint(0, 99999999)).zfill(8)
 1179         host_name = '%s-%s' % (host_name[:55], rand_id)
 1180 
 1181         # Create a host with one port
 1182         port = ports.pop(0)
 1183         # Host site_id is necessary for hyperswap volume.
 1184         self.ssh.mkhost(host_name, port[0], port[1], site)
 1185 
 1186         # Add any additional ports to the host
 1187         for port in ports:
 1188             self.ssh.addhostport(host_name, port[0], port[1])
 1189 
 1190         LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.',
 1191                   {'host': connector['host'], 'host_name': host_name})
 1192         return host_name
 1193 
 1194     def update_host(self, host_name, site_name):
 1195         self.ssh.chhost(host_name, site=site_name)
 1196 
 1197     def delete_host(self, host_name):
 1198         self.ssh.rmhost(host_name)
 1199 
 1200     def _get_unused_lun_id(self, host_name):
 1201         luns_used = []
 1202         result_lun = '-1'
 1203         resp = self.ssh.lshostvdiskmap(host_name)
 1204         for mapping_info in resp:
 1205             luns_used.append(int(mapping_info['SCSI_id']))
 1206 
 1207         luns_used.sort()
 1208         result_lun = str(len(luns_used))
 1209         for index, n in enumerate(luns_used):
 1210             if n > index:
 1211                 result_lun = str(index)
 1212                 break
 1213 
 1214         return result_lun
 1215 
 1216     @cinder_utils.trace
 1217     def map_vol_to_host(self, volume_name, host_name, multihostmap):
 1218         """Create a mapping between a volume to a host."""
 1219 
 1220         # Check if this volume is already mapped to this host
 1221         result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name)
 1222         if result_lun:
 1223             LOG.debug('volume %(volume_name)s is already mapped to the host '
 1224                       '%(host_name)s.',
 1225                       {'volume_name': volume_name, 'host_name': host_name})
 1226             return int(result_lun)
 1227 
 1228         class _RetryableVolumeDriverException(
 1229                 exception.VolumeBackendAPIException):
 1230             """Exception to identify which types of errors to retry."""
 1231             pass
 1232 
 1233         @cinder_utils.retry(_RetryableVolumeDriverException,
 1234                             interval=2,
 1235                             retries=3,
 1236                             wait_random=True)
 1237         def make_vdisk_host_map():
 1238             try:
 1239                 result_lun = self._get_unused_lun_id(host_name)
 1240                 self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun,
 1241                                         multihostmap)
 1242                 return int(result_lun)
 1243             except Exception as ex:
 1244                 # pylint: disable=E1101
 1245                 if (not multihostmap and hasattr(ex, 'msg') and
 1246                         'CMMVC6071E' in ex.msg):
 1247                     LOG.warning('storwize_svc_multihostmap_enabled is set '
 1248                                 'to False, not allowing multi host mapping.')
 1249                     raise exception.VolumeDriverException(
 1250                         message=_('CMMVC6071E The VDisk-to-host mapping was '
 1251                                   'not created because the VDisk is already '
 1252                                   'mapped to a host.'))
 1253                 if hasattr(ex, 'msg') and 'CMMVC5879E' in ex.msg:
 1254                     raise _RetryableVolumeDriverException(ex)
 1255                 with excutils.save_and_reraise_exception():
 1256                     LOG.error('Error mapping VDisk-to-host.')
 1257 
 1258         return make_vdisk_host_map()
 1259 
 1260     def unmap_vol_from_host(self, volume_name, host_name):
 1261         """Unmap the volume and delete the host if it has no more mappings."""
 1262 
 1263         LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from '
 1264                   'host %(host_name)s.',
 1265                   {'volume_name': volume_name, 'host_name': host_name})
 1266 
 1267         # Check if the mapping exists
 1268         resp = self.ssh.lsvdiskhostmap(volume_name)
 1269         if not len(resp):
 1270             LOG.warning('unmap_vol_from_host: No mapping of volume '
 1271                         '%(vol_name)s to any host found.',
 1272                         {'vol_name': volume_name})
 1273             return host_name
 1274         if host_name is None:
 1275             if len(resp) > 1:
 1276                 LOG.warning('unmap_vol_from_host: Multiple mappings of '
 1277                             'volume %(vol_name)s found, no host '
 1278                             'specified.', {'vol_name': volume_name})
 1279                 return
 1280             else:
 1281                 host_name = resp[0]['host_name']
 1282         else:
 1283             found = False
 1284             for h in resp.select('host_name'):
 1285                 if h == host_name:
 1286                     found = True
 1287             if not found:
 1288                 LOG.warning('unmap_vol_from_host: No mapping of volume '
 1289                             '%(vol_name)s to host %(host)s found.',
 1290                             {'vol_name': volume_name, 'host': host_name})
 1291                 return host_name
 1292         # We now know that the mapping exists
 1293         self.ssh.rmvdiskhostmap(host_name, volume_name)
 1294 
 1295         LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from '
 1296                   'host %(host_name)s.',
 1297                   {'volume_name': volume_name, 'host_name': host_name})
 1298         return host_name
 1299 
 1300     def check_host_mapped_vols(self, host_name):
 1301         return self.ssh.lshostvdiskmap(host_name)
 1302 
 1303     def check_vol_mapped_to_host(self, vol_name, host_name):
 1304         resp = self.ssh.lsvdiskhostmap(vol_name)
 1305         for mapping_info in resp:
 1306             if mapping_info['host_name'] == host_name:
 1307                 return True
 1308         return False
 1309 
 1310     @staticmethod
 1311     def build_default_opts(config):
 1312         # Ignore capitalization
 1313 
 1314         cluster_partner = config.storwize_svc_stretched_cluster_partner
 1315         opt = {'rsize': config.storwize_svc_vol_rsize,
 1316                'warning': config.storwize_svc_vol_warning,
 1317                'autoexpand': config.storwize_svc_vol_autoexpand,
 1318                'grainsize': config.storwize_svc_vol_grainsize,
 1319                'compression': config.storwize_svc_vol_compression,
 1320                'easytier': config.storwize_svc_vol_easytier,
 1321                'iogrp': config.storwize_svc_vol_iogrp,
 1322                'qos': None,
 1323                'stretched_cluster': cluster_partner,
 1324                'replication': False,
 1325                'nofmtdisk': config.storwize_svc_vol_nofmtdisk,
 1326                'flashcopy_rate': config.storwize_svc_flashcopy_rate,
 1327                'mirror_pool': config.storwize_svc_mirror_pool,
 1328                'volume_topology': None,
 1329                'peer_pool': config.storwize_peer_pool,
 1330                'cycle_period_seconds': config.cycle_period_seconds}
 1331         return opt
 1332 
 1333     @staticmethod
 1334     def check_vdisk_opts(state, opts):
 1335         # Check that grainsize is 32/64/128/256
 1336         if opts['grainsize'] not in [8, 32, 64, 128, 256]:
 1337             raise exception.InvalidInput(
 1338                 reason=_('Illegal value specified for '
 1339                          'storwize_svc_vol_grainsize: set to either '
 1340                          '32, 64, 128, or 256.'))
 1341 
 1342         # Check that compression is supported
 1343         if opts['compression'] and not state['compression_enabled']:
 1344             raise exception.InvalidInput(
 1345                 reason=_('System does not support compression.'))
 1346 
 1347         # Check that rsize is set if compression is set
 1348         if opts['compression'] and opts['rsize'] == -1:
 1349             raise exception.InvalidInput(
 1350                 reason=_('If compression is set to True, rsize must '
 1351                          'also be set (not equal to -1).'))
 1352 
 1353         # Check cycle_period_seconds are in 60-86400
 1354         if opts['cycle_period_seconds'] not in range(60, 86401):
 1355             raise exception.InvalidInput(
 1356                 reason=_('cycle_period_seconds should be integer '
 1357                          'between 60 and 86400.'))
 1358 
 1359         iogs = StorwizeHelpers._get_valid_requested_io_groups(state, opts)
 1360 
 1361         if len(iogs) == 0:
 1362             raise exception.InvalidInput(
 1363                 reason=_('Given I/O group(s) %(iogrp)s not valid; available '
 1364                          'I/O groups are %(avail)s.')
 1365                 % {'iogrp': opts['iogrp'],
 1366                    'avail': state['available_iogrps']})
 1367 
 1368         if opts['nofmtdisk'] and opts['rsize'] != -1:
 1369             raise exception.InvalidInput(
 1370                 reason=_('If nofmtdisk is set to True, rsize must '
 1371                          'also be set to -1.'))
 1372 
 1373     @staticmethod
 1374     def _get_valid_requested_io_groups(state, opts):
 1375         given_iogs = str(opts['iogrp'])
 1376         iog_list = given_iogs.split(',')
 1377         # convert to int
 1378         iog_list = list(map(int, iog_list))
 1379         LOG.debug("Requested iogroups %s", iog_list)
 1380         LOG.debug("Available iogroups %s", state['available_iogrps'])
 1381         filtiog = set(iog_list).intersection(state['available_iogrps'])
 1382         iog_list = list(filtiog)
 1383         LOG.debug("Filtered (valid) requested iogroups %s", iog_list)
 1384         return iog_list
 1385 
 1386     def _get_opts_from_specs(self, opts, specs):
 1387         qos = {}
 1388         for k, value in specs.items():
 1389             # Get the scope, if using scope format
 1390             key_split = k.split(':')
 1391             if len(key_split) == 1:
 1392                 scope = None
 1393                 key = key_split[0]
 1394             else:
 1395                 scope = key_split[0]
 1396                 key = key_split[1]
 1397 
 1398             # We generally do not look at capabilities in the driver, but
 1399             # replication is a special case where the user asks for
 1400             # a volume to be replicated, and we want both the scheduler and
 1401             # the driver to act on the value.
 1402             if ((not scope or scope == 'capabilities') and
 1403                key == 'replication'):
 1404                 scope = None
 1405                 key = 'replication'
 1406                 words = value.split()
 1407                 if not (words and len(words) == 2 and words[0] == '<is>'):
 1408                     LOG.error('Replication must be specified as '
 1409                               '\'<is> True\' or \'<is> False\'.')
 1410                 del words[0]
 1411                 value = words[0]
 1412 
 1413             # Add the QoS.
 1414             if scope and scope == 'qos':
 1415                 if key in self.svc_qos_keys.keys():
 1416                     try:
 1417                         type_fn = self.svc_qos_keys[key]['type']
 1418                         value = type_fn(value)
 1419                         qos[key] = value
 1420                     except ValueError:
 1421                         continue
 1422 
 1423             # Any keys that the driver should look at should have the
 1424             # 'drivers' scope.
 1425             if scope and scope != 'drivers':
 1426                 continue
 1427 
 1428             if key in opts:
 1429                 this_type = type(opts[key]).__name__
 1430                 if this_type == 'int':
 1431                     value = int(value)
 1432                 elif this_type == 'bool':
 1433                     value = strutils.bool_from_string(value)
 1434                 opts[key] = value
 1435         if len(qos) != 0:
 1436             opts['qos'] = qos
 1437         return opts
 1438 
 1439     def _get_qos_from_volume_metadata(self, volume_metadata):
 1440         """Return the QoS information from the volume metadata."""
 1441         qos = {}
 1442         for i in volume_metadata:
 1443             k = i.get('key', None)
 1444             value = i.get('value', None)
 1445             key_split = k.split(':')
 1446             if len(key_split) == 1:
 1447                 scope = None
 1448                 key = key_split[0]
 1449             else:
 1450                 scope = key_split[0]
 1451                 key = key_split[1]
 1452             # Add the QoS.
 1453             if scope and scope == 'qos':
 1454                 if key in self.svc_qos_keys.keys():
 1455                     try:
 1456                         type_fn = self.svc_qos_keys[key]['type']
 1457                         value = type_fn(value)
 1458                         qos[key] = value
 1459                     except ValueError:
 1460                         continue
 1461         return qos
 1462 
 1463     def _wait_for_a_condition(self, testmethod, timeout=None,
 1464                               interval=INTERVAL_1_SEC,
 1465                               raise_exception=False):
 1466         start_time = time.time()
 1467         if timeout is None:
 1468             timeout = DEFAULT_TIMEOUT
 1469 
 1470         def _inner():
 1471             try:
 1472                 testValue = testmethod()
 1473             except Exception as ex:
 1474                 if raise_exception:
 1475                     LOG.exception("_wait_for_a_condition: %s"
 1476                                   " execution failed.",
 1477                                   testmethod.__name__)
 1478                     raise exception.VolumeBackendAPIException(data=ex)
 1479                 else:
 1480                     testValue = False
 1481                     # pylint: disable=E1101
 1482                     LOG.debug('Helper.'
 1483                               '_wait_for_condition: %(method_name)s '
 1484                               'execution failed for %(exception)s.',
 1485                               {'method_name': testmethod.__name__,
 1486                                'exception': ex.message})
 1487             if testValue:
 1488                 raise loopingcall.LoopingCallDone()
 1489 
 1490             if int(time.time()) - start_time > timeout:
 1491                 msg = (_('CommandLineHelper._wait_for_condition: %s timeout.')
 1492                        % testmethod.__name__)
 1493                 LOG.error(msg)
 1494                 raise exception.VolumeBackendAPIException(data=msg)
 1495 
 1496         timer = loopingcall.FixedIntervalLoopingCall(_inner)
 1497         timer.start(interval=interval).wait()
 1498 
 1499     def get_vdisk_params(self, config, state, type_id,
 1500                          volume_type=None, volume_metadata=None):
 1501         """Return the parameters for creating the vdisk.
 1502 
 1503         Takes volume type and defaults from config options into account.
 1504         """
 1505         opts = self.build_default_opts(config)
 1506         ctxt = context.get_admin_context()
 1507         if volume_type is None and type_id is not None:
 1508             volume_type = volume_types.get_volume_type(ctxt, type_id)
 1509         if volume_type:
 1510             qos_specs_id = volume_type.get('qos_specs_id')
 1511             specs = dict(volume_type).get('extra_specs')
 1512 
 1513             # NOTE(vhou): We prefer the qos_specs association
 1514             # and over-ride any existing
 1515             # extra-specs settings if present
 1516             if qos_specs_id is not None:
 1517                 kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
 1518                 # Merge the qos_specs into extra_specs and qos_specs has higher
 1519                 # priority than extra_specs if they have different values for
 1520                 # the same key.
 1521                 specs.update(kvs)
 1522             opts = self._get_opts_from_specs(opts, specs)
 1523         if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos
 1524                 and volume_metadata):
 1525             qos = self._get_qos_from_volume_metadata(volume_metadata)
 1526             if len(qos) != 0:
 1527                 opts['qos'] = qos
 1528 
 1529         self.check_vdisk_opts(state, opts)
 1530         return opts
 1531 
 1532     def check_data_reduction_pool_params(self, opts):
 1533         """Check the configured parameters if vol in data reduction pool."""
 1534         if opts['warning'] != 0:
 1535             msg = (_('You cannot specify -warning for thin-provisioned or '
 1536                      'compressed volumes that are in data reduction '
 1537                      'pools. The configured warning is '
 1538                      '%s.') % opts['warning'])
 1539             raise exception.VolumeDriverException(message=msg)
 1540         if not opts['easytier']:
 1541             msg = (_('You cannot specify -easytier for thin-provisioned '
 1542                      'or compressed volumes that are in data reduction '
 1543                      'pools. The configured easytier is '
 1544                      '%s') % opts['easytier'])
 1545             raise exception.VolumeDriverException(message=msg)
 1546         if opts['grainsize'] != 256 and opts['grainsize'] != 8:
 1547             msg = (_('You cannot specify -grainsize for thin-provisioned '
 1548                      'or compressed volumes that are in data reduction '
 1549                      'pools. This type of volume will be created with a '
 1550                      'grainsize of 8 KB. The configured grainsize is '
 1551                      '%s.') % opts['grainsize'])
 1552             raise exception.VolumeDriverException(message=msg)
 1553         if opts['rsize'] != 2:
 1554             if opts['volume_topology'] == 'hyperswap':
 1555                 msg = (_('You cannot specify -buffersize for Hyperswap volumes'
 1556                          ' that are in data reduction pools, The configured '
 1557                          'buffersize is %s.') % opts['rsize'])
 1558                 raise exception.VolumeDriverException(message=msg)
 1559             else:
 1560                 msg = (_('You cannot specify -rsize for thin-provisioned '
 1561                          'or compressed volumes that are in data reduction '
 1562                          'pools. The -rsize parameter will be ignored in '
 1563                          'mkvdisk. Only its presence or absence is used to '
 1564                          'determine if the disk is a data reduction volume '
 1565                          'copy or a thick volume copy. The '
 1566                          'configured rsize is %s.') % opts['rsize'])
 1567                 raise exception.VolumeDriverException(message=msg)
 1568         if not opts['autoexpand']:
 1569             msg = (_('You cannot set the autoexpand to disable for '
 1570                      'thin-provisioned or compressed volumes that are in data '
 1571                      'reduction pool. The configured'
 1572                      ' autoexpand is %s.') % opts['autoexpand'])
 1573             raise exception.VolumeDriverException(message=msg)
 1574         else:
 1575             LOG.info('You cannot specify warning, grainsize and '
 1576                      'easytier for thin-provisioned or compressed'
 1577                      ' volumes that are in data reduction pools. '
 1578                      'The rsize parameter will be ignored, the '
 1579                      'autoexpand must be enabled.')
 1580 
 1581     def is_volume_type_dr_pools(self, pool, opts, rep_type=None,
 1582                                 rep_target_pool=None):
 1583         """Check every configured pools is data reduction pool."""
 1584         if self.is_data_reduction_pool(pool):
 1585             LOG.debug('The configured pool %s is a data reduction pool.', pool)
 1586             return True
 1587 
 1588         if opts['mirror_pool'] and self.is_data_reduction_pool(
 1589                 opts['mirror_pool']):
 1590             LOG.debug('The mirror_pool %s is a data reduction pool.',
 1591                       opts['mirror_pool'])
 1592             return True
 1593 
 1594         if (opts['volume_topology'] == 'hyperswap' and
 1595                 self.is_data_reduction_pool(opts['peer_pool'])):
 1596             LOG.debug('The peer_pool %s is a data reduction pool.',
 1597                       opts['peer_pool'])
 1598             return True
 1599 
 1600         if rep_type and self.is_data_reduction_pool(rep_target_pool):
 1601             LOG.debug('The replica target pool %s is a data reduction pool.',
 1602                       rep_target_pool)
 1603             return True
 1604 
 1605         return False
 1606 
 1607     @staticmethod
 1608     def _get_vdisk_create_params(opts, is_dr_pool, add_copies=False):
 1609         easytier = 'on' if opts['easytier'] else 'off'
 1610         if opts['rsize'] == -1:
 1611             params = []
 1612             if opts['nofmtdisk']:
 1613                 params.append('-nofmtdisk')
 1614         else:
 1615             if is_dr_pool:
 1616                 params = ['-rsize', '%s%%' % str(opts['rsize']), '-autoexpand']
 1617                 if opts['compression']:
 1618                     params.append('-compressed')
 1619             else:
 1620                 params = ['-rsize', '%s%%' % str(opts['rsize']),
 1621                           '-autoexpand', '-warning',
 1622                           '%s%%' % str(opts['warning'])]
 1623                 if not opts['autoexpand']:
 1624                     params.remove('-autoexpand')
 1625 
 1626                 if opts['compression']:
 1627                     params.append('-compressed')
 1628                 else:
 1629                     params.extend(['-grainsize', str(opts['grainsize'])])
 1630 
 1631         if add_copies and opts['mirror_pool']:
 1632             params.extend(['-copies', '2'])
 1633 
 1634         if not is_dr_pool:
 1635             params.extend(['-easytier', easytier])
 1636         return params
 1637 
 1638     def create_vdisk(self, name, size, units, pool, opts):
 1639         LOG.debug('Enter: create_vdisk: vdisk %s.', name)
 1640         mdiskgrp = pool
 1641         if opts['mirror_pool']:
 1642             if not self.is_pool_defined(opts['mirror_pool']):
 1643                 raise exception.InvalidInput(
 1644                     reason=_('The pool %s in which mirrored copy is stored '
 1645                              'is invalid') % opts['mirror_pool'])
 1646             # The syntax of pool SVC expects is pool:mirror_pool in
 1647             # mdiskgrp for mirror volume
 1648             mdiskgrp = '%s:%s' % (pool, opts['mirror_pool'])
 1649 
 1650         is_dr_pool = False
 1651         if opts['rsize'] != -1:
 1652             is_dr_pool = self.is_volume_type_dr_pools(pool, opts)
 1653             if is_dr_pool:
 1654                 self.check_data_reduction_pool_params(opts)
 1655         params = self._get_vdisk_create_params(
 1656             opts, is_dr_pool,
 1657             add_copies=True if opts['mirror_pool'] else False)
 1658         self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params)
 1659         LOG.debug('Leave: _create_vdisk: volume %s.', name)
 1660 
 1661     def _get_hyperswap_volume_create_params(self, opts, is_dr_pool):
 1662         # Storwize/svc use cli command mkvolume to create hyperswap volume.
 1663         # You must specify -thin with grainsize.
 1664         # You must specify either -thin or -compressed with warning.
 1665         params = []
 1666         LOG.debug('The I/O groups of a hyperswap volume will be selected by '
 1667                   'storage.')
 1668         if is_dr_pool:
 1669             if opts['compression']:
 1670                 params.append('-compressed')
 1671             else:
 1672                 params.append('-thin')
 1673         else:
 1674             params.extend(['-buffersize', '%s%%' % str(opts['rsize']),
 1675                            '-warning',
 1676                            '%s%%' % six.text_type(opts['warning'])])
 1677             if not opts['autoexpand']:
 1678                 params.append('-noautoexpand')
 1679             if opts['compression']:
 1680                 params.append('-compressed')
 1681             else:
 1682                 params.append('-thin')
 1683                 params.extend(['-grainsize', six.text_type(opts['grainsize'])])
 1684         return params
 1685 
 1686     def create_hyperswap_volume(self, vol_name, size, units, pool, opts):
 1687         vol_name = '"%s"' % vol_name
 1688         params = []
 1689         if opts['rsize'] != -1:
 1690             is_dr_pool = self.is_volume_type_dr_pools(pool, opts)
 1691             if is_dr_pool:
 1692                 self.check_data_reduction_pool_params(opts)
 1693             params = self._get_hyperswap_volume_create_params(opts, is_dr_pool)
 1694         hyperpool = '%s:%s' % (pool, opts['peer_pool'])
 1695         self.ssh.mkvolume(vol_name, six.text_type(size), units,
 1696                           hyperpool, params)
 1697 
 1698     def convert_volume_to_hyperswap(self, vol_name, opts, state):
 1699         vol_name = '%s' % vol_name
 1700         if not self.is_system_topology_hyperswap(state):
 1701             msg = _('Convert volume to hyperswap failed, the system is '
 1702                     'below release 7.6.0.0 or it is not hyperswap '
 1703                     'topology.')
 1704             raise exception.VolumeDriverException(message=msg)
 1705         else:
 1706             attr = self.get_vdisk_attributes(vol_name)
 1707             if attr is None:
 1708                 msg = (_('convert_volume_to_hyperswap: Failed to get '
 1709                          'attributes for volume %s.') % vol_name)
 1710                 LOG.error(msg)
 1711                 raise exception.VolumeDriverException(message=msg)
 1712             pool = attr['mdisk_grp_name']
 1713             self.check_hyperswap_pool(pool, opts['peer_pool'])
 1714             hyper_pool = '%s' % opts['peer_pool']
 1715             is_dr_pool = self.is_volume_type_dr_pools(pool, opts)
 1716             if is_dr_pool and opts['rsize'] != -1:
 1717                 self.check_data_reduction_pool_params(opts)
 1718             params = self._get_hyperswap_volume_create_params(opts, is_dr_pool)
 1719             self.ssh.addvolumecopy(vol_name, hyper_pool, params)
 1720 
 1721     def convert_hyperswap_volume_to_normal(self, vol_name, peer_pool):
 1722         vol_name = '%s' % vol_name
 1723         hyper_pool = '%s' % peer_pool
 1724         self.ssh.rmvolumecopy(vol_name, hyper_pool)
 1725 
 1726     def delete_hyperswap_volume(self, volume, force_unmap, force_delete):
 1727         """Ensures that vdisk is not part of FC mapping and deletes it."""
 1728         if not self.is_vdisk_defined(volume):
 1729             LOG.warning('Tried to delete non-existent volume %s.', volume)
 1730             return
 1731         self.ensure_vdisk_no_fc_mappings(volume, allow_snaps=True,
 1732                                          allow_fctgt=True)
 1733         self.ssh.rmvolume(volume,
 1734                           force_unmap=force_unmap,
 1735                           force_delete=force_delete)
 1736 
 1737     def get_vdisk_attributes(self, vdisk):
 1738         attrs = self.ssh.lsvdisk(vdisk)
 1739         return attrs
 1740 
 1741     def is_vdisk_defined(self, vdisk_name):
 1742         """Check if vdisk is defined."""
 1743         attrs = self.get_vdisk_attributes(vdisk_name)
 1744         return attrs is not None
 1745 
 1746     def find_vdisk_copy_id(self, vdisk, pool):
 1747         resp = self.ssh.lsvdiskcopy(vdisk)
 1748         for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
 1749             if mdisk_grp == pool:
 1750                 return copy_id
 1751         msg = _('Failed to find a vdisk copy in the expected pool.')
 1752         LOG.error(msg)
 1753         raise exception.VolumeDriverException(message=msg)
 1754 
 1755     def get_vdisk_copy_attrs(self, vdisk, copy_id):
 1756         return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
 1757 
 1758     def get_vdisk_copies(self, vdisk):
 1759         copies = {'primary': None,
 1760                   'secondary': None}
 1761 
 1762         resp = self.ssh.lsvdiskcopy(vdisk)
 1763         for copy_id, status, sync, primary, mdisk_grp in (
 1764             resp.select('copy_id', 'status', 'sync',
 1765                         'primary', 'mdisk_grp_name')):
 1766             copy = {'copy_id': copy_id,
 1767                     'status': status,
 1768                     'sync': sync,
 1769                     'primary': primary,
 1770                     'mdisk_grp_name': mdisk_grp,
 1771                     'sync_progress': None}
 1772             if copy['sync'] != 'yes':
 1773                 progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
 1774                 copy['sync_progress'] = progress_info['progress']
 1775             if copy['primary'] == 'yes':
 1776                 copies['primary'] = copy
 1777             else:
 1778                 copies['secondary'] = copy
 1779         return copies
 1780 
 1781     def _prepare_fc_map(self, fc_map_id, timeout, restore):
 1782         self.ssh.prestartfcmap(fc_map_id, restore)
 1783         mapping_ready = False
 1784         max_retries = (timeout // self.WAIT_TIME) + 1
 1785         for try_number in range(1, max_retries):
 1786             mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
 1787             if (mapping_attrs is None or
 1788                     'status' not in mapping_attrs):
 1789                 break
 1790             if mapping_attrs['status'] == 'prepared':
 1791                 mapping_ready = True
 1792                 break
 1793             elif mapping_attrs['status'] == 'stopped':
 1794                 self.ssh.prestartfcmap(fc_map_id, restore)
 1795             elif mapping_attrs['status'] != 'preparing':
 1796                 msg = (_('Unexecpted mapping status %(status)s for mapping '
 1797                          '%(id)s. Attributes: %(attr)s.')
 1798                        % {'status': mapping_attrs['status'],
 1799                           'id': fc_map_id,
 1800                           'attr': mapping_attrs})
 1801                 LOG.error(msg)
 1802                 raise exception.VolumeBackendAPIException(data=msg)
 1803             greenthread.sleep(self.WAIT_TIME)
 1804 
 1805         if not mapping_ready:
 1806             msg = (_('Mapping %(id)s prepare failed to complete within the '
 1807                      'allotted %(to)d seconds timeout. Terminating.')
 1808                    % {'id': fc_map_id,
 1809                       'to': timeout})
 1810             LOG.error(msg)
 1811             raise exception.VolumeDriverException(message=msg)
 1812 
 1813     def start_fc_consistgrp(self, fc_consistgrp):
 1814         self.ssh.startfcconsistgrp(fc_consistgrp)
 1815 
 1816     def create_fc_consistgrp(self, fc_consistgrp):
 1817         self.ssh.mkfcconsistgrp(fc_consistgrp)
 1818 
 1819     def delete_fc_consistgrp(self, fc_consistgrp):
 1820         self.ssh.rmfcconsistgrp(fc_consistgrp)
 1821 
 1822     def stop_fc_consistgrp(self, fc_consistgrp):
 1823         self.ssh.stopfcconsistgrp(fc_consistgrp)
 1824 
 1825     def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state,
 1826                                  config, timeout):
 1827         model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
 1828         snapshots_model_update = []
 1829         try:
 1830             for snapshot in snapshots:
 1831                 opts = self.get_vdisk_params(config, state,
 1832                                              snapshot['volume_type_id'])
 1833                 volume = snapshot.volume
 1834                 if not volume:
 1835                     msg = (_("Can't get volume from snapshot: %(id)s")
 1836                            % {"id": snapshot.id})
 1837                     LOG.error(msg)
 1838                     raise exception.VolumeBackendAPIException(data=msg)
 1839                 pool = volume_utils.extract_host(volume.host, 'pool')
 1840                 self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
 1841                                                     snapshot['name'],
 1842                                                     fc_consistgrp,
 1843                                                     config, opts, False,
 1844                                                     pool=pool)
 1845 
 1846             self.prepare_fc_consistgrp(fc_consistgrp, timeout)
 1847             self.start_fc_consistgrp(fc_consistgrp)
 1848             # There is CG limitation that could not create more than 128 CGs.
 1849             # After start CG, we delete CG to avoid CG limitation.
 1850             # Cinder general will maintain the CG and snapshots relationship.
 1851             self.delete_fc_consistgrp(fc_consistgrp)
 1852         except exception.VolumeBackendAPIException as err:
 1853             model_update['status'] = fields.GroupSnapshotStatus.ERROR
 1854             # Release cg
 1855             self.delete_fc_consistgrp(fc_consistgrp)
 1856             LOG.error("Failed to create CGSnapshot. "
 1857                       "Exception: %s.", err)
 1858 
 1859         for snapshot in snapshots:
 1860             snapshots_model_update.append(
 1861                 {'id': snapshot['id'],
 1862                  'status': model_update['status'],
 1863                  'replication_status': fields.ReplicationStatus.NOT_CAPABLE})
 1864         return model_update, snapshots_model_update
 1865 
 1866     def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots):
 1867         """Delete flashcopy maps and consistent group."""
 1868         model_update = {'status': fields.GroupSnapshotStatus.DELETED}
 1869         snapshots_model_update = []
 1870 
 1871         try:
 1872             self.delete_fc_consistgrp(fc_consistgrp)
 1873         except exception.VolumeBackendAPIException as err:
 1874             if CMMVC5753E in err.msg:
 1875                 LOG.warning('Failed to delete as flash copy consistency '
 1876                             'group %s does not exist,ignoring err: %s',
 1877                             fc_consistgrp, err)
 1878 
 1879         for snapshot in snapshots:
 1880             try:
 1881                 self.delete_vdisk(snapshot['name'],
 1882                                   force_unmap=False,
 1883                                   force_delete=True)
 1884                 snapshots_model_update.append(
 1885                     {'id': snapshot['id'],
 1886                      'status': fields.GroupSnapshotStatus.DELETED})
 1887             except exception.VolumeBackendAPIException as err:
 1888                 model_update['status'] = (
 1889                     fields.GroupSnapshotStatus.ERROR_DELETING)
 1890                 snapshots_model_update.append(
 1891                     {'id': snapshot['id'],
 1892                      'status': fields.GroupSnapshotStatus.ERROR_DELETING})
 1893                 LOG.error("Failed to delete the snapshot %(snap)s of "
 1894                           "CGSnapshot. Exception: %(exception)s.",
 1895                           {'snap': snapshot['name'], 'exception': err})
 1896 
 1897         return model_update, snapshots_model_update
 1898 
 1899     def prepare_fc_consistgrp(self, fc_consistgrp, timeout):
 1900         """Prepare FC Consistency Group."""
 1901         self.ssh.prestartfcconsistgrp(fc_consistgrp)
 1902 
 1903         def prepare_fc_consistgrp_success():
 1904             mapping_ready = False
 1905             mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp)
 1906             if (mapping_attrs is None or
 1907                     'status' not in mapping_attrs):
 1908                 pass
 1909             if mapping_attrs['status'] == 'prepared':
 1910                 mapping_ready = True
 1911             elif mapping_attrs['status'] == 'stopped':
 1912                 self.ssh.prestartfcconsistgrp(fc_consistgrp)
 1913             elif mapping_attrs['status'] != 'preparing':
 1914                 msg = (_('Unexpected mapping status %(status)s for mapping '
 1915                          '%(id)s. Attributes: %(attr)s.') %
 1916                        {'status': mapping_attrs['status'],
 1917                         'id': fc_consistgrp,
 1918                         'attr': mapping_attrs})
 1919                 LOG.error(msg)
 1920                 raise exception.VolumeBackendAPIException(data=msg)
 1921             return mapping_ready
 1922         self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout)
 1923 
 1924     def create_cg_from_source(self, group, fc_consistgrp,
 1925                               sources, targets, state,
 1926                               config, timeout):
 1927         """Create consistence group from source"""
 1928         LOG.debug('Enter: create_cg_from_source: cg %(cg)s'
 1929                   ' source %(source)s, target %(target)s',
 1930                   {'cg': fc_consistgrp, 'source': sources, 'target': targets})
 1931         model_update = {'status': fields.GroupStatus.AVAILABLE}
 1932         ctxt = context.get_admin_context()
 1933         try:
 1934             for source, target in zip(sources, targets):
 1935                 opts = self.get_vdisk_params(config, state,
 1936                                              source['volume_type_id'])
 1937                 pool = volume_utils.extract_host(target['host'], 'pool')
 1938                 self.create_flashcopy_to_consistgrp(source['name'],
 1939                                                     target['name'],
 1940                                                     fc_consistgrp,
 1941                                                     config, opts,
 1942                                                     True, pool=pool)
 1943             self.prepare_fc_consistgrp(fc_consistgrp, timeout)
 1944             self.start_fc_consistgrp(fc_consistgrp)
 1945             self.delete_fc_consistgrp(fc_consistgrp)
 1946             volumes_model_update = self._get_volume_model_updates(
 1947                 ctxt, targets, group['id'], model_update['status'])
 1948         except exception.VolumeBackendAPIException as err:
 1949             model_update['status'] = fields.GroupStatus.ERROR
 1950             volumes_model_update = self._get_volume_model_updates(
 1951                 ctxt, targets, group['id'], model_update['status'])
 1952             with excutils.save_and_reraise_exception():
 1953                 # Release cg
 1954                 self.delete_fc_consistgrp(fc_consistgrp)
 1955                 LOG.error("Failed to create CG from CGsnapshot. "
 1956                           "Exception: %s", err)
 1957             return model_update, volumes_model_update
 1958 
 1959         LOG.debug('Leave: create_cg_from_source.')
 1960         return model_update, volumes_model_update
 1961 
 1962     def _get_volume_model_updates(self, ctxt, volumes, cgId,
 1963                                   status='available'):
 1964         """Update the volume model's status and return it."""
 1965         volume_model_updates = []
 1966         LOG.info("Updating status for CG: %(id)s.",
 1967                  {'id': cgId})
 1968         if volumes:
 1969             for volume in volumes:
 1970                 volume_model_updates.append({
 1971                     'id': volume['id'],
 1972                     'status': status,
 1973                     'replication_status':
 1974                         fields.ReplicationStatus.NOT_CAPABLE})
 1975         else:
 1976             LOG.info("No volume found for CG: %(cg)s.",
 1977                      {'cg': cgId})
 1978         return volume_model_updates
 1979 
 1980     def check_flashcopy_rate(self, flashcopy_rate):
 1981         if not self.code_level:
 1982             sys_info = self.get_system_info()
 1983             self.code_level = sys_info['code_level']
 1984 
 1985         if flashcopy_rate not in range(1, 151):
 1986             raise exception.InvalidInput(
 1987                 reason=_('The configured flashcopy rate should be '
 1988                          'between 1 and 150.'))
 1989         elif self.code_level < (7, 8, 1, 0) and flashcopy_rate > 100:
 1990             msg = (_('The configured flashcopy rate is %(fc_rate)s, The '
 1991                      'storage code level is %(code_level)s, the flashcopy_rate'
 1992                      ' range is 1-100 if the storwize code level '
 1993                      'below 7.8.1.') % {'fc_rate': flashcopy_rate,
 1994                                         'code_level': self.code_level})
 1995             LOG.error(msg)
 1996             raise exception.VolumeDriverException(message=msg)
 1997 
 1998     def update_flashcopy_rate(self, volume_name, new_flashcopy_rate):
 1999         mapping_ids = self._get_vdisk_fc_mappings(volume_name)
 2000         for map_id in mapping_ids:
 2001             attrs = self._get_flashcopy_mapping_attributes(map_id)
 2002             copy_rate = attrs['copy_rate']
 2003             # update flashcopy rate for clone volume
 2004             if copy_rate != '0':
 2005                 self.ssh.chfcmap(map_id,
 2006                                  copyrate=six.text_type(new_flashcopy_rate))
 2007 
 2008     def run_flashcopy(self, source, target, timeout, copy_rate,
 2009                       full_copy=True, restore=False):
 2010         """Create a FlashCopy mapping from the source to the target."""
 2011         LOG.debug('Enter: run_flashcopy: execute FlashCopy from source '
 2012                   '%(source)s to target %(target)s.',
 2013                   {'source': source, 'target': target})
 2014         self.check_flashcopy_rate(copy_rate)
 2015         fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate)
 2016         self._prepare_fc_map(fc_map_id, timeout, restore)
 2017         self.ssh.startfcmap(fc_map_id, restore)
 2018 
 2019         LOG.debug('Leave: run_flashcopy: FlashCopy started from '
 2020                   '%(source)s to %(target)s.',
 2021                   {'source': source, 'target': target})
 2022 
 2023     def create_flashcopy_to_consistgrp(self, source, target, consistgrp,
 2024                                        config, opts, full_copy=False,
 2025                                        pool=None):
 2026         """Create a FlashCopy mapping and add to consistent group."""
 2027         LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy'
 2028                   ' from source %(source)s to target %(target)s. '
 2029                   'Then add the flashcopy to %(cg)s.',
 2030                   {'source': source, 'target': target, 'cg': consistgrp})
 2031 
 2032         src_attrs = self.get_vdisk_attributes(source)
 2033         if src_attrs is None:
 2034             msg = (_('create_copy: Source vdisk %(src)s '
 2035                      'does not exist.') % {'src': source})
 2036             LOG.error(msg)
 2037             raise exception.VolumeDriverException(message=msg)
 2038 
 2039         src_size = src_attrs['capacity']
 2040         # In case we need to use a specific pool
 2041         if not pool:
 2042             pool = src_attrs['mdisk_grp_name']
 2043         opts['iogrp'] = src_attrs['IO_group_id']
 2044         self.create_vdisk(target, src_size, 'b', pool, opts)
 2045 
 2046         self.check_flashcopy_rate(opts['flashcopy_rate'])
 2047         self.ssh.mkfcmap(source, target, full_copy,
 2048                          opts['flashcopy_rate'],
 2049                          consistgrp=consistgrp)
 2050 
 2051         LOG.debug('Leave: create_flashcopy_to_consistgrp: '
 2052                   'FlashCopy started from  %(source)s to %(target)s.',
 2053                   {'source': source, 'target': target})
 2054 
 2055     def _get_vdisk_fc_mappings(self, vdisk):
 2056         """Return FlashCopy mappings that this vdisk is associated with."""
 2057         mapping_ids = []
 2058         resp = self.ssh.lsvdiskfcmappings(vdisk)
 2059         for id in resp.select('id'):
 2060             mapping_ids.append(id)
 2061         return mapping_ids
 2062 
 2063     def _get_flashcopy_mapping_attributes(self, fc_map_id):
 2064         try:
 2065             resp = self.ssh.lsfcmap(fc_map_id)
 2066             return resp[0] if len(resp) else None
 2067         except exception.VolumeBackendAPIException as ex:
 2068             LOG.warning("Failed to get fcmap %(fcmap)s info. "
 2069                         "Exception: %(ex)s.", {'fcmap': fc_map_id,
 2070                                                'ex': ex})
 2071             return None
 2072 
 2073     def _get_flashcopy_consistgrp_attr(self, fc_map_id):
 2074         resp = self.ssh.lsfcconsistgrp(fc_map_id)
 2075         if not len(resp):
 2076             return None
 2077         return resp[0]
 2078 
 2079     @cinder_utils.trace
 2080     def _check_delete_vdisk_fc_mappings(self, name, allow_snaps=True,
 2081                                         allow_fctgt=False):
 2082         """FlashCopy mapping check helper."""
 2083         mapping_ids = self._get_vdisk_fc_mappings(name)
 2084         wait_for_copy = False
 2085         for map_id in mapping_ids:
 2086             attrs = self._get_flashcopy_mapping_attributes(map_id)
 2087             # We should ignore GMCV flash copies
 2088             # Hyperswap flash copies are also ignored.
 2089             if not attrs or 'yes' == attrs['rc_controlled']:
 2090                 continue
 2091             source = attrs['source_vdisk_name']
 2092             target = attrs['target_vdisk_name']
 2093             copy_rate = attrs['copy_rate']
 2094             status = attrs['status']
 2095             progress = attrs['progress']
 2096 
 2097             LOG.debug('Loopcall: source: %s, target: %s, copy_rate: %s, '
 2098                       'status: %s, progress: %s, mapid: %s', source, target,
 2099                       copy_rate, status, progress, map_id)
 2100             if allow_fctgt and target == name and status == 'copying':
 2101                 try:
 2102                     self.ssh.stopfcmap(map_id)
 2103                 except exception.VolumeBackendAPIException as ex:
 2104                     LOG.warning(ex)
 2105                     wait_for_copy = True
 2106                 try:
 2107                     attrs = self._get_flashcopy_mapping_attributes(map_id)
 2108                 except exception.VolumeBackendAPIException as ex:
 2109                     LOG.warning(ex)
 2110                     wait_for_copy = True
 2111                     continue
 2112                 if attrs:
 2113                     status = attrs['status']
 2114                 else:
 2115                     continue
 2116 
 2117             if copy_rate == '0':
 2118                 if source == name:
 2119                     # Vdisk with snapshots. Return False if snapshot
 2120                     # not allowed.
 2121                     if not allow_snaps:
 2122                         raise loopingcall.LoopingCallDone(retvalue=False)
 2123                     self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
 2124                     wait_for_copy = True
 2125                 else:
 2126                     # A snapshot
 2127                     if target != name:
 2128                         msg = (_('Vdisk %(name)s not involved in '
 2129                                  'mapping %(src)s -> %(tgt)s.') %
 2130                                {'name': name, 'src': source, 'tgt': target})
 2131                         LOG.error(msg)
 2132                         raise exception.VolumeDriverException(message=msg)
 2133                     try:
 2134                         if status in ['copying', 'prepared']:
 2135                             self.ssh.stopfcmap(map_id)
 2136                             # Need to wait for the fcmap to change to
 2137                             # stopped state before remove fcmap
 2138                             wait_for_copy = True
 2139                         elif status in ['stopping', 'preparing']:
 2140                             wait_for_copy = True
 2141                         else:
 2142                             self.ssh.rmfcmap(map_id)
 2143                     except exception.VolumeBackendAPIException as ex:
 2144                         LOG.warning(ex)
 2145                         wait_for_copy = True
 2146             # Case 4: Copy in progress - wait and will autodelete
 2147             else:
 2148                 try:
 2149                     if status == 'prepared':
 2150                         self.ssh.stopfcmap(map_id)
 2151                         self.ssh.rmfcmap(map_id)
 2152                     elif status in ['idle_or_copied', 'stopped']:
 2153                         # Prepare failed or stopped
 2154                         self.ssh.rmfcmap(map_id)
 2155                     elif (status in ['copying', 'prepared'] and
 2156                           progress == '100'):
 2157                         self.ssh.stopfcmap(map_id)
 2158                     else:
 2159                         wait_for_copy = True
 2160                 except exception.VolumeBackendAPIException as ex:
 2161                     LOG.warning(ex)
 2162                     wait_for_copy = True
 2163 
 2164         if not wait_for_copy or not len(mapping_ids):
 2165             raise loopingcall.LoopingCallDone(retvalue=True)
 2166 
 2167     @cinder_utils.trace
 2168     def _check_vdisk_fc_mappings(self, name, allow_snaps=True,
 2169                                  allow_fctgt=False):
 2170         """FlashCopy mapping check helper."""
 2171         # if this is a remove disk we need to be down to one fc clone
 2172         mapping_ids = self._get_vdisk_fc_mappings(name)
 2173         if len(mapping_ids) > 1 and allow_fctgt:
 2174             LOG.debug('Loopcall: vdisk %s has '
 2175                       'more than one fc map. Waiting.', name)
 2176             for map_id in mapping_ids:
 2177                 attrs = self._get_flashcopy_mapping_attributes(map_id)
 2178                 if not attrs:
 2179                     continue
 2180                 source = attrs['source_vdisk_name']
 2181                 target = attrs['target_vdisk_name']
 2182                 copy_rate = attrs['copy_rate']
 2183                 status = attrs['status']
 2184                 progress = attrs['progress']
 2185                 LOG.debug('Loopcall: source: %s, target: %s, copy_rate: %s, '
 2186                           'status: %s, progress: %s, mapid: %s',
 2187                           source, target, copy_rate, status, progress, map_id)
 2188 
 2189                 if copy_rate != '0' and source == name:
 2190                     try:
 2191                         if status in ['copying'] and progress == '100':
 2192                             self.ssh.stopfcmap(map_id)
 2193                         elif status == 'idle_or_copied' and progress == '100':
 2194                             # wait for auto-delete of fcmap.
 2195                             continue
 2196                         elif status in ['idle_or_copied', 'stopped']:
 2197                             # Prepare failed or stopped
 2198                             self.ssh.rmfcmap(map_id)
 2199                     # handle VolumeBackendAPIException to let it go through
 2200                     # next attempts in case of any cli exception.
 2201                     except exception.VolumeBackendAPIException as ex:
 2202                         LOG.warning(ex)
 2203             return
 2204         return self._check_delete_vdisk_fc_mappings(
 2205             name, allow_snaps=allow_snaps, allow_fctgt=allow_fctgt)
 2206 
 2207     def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True,
 2208                                     allow_fctgt=False):
 2209         """Ensure vdisk has no flashcopy mappings."""
 2210         timer = loopingcall.FixedIntervalLoopingCall(
 2211             self._check_vdisk_fc_mappings, name,
 2212             allow_snaps, allow_fctgt)
 2213         # Create a timer greenthread. The default volume service heart
 2214         # beat is every 10 seconds. The flashcopy usually takes hours
 2215         # before it finishes. Don't set the sleep interval shorter
 2216         # than the heartbeat. Otherwise volume service heartbeat
 2217         # will not be serviced.
 2218         LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.',
 2219                   name)
 2220         ret = timer.start(interval=self.check_fcmapping_interval).wait()
 2221         timer.stop()
 2222         return ret
 2223 
 2224     def start_relationship(self, volume_name, primary=None):
 2225         vol_attrs = self.get_vdisk_attributes(volume_name)
 2226         if vol_attrs['RC_name']:
 2227             self.ssh.startrcrelationship(vol_attrs['RC_name'], primary)
 2228 
 2229     def stop_relationship(self, volume_name, access=False):
 2230         vol_attrs = self.get_vdisk_attributes(volume_name)
 2231         if vol_attrs['RC_name']:
 2232             self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=access)
 2233 
 2234     def create_relationship(self, master, aux, system, asyncmirror,
 2235                             cyclingmode=False, masterchange=None,
 2236                             cycle_period_seconds=None):
 2237         try:
 2238             rc_id = self.ssh.mkrcrelationship(master, aux, system,
 2239                                               asyncmirror, cyclingmode)
 2240         except exception.VolumeBackendAPIException as ex:
 2241             rc_id = None
 2242             # CMMVC5959E is the code in Stowize storage, meaning that
 2243             # there is a relationship that already has this name on the
 2244             # master cluster.
 2245             # pylint: disable=E1101
 2246             if hasattr(ex, 'msg') and 'CMMVC5959E' not in ex.msg:
 2247                 # If there is no relation between the primary and the
 2248                 # secondary back-end storage, the exception is raised.
 2249                 raise
 2250         if rc_id:
 2251             # We need setup master and aux change volumes for gmcv
 2252             # before we can start remote relationship
 2253             # aux change volume must be set on target site
 2254             if cycle_period_seconds:
 2255                 self.change_relationship_cycleperiod(master,
 2256                                                      cycle_period_seconds)
 2257             if masterchange:
 2258                 self.change_relationship_changevolume(master,
 2259                                                       masterchange, True)
 2260             else:
 2261                 self.start_relationship(master)
 2262 
 2263     def change_relationship_changevolume(self, volume_name,
 2264                                          change_volume, master):
 2265         vol_attrs = self.get_vdisk_attributes(volume_name)
 2266         if vol_attrs['RC_name'] and change_volume:
 2267             self.ssh.ch_rcrelationship_changevolume(vol_attrs['RC_name'],
 2268                                                     change_volume, master)
 2269 
 2270     def change_relationship_cycleperiod(self, volume_name,
 2271                                         cycle_period_seconds):
 2272         vol_attrs = self.get_vdisk_attributes(volume_name)
 2273         if vol_attrs['RC_name'] and cycle_period_seconds:
 2274             self.ssh.ch_rcrelationship_cycleperiod(vol_attrs['RC_name'],
 2275                                                    cycle_period_seconds)
 2276 
 2277     def delete_relationship(self, volume_name):
 2278         vol_attrs = self.get_vdisk_attributes(volume_name)
 2279         if vol_attrs['RC_name']:
 2280             self.ssh.rmrcrelationship(vol_attrs['RC_name'], True)
 2281 
 2282     def get_relationship_info(self, volume_name):
 2283         vol_attrs = self.get_vdisk_attributes(volume_name)
 2284         if not vol_attrs or not vol_attrs['RC_name']:
 2285             LOG.info("Unable to get remote copy information for "
 2286                      "volume %s", volume_name)
 2287             return None
 2288 
 2289         relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
 2290         return relationship[0] if len(relationship) > 0 else None
 2291 
 2292     def delete_rc_volume(self, volume_name, target_vol=False,
 2293                          force_unmap=True, retain_aux_volume=False):
 2294         vol_name = volume_name
 2295         if target_vol:
 2296             vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume_name
 2297 
 2298         try:
 2299             rel_info = self.get_relationship_info(vol_name)
 2300             if rel_info:
 2301                 self.delete_relationship(vol_name)
 2302             # Delete change volume
 2303             self.delete_vdisk(
 2304                 storwize_const.REPLICA_CHG_VOL_PREFIX + vol_name,
 2305                 force_unmap=force_unmap,
 2306                 force_delete=False)
 2307             # We want to retain the aux volume after retyping
 2308             # from mirror to non mirror storage template or
 2309             # on delete of the primary volume based on user's
 2310             # choice of config value for storwize_svc_retain_aux_volume.
 2311             # Default value is False.
 2312             if (retain_aux_volume is False and target_vol) or not target_vol:
 2313                 self.delete_vdisk(vol_name,
 2314                                   force_unmap=force_unmap,
 2315                                   force_delete=False)
 2316         except Exception as e:
 2317             msg = (_('Unable to delete the volume for '
 2318                      'volume %(vol)s. Exception: %(err)s.'),
 2319                    {'vol': vol_name, 'err': e})
 2320             LOG.exception(msg)
 2321             raise exception.VolumeDriverException(message=msg)
 2322 
 2323     def switch_relationship(self, relationship, aux=True):
 2324         self.ssh.switchrelationship(relationship, aux)
 2325 
 2326     # replication cg
 2327     def chrcrelationship(self, relationship, rccg=None):
 2328         rels = self.ssh.lsrcrelationship(relationship)[0]
 2329         if rccg and rels['consistency_group_name'] == rccg:
 2330             LOG.info('relationship %(rel)s is aleady added to group %(grp)s.',
 2331                      {'rel': relationship, 'grp': rccg})
 2332             return
 2333         if not rccg and rels['consistency_group_name'] == '':
 2334             LOG.info('relationship %(rel)s is aleady removed from group',
 2335                      {'rel': relationship})
 2336             return
 2337         self.ssh.chrcrelationship(relationship, rccg)
 2338 
 2339     def get_rccg(self, rccg):
 2340         return self.ssh.lsrcconsistgrp(rccg)
 2341 
 2342     def create_rccg(self, rccg, system):
 2343         self.ssh.mkrcconsistgrp(rccg, system)
 2344 
 2345     def delete_rccg(self, rccg):
 2346         if self.ssh.lsrcconsistgrp(rccg):
 2347             self.ssh.rmrcconsistgrp(rccg)
 2348 
 2349     def start_rccg(self, rccg, primary=None):
 2350         self.ssh.startrcconsistgrp(rccg, primary)
 2351 
 2352     def stop_rccg(self, rccg, access=False):
 2353         self.ssh.stoprcconsistgrp(rccg, access)
 2354 
 2355     def switch_rccg(self, rccg, aux=True):
 2356         self.ssh.switchrcconsistgrp(rccg, aux)
 2357 
 2358     def get_rccg_info(self, volume_name):
 2359         vol_attrs = self.get_vdisk_attributes(volume_name)
 2360         if not vol_attrs or not vol_attrs['RC_name']:
 2361             LOG.warning("Unable to get remote copy information for "
 2362                         "volume %s", volume_name)
 2363             return None
 2364 
 2365         rcrel = self.ssh.lsrcrelationship(vol_attrs['RC_name'])
 2366         if len(rcrel) > 0 and rcrel[0]['consistency_group_name']:
 2367             return self.ssh.lsrcconsistgrp(rcrel[0]['consistency_group_name'])
 2368         else:
 2369             return None
 2370 
 2371     def get_partnership_info(self, system_name):
 2372         partnership = self.ssh.lspartnership(system_name)
 2373         return partnership[0] if len(partnership) > 0 else None
 2374 
 2375     def get_partnershipcandidate_info(self, system_name):
 2376         candidates = self.ssh.lspartnershipcandidate()
 2377         for candidate in candidates:
 2378             if system_name == candidate['name']:
 2379                 return candidate
 2380         return None
 2381 
 2382     def mkippartnership(self, ip_v4, bandwidth=1000, copyrate=50):
 2383         self.ssh.mkippartnership(ip_v4, bandwidth, copyrate)
 2384 
 2385     def mkfcpartnership(self, system_name, bandwidth=1000, copyrate=50):
 2386         self.ssh.mkfcpartnership(system_name, bandwidth, copyrate)
 2387 
 2388     def chpartnership(self, partnership_id):
 2389         self.ssh.chpartnership(partnership_id)
 2390 
 2391     def delete_vdisk(self, vdisk, force_unmap, force_delete):
 2392         """Ensures that vdisk is not part of FC mapping and deletes it."""
 2393         LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk)
 2394         if not self.is_vdisk_defined(vdisk):
 2395             LOG.info('Tried to delete non-existent vdisk %s.', vdisk)
 2396             return
 2397         self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True,
 2398                                          allow_fctgt=True)
 2399         self.ssh.rmvdisk(vdisk,
 2400                          force_unmap=force_unmap,
 2401                          force_delete=force_delete)
 2402         LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk)
 2403 
 2404     def create_copy(self, src, tgt, src_id, config, opts,
 2405                     full_copy, state, pool=None):
 2406         """Create a new snapshot using FlashCopy."""
 2407         LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.',
 2408                   {'tgt': tgt, 'src': src})
 2409 
 2410         src_attrs = self.get_vdisk_attributes(src)
 2411         if src_attrs is None:
 2412             msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
 2413                      'does not exist.') % {'src': src, 'src_id': src_id})
 2414             LOG.error(msg)
 2415             raise exception.VolumeDriverException(message=msg)
 2416 
 2417         src_size = src_attrs['capacity']
 2418         # In case we need to use a specific pool
 2419         if not pool:
 2420             pool = src_attrs['mdisk_grp_name']
 2421 
 2422         opts['iogrp'] = self.select_io_group(state, opts, pool)
 2423         self.create_vdisk(tgt, src_size, 'b', pool, opts)
 2424         timeout = config.storwize_svc_flashcopy_timeout
 2425         try:
 2426             self.run_flashcopy(src, tgt, timeout,
 2427                                opts['flashcopy_rate'],
 2428                                full_copy=full_copy)
 2429         except Exception:
 2430             with excutils.save_and_reraise_exception():
 2431                 self.delete_vdisk(tgt, force_unmap=False, force_delete=True)
 2432 
 2433         LOG.debug('Leave: _create_copy: snapshot %(tgt)s from '
 2434                   'vdisk %(src)s.',
 2435                   {'tgt': tgt, 'src': src})
 2436 
 2437     def extend_vdisk(self, vdisk, amount):
 2438         self.ssh.expandvdisksize(vdisk, amount)
 2439 
 2440     def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config,
 2441                        auto_delete=False):
 2442         """Add a vdisk copy in the given pool."""
 2443         resp = self.ssh.lsvdiskcopy(vdisk)
 2444         if len(resp) > 1:
 2445             msg = (_('add_vdisk_copy failed: A copy of volume %s exists. '
 2446                      'Adding another copy would exceed the limit of '
 2447                      '2 copies.') % vdisk)
 2448             raise exception.VolumeDriverException(message=msg)
 2449         orig_copy_id = resp[0].get("copy_id", None)
 2450 
 2451         if orig_copy_id is None:
 2452             msg = (_('add_vdisk_copy started without a vdisk copy in the '
 2453                      'expected pool.'))
 2454             LOG.error(msg)
 2455             raise exception.VolumeDriverException(message=msg)
 2456 
 2457         if volume_type is None:
 2458             opts = self.get_vdisk_params(config, state, None)
 2459         else:
 2460             opts = self.get_vdisk_params(config, state, volume_type['id'],
 2461                                          volume_type=volume_type)
 2462         is_dr_pool = self.is_data_reduction_pool(dest_pool)
 2463         if is_dr_pool and opts['rsize'] != -1:
 2464             self.check_data_reduction_pool_params(opts)
 2465         params = self._get_vdisk_create_params(opts, is_dr_pool)
 2466         try:
 2467             new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params,
 2468                                                 auto_delete)
 2469         except exception.VolumeBackendAPIException as e:
 2470             msg = (_('Unable to add vdiskcopy for volume %(vol)s. '
 2471                      'Exception: %(err)s.'),
 2472                    {'vol': vdisk, 'err': e})
 2473             LOG.exception(msg)
 2474             raise exception.VolumeDriverException(message=msg)
 2475         return (orig_copy_id, new_copy_id)
 2476 
 2477     def is_vdisk_copy_synced(self, vdisk, copy_id):
 2478         sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync']
 2479         if sync == 'yes':
 2480             return True
 2481         return False
 2482 
 2483     def rm_vdisk_copy(self, vdisk, copy_id):
 2484         self.ssh.rmvdiskcopy(vdisk, copy_id)
 2485 
 2486     def lsvdiskcopy(self, vdisk, copy_id=None):
 2487         return self.ssh.lsvdiskcopy(vdisk, copy_id)
 2488 
 2489     @staticmethod
 2490     def can_migrate_to_host(host, state):
 2491         if 'location_info' not in host['capabilities']:
 2492             return None
 2493         info = host['capabilities']['location_info']
 2494         try:
 2495             (dest_type, dest_id, dest_pool) = info.split(':')
 2496         except ValueError:
 2497             return None
 2498         if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
 2499             return None
 2500         return dest_pool
 2501 
 2502     def add_vdisk_qos(self, vdisk, qos):
 2503         """Add the QoS configuration to the volume."""
 2504         for key, value in qos.items():
 2505             if key in self.svc_qos_keys.keys():
 2506                 param = self.svc_qos_keys[key]['param']
 2507                 self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
 2508 
 2509     def update_vdisk_qos(self, vdisk, qos):
 2510         """Update all the QoS in terms of a key and value.
 2511 
 2512         svc_qos_keys saves all the supported QoS parameters. Going through
 2513         this dict, we set the new values to all the parameters. If QoS is
 2514         available in the QoS configuration, the value is taken from it;
 2515         if not, the value will be set to default.
 2516         """
 2517         for key, value in self.svc_qos_keys.items():
 2518             param = value['param']
 2519             if key in qos.keys():
 2520                 # If the value is set in QoS, take the value from
 2521                 # the QoS configuration.
 2522                 v = qos[key]
 2523             else:
 2524                 # If not, set the value to default.
 2525                 v = value['default']
 2526             self.ssh.chvdisk(vdisk, ['-' + param, str(v)])
 2527 
 2528     def disable_vdisk_qos(self, vdisk, qos):
 2529         """Disable the QoS."""
 2530         for key, value in qos.items():
 2531             if key in self.svc_qos_keys.keys():
 2532                 param = self.svc_qos_keys[key]['param']
 2533                 # Take the default value.
 2534                 value = self.svc_qos_keys[key]['default']
 2535                 self.ssh.chvdisk(vdisk, ['-' + param, value])
 2536 
 2537     def change_vdisk_options(self, vdisk, changes, opts, state):
 2538         change_value = {'warning': '', 'easytier': '', 'autoexpand': ''}
 2539         if 'warning' in opts:
 2540             change_value['warning'] = '%s%%' % str(opts['warning'])
 2541         if 'easytier' in opts:
 2542             change_value['easytier'] = 'on' if opts['easytier'] else 'off'
 2543         if 'autoexpand' in opts:
 2544             change_value['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
 2545 
 2546         for key in changes:
 2547             self.ssh.chvdisk(vdisk, ['-' + key, change_value[key]])
 2548 
 2549     def change_vdisk_iogrp(self, vdisk, state, iogrp):
 2550         if state['code_level'] < (6, 4, 0, 0):
 2551             LOG.debug('Ignore change IO group as storage code level is '
 2552                       '%(code_level)s, below the required 6.4.0.0.',
 2553                       {'code_level': state['code_level']})
 2554         else:
 2555             self.ssh.movevdisk(vdisk, str(iogrp[0]))
 2556             self.ssh.addvdiskaccess(vdisk, str(iogrp[0]))
 2557             self.ssh.rmvdiskaccess(vdisk, str(iogrp[1]))
 2558 
 2559     def vdisk_by_uid(self, vdisk_uid):
 2560         """Returns the properties of the vdisk with the specified UID.
 2561 
 2562         Returns None if no such disk exists.
 2563         """
 2564 
 2565         vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)
 2566 
 2567         if len(vdisks) == 0:
 2568             return None
 2569 
 2570         if len(vdisks) != 1:
 2571             msg = (_('Expected single vdisk returned from lsvdisk when '
 2572                      'filtering on vdisk_UID.  %(count)s were returned.') %
 2573                    {'count': len(vdisks)})
 2574             LOG.error(msg)
 2575             raise exception.VolumeBackendAPIException(data=msg)
 2576 
 2577         vdisk = vdisks.result[0]
 2578 
 2579         return self.ssh.lsvdisk(vdisk['name'])
 2580 
 2581     def is_vdisk_in_use(self, vdisk):
 2582         """Returns True if the specified vdisk is mapped to at least 1 host."""
 2583         resp = self.ssh.lsvdiskhostmap(vdisk)
 2584         return len(resp) != 0
 2585 
 2586     def rename_vdisk(self, vdisk, new_name):
 2587         self.ssh.chvdisk(vdisk, ['-name', new_name])
 2588 
 2589     def change_vdisk_primary_copy(self, vdisk, copy_id):
 2590         self.ssh.chvdisk(vdisk, ['-primary', copy_id])
 2591 
 2592     def migratevdisk(self, vdisk, dest_pool, copy_id='0'):
 2593         self.ssh.migratevdisk(vdisk, dest_pool, copy_id)
 2594 
 2595     def is_system_topology_hyperswap(self, state):
 2596         """Returns True if the system version higher than 7.5 and the system
 2597 
 2598         topology is hyperswap.
 2599         """
 2600         if state['code_level'] < (7, 6, 0, 0):
 2601             LOG.debug('Hyperswap failure as the storage '
 2602                       'code_level is %(code_level)s, below '
 2603                       'the required 7.6.0.0.',
 2604                       {'code_level': state['code_level']})
 2605         else:
 2606             if state['topology'] == 'hyperswap':
 2607                 return True
 2608             else:
 2609                 LOG.debug('Hyperswap failure as the storage system '
 2610                           'topology is not hyperswap.')
 2611         return False
 2612 
 2613     def check_hyperswap_pool(self, pool, peer_pool):
 2614         # Check the hyperswap pools.
 2615         if not peer_pool:
 2616             raise exception.InvalidInput(
 2617                 reason=_('The peer pool is necessary for hyperswap volume, '
 2618                          'please configure the peer pool.'))
 2619         pool_attr = self.get_pool_attrs(pool)
 2620         peer_pool_attr = self.get_pool_attrs(peer_pool)
 2621         if not peer_pool_attr:
 2622             raise exception.InvalidInput(
 2623                 reason=_('The hyperswap peer pool %s '
 2624                          'is invalid.') % peer_pool)
 2625 
 2626         if not pool_attr['site_id'] or not peer_pool_attr['site_id']:
 2627             raise exception.InvalidInput(
 2628                 reason=_('The site_id of pools is necessary for hyperswap '
 2629                          'volume, but there is no site_id in the pool or '
 2630                          'peer pool.'))
 2631 
 2632         if pool_attr['site_id'] == peer_pool_attr['site_id']:
 2633             raise exception.InvalidInput(
 2634                 reason=_('The hyperswap volume must be configured in two '
 2635                          'independent sites, the pool %(pool)s is on the '
 2636                          'same site as peer_pool %(peer_pool)s. ') %
 2637                 {'pool': pool, 'peer_pool': peer_pool})
 2638 
 2639     def pretreatment_before_revert(self, name):
 2640         mapping_ids = self._get_vdisk_fc_mappings(name)
 2641         for map_id in mapping_ids:
 2642             attrs = self._get_flashcopy_mapping_attributes(map_id)
 2643             if not attrs:
 2644                 continue
 2645             target = attrs['target_vdisk_name']
 2646             copy_rate = attrs['copy_rate']
 2647             progress = attrs['progress']
 2648             status = attrs['status']
 2649             if status in ['copying', 'prepared'] and target == name:
 2650                 if copy_rate != '0' and progress != '100':
 2651                     msg = (_('Cannot start revert since fcmap %(map_id)s '
 2652                              'in progress, current progress is %(progress)s')
 2653                            % {'map_id': map_id, 'progress': progress})
 2654                     LOG.error(msg)
 2655                     raise exception.VolumeDriverException(message=msg)
 2656                 elif copy_rate != '0' and progress == '100':
 2657                     LOG.debug('Split completed clone map_id=%(map_id)s fcmap',
 2658                               {'map_id': map_id})
 2659                     self.ssh.stopfcmap(map_id)
 2660 
 2661 
 2662 class CLIResponse(object):
 2663     """Parse SVC CLI output and generate iterable."""
 2664 
 2665     def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True):
 2666         super(CLIResponse, self).__init__()
 2667         if ssh_cmd:
 2668             self.ssh_cmd = ' '.join(ssh_cmd)
 2669         else:
 2670             self.ssh_cmd = 'None'
 2671         self.raw = raw
 2672         self.delim = delim
 2673         self.with_header = with_header
 2674         self.result = self._parse()
 2675 
 2676     def select(self, *keys):
 2677         for a in self.result:
 2678             vs = []
 2679             for k in keys:
 2680                 v = a.get(k, None)
 2681                 if isinstance(v, six.string_types) or v is None:
 2682                     v = [v]
 2683                 if isinstance(v, list):
 2684                     vs.append(v)
 2685             for item in zip(*vs):
 2686                 if len(item) == 1:
 2687                     yield item[0]
 2688                 else:
 2689                     yield item
 2690 
 2691     def __getitem__(self, key):
 2692         try:
 2693             return self.result[key]
 2694         except KeyError:
 2695             msg = (_('Did not find the expected key %(key)s in %(fun)s: '
 2696                      '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd,
 2697                                     'raw': self.raw})
 2698             raise exception.VolumeBackendAPIException(data=msg)
 2699 
 2700     def __iter__(self):
 2701         for a in self.result:
 2702             yield a
 2703 
 2704     def __len__(self):
 2705         return len(self.result)
 2706 
 2707     def _parse(self):
 2708         def get_reader(content, delim):
 2709             for line in content.lstrip().splitlines():
 2710                 line = line.strip()
 2711                 if line:
 2712                     yield line.split(delim)
 2713                 else:
 2714                     yield []
 2715 
 2716         if isinstance(self.raw, six.string_types):
 2717             stdout, stderr = self.raw, ''
 2718         else:
 2719             stdout, stderr = self.raw
 2720         reader = get_reader(stdout, self.delim)
 2721         result = []
 2722 
 2723         if self.with_header:
 2724             hds = tuple()
 2725             for row in reader:
 2726                 hds = row
 2727                 break
 2728             for row in reader:
 2729                 cur = dict()
 2730                 if len(hds) != len(row):
 2731                     msg = (_('Unexpected CLI response: header/row mismatch. '
 2732                              'header: %(header)s, row: %(row)s.')
 2733                            % {'header': hds,
 2734                               'row': row})
 2735                     raise exception.VolumeBackendAPIException(data=msg)
 2736                 for k, v in zip(hds, row):
 2737                     CLIResponse.append_dict(cur, k, v)
 2738                 result.append(cur)
 2739         else:
 2740             cur = dict()
 2741             for row in reader:
 2742                 if row:
 2743                     CLIResponse.append_dict(cur, row[0], ' '.join(row[1:]))
 2744                 elif cur:  # start new section
 2745                     result.append(cur)
 2746                     cur = dict()
 2747             if cur:
 2748                 result.append(cur)
 2749         return result
 2750 
 2751     @staticmethod
 2752     def append_dict(dict_, key, value):
 2753         key, value = key.strip(), value.strip()
 2754         obj = dict_.get(key, None)
 2755         if obj is None:
 2756             dict_[key] = value
 2757         elif isinstance(obj, list):
 2758             obj.append(value)
 2759             dict_[key] = obj
 2760         else:
 2761             dict_[key] = [obj, value]
 2762         return dict_
 2763 
 2764 
 2765 class StorwizeSVCCommonDriver(san.SanDriver,
 2766                               driver.ManageableVD,
 2767                               driver.MigrateVD,
 2768                               driver.CloneableImageVD):
 2769     """IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers.
 2770 
 2771     Version history:
 2772 
 2773     .. code-block:: none
 2774 
 2775         1.0 - Initial driver
 2776         1.1 - FC support, create_cloned_volume, volume type support,
 2777               get_volume_stats, minor bug fixes
 2778         1.2.0 - Added retype
 2779         1.2.1 - Code refactor, improved exception handling
 2780         1.2.2 - Fix bug #1274123 (races in host-related functions)
 2781         1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
 2782                 to lsfabric, clear unused data from connections, ensure
 2783                 matching WWPNs by comparing lower case
 2784         1.2.4 - Fix bug #1278035 (async migration/retype)
 2785         1.2.5 - Added support for manage_existing (unmanage is inherited)
 2786         1.2.6 - Added QoS support in terms of I/O throttling rate
 2787         1.3.1 - Added support for volume replication
 2788         1.3.2 - Added support for consistency group
 2789         1.3.3 - Update driver to use ABC metaclasses
 2790         2.0 - Code refactor, split init file and placed shared methods
 2791               for FC and iSCSI within the StorwizeSVCCommonDriver class
 2792         2.1 - Added replication V2 support to the global/metro mirror
 2793               mode
 2794         2.1.1 - Update replication to version 2.1
 2795     """
 2796 
 2797     VERSION = "2.1.1"
 2798     VDISKCOPYOPS_INTERVAL = 600
 2799     DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0
 2800 
 2801     def __init__(self, *args, **kwargs):
 2802         super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs)
 2803         self.configuration.append_config_values(storwize_svc_opts)
 2804         self._backend_name = self.configuration.safe_get('volume_backend_name')
 2805         self.active_ip = self.configuration.san_ip
 2806         self.inactive_ip = self.configuration.storwize_san_secondary_ip
 2807         self._master_backend_helpers = StorwizeHelpers(self._run_ssh)
 2808         self._aux_backend_helpers = None
 2809         self._helpers = self._master_backend_helpers
 2810         self._vdiskcopyops = {}
 2811         self._vdiskcopyops_loop = None
 2812         self.protocol = None
 2813         self._master_state = {'storage_nodes': {},
 2814                               'enabled_protocols': set(),
 2815                               'compression_enabled': False,
 2816                               'available_iogrps': [],
 2817                               'system_name': None,
 2818                               'system_id': None,
 2819                               'code_level': None,
 2820                               }
 2821         self._state = self._master_state
 2822         self._aux_state = {'storage_nodes': {},
 2823                            'enabled_protocols': set(),
 2824                            'compression_enabled': False,
 2825                            'available_iogrps': [],
 2826                            'system_name': None,
 2827                            'system_id': None,
 2828                            'code_level': None,
 2829                            }
 2830         self._active_backend_id = kwargs.get('active_backend_id')
 2831 
 2832         # This list is used to ensure volume export
 2833         self._volumes_list = []
 2834 
 2835         # This dictionary is used to map each replication target to certain
 2836         # replication manager object.
 2837         self.replica_manager = {}
 2838 
 2839         # One driver can be configured with only one replication target
 2840         # to failover.
 2841         self._replica_target = {}
 2842 
 2843         # This boolean is used to indicate whether replication is supported
 2844         # by this storage.
 2845         self._replica_enabled = False
 2846 
 2847         # This list is used to save the supported replication modes.
 2848         self._supported_replica_types = []
 2849 
 2850         # This is used to save the available pools in failed-over status
 2851         self._secondary_pools = None
 2852 
 2853         # This dictionary is used to save pools information.
 2854         self._stats = {}
 2855 
 2856         # Storwize has the limitation that can not burst more than 3 new ssh
 2857         # connections within 1 second. So slow down the initialization.
 2858         time.sleep(1)
 2859 
 2860     def do_setup(self, ctxt):
 2861         """Check that we have all configuration details from the storage."""
 2862         LOG.debug('enter: do_setup')
 2863 
 2864         # v2.1 replication setup
 2865         self._get_storwize_config()
 2866 
 2867         # Validate that the pool exists
 2868         self._validate_pools_exist()
 2869 
 2870         # Get list of all volumes
 2871         self._get_all_volumes()
 2872 
 2873         # Update the pool stats
 2874         self._update_volume_stats()
 2875 
 2876         # Save the pool stats information in helpers class.
 2877         self._master_backend_helpers.stats = self._stats
 2878 
 2879         # Build the list of in-progress vdisk copy operations
 2880         if ctxt is None:
 2881             admin_context = context.get_admin_context()
 2882         else:
 2883             admin_context = ctxt.elevated()
 2884         volumes = objects.VolumeList.get_all_by_host(admin_context, self.host)
 2885 
 2886         for volume in volumes:
 2887             metadata = volume.admin_metadata
 2888             curr_ops = metadata.get('vdiskcopyops', None)
 2889             if curr_ops:
 2890                 ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
 2891                 self._vdiskcopyops[volume['id']] = ops
 2892 
 2893         # if vdiskcopy exists in database, start the looping call
 2894         if len(self._vdiskcopyops) >= 1:
 2895             self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
 2896                 self._check_volume_copy_ops)
 2897             self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
 2898         LOG.debug('leave: do_setup')
 2899 
 2900     def _update_storwize_state(self, state, helper):
 2901         # Get storage system name, id, and code level
 2902         state.update(helper.get_system_info())
 2903 
 2904         # Check if compression is supported
 2905         state['compression_enabled'] = helper.compression_enabled()
 2906 
 2907         # Get the available I/O groups
 2908         state['available_iogrps'] = helper.get_available_io_groups()
 2909 
 2910         # Get the iSCSI and FC names of the Storwize/SVC nodes
 2911         state['storage_nodes'] = helper.get_node_info()
 2912 
 2913         # Add the iSCSI IP addresses and WWPNs to the storage node info
 2914         helper.add_iscsi_ip_addrs(state['storage_nodes'])
 2915         helper.add_fc_wwpns(state['storage_nodes'], state['code_level'])
 2916 
 2917         # For each node, check what connection modes it supports.  Delete any
 2918         # nodes that do not support any types (may be partially configured).
 2919         to_delete = []
 2920         for k, node in state['storage_nodes'].items():
 2921             if ((len(node['ipv4']) or len(node['ipv6']))
 2922                     and len(node['iscsi_name'])):
 2923                 node['enabled_protocols'].append('iSCSI')
 2924                 state['enabled_protocols'].add('iSCSI')
 2925             if len(node['WWPN']):
 2926                 node['enabled_protocols'].append('FC')
 2927                 state['enabled_protocols'].add('FC')
 2928             if not len(node['enabled_protocols']):
 2929                 to_delete.append(k)
 2930         for delkey in to_delete:
 2931             del state['storage_nodes'][delkey]
 2932 
 2933     def _get_backend_pools(self):
 2934         if not self._active_backend_id:
 2935             return self.configuration.storwize_svc_volpool_name
 2936         elif not self._secondary_pools:
 2937             self._secondary_pools = [self._replica_target.get('pool_name')]
 2938         return self._secondary_pools
 2939 
 2940     def _validate_pools_exist(self):
 2941         # Validate that the pool exists
 2942         pools = self._get_backend_pools()
 2943         for pool in pools:
 2944             if not self._helpers.is_pool_defined(pool):
 2945                 reason = (_('Failed getting details for pool %s.') % pool)
 2946                 raise exception.InvalidInput(reason=reason)
 2947 
 2948     def _get_all_volumes(self):
 2949         # Get list of all volumes
 2950         pools = self._get_backend_pools()
 2951         for pool in pools:
 2952             pool_vols = self._helpers.get_pool_volumes(pool)
 2953             for volume in pool_vols:
 2954                 self._volumes_list.append(volume['name'])
 2955 
 2956     def check_for_setup_error(self):
 2957         """Ensure that the flags are set properly."""
 2958         LOG.debug('enter: check_for_setup_error')
 2959 
 2960         # Check that we have the system ID information
 2961         if self._state['system_name'] is None:
 2962             exception_msg = (_('Unable to determine system name.'))
 2963             raise exception.VolumeBackendAPIException(data=exception_msg)
 2964         if self._state['system_id'] is None:
 2965             exception_msg = (_('Unable to determine system id.'))
 2966             raise exception.VolumeBackendAPIException(data=exception_msg)
 2967 
 2968         # Make sure we have at least one node configured
 2969         if not len(self._state['storage_nodes']):
 2970             msg = _('do_setup: No configured nodes.')
 2971             LOG.error(msg)
 2972             raise exception.VolumeDriverException(message=msg)
 2973 
 2974         if self.protocol not in self._state['enabled_protocols']:
 2975             # TODO(mc_nair): improve this error message by looking at
 2976             # self._state['enabled_protocols'] to tell user what driver to use
 2977             raise exception.InvalidInput(
 2978                 reason=_('The storage device does not support %(prot)s. '
 2979                          'Please configure the device to support %(prot)s or '
 2980                          'switch to a driver using a different protocol.')
 2981                 % {'prot': self.protocol})
 2982 
 2983         required_flags = ['san_ip', 'san_ssh_port', 'san_login',
 2984                           'storwize_svc_volpool_name']
 2985         for flag in required_flags:
 2986             if not self.configuration.safe_get(flag):
 2987                 raise exception.InvalidInput(reason=_('%s is not set.') % flag)
 2988 
 2989         # Ensure that either password or keyfile were set
 2990         if not (self.configuration.san_password or
 2991                 self.configuration.san_private_key):
 2992             raise exception.InvalidInput(
 2993                 reason=_('Password or SSH private key is required for '
 2994                          'authentication: set either san_password or '
 2995                          'san_private_key option.'))
 2996 
 2997         opts = self._helpers.build_default_opts(self.configuration)
 2998         self._helpers.check_vdisk_opts(self._state, opts)
 2999 
 3000         LOG.debug('leave: check_for_setup_error')
 3001 
 3002     def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
 3003         cinder_utils.check_ssh_injection(cmd_list)
 3004         command = ' '.join(cmd_list)
 3005         if not self.sshpool:
 3006             try:
 3007                 self.sshpool = self._set_up_sshpool(self.active_ip)
 3008             except paramiko.SSHException:
 3009                 LOG.warning('Unable to use san_ip to create SSHPool. Now '
 3010                             'attempting to use storwize_san_secondary_ip '
 3011                             'to create SSHPool.')
 3012                 if self._toggle_ip():
 3013                     self.sshpool = self._set_up_sshpool(self.active_ip)
 3014                 else:
 3015                     LOG.warning('Unable to create SSHPool using san_ip '
 3016                                 'and not able to use '
 3017                                 'storwize_san_secondary_ip since it is '
 3018                                 'not configured.')
 3019                     raise
 3020         try:
 3021             return self._ssh_execute(self.sshpool, command,
 3022                                      check_exit_code, attempts)
 3023 
 3024         except Exception:
 3025             # Need to check if creating an SSHPool storwize_san_secondary_ip
 3026             # before raising an error.
 3027             try:
 3028                 if self._toggle_ip():
 3029                     LOG.warning("Unable to execute SSH command with "
 3030                                 "%(inactive)s. Attempting to execute SSH "
 3031                                 "command with %(active)s.",
 3032                                 {'inactive': self.inactive_ip,
 3033                                  'active': self.active_ip})
 3034                     self.sshpool = self._set_up_sshpool(self.active_ip)
 3035                     return self._ssh_execute(self.sshpool, command,
 3036                                              check_exit_code, attempts)
 3037                 else:
 3038                     LOG.warning('Not able to use '
 3039                                 'storwize_san_secondary_ip since it is '
 3040                                 'not configured.')
 3041                     raise
 3042             except Exception:
 3043                 with excutils.save_and_reraise_exception():
 3044                     LOG.error("Error running SSH command: %s",
 3045                               command)
 3046 
 3047     def _set_up_sshpool(self, ip):
 3048         password = self.configuration.san_password
 3049         privatekey = self.configuration.san_private_key
 3050         min_size = self.configuration.ssh_min_pool_conn
 3051         max_size = self.configuration.ssh_max_pool_conn
 3052         sshpool = ssh_utils.SSHPool(
 3053             ip,
 3054             self.configuration.san_ssh_port,
 3055             self.configuration.ssh_conn_timeout,
 3056             self.configuration.san_login,
 3057             password=password,
 3058             privatekey=privatekey,
 3059             min_size=min_size,
 3060             max_size=max_size)
 3061 
 3062         return sshpool
 3063 
 3064     def _ssh_execute(self, sshpool, command,
 3065                      check_exit_code=True, attempts=1):
 3066         try:
 3067             with sshpool.item() as ssh:
 3068                 while attempts > 0:
 3069                     attempts -= 1
 3070                     try:
 3071                         return processutils.ssh_execute(
 3072                             ssh,
 3073                             command,
 3074                             check_exit_code=check_exit_code,
 3075                             sanitize_stdout=False)
 3076                     except Exception as e:
 3077                         LOG.error('Error has occurred: %s', e)
 3078                         last_exception = e
 3079                         greenthread.sleep(self.DEFAULT_GR_SLEEP)
 3080                     try:
 3081                         std_err = last_exception.stderr
 3082                         if std_err is not None and not self._is_ascii(std_err):
 3083                             std_err = encodeutils.safe_decode(std_err,
 3084                                                               errors='ignore')
 3085                             LOG.error("The stderr has non-ascii characters. "
 3086                                       "Please check the error code.\n"
 3087                                       "Stderr: %s", std_err)
 3088                             std_err = std_err.split()[0]
 3089                         raise processutils.ProcessExecutionError(
 3090                             exit_code=last_exception.exit_code,
 3091                             stdout=last_exception.stdout,
 3092                             stderr=std_err,
 3093                             cmd=last_exception.cmd)
 3094                     except AttributeError:
 3095                         raise processutils.ProcessExecutionError(
 3096                             exit_code=-1,
 3097                             stdout="",
 3098                             stderr="Error running SSH command",
 3099                             cmd=command)
 3100 
 3101         except Exception:
 3102             with excutils.save_and_reraise_exception():
 3103                 LOG.error("Error running SSH command: %s", command)
 3104 
 3105     def _is_ascii(self, value):
 3106         try:
 3107             return all(ord(c) < 128 for c in value)
 3108         except TypeError:
 3109             return False
 3110 
 3111     def _toggle_ip(self):
 3112         # Change active_ip if storwize_san_secondary_ip is set.
 3113         if self.configuration.storwize_san_secondary_ip is None:
 3114             return False
 3115 
 3116         self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip
 3117         LOG.info('Toggle active_ip from %(old)s to %(new)s.',
 3118                  {'old': self.inactive_ip,
 3119                   'new': self.active_ip})
 3120         return True
 3121 
 3122     def ensure_export(self, ctxt, volume):
 3123         """Check that the volume exists on the storage.
 3124 
 3125         The system does not "export" volumes as a Linux iSCSI target does,
 3126         and therefore we just check that the volume exists on the storage.
 3127         """
 3128         volume_defined = volume['name'] in self._volumes_list
 3129 
 3130         if not volume_defined:
 3131             LOG.error('ensure_export: Volume %s not found on storage.',
 3132                       volume['name'])
 3133 
 3134     def create_export(self, ctxt, volume, connector):
 3135         model_update = None
 3136         return model_update
 3137 
 3138     def remove_export(self, ctxt, volume):
 3139         pass
 3140 
 3141     def create_export_snapshot(self, ctxt, snapshot, connector):
 3142         model_update = None
 3143         return model_update
 3144 
 3145     def remove_export_snapshot(self, ctxt, snapshot):
 3146         pass
 3147 
 3148     def _get_vdisk_params(self, type_id, volume_type=None,
 3149                           volume_metadata=None):
 3150         return self._helpers.get_vdisk_params(self.configuration,
 3151                                               self._state, type_id,
 3152                                               volume_type=volume_type,
 3153                                               volume_metadata=volume_metadata)
 3154 
 3155     def _check_if_group_type_cg_snapshot(self, volume):
 3156         if (volume.group_id and
 3157                 not volume_utils.is_group_a_cg_snapshot_type(volume.group)):
 3158             msg = _('Create volume with a replication or hyperswap '
 3159                     'group_id is not supported. Please add volume to '
 3160                     'group after volume creation.')
 3161             LOG.error(msg)
 3162             raise exception.VolumeDriverException(reason=msg)
 3163 
 3164     def create_volume(self, volume):
 3165         LOG.debug('enter: create_volume: volume %s', volume['name'])
 3166         # Create a replication or hyperswap volume with group_id is not
 3167         # allowed.
 3168         self._check_if_group_type_cg_snapshot(volume)
 3169         opts = self._get_vdisk_params(volume['volume_type_id'],
 3170                                       volume_metadata=
 3171                                       volume.get('volume_metadata'))
 3172         ctxt = context.get_admin_context()
 3173         rep_type = self._get_volume_replicated_type(ctxt, volume)
 3174 
 3175         pool = volume_utils.extract_host(volume['host'], 'pool')
 3176         model_update = None
 3177 
 3178         if opts['volume_topology'] == 'hyperswap':
 3179             LOG.debug('Volume %s to be created is a hyperswap volume.',
 3180                       volume.name)
 3181             if not self._helpers.is_system_topology_hyperswap(self._state):
 3182                 reason = _('Create hyperswap volume failed, the system is '
 3183                            'below release 7.6.0.0 or it is not hyperswap '
 3184                            'topology.')
 3185                 raise exception.InvalidInput(reason=reason)
 3186             if opts['mirror_pool'] or rep_type:
 3187                 reason = _('Create hyperswap volume with streched cluster or '
 3188                            'replication enabled is not supported.')
 3189                 raise exception.InvalidInput(reason=reason)
 3190             if not opts['easytier']:
 3191                 msg = _('The default easytier of hyperswap volume is '
 3192                         'on, it does not support easytier off.')
 3193                 raise exception.VolumeDriverException(message=msg)
 3194             self._helpers.check_hyperswap_pool(pool, opts['peer_pool'])
 3195             self._helpers.create_hyperswap_volume(volume.name, volume.size,
 3196                                                   'gb', pool, opts)
 3197         else:
 3198             if opts['mirror_pool'] and rep_type:
 3199                 reason = _('Create mirror volume with replication enabled is '
 3200                            'not supported.')
 3201                 raise exception.InvalidInput(reason=reason)
 3202             opts['iogrp'] = self._helpers.select_io_group(self._state,
 3203                                                           opts, pool)
 3204             self._helpers.create_vdisk(volume['name'], str(volume['size']),
 3205                                        'gb', pool, opts)
 3206         if opts['qos']:
 3207             self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
 3208 
 3209         model_update = {'replication_status':
 3210                         fields.ReplicationStatus.NOT_CAPABLE}
 3211 
 3212         if rep_type:
 3213             replica_obj = self._get_replica_obj(rep_type)
 3214             replica_obj.volume_replication_setup(ctxt, volume)
 3215             model_update = {'replication_status':
 3216                             fields.ReplicationStatus.ENABLED}
 3217 
 3218         LOG.debug('leave: create_volume:\n volume: %(vol)s\n '
 3219                   'model_update %(model_update)s',
 3220                   {'vol': volume['name'],
 3221                    'model_update': model_update})
 3222         return model_update
 3223 
 3224     def delete_volume(self, volume):
 3225         LOG.debug('enter: delete_volume: volume %s', volume['name'])
 3226         ctxt = context.get_admin_context()
 3227         if self._state['code_level'] < (7, 7, 0, 0):
 3228             force_unmap = False
 3229         else:
 3230             force_unmap = True
 3231         hyper_volume = self.is_volume_hyperswap(volume)
 3232         if hyper_volume:
 3233             LOG.debug('Volume %s to be deleted is a hyperswap '
 3234                       'volume.', volume.name)
 3235             self._helpers.delete_hyperswap_volume(volume.name,
 3236                                                   force_unmap=force_unmap,
 3237                                                   force_delete=False)
 3238             return
 3239 
 3240         rep_type = self._get_volume_replicated_type(ctxt, volume)
 3241         if rep_type:
 3242             if self._aux_backend_helpers:
 3243                 self._aux_backend_helpers.delete_rc_volume(
 3244                     volume['name'],
 3245                     target_vol=True,
 3246                     force_unmap=force_unmap,
 3247                     retain_aux_volume=self.configuration.safe_get(
 3248                         'storwize_svc_retain_aux_volume'))
 3249             if not self._active_backend_id:
 3250                 self._master_backend_helpers.delete_rc_volume(
 3251                     volume['name'], force_unmap=force_unmap)
 3252             else:
 3253                 # If it's in fail over state, also try to delete the volume
 3254                 # in master backend
 3255                 try:
 3256                     self._master_backend_helpers.delete_rc_volume(
 3257                         volume['name'], force_unmap=force_unmap)
 3258                 except Exception as ex:
 3259                     LOG.error('Failed to get delete volume %(volume)s in '
 3260                               'master backend. Exception: %(err)s.',
 3261                               {'volume': volume['name'],
 3262                                'err': ex})
 3263         else:
 3264             if self._active_backend_id:
 3265                 msg = (_('Error: delete non-replicate volume in failover mode'
 3266                          ' is not allowed.'))
 3267                 LOG.error(msg)
 3268                 raise exception.VolumeDriverException(message=msg)
 3269             else:
 3270                 self._helpers.delete_vdisk(
 3271                     volume['name'],
 3272                     force_unmap=force_unmap,
 3273                     force_delete=False)
 3274 
 3275         if volume['id'] in self._vdiskcopyops:
 3276             del self._vdiskcopyops[volume['id']]
 3277 
 3278             if not len(self._vdiskcopyops):
 3279                 self._vdiskcopyops_loop.stop()
 3280                 self._vdiskcopyops_loop = None
 3281         LOG.debug('leave: delete_volume: volume %s', volume['name'])
 3282 
 3283     def create_snapshot(self, snapshot):
 3284         ctxt = context.get_admin_context()
 3285         try:
 3286             # TODO(zhaochy): change to use snapshot.volume
 3287             source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
 3288         except Exception:
 3289             msg = (_('create_snapshot: get source volume failed.'))
 3290             LOG.error(msg)
 3291             raise exception.VolumeDriverException(message=msg)
 3292 
 3293         rep_type = self._get_volume_replicated_type(
 3294             ctxt, None, source_vol['volume_type_id'])
 3295         if rep_type == storwize_const.GMCV:
 3296             # GMCV volume will have problem to failback
 3297             # when it has flash copy relationship besides change volumes
 3298             msg = _('create_snapshot: Create snapshot to '
 3299                     'gmcv replication volume is not allowed.')
 3300             LOG.error(msg)
 3301             raise exception.VolumeDriverException(message=msg)
 3302 
 3303         pool = volume_utils.extract_host(source_vol['host'], 'pool')
 3304         opts = self._get_vdisk_params(source_vol['volume_type_id'])
 3305 
 3306         if opts['volume_topology'] == 'hyperswap':
 3307             msg = _('create_snapshot: Create snapshot to a '
 3308                     'hyperswap volume is not allowed.')
 3309             LOG.error(msg)
 3310             raise exception.VolumeDriverException(message=msg)
 3311 
 3312         self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
 3313                                   snapshot['volume_id'], self.configuration,
 3314                                   opts, False, self._state, pool=pool)
 3315 
 3316     def delete_snapshot(self, snapshot):
 3317         if self._state['code_level'] < (7, 7, 0, 0):
 3318             force_unmap = False
 3319         else:
 3320             force_unmap = True
 3321         self._helpers.delete_vdisk(
 3322             snapshot['name'], force_unmap=force_unmap, force_delete=False)
 3323 
 3324     def create_volume_from_snapshot(self, volume, snapshot):
 3325         # Create volume from snapshot with a replication or hyperswap group_id
 3326         # is not allowed.
 3327         self._check_if_group_type_cg_snapshot(volume)
 3328         opts = self._get_vdisk_params(volume['volume_type_id'],
 3329                                       volume_metadata=
 3330                                       volume.get('volume_metadata'))
 3331         pool = volume_utils.extract_host(volume['host'], 'pool')
 3332         self._helpers.create_copy(snapshot['name'], volume['name'],
 3333                                   snapshot['id'], self.configuration,
 3334                                   opts, True, self._state, pool=pool)
 3335         # The volume size is equal to the snapshot size in most
 3336         # of the cases. But in some scenario, the volume size
 3337         # may be bigger than the source volume size.
 3338         # SVC does not support flashcopy between two volumes
 3339         # with two different size. So use the snapshot size to
 3340         # create volume first and then extend the volume to-
 3341         # the target size.
 3342         if volume['size'] > snapshot['volume_size']:
 3343             # extend the new created target volume to expected size.
 3344             self._extend_volume_op(volume, volume['size'],
 3345                                    snapshot['volume_size'])
 3346         if opts['qos']:
 3347             self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
 3348 
 3349         ctxt = context.get_admin_context()
 3350         model_update = {'replication_status':
 3351                         fields.ReplicationStatus.NOT_CAPABLE}
 3352         rep_type = self._get_volume_replicated_type(ctxt, volume)
 3353 
 3354         if rep_type:
 3355             self._validate_replication_enabled()
 3356             replica_obj = self._get_replica_obj(rep_type)
 3357             replica_obj.volume_replication_setup(ctxt, volume)
 3358             model_update = {'replication_status':
 3359                             fields.ReplicationStatus.ENABLED}
 3360         return model_update
 3361 
 3362     def create_cloned_volume(self, tgt_volume, src_volume):
 3363         """Creates a clone of the specified volume."""
 3364         # Create a cloned volume with a replication or hyperswap group_id is
 3365         # not allowed.
 3366         self._check_if_group_type_cg_snapshot(tgt_volume)
 3367         opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
 3368                                       volume_metadata=
 3369                                       tgt_volume.get('volume_metadata'))
 3370         pool = volume_utils.extract_host(tgt_volume['host'], 'pool')
 3371         self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
 3372                                   src_volume['id'], self.configuration,
 3373                                   opts, True, self._state, pool=pool)
 3374 
 3375         # The source volume size is equal to target volume size
 3376         # in most of the cases. But in some scenarios, the target
 3377         # volume size may be bigger than the source volume size.
 3378         # SVC does not support flashcopy between two volumes
 3379         # with two different sizes. So use source volume size to
 3380         # create target volume first and then extend target
 3381         # volume to original size.
 3382         if tgt_volume['size'] > src_volume['size']:
 3383             # extend the new created target volume to expected size.
 3384             self._extend_volume_op(tgt_volume, tgt_volume['size'],
 3385                                    src_volume['size'])
 3386 
 3387         if opts['qos']:
 3388             self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
 3389 
 3390         if opts['volume_topology'] == 'hyperswap':
 3391             LOG.debug('The source volume %s to be cloned is a hyperswap '
 3392                       'volume.', src_volume.name)
 3393             # Ensures the vdisk is not part of FC mapping.
 3394             # Otherwize convert it to hyperswap volume will be failed.
 3395             self._helpers.ensure_vdisk_no_fc_mappings(tgt_volume['name'],
 3396                                                       allow_snaps=True,
 3397                                                       allow_fctgt=False)
 3398 
 3399             self._helpers.convert_volume_to_hyperswap(tgt_volume['name'],
 3400                                                       opts,
 3401                                                       self._state)
 3402 
 3403         ctxt = context.get_admin_context()
 3404         model_update = {'replication_status':
 3405                         fields.ReplicationStatus.NOT_CAPABLE}
 3406         rep_type = self._get_volume_replicated_type(ctxt, tgt_volume)
 3407 
 3408         if rep_type:
 3409             self._validate_replication_enabled()
 3410             replica_obj = self._get_replica_obj(rep_type)
 3411             replica_obj.volume_replication_setup(ctxt, tgt_volume)
 3412             model_update = {'replication_status':
 3413                             fields.ReplicationStatus.ENABLED}
 3414         return model_update
 3415 
 3416     def extend_volume(self, volume, new_size):
 3417         self._extend_volume_op(volume, new_size)
 3418 
 3419     def _extend_volume_op(self, volume, new_size, old_size=None):
 3420         LOG.debug('enter: _extend_volume_op: volume %s', volume['id'])
 3421         volume_name = self._get_target_vol(volume)
 3422         if self.is_volume_hyperswap(volume):
 3423             msg = _('_extend_volume_op: Extending a hyperswap volume is '
 3424                     'not supported.')
 3425             LOG.error(msg)
 3426             raise exception.InvalidInput(message=msg)
 3427 
 3428         ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name,
 3429                                                         allow_snaps=False)
 3430         if not ret:
 3431             msg = (_('_extend_volume_op: Extending a volume with snapshots is '
 3432                      'not supported.'))
 3433             LOG.error(msg)
 3434             raise exception.VolumeDriverException(message=msg)
 3435 
 3436         if old_size is None:
 3437             old_size = volume.size
 3438         extend_amt = int(new_size) - old_size
 3439 
 3440         rel_info = self._helpers.get_relationship_info(volume_name)
 3441         if rel_info:
 3442             LOG.warning('_extend_volume_op: Extending a volume with '
 3443                         'remote copy is not recommended.')
 3444             try:
 3445                 rep_type = rel_info['copy_type']
 3446                 cyclingmode = rel_info['cycling_mode']
 3447                 self._master_backend_helpers.delete_relationship(
 3448                     volume.name)
 3449                 tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
 3450                            volume.name)
 3451                 self._master_backend_helpers.extend_vdisk(volume.name,
 3452                                                           extend_amt)
 3453                 self._aux_backend_helpers.extend_vdisk(tgt_vol, extend_amt)
 3454                 tgt_sys = self._aux_backend_helpers.get_system_info()
 3455                 if storwize_const.GMCV_MULTI == cyclingmode:
 3456                     tgt_change_vol = (
 3457                         storwize_const.REPLICA_CHG_VOL_PREFIX +
 3458                         tgt_vol)
 3459                     source_change_vol = (
 3460                         storwize_const.REPLICA_CHG_VOL_PREFIX +
 3461                         volume.name)
 3462                     self._master_backend_helpers.extend_vdisk(
 3463                         source_change_vol, extend_amt)
 3464                     self._aux_backend_helpers.extend_vdisk(
 3465                         tgt_change_vol, extend_amt)
 3466                     src_change_opts = self._get_vdisk_params(
 3467                         volume.volume_type_id)
 3468                     cycle_period_seconds = src_change_opts.get(
 3469                         'cycle_period_seconds')
 3470                     self._master_backend_helpers.create_relationship(
 3471                         volume.name, tgt_vol, tgt_sys.get('system_name'),
 3472                         True, True, source_change_vol, cycle_period_seconds)
 3473                     self._aux_backend_helpers.change_relationship_changevolume(
 3474                         tgt_vol, tgt_change_vol, False)
 3475                     self._master_backend_helpers.start_relationship(
 3476                         volume.name)
 3477                 else:
 3478                     self._master_backend_helpers.create_relationship(
 3479                         volume.name, tgt_vol, tgt_sys.get('system_name'),
 3480                         True if storwize_const.GLOBAL == rep_type else False)
 3481             except Exception as e:
 3482                 msg = (_('Failed to extend a volume with remote copy '
 3483                          '%(volume)s. Exception: '
 3484                          '%(err)s.') % {'volume': volume.id,
 3485                                         'err': e})
 3486                 LOG.error(msg)
 3487                 raise exception.VolumeDriverException(message=msg)
 3488         else:
 3489             self._helpers.extend_vdisk(volume_name, extend_amt)
 3490         LOG.debug('leave: _extend_volume_op: volume %s', volume.id)
 3491 
 3492     def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False):
 3493         return self._helpers.add_vdisk_copy(volume, dest_pool,
 3494                                             vol_type, self._state,
 3495                                             self.configuration,
 3496                                             auto_delete=auto_delete)
 3497 
 3498     def _add_vdisk_copy_op(self, ctxt, volume, new_op):
 3499         metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
 3500                                                      volume['id'])
 3501         curr_ops = metadata.get('vdiskcopyops', None)
 3502         if curr_ops:
 3503             curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
 3504             new_ops_list = curr_ops_list.append(new_op)
 3505         else:
 3506             new_ops_list = [new_op]
 3507         new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
 3508         self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
 3509                                              {'vdiskcopyops': new_ops_str},
 3510                                              False)
 3511         if volume['id'] in self._vdiskcopyops:
 3512             self._vdiskcopyops[volume['id']].append(new_op)
 3513         else:
 3514             self._vdiskcopyops[volume['id']] = [new_op]
 3515 
 3516         # We added the first copy operation, so start the looping call
 3517         if len(self._vdiskcopyops) == 1:
 3518             self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
 3519                 self._check_volume_copy_ops)
 3520             self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
 3521 
 3522     def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
 3523         try:
 3524             self._vdiskcopyops[volume['id']].remove((orig_copy_id,
 3525                                                      new_copy_id))
 3526             if not len(self._vdiskcopyops[volume['id']]):
 3527                 del self._vdiskcopyops[volume['id']]
 3528             if not len(self._vdiskcopyops):
 3529                 self._vdiskcopyops_loop.stop()
 3530                 self._vdiskcopyops_loop = None
 3531         except KeyError:
 3532             LOG.error('_rm_vdisk_copy_op: Volume %s does not have any '
 3533                       'registered vdisk copy operations.', volume['id'])
 3534             return
 3535         except ValueError:
 3536             LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have '
 3537                       'the specified vdisk copy operation: orig=%(orig)s '
 3538                       'new=%(new)s.',
 3539                       {'vol': volume['id'], 'orig': orig_copy_id,
 3540                        'new': new_copy_id})
 3541             return
 3542 
 3543         metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
 3544                                                      volume['id'])
 3545         curr_ops = metadata.get('vdiskcopyops', None)
 3546         if not curr_ops:
 3547             LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not '
 3548                       'have any registered vdisk copy operations.',
 3549                       volume['id'])
 3550             return
 3551         curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
 3552         try:
 3553             curr_ops_list.remove((orig_copy_id, new_copy_id))
 3554         except ValueError:
 3555             LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
 3556                       'not have the specified vdisk copy operation: '
 3557                       'orig=%(orig)s new=%(new)s.',
 3558                       {'vol': volume['id'], 'orig': orig_copy_id,
 3559                        'new': new_copy_id})
 3560             return
 3561 
 3562         if len(curr_ops_list):
 3563             new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
 3564             self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
 3565                                                  {'vdiskcopyops': new_ops_str},
 3566                                                  False)
 3567         else:
 3568             self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
 3569                                                  'vdiskcopyops')
 3570 
 3571     def _check_volume_copy_ops(self):
 3572         LOG.debug("Enter: update volume copy status.")
 3573         ctxt = context.get_admin_context()
 3574         copy_items = list(self._vdiskcopyops.items())
 3575         for vol_id, copy_ops in copy_items:
 3576             try:
 3577                 volume = self.db.volume_get(ctxt, vol_id)
 3578             except Exception:
 3579                 LOG.warning('Volume %s does not exist.', vol_id)
 3580                 del self._vdiskcopyops[vol_id]
 3581                 if not len(self._vdiskcopyops):
 3582                     self._vdiskcopyops_loop.stop()
 3583                     self._vdiskcopyops_loop = None
 3584                 continue
 3585 
 3586             for copy_op in copy_ops:
 3587                 try:
 3588                     synced = self._helpers.is_vdisk_copy_synced(volume['name'],
 3589                                                                 copy_op[1])
 3590                 except Exception:
 3591                     LOG.info('_check_volume_copy_ops: Volume %(vol)s does '
 3592                              'not have the specified vdisk copy '
 3593                              'operation: orig=%(orig)s new=%(new)s.',
 3594                              {'vol': volume['id'], 'orig': copy_op[0],
 3595                               'new': copy_op[1]})
 3596                 else:
 3597                     if synced:
 3598                         self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
 3599                         self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
 3600                                                copy_op[1])
 3601         LOG.debug("Exit: update volume copy status.")
 3602 
 3603     # #### V2.1 replication methods #### #
 3604     @cinder_utils.trace
 3605     def failover_host(self, context, volumes, secondary_id=None, groups=None):
 3606         if not self._replica_enabled:
 3607             msg = _("Replication is not properly enabled on backend.")
 3608             LOG.error(msg)
 3609             raise exception.UnableToFailOver(reason=msg)
 3610 
 3611         if storwize_const.FAILBACK_VALUE == secondary_id:
 3612             # In this case the administrator would like to fail back.
 3613             secondary_id, volumes_update, groups_update = self._host_failback(
 3614                 context, volumes, groups)
 3615         elif (secondary_id == self._replica_target['backend_id']
 3616                 or secondary_id is None):
 3617             # In this case the administrator would like to fail over.
 3618             secondary_id, volumes_update, groups_update = self._host_failover(
 3619                 context, volumes, groups)
 3620         else:
 3621             msg = (_("Invalid secondary id %s.") % secondary_id)
 3622             LOG.error(msg)
 3623             raise exception.InvalidReplicationTarget(reason=msg)
 3624 
 3625         return secondary_id, volumes_update, groups_update
 3626 
 3627     def _host_failback(self, ctxt, volumes, groups):
 3628         """Fail back all the volume on the secondary backend."""
 3629         volumes_update = []
 3630         groups_update = []
 3631         if not self._active_backend_id:
 3632             LOG.info("Host has been failed back. doesn't need "
 3633                      "to fail back again")
 3634             return None, volumes_update, groups_update
 3635 
 3636         try:
 3637             self._master_backend_helpers.get_system_info()
 3638         except Exception:
 3639             msg = (_("Unable to failback due to primary is not reachable."))
 3640             LOG.error(msg)
 3641             raise exception.UnableToFailOver(reason=msg)
 3642 
 3643         bypass_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
 3644 
 3645         # start synchronize from aux volume to master volume
 3646         self._sync_with_aux(ctxt, rep_volumes)
 3647         self._sync_replica_groups(ctxt, groups)
 3648         self._wait_replica_ready(ctxt, rep_volumes)
 3649         self._wait_replica_groups_ready(ctxt, groups)
 3650 
 3651         rep_volumes_update = self._failback_replica_volumes(ctxt,
 3652                                                             rep_volumes)
 3653         volumes_update.extend(rep_volumes_update)
 3654 
 3655         rep_vols_in_grp_update, groups_update = self._failback_replica_groups(
 3656             ctxt, groups)
 3657         volumes_update.extend(rep_vols_in_grp_update)
 3658 
 3659         bypass_volumes_update = self._bypass_volume_process(bypass_volumes)
 3660         volumes_update.extend(bypass_volumes_update)
 3661 
 3662         self._helpers = self._master_backend_helpers
 3663         self._active_backend_id = None
 3664         self._state = self._master_state
 3665 
 3666         self._update_volume_stats()
 3667         self._master_backend_helpers.stats = self._stats
 3668 
 3669         return storwize_const.FAILBACK_VALUE, volumes_update, groups_update
 3670 
 3671     def _failback_replica_volumes(self, ctxt, rep_volumes):
 3672         LOG.debug('enter: _failback_replica_volumes')
 3673         volumes_update = []
 3674 
 3675         for volume in rep_volumes:
 3676             rep_type = self._get_volume_replicated_type(ctxt, volume)
 3677             replica_obj = self._get_replica_obj(rep_type)
 3678             tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
 3679             rep_info = self._helpers.get_relationship_info(tgt_volume)
 3680             if not rep_info:
 3681                 volumes_update.append(
 3682                     {'volume_id': volume['id'],
 3683                      'updates':
 3684                          {'replication_status':
 3685                           fields.ReplicationStatus.ERROR,
 3686                           'status': 'error'}})
 3687                 LOG.error('_failback_replica_volumes:no rc-releationship '
 3688                           'is established between master: %(master)s and '
 3689                           'aux %(aux)s. Please re-establish the '
 3690                           'relationship and synchronize the volumes on '
 3691                           'backend storage.',
 3692                           {'master': volume['name'], 'aux': tgt_volume})
 3693                 continue
 3694             LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol='
 3695                       '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
 3696                       'primary=%(primary)s',
 3697                       {'vol': volume['name'],
 3698                        'master_vol': rep_info['master_vdisk_name'],
 3699                        'aux_vol': rep_info['aux_vdisk_name'],
 3700                        'state': rep_info['state'],
 3701                        'primary': rep_info['primary']})
 3702             if volume.status == 'in-use':
 3703                 LOG.warning('_failback_replica_volumes: failback in-use '
 3704                             'volume: %(volume)s is not recommended.',
 3705                             {'volume': volume.name})
 3706             try:
 3707                 replica_obj.replication_failback(volume)
 3708                 model_updates = {
 3709                     'replication_status': fields.ReplicationStatus.ENABLED}
 3710                 volumes_update.append(
 3711                     {'volume_id': volume['id'],
 3712                      'updates': model_updates})
 3713             except exception.VolumeDriverException:
 3714                 LOG.error('Unable to fail back volume %(volume_id)s',
 3715                           {'volume_id': volume.id})
 3716                 volumes_update.append(
 3717                     {'volume_id': volume['id'],
 3718                      'updates': {'replication_status':
 3719                                  fields.ReplicationStatus.ERROR,
 3720                                  'status': 'error'}})
 3721         LOG.debug('leave: _failback_replica_volumes '
 3722                   'volumes_update=%(volumes_update)s',
 3723                   {'volumes_update': volumes_update})
 3724         return volumes_update
 3725 
 3726     def _bypass_volume_process(self, bypass_vols):
 3727         volumes_update = []
 3728         for vol in bypass_vols:
 3729             if vol.replication_driver_data:
 3730                 rep_data = json.loads(vol.replication_driver_data)
 3731                 update_status = rep_data['previous_status']
 3732                 rep_data = ''
 3733             else:
 3734                 update_status = 'error'
 3735                 rep_data = json.dumps({'previous_status': vol.status})
 3736 
 3737             volumes_update.append(
 3738                 {'volume_id': vol.id,
 3739                  'updates': {'status': update_status,
 3740                              'replication_driver_data': rep_data}})
 3741 
 3742         return volumes_update
 3743 
 3744     def _failback_replica_groups(self, ctxt, groups):
 3745         volumes_update = []
 3746         groups_update = []
 3747         for grp in groups:
 3748             try:
 3749                 grp_rep_status = self._rep_grp_failback(
 3750                     ctxt, grp, sync_grp=False)['replication_status']
 3751             except Exception as ex:
 3752                 LOG.error('Fail to failback group %(grp)s during host '
 3753                           'failback due to error: %(error)s',
 3754                           {'grp': grp.id, 'error': ex})
 3755                 grp_rep_status = fields.ReplicationStatus.ERROR
 3756 
 3757             # Update all the volumes' status in that group
 3758             for vol in grp.volumes:
 3759                 vol_update = {'volume_id': vol.id,
 3760                               'updates':
 3761                                   {'replication_status': grp_rep_status,
 3762                                    'status': (
 3763                                        vol.status if grp_rep_status ==
 3764                                        fields.ReplicationStatus.ENABLED
 3765                                        else 'error')}}
 3766                 volumes_update.append(vol_update)
 3767             grp_status = (fields.GroupStatus.AVAILABLE
 3768                           if grp_rep_status ==
 3769                           fields.ReplicationStatus.ENABLED
 3770                           else fields.GroupStatus.ERROR)
 3771             grp_update = {'group_id': grp.id,
 3772                           'updates': {'replication_status': grp_rep_status,
 3773                                       'status': grp_status}}
 3774             groups_update.append(grp_update)
 3775         return volumes_update, groups_update
 3776 
 3777     def _sync_with_aux(self, ctxt, volumes):
 3778         LOG.debug('enter: _sync_with_aux ')
 3779         try:
 3780             rep_mgr = self._get_replica_mgr()
 3781             rep_mgr.establish_target_partnership()
 3782         except Exception as ex:
 3783             LOG.warning('Fail to establish partnership in backend. '
 3784                         'error=%(ex)s', {'error': ex})
 3785         for volume in volumes:
 3786             tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
 3787             rep_info = self._helpers.get_relationship_info(tgt_volume)
 3788             if not rep_info:
 3789                 LOG.error('_sync_with_aux: no rc-releationship is '
 3790                           'established between master: %(master)s and aux '
 3791                           '%(aux)s. Please re-establish the relationship '
 3792                           'and synchronize the volumes on backend '
 3793                           'storage.', {'master': volume['name'],
 3794                                        'aux': tgt_volume})
 3795                 continue
 3796             LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol='
 3797                       '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, '
 3798                       'primary=%(primary)s',
 3799                       {'volume': volume['name'],
 3800                        'master_vol': rep_info['master_vdisk_name'],
 3801                        'aux_vol': rep_info['aux_vdisk_name'],
 3802                        'state': rep_info['state'],
 3803                        'primary': rep_info['primary']})
 3804             try:
 3805                 if (rep_info['state'] not in
 3806                         [storwize_const.REP_CONSIS_SYNC,
 3807                          storwize_const.REP_CONSIS_COPYING]):
 3808                     if rep_info['primary'] == 'master':
 3809                         self._helpers.start_relationship(tgt_volume)
 3810                     else:
 3811                         self._helpers.start_relationship(tgt_volume,
 3812                                                          primary='aux')
 3813             except Exception as ex:
 3814                 LOG.warning('Fail to copy data from aux to master. master:'
 3815                             ' %(master)s and aux %(aux)s. Please '
 3816                             're-establish the relationship and synchronize'
 3817                             ' the volumes on backend storage. error='
 3818                             '%(ex)s', {'master': volume['name'],
 3819                                        'aux': tgt_volume,
 3820                                        'error': ex})
 3821         LOG.debug('leave: _sync_with_aux.')
 3822 
 3823     def _wait_replica_ready(self, ctxt, volumes):
 3824         for volume in volumes:
 3825             tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']
 3826             try:
 3827                 self._wait_replica_vol_ready(ctxt, tgt_volume)
 3828             except Exception as ex:
 3829                 LOG.error('_wait_replica_ready: wait for volume:%(volume)s'
 3830                           ' remote copy synchronization failed due to '
 3831                           'error:%(err)s.', {'volume': tgt_volume,
 3832                                              'err': ex})
 3833 
 3834     def _wait_replica_vol_ready(self, ctxt, volume):
 3835         LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s',
 3836                   {'volume': volume})
 3837 
 3838         def _replica_vol_ready():
 3839             rep_info = self._helpers.get_relationship_info(volume)
 3840             if not rep_info:
 3841                 msg = (_('_wait_replica_vol_ready: no rc-releationship '
 3842                          'is established for volume:%(volume)s. Please '
 3843                          're-establish the rc-relationship and '
 3844                          'synchronize the volumes on backend storage.'),
 3845                        {'volume': volume})
 3846                 LOG.error(msg)
 3847                 raise exception.VolumeBackendAPIException(data=msg)
 3848             LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: '
 3849                       'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
 3850                       'state=%(state)s, primary=%(primary)s',
 3851                       {'volume': volume,
 3852                        'master_vol': rep_info['master_vdisk_name'],
 3853                        'aux_vol': rep_info['aux_vdisk_name'],
 3854                        'state': rep_info['state'],
 3855                        'primary': rep_info['primary']})
 3856             if (rep_info['state'] in
 3857                     [storwize_const.REP_CONSIS_SYNC,
 3858                      storwize_const.REP_CONSIS_COPYING]):
 3859                 return True
 3860             elif rep_info['state'] == storwize_const.REP_IDL_DISC:
 3861                 msg = (_('Wait synchronize failed. volume: %(volume)s'),
 3862                        {'volume': volume})
 3863                 LOG.error(msg)
 3864                 raise exception.VolumeBackendAPIException(data=msg)
 3865             return False
 3866 
 3867         self._helpers._wait_for_a_condition(
 3868             _replica_vol_ready, timeout=storwize_const.DEFAULT_RC_TIMEOUT,
 3869             interval=storwize_const.DEFAULT_RC_INTERVAL,
 3870             raise_exception=True)
 3871         LOG.debug('leave: _wait_replica_vol_ready: volume=%(volume)s',
 3872                   {'volume': volume})
 3873 
 3874     def _sync_replica_groups(self, ctxt, groups):
 3875         for grp in groups:
 3876             rccg_name = self._get_rccg_name(grp)
 3877             self._sync_with_aux_grp(ctxt, rccg_name)
 3878 
 3879     def _wait_replica_groups_ready(self, ctxt, groups):
 3880         for grp in groups:
 3881             rccg_name = self._get_rccg_name(grp)
 3882             self._wait_replica_grp_ready(ctxt, rccg_name)
 3883 
 3884     def _host_failover(self, ctxt, volumes, groups):
 3885         volumes_update = []
 3886         groups_update = []
 3887         if self._active_backend_id:
 3888             LOG.info("Host has been failed over to %s",
 3889                      self._active_backend_id)
 3890             return self._active_backend_id, volumes_update, groups_update
 3891 
 3892         try:
 3893             self._aux_backend_helpers.get_system_info()
 3894         except Exception as ex:
 3895             msg = (_("Unable to failover due to replication target is not "
 3896                      "reachable. error=%(ex)s"), {'error': ex})
 3897             LOG.error(msg)
 3898             raise exception.UnableToFailOver(reason=msg)
 3899 
 3900         bypass_volumes, rep_volumes = self._classify_volume(ctxt, volumes)
 3901 
 3902         rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes)
 3903         volumes_update.extend(rep_volumes_update)
 3904 
 3905         rep_vols_in_grp_update, groups_update = self._failover_replica_groups(
 3906             ctxt, groups)
 3907         volumes_update.extend(rep_vols_in_grp_update)
 3908 
 3909         bypass_volumes_update = self._bypass_volume_process(bypass_volumes)
 3910         volumes_update.extend(bypass_volumes_update)
 3911 
 3912         self._helpers = self._aux_backend_helpers
 3913         self._active_backend_id = self._replica_target['backend_id']
 3914         self._secondary_pools = [self._replica_target['pool_name']]
 3915         self._state = self._aux_state
 3916 
 3917         self._update_volume_stats()
 3918         self._aux_backend_helpers.stats = self._stats
 3919 
 3920         return self._active_backend_id, volumes_update, groups_update
 3921 
 3922     def _failover_replica_volumes(self, ctxt, rep_volumes):
 3923         LOG.debug('enter: _failover_replica_volumes')
 3924         volumes_update = []
 3925 
 3926         for volume in rep_volumes:
 3927             rep_type = self._get_volume_replicated_type(ctxt, volume)
 3928             replica_obj = self._get_replica_obj(rep_type)
 3929             # Try do the fail-over.
 3930             try:
 3931                 rep_info = self._aux_backend_helpers.get_relationship_info(
 3932                     storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'])
 3933                 if not rep_info:
 3934                     volumes_update.append(
 3935                         {'volume_id': volume['id'],
 3936                          'updates':
 3937                              {'replication_status':
 3938                               fields.ReplicationStatus.FAILOVER_ERROR,
 3939                               'status': 'error'}})
 3940                     LOG.error('_failover_replica_volumes: no rc-'
 3941                               'releationship is established for volume:'
 3942                               '%(volume)s. Please re-establish the rc-'
 3943                               'relationship and synchronize the volumes on'
 3944                               ' backend storage.',
 3945                               {'volume': volume.name})
 3946                     continue
 3947                 LOG.debug('_failover_replica_volumes: vol=%(vol)s, '
 3948                           'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, '
 3949                           'state=%(state)s, primary=%(primary)s',
 3950                           {'vol': volume['name'],
 3951                            'master_vol': rep_info['master_vdisk_name'],
 3952                            'aux_vol': rep_info['aux_vdisk_name'],
 3953                            'state': rep_info['state'],
 3954                            'primary': rep_info['primary']})
 3955                 if volume.status == 'in-use':
 3956                     LOG.warning('_failover_replica_volumes: failover in-use '
 3957                                 'volume: %(volume)s is not recommended.',
 3958                                 {'volume': volume.name})
 3959                 replica_obj.failover_volume_host(ctxt, volume)
 3960                 model_updates = {
 3961                     'replication_status': fields.ReplicationStatus.FAILED_OVER}
 3962                 volumes_update.append(
 3963                     {'volume_id': volume['id'],
 3964                      'updates': model_updates})
 3965             except exception.VolumeDriverException:
 3966                 LOG.error('Unable to failover to aux volume. Please make '
 3967                           'sure that the aux volume is ready.')
 3968                 volumes_update.append(
 3969                     {'volume_id': volume['id'],
 3970                      'updates': {'status': 'error',
 3971                                  'replication_status':
 3972                                  fields.ReplicationStatus.FAILOVER_ERROR}})
 3973         LOG.debug('leave: _failover_replica_volumes '
 3974                   'volumes_update=%(volumes_update)s',
 3975                   {'volumes_update': volumes_update})
 3976         return volumes_update
 3977 
 3978     def _failover_replica_groups(self, ctxt, groups):
 3979         volumes_update = []
 3980         groups_update = []
 3981         for grp in groups:
 3982             try:
 3983                 grp_rep_status = self._rep_grp_failover(
 3984                     ctxt, grp)['replication_status']
 3985             except Exception as ex:
 3986                 LOG.error('Fail to failover group %(grp)s during host '
 3987                           'failover due to error: %(error)s',
 3988                           {'grp': grp.id, 'error': ex})
 3989                 grp_rep_status = fields.ReplicationStatus.ERROR
 3990 
 3991             # Update all the volumes' status in that group
 3992             for vol in grp.volumes:
 3993                 vol_update = {'volume_id': vol.id,
 3994                               'updates':
 3995                                   {'replication_status': grp_rep_status,
 3996                                    'status': (
 3997                                        vol.status if grp_rep_status ==
 3998                                        fields.ReplicationStatus.FAILED_OVER
 3999                                        else 'error')}}
 4000                 volumes_update.append(vol_update)
 4001             grp_status = (fields.GroupStatus.AVAILABLE
 4002                           if grp_rep_status ==
 4003                           fields.ReplicationStatus.FAILED_OVER
 4004                           else fields.GroupStatus.ERROR)
 4005             grp_update = {'group_id': grp.id,
 4006                           'updates': {'replication_status': grp_rep_status,
 4007                                       'status': grp_status}}
 4008             groups_update.append(grp_update)
 4009         return volumes_update, groups_update
 4010 
 4011     def _classify_volume(self, ctxt, volumes):
 4012         bypass_volumes = []
 4013         replica_volumes = []
 4014 
 4015         for v in volumes:
 4016             volume_type = self._get_volume_replicated_type(ctxt, v)
 4017             grp = v.group
 4018             if grp and volume_utils.is_group_a_type(
 4019                     grp, "consistent_group_replication_enabled"):
 4020                 continue
 4021             elif volume_type and v.status in ['available', 'in-use']:
 4022                 replica_volumes.append(v)
 4023             else:
 4024                 bypass_volumes.append(v)
 4025         return bypass_volumes, replica_volumes
 4026 
 4027     def _get_replica_obj(self, rep_type):
 4028         replica_manager = self.replica_manager[
 4029             self._replica_target['backend_id']]
 4030         return replica_manager.get_replica_obj(rep_type)
 4031 
 4032     def _get_replica_mgr(self):
 4033         replica_manager = self.replica_manager[
 4034             self._replica_target['backend_id']]
 4035         return replica_manager
 4036 
 4037     def _get_target_vol(self, volume):
 4038         tgt_vol = volume['name']
 4039         if self._active_backend_id:
 4040             ctxt = context.get_admin_context()
 4041             rep_type = self._get_volume_replicated_type(ctxt, volume)
 4042             if rep_type:
 4043                 tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
 4044                            volume['name'])
 4045         return tgt_vol
 4046 
 4047     def _validate_replication_enabled(self):
 4048         if not self._replica_enabled:
 4049             msg = _("Replication is not properly configured on backend.")
 4050             LOG.error(msg)
 4051             raise exception.VolumeBackendAPIException(data=msg)
 4052 
 4053     def _get_specs_replicated_type(self, volume_type):
 4054         replication_type = None
 4055         extra_specs = volume_type.get("extra_specs", {})
 4056         rep_val = extra_specs.get('replication_enabled')
 4057         if rep_val == "<is> True":
 4058             replication_type = extra_specs.get('replication_type',
 4059                                                storwize_const.GLOBAL)
 4060             # The format for replication_type in extra spec is in
 4061             # "<in> global". Otherwise, the code will
 4062             # not reach here.
 4063             if replication_type != storwize_const.GLOBAL:
 4064                 # Pick up the replication type specified in the
 4065                 # extra spec from the format like "<in> global".
 4066                 replication_type = replication_type.split()[1]
 4067             if replication_type not in storwize_const.VALID_REP_TYPES:
 4068                 msg = (_("Invalid replication type %s.") % replication_type)
 4069                 LOG.error(msg)
 4070                 raise exception.InvalidInput(reason=msg)
 4071         return replication_type
 4072 
 4073     def _get_volume_replicated_type(self, ctxt, volume, vol_type_id=None):
 4074         replication_type = None
 4075         volume_type = None
 4076         volume_type_id = volume.volume_type_id if volume else vol_type_id
 4077         if volume_type_id:
 4078             volume_type = objects.VolumeType.get_by_name_or_id(
 4079                 ctxt, volume_type_id)
 4080         if volume_type:
 4081             replication_type = self._get_specs_replicated_type(volume_type)
 4082         return replication_type
 4083 
 4084     def is_volume_hyperswap(self, volume):
 4085         """Returns True if the volume type is hyperswap."""
 4086         is_hyper_volume = False
 4087         if 'volume_type_id' in volume:
 4088             opts = self._get_vdisk_params(volume.volume_type_id)
 4089             if opts['volume_topology'] == 'hyperswap':
 4090                 is_hyper_volume = True
 4091         return is_hyper_volume
 4092 
 4093     def _get_storwize_config(self):
 4094         # Update the storwize state
 4095         try:
 4096             self._update_storwize_state(self._master_state, self._helpers)
 4097         except Exception as err:
 4098             LOG.warning('Fail to get system %(san_ip)s info. error=%(error)s',
 4099                         {'san_ip': self.active_ip, 'error': err})
 4100             if not self._active_backend_id:
 4101                 with excutils.save_and_reraise_exception():
 4102                     pass
 4103         self._do_replication_setup()
 4104 
 4105         if self._active_backend_id and self._replica_target:
 4106             self._helpers = self._aux_backend_helpers
 4107             self._state = self._aux_state
 4108 
 4109         self._replica_enabled = (True if (self._helpers.replication_licensed()
 4110                                           and self._replica_target) else False)
 4111         if self._replica_enabled:
 4112             self._supported_replica_types = storwize_const.VALID_REP_TYPES
 4113 
 4114     def _do_replication_setup(self):
 4115         rep_devs = self.configuration.safe_get('replication_device')
 4116         if not rep_devs:
 4117             return
 4118 
 4119         if len(rep_devs) > 1:
 4120             raise exception.InvalidInput(
 4121                 reason='Multiple replication devices are configured. '
 4122                        'Now only one replication_device is supported.')
 4123 
 4124         required_flags = ['san_ip', 'backend_id', 'san_login',
 4125                           'san_password', 'pool_name']
 4126         for flag in required_flags:
 4127             if flag not in rep_devs[0]:
 4128                 raise exception.InvalidInput(
 4129                     reason=_('%s is not set.') % flag)
 4130 
 4131         rep_target = {}
 4132         rep_target['san_ip'] = rep_devs[0].get('san_ip')
 4133         rep_target['backend_id'] = rep_devs[0].get('backend_id')
 4134         rep_target['san_login'] = rep_devs[0].get('san_login')
 4135         rep_target['san_password'] = rep_devs[0].get('san_password')
 4136         rep_target['pool_name'] = rep_devs[0].get('pool_name')
 4137 
 4138         # Each replication target will have a corresponding replication.
 4139         self._replication_initialize(rep_target)
 4140 
 4141     def _replication_initialize(self, target):
 4142         rep_manager = storwize_rep.StorwizeSVCReplicationManager(
 4143             self, target, StorwizeHelpers)
 4144 
 4145         if self._active_backend_id:
 4146             if self._active_backend_id != target['backend_id']:
 4147                 msg = (_("Invalid secondary id %s.") % self._active_backend_id)
 4148                 LOG.error(msg)
 4149                 raise exception.InvalidInput(reason=msg)
 4150         # Setup partnership only in non-failover state
 4151         else:
 4152             try:
 4153                 rep_manager.establish_target_partnership()
 4154             except exception.VolumeDriverException:
 4155                 LOG.error('The replication src %(src)s has not '
 4156                           'successfully established partnership with the '
 4157                           'replica target %(tgt)s.',
 4158                           {'src': self.configuration.san_ip,
 4159                            'tgt': target['backend_id']})
 4160 
 4161         self._aux_backend_helpers = rep_manager.get_target_helpers()
 4162         self.replica_manager[target['backend_id']] = rep_manager
 4163         self._replica_target = target
 4164         self._update_storwize_state(self._aux_state, self._aux_backend_helpers)
 4165 
 4166     # Replication Group (Tiramisu)
 4167     @cinder_utils.trace
 4168     def enable_replication(self, context, group, volumes):
 4169         """Enables replication for a group and volumes in the group."""
 4170         model_update = {'replication_status': fields.ReplicationStatus.ENABLED}
 4171         volumes_update = []
 4172         rccg_name = self._get_rccg_name(group)
 4173         rccg = self._helpers.get_rccg(rccg_name)
 4174         if rccg and rccg['relationship_count'] != '0':
 4175             try:
 4176                 if rccg['primary'] == 'aux':
 4177                     self._helpers.start_rccg(rccg_name, primary='aux')
 4178                 else:
 4179                     self._helpers.start_rccg(rccg_name, primary='master')
 4180             except exception.VolumeBackendAPIException as err:
 4181                 LOG.error("Failed to enable group replication on %(rccg)s. "
 4182                           "Exception: %(exception)s.",
 4183                           {'rccg': rccg_name, 'exception': err})
 4184                 model_update[
 4185                     'replication_status'] = fields.ReplicationStatus.ERROR
 4186         else:
 4187             if rccg:
 4188                 LOG.error("Enable replication on empty group %(rccg)s is "
 4189                           "forbidden.", {'rccg': rccg['name']})
 4190             else:
 4191                 LOG.error("Failed to enable group replication: %(grp)s does "
 4192                           "not exist in backend.", {'grp': group.id})
 4193             model_update['replication_status'] = fields.ReplicationStatus.ERROR
 4194 
 4195         for vol in volumes:
 4196             volumes_update.append(
 4197                 {'id': vol.id,
 4198                  'replication_status': model_update['replication_status']})
 4199         return model_update, volumes_update
 4200 
 4201     @cinder_utils.trace
 4202     def disable_replication(self, context, group, volumes):
 4203         """Disables replication for a group and volumes in the group."""
 4204         model_update = {
 4205             'replication_status': fields.ReplicationStatus.DISABLED}
 4206         volumes_update = []
 4207         rccg_name = self._get_rccg_name(group)
 4208         rccg = self._helpers.get_rccg(rccg_name)
 4209         if rccg and rccg['relationship_count'] != '0':
 4210             try:
 4211                 self._helpers.stop_rccg(rccg_name)
 4212             except exception.VolumeBackendAPIException as err:
 4213                 LOG.error("Failed to disable group replication on %(rccg)s. "
 4214                           "Exception: %(exception)s.",
 4215                           {'rccg': rccg_name, 'exception': err})
 4216                 model_update[
 4217                     'replication_status'] = fields.ReplicationStatus.ERROR
 4218         else:
 4219             if rccg:
 4220                 LOG.error("Disable replication on empty group %(rccg)s is "
 4221                           "forbidden.", {'rccg': rccg['name']})
 4222             else:
 4223                 LOG.error("Failed to disable group replication: %(grp)s does "
 4224                           "not exist in backend.", {'grp': group.id})
 4225             model_update['replication_status'] = fields.ReplicationStatus.ERROR
 4226 
 4227         for vol in volumes:
 4228             volumes_update.append(
 4229                 {'id': vol.id,
 4230                  'replication_status': model_update['replication_status']})
 4231         return model_update, volumes_update
 4232 
 4233     @cinder_utils.trace
 4234     def failover_replication(self, context, group, volumes,
 4235                              secondary_backend_id=None):
 4236         """Fails over replication for a group and volumes in the group."""
 4237         volumes_model_update = []
 4238         model_update = {}
 4239         if not self._replica_enabled:
 4240             msg = _("Replication is not properly enabled on backend.")
 4241             LOG.error(msg)
 4242             raise exception.UnableToFailOver(reason=msg)
 4243 
 4244         if storwize_const.FAILBACK_VALUE == secondary_backend_id:
 4245             # In this case the administrator would like to group fail back.
 4246             model_update = self._rep_grp_failback(context, group)
 4247         elif (secondary_backend_id == self._replica_target['backend_id']
 4248                 or secondary_backend_id is None):
 4249             # In this case the administrator would like to group fail over.
 4250             model_update = self._rep_grp_failover(context, group)
 4251         else:
 4252             msg = (_("Invalid secondary id %s.") % secondary_backend_id)
 4253             LOG.error(msg)
 4254             raise exception.InvalidReplicationTarget(reason=msg)
 4255 
 4256         for vol in volumes:
 4257             volume_model_update = {'id': vol.id,
 4258                                    'replication_status':
 4259                                        model_update['replication_status']}
 4260             volumes_model_update.append(volume_model_update)
 4261         return model_update, volumes_model_update
 4262 
 4263     def _rep_grp_failback(self, ctxt, group, sync_grp=True):
 4264         """Fail back all the volume in the replication group."""
 4265         model_update = {
 4266             'replication_status': fields.ReplicationStatus.ENABLED}
 4267         rccg_name = self._get_rccg_name(group)
 4268 
 4269         try:
 4270             self._master_backend_helpers.get_system_info()
 4271         except Exception as ex:
 4272             msg = (_("Unable to failback group %(rccg)s due to primary is not "
 4273                      "reachable. error=%(error)s"),
 4274                    {'rccg': rccg_name, 'error': ex})
 4275             LOG.error(msg)
 4276             raise exception.UnableToFailOver(reason=msg)
 4277 
 4278         rccg = self._helpers.get_rccg(rccg_name)
 4279         if not rccg:
 4280             msg = (_("Unable to failback group %(rccg)s due to replication "
 4281                      "group does not exist on backend."),
 4282                    {'rccg': rccg_name})
 4283             LOG.error(msg)
 4284             raise exception.UnableToFailOver(reason=msg)
 4285 
 4286         if rccg['relationship_count'] == '0':
 4287             msg = (_("Unable to failback empty group %(rccg)s"),
 4288                    {'rccg': rccg['name']})
 4289             LOG.error(msg)
 4290             raise exception.UnableToFailOver(reason=msg)
 4291 
 4292         if rccg['primary'] == 'master':
 4293             LOG.info("Do not need to fail back group %(rccg)s again due to "
 4294                      "primary is already master.", {'rccg': rccg['name']})
 4295             return model_update
 4296 
 4297         if sync_grp:
 4298             self._sync_with_aux_grp(ctxt, rccg['name'])
 4299             self._wait_replica_grp_ready(ctxt, rccg['name'])
 4300 
 4301         if rccg['cycling_mode'] == 'multi':
 4302             # This is a gmcv replication group
 4303             try:
 4304                 self._aux_backend_helpers.stop_rccg(rccg['name'], access=True)
 4305                 self._aux_backend_helpers.start_rccg(rccg['name'],
 4306                                                      primary='master')
 4307                 return model_update
 4308             except exception.VolumeBackendAPIException as e:
 4309                 msg = (_('Unable to fail over the group %(rccg)s to the aux '
 4310                          'back-end, error: %(error)s') %
 4311                        {"rccg": rccg['name'], "error": e})
 4312                 LOG.exception(msg)
 4313                 raise exception.UnableToFailOver(reason=msg)
 4314         else:
 4315             try:
 4316                 self._helpers.switch_rccg(rccg['name'], aux=False)
 4317             except exception.VolumeBackendAPIException as e:
 4318                 msg = (_('Unable to fail back the group %(rccg)s, error: '
 4319                          '%(error)s') % {"rccg": rccg['name'], "error": e})
 4320                 LOG.exception(msg)
 4321                 raise exception.UnableToFailOver(reason=msg)
 4322         return model_update
 4323 
 4324     def _rep_grp_failover(self, ctxt, group):
 4325         """Fail over all the volume in the replication group."""
 4326         model_update = {
 4327             'replication_status': fields.ReplicationStatus.FAILED_OVER}
 4328         rccg_name = self._get_rccg_name(group)
 4329         try:
 4330             self._aux_backend_helpers.get_system_info()
 4331         except Exception as ex:
 4332             msg = (_("Unable to failover group %(rccg)s due to replication "
 4333                      "target is not reachable. error=%(error)s"),
 4334                    {'rccg': rccg_name, 'error': ex})
 4335             LOG.error(msg)
 4336             raise exception.UnableToFailOver(reason=msg)
 4337 
 4338         rccg = self._aux_backend_helpers.get_rccg(rccg_name)
 4339         if not rccg:
 4340             msg = (_("Unable to failover group %(rccg)s due to replication "
 4341                      "group does not exist on backend."),
 4342                    {'rccg': rccg_name})
 4343             LOG.error(msg)
 4344             raise exception.UnableToFailOver(reason=msg)
 4345 
 4346         if rccg['relationship_count'] == '0':
 4347             msg = (_("Unable to failover group %(rccg)s due to it is an "
 4348                      "empty group."), {'rccg': rccg['name']})
 4349             LOG.error(msg)
 4350             raise exception.UnableToFailOver(reason=msg)
 4351 
 4352         if rccg['primary'] == 'aux':
 4353             LOG.info("Do not need to fail over group %(rccg)s again due to "
 4354                      "primary is already aux.", {'rccg': rccg['name']})
 4355             return model_update
 4356 
 4357         if rccg['cycling_mode'] == 'multi':
 4358             # This is a gmcv replication group
 4359             try:
 4360                 self._aux_backend_helpers.stop_rccg(rccg['name'], access=True)
 4361                 self._sync_with_aux_grp(ctxt, rccg['name'])
 4362                 return model_update
 4363             except exception.VolumeBackendAPIException as e:
 4364                 msg = (_('Unable to fail over the group %(rccg)s to the aux '
 4365                          'back-end, error: %(error)s') %
 4366                        {"rccg": rccg['name'], "error": e})
 4367                 LOG.exception(msg)
 4368                 raise exception.UnableToFailOver(reason=msg)
 4369         else:
 4370             try:
 4371                 # Reverse the role of the primary and secondary volumes
 4372                 self._helpers.switch_rccg(rccg['name'], aux=True)
 4373                 return model_update
 4374             except exception.VolumeBackendAPIException as e:
 4375                 LOG.exception('Unable to fail over the group %(rccg)s to the '
 4376                               'aux back-end by switchrcconsistgrp command, '
 4377                               'error: %(error)s',
 4378                               {"rccg": rccg['name'], "error": e})
 4379                 # If the switch command fail, try to make the aux group
 4380                 # writeable again.
 4381                 try:
 4382                     self._aux_backend_helpers.stop_rccg(rccg['name'],
 4383                                                         access=True)
 4384                     self._sync_with_aux_grp(ctxt, rccg['name'])
 4385                     return model_update
 4386                 except exception.VolumeBackendAPIException as e:
 4387                     msg = (_('Unable to fail over the group %(rccg)s to the '
 4388                              'aux back-end, error: %(error)s') %
 4389                            {"rccg": rccg['name'], "error": e})
 4390                     LOG.exception(msg)
 4391                     raise exception.UnableToFailOver(reason=msg)
 4392 
 4393     @cinder_utils.trace
 4394     def _sync_with_aux_grp(self, ctxt, rccg_name):
 4395         try:
 4396             rccg = self._helpers.get_rccg(rccg_name)
 4397             if rccg and rccg['relationship_count'] != '0':
 4398                 if (rccg['state'] not in
 4399                         [storwize_const.REP_CONSIS_SYNC,
 4400                          storwize_const.REP_CONSIS_COPYING]):
 4401                     if rccg['primary'] == 'master':
 4402                         self._helpers.start_rccg(rccg_name, primary='master')
 4403                     else:
 4404                         self._helpers.start_rccg(rccg_name, primary='aux')
 4405             else:
 4406                 LOG.warning('group %(grp)s is not in sync.',
 4407                             {'grp': rccg_name})
 4408         except exception.VolumeBackendAPIException as ex:
 4409             LOG.warning('Fail to copy data from aux group %(rccg)s to master '
 4410                         'group. Please recheck the relationship and '
 4411                         'synchronize the group on backend storage. error='
 4412                         '%(error)s', {'rccg': rccg['name'], 'error': ex})
 4413 
 4414     def _wait_replica_grp_ready(self, ctxt, rccg_name):
 4415         LOG.debug('_wait_replica_grp_ready: group=%(rccg)s',
 4416                   {'rccg': rccg_name})
 4417 
 4418         def _replica_grp_ready():
 4419             rccg = self._helpers.get_rccg(rccg_name)
 4420             if not rccg:
 4421                 msg = (_('_replica_grp_ready: no group %(rccg)s exists on the '
 4422                          'backend. Please re-create the rccg and synchronize '
 4423                          'the volumes on backend storage.'),
 4424                        {'rccg': rccg_name})
 4425                 LOG.error(msg)
 4426                 raise exception.VolumeBackendAPIException(data=msg)
 4427             if rccg['relationship_count'] == '0':
 4428                 return True
 4429             LOG.debug('_replica_grp_ready: group: %(rccg)s: state=%(state)s, '
 4430                       'primary=%(primary)s',
 4431                       {'rccg': rccg['name'], 'state': rccg['state'],
 4432                        'primary': rccg['primary']})
 4433             if rccg['state'] in [storwize_const.REP_CONSIS_SYNC,
 4434                                  storwize_const.REP_CONSIS_COPYING]:
 4435                 return True
 4436             if rccg['state'] == storwize_const.REP_IDL_DISC:
 4437                 msg = (_('Wait synchronize failed. group: %(rccg)s') %
 4438                        {'rccg': rccg_name})
 4439                 LOG.error(msg)
 4440                 raise exception.VolumeBackendAPIException(data=msg)
 4441             return False
 4442         try:
 4443             self._helpers._wait_for_a_condition(
 4444                 _replica_grp_ready,
 4445                 timeout=storwize_const.DEFAULT_RCCG_TIMEOUT,
 4446                 interval=storwize_const.DEFAULT_RCCG_INTERVAL,
 4447                 raise_exception=True)
 4448         except Exception as ex:
 4449             LOG.error('_wait_replica_grp_ready: wait for group %(rccg)s '
 4450                       'synchronization failed due to '
 4451                       'error: %(err)s.', {'rccg': rccg_name,
 4452                                           'err': ex})
 4453 
 4454     def get_replication_error_status(self, context, groups):
 4455         """Returns error info for replicated groups and its volumes.
 4456 
 4457         The failover/failback only happens manually, no need to update the
 4458         status.
 4459         """
 4460         return [], []
 4461 
 4462     def _get_vol_sys_info(self, volume):
 4463         tgt_vol = volume.name
 4464         backend_helper = self._helpers
 4465         node_state = self._state
 4466         grp = volume.group
 4467         if grp and volume_utils.is_group_a_type(
 4468                 grp, "consistent_group_replication_enabled"):
 4469             if (grp.replication_status ==
 4470                     fields.ReplicationStatus.FAILED_OVER):
 4471                 tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
 4472                            volume.name)
 4473                 backend_helper = self._aux_backend_helpers
 4474                 node_state = self._aux_state
 4475             else:
 4476                 backend_helper = self._master_backend_helpers
 4477                 node_state = self._master_state
 4478         elif self._active_backend_id:
 4479             ctxt = context.get_admin_context()
 4480             rep_type = self._get_volume_replicated_type(ctxt, volume)
 4481             if rep_type:
 4482                 tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX +
 4483                            volume.name)
 4484 
 4485         return tgt_vol, backend_helper, node_state
 4486 
 4487     def _toggle_rep_vol_info(self, volume, helper):
 4488         if helper == self._master_backend_helpers:
 4489             vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name
 4490             backend_helper = self._aux_backend_helpers
 4491             node_state = self._aux_state
 4492         else:
 4493             vol_name = volume.name
 4494             backend_helper = self._master_backend_helpers
 4495             node_state = self._master_state
 4496         return vol_name, backend_helper, node_state
 4497 
 4498     def _get_map_info_from_connector(self, volume, connector, iscsi=False):
 4499         if volume.display_name == 'backup-snapshot':
 4500             LOG.debug('It is a virtual volume %(vol)s for detach snapshot.',
 4501                       {'vol': volume.id})
 4502             vol_name = volume.name
 4503             backend_helper = self._helpers
 4504             node_state = self._state
 4505         else:
 4506             vol_name, backend_helper, node_state = self._get_vol_sys_info(
 4507                 volume)
 4508 
 4509         info = {}
 4510         if 'host' in connector:
 4511             # get host according to FC protocol
 4512             connector = connector.copy()
 4513             if not iscsi:
 4514                 connector.pop('initiator', None)
 4515                 info = {'driver_volume_type': 'fibre_channel',
 4516                         'data': {}}
 4517             else:
 4518                 info = {'driver_volume_type': 'iscsi',
 4519                         'data': {}}
 4520             host_name = backend_helper.get_host_from_connector(
 4521                 connector, volume_name=vol_name, iscsi