"Fossies" - the Fresh Open Source Software Archive

Member "cinder-17.1.0/cinder/volume/drivers/remotefs.py" (8 Mar 2021, 90114 Bytes) of package /linux/misc/openstack/cinder-17.1.0.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "remotefs.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 17.0.1_vs_17.1.0.

    1 # Copyright (c) 2012 NetApp, Inc.
    2 # Copyright (c) 2014 Red Hat, Inc.
    3 # All Rights Reserved.
    4 #
    5 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    6 #    not use this file except in compliance with the License. You may obtain
    7 #    a copy of the License at
    8 #
    9 #         http://www.apache.org/licenses/LICENSE-2.0
   10 #
   11 #    Unless required by applicable law or agreed to in writing, software
   12 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   13 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   14 #    License for the specific language governing permissions and limitations
   15 #    under the License.
   16 
   17 import binascii
   18 import collections
   19 import errno
   20 import hashlib
   21 import inspect
   22 import json
   23 import math
   24 import os
   25 import re
   26 import shutil
   27 import string
   28 import tempfile
   29 import time
   30 
   31 from castellan import key_manager
   32 from oslo_config import cfg
   33 from oslo_log import log as logging
   34 from oslo_serialization import jsonutils
   35 from oslo_utils import units
   36 import six
   37 
   38 from cinder import compute
   39 from cinder import coordination
   40 from cinder import db
   41 from cinder import exception
   42 from cinder.i18n import _
   43 from cinder.image import image_utils
   44 from cinder import objects
   45 from cinder.objects import fields
   46 from cinder import utils
   47 from cinder.volume import configuration
   48 from cinder.volume import driver
   49 from cinder.volume import volume_utils
   50 
   51 LOG = logging.getLogger(__name__)
   52 
   53 
   54 nas_opts = [
   55     cfg.StrOpt('nas_host',
   56                default='',
   57                help='IP address or Hostname of NAS system.'),
   58     cfg.StrOpt('nas_login',
   59                default='admin',
   60                help='User name to connect to NAS system.'),
   61     cfg.StrOpt('nas_password',
   62                default='',
   63                help='Password to connect to NAS system.',
   64                secret=True),
   65     cfg.PortOpt('nas_ssh_port',
   66                 default=22,
   67                 help='SSH port to use to connect to NAS system.'),
   68     cfg.StrOpt('nas_private_key',
   69                default='',
   70                help='Filename of private key to use for SSH authentication.'),
   71     cfg.StrOpt('nas_secure_file_operations',
   72                default='auto',
   73                help=('Allow network-attached storage systems to operate in a '
   74                      'secure environment where root level access is not '
   75                      'permitted. If set to False, access is as the root user '
   76                      'and insecure. If set to True, access is not as root. '
   77                      'If set to auto, a check is done to determine if this is '
   78                      'a new installation: True is used if so, otherwise '
   79                      'False. Default is auto.')),
   80     cfg.StrOpt('nas_secure_file_permissions',
   81                default='auto',
   82                help=('Set more secure file permissions on network-attached '
   83                      'storage volume files to restrict broad other/world '
   84                      'access. If set to False, volumes are created with open '
   85                      'permissions. If set to True, volumes are created with '
   86                      'permissions for the cinder user and group (660). If '
   87                      'set to auto, a check is done to determine if '
   88                      'this is a new installation: True is used if so, '
   89                      'otherwise False. Default is auto.')),
   90     cfg.StrOpt('nas_share_path',
   91                default='',
   92                help=('Path to the share to use for storing Cinder volumes. '
   93                      'For example:  "/srv/export1" for an NFS server export '
   94                      'available at 10.0.5.10:/srv/export1 .')),
   95     cfg.StrOpt('nas_mount_options',
   96                help=('Options used to mount the storage backend file system '
   97                      'where Cinder volumes are stored.')),
   98 ]
   99 
  100 volume_opts = [
  101     cfg.StrOpt('nas_volume_prov_type',
  102                default='thin',
  103                choices=['thin', 'thick'],
  104                help=('Provisioning type that will be used when '
  105                      'creating volumes.')),
  106 ]
  107 
  108 CONF = cfg.CONF
  109 CONF.register_opts(nas_opts, group=configuration.SHARED_CONF_GROUP)
  110 CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
  111 
  112 
  113 def locked_volume_id_operation(f):
  114     """Lock decorator for volume operations.
  115 
  116        Takes a named lock prior to executing the operation. The lock is named
  117        with the id of the volume. This lock can be used by driver methods
  118        to prevent conflicts with other operations modifying the same volume.
  119 
  120        May be applied to methods that take a 'volume' or 'snapshot' argument.
  121     """
  122 
  123     def lvo_inner1(inst, *args, **kwargs):
  124         lock_tag = inst.driver_prefix
  125         call_args = inspect.getcallargs(f, inst, *args, **kwargs)
  126 
  127         if call_args.get('volume'):
  128             volume_id = call_args['volume'].id
  129         elif call_args.get('snapshot'):
  130             volume_id = call_args['snapshot'].volume.id
  131         else:
  132             err_msg = _('The decorated method must accept either a volume or '
  133                         'a snapshot object')
  134             raise exception.VolumeBackendAPIException(data=err_msg)
  135 
  136         @utils.synchronized('%s-%s' % (lock_tag, volume_id),
  137                             external=False)
  138         def lvo_inner2():
  139             return f(inst, *args, **kwargs)
  140         return lvo_inner2()
  141     return lvo_inner1
  142 
  143 
  144 class BackingFileTemplate(string.Template):
  145     """Custom Template for substitutions in backing files regex strings
  146 
  147         Changes the default delimiter from '$' to '#' in order to prevent
  148         clashing with the regex end of line marker '$'.
  149     """
  150     delimiter = '#'
  151     idpattern = r'[a-z][_a-z0-9]*'
  152 
  153 
  154 class RemoteFSDriver(driver.BaseVD):
  155     """Common base for drivers that work like NFS."""
  156 
  157     driver_volume_type = None
  158     driver_prefix = 'remotefs'
  159     volume_backend_name = None
  160     vendor_name = 'Open Source'
  161     SHARE_FORMAT_REGEX = r'.+:/.+'
  162 
  163     # We let the drivers inheriting this specify
  164     # whether thin provisioning is supported or not.
  165     _thin_provisioning_support = False
  166     _thick_provisioning_support = False
  167 
  168     def __init__(self, *args, **kwargs):
  169         super(RemoteFSDriver, self).__init__(*args, **kwargs)
  170         self.shares = {}
  171         self._mounted_shares = []
  172         self._execute_as_root = True
  173         self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
  174         self._supports_encryption = False
  175 
  176         if self.configuration:
  177             self.configuration.append_config_values(nas_opts)
  178             self.configuration.append_config_values(volume_opts)
  179 
  180     def check_for_setup_error(self):
  181         """Just to override parent behavior."""
  182         pass
  183 
  184     @utils.trace
  185     def initialize_connection(self, volume, connector):
  186         """Allow connection to connector and return connection info.
  187 
  188         :param volume: volume reference
  189         :param connector: connector reference
  190         """
  191         data = {'export': volume.provider_location,
  192                 'name': volume.name}
  193         if volume.provider_location in self.shares:
  194             data['options'] = self.shares[volume.provider_location]
  195         return {
  196             'driver_volume_type': self.driver_volume_type,
  197             'data': data,
  198             'mount_point_base': self._get_mount_point_base()
  199         }
  200 
  201     def do_setup(self, context):
  202         """Any initialization the volume driver does while starting."""
  203         super(RemoteFSDriver, self).do_setup(context)
  204 
  205         # Validate the settings for our secure file options.
  206         self.configuration.nas_secure_file_permissions = \
  207             self.configuration.nas_secure_file_permissions.lower()
  208         self.configuration.nas_secure_file_operations = \
  209             self.configuration.nas_secure_file_operations.lower()
  210         valid_secure_opts = ['auto', 'true', 'false']
  211         secure_options = {'nas_secure_file_permissions':
  212                           self.configuration.nas_secure_file_permissions,
  213                           'nas_secure_file_operations':
  214                           self.configuration.nas_secure_file_operations}
  215 
  216         LOG.debug('NAS config: %s', secure_options)
  217         for opt_name, opt_value in secure_options.items():
  218             if opt_value not in valid_secure_opts:
  219                 err_parms = {'name': opt_name, 'value': opt_value}
  220                 msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
  221                         "'auto', 'true', or 'false'") % err_parms
  222                 LOG.error(msg)
  223                 raise exception.InvalidConfigurationValue(msg)
  224 
  225     def _get_provisioned_capacity(self):
  226         """Returns the provisioned capacity.
  227 
  228         Get the sum of sizes of volumes, snapshots and any other
  229         files on the mountpoint.
  230         """
  231         provisioned_size = 0.0
  232         for share in self.shares.keys():
  233             mount_path = self._get_mount_point_for_share(share)
  234             out, _ = self._execute('du', '--bytes', '-s', mount_path,
  235                                    run_as_root=self._execute_as_root)
  236             provisioned_size += int(out.split()[0])
  237         return round(provisioned_size / units.Gi, 2)
  238 
  239     def _get_mount_point_base(self):
  240         """Returns the mount point base for the remote fs.
  241 
  242            This method facilitates returning mount point base
  243            for the specific remote fs. Override this method
  244            in the respective driver to return the entry to be
  245            used while attach/detach using brick in cinder.
  246            If not overridden then it returns None without
  247            raising exception to continue working for cases
  248            when not used with brick.
  249         """
  250         LOG.debug("Driver specific implementation needs to return"
  251                   " mount_point_base.")
  252         return None
  253 
  254     @staticmethod
  255     def _validate_state(current_state,
  256                         acceptable_states,
  257                         obj_description='volume',
  258                         invalid_exc=exception.InvalidVolume):
  259         if current_state not in acceptable_states:
  260             message = _('Invalid %(obj_description)s state. '
  261                         'Acceptable states for this operation: '
  262                         '%(acceptable_states)s. '
  263                         'Current %(obj_description)s state: '
  264                         '%(current_state)s.')
  265             raise invalid_exc(
  266                 message=message %
  267                 dict(obj_description=obj_description,
  268                      acceptable_states=acceptable_states,
  269                      current_state=current_state))
  270 
  271     @utils.trace
  272     def create_volume(self, volume):
  273         """Creates a volume.
  274 
  275         :param volume: volume reference
  276         :returns: provider_location update dict for database
  277         """
  278 
  279         if volume.encryption_key_id and not self._supports_encryption:
  280             message = _("Encryption is not yet supported.")
  281             raise exception.VolumeDriverException(message=message)
  282 
  283         LOG.debug('Creating volume %(vol)s', {'vol': volume.id})
  284         self._ensure_shares_mounted()
  285 
  286         volume.provider_location = self._find_share(volume)
  287 
  288         LOG.info('casted to %s', volume.provider_location)
  289 
  290         self._do_create_volume(volume)
  291 
  292         return {'provider_location': volume.provider_location}
  293 
  294     def _do_create_volume(self, volume):
  295         """Create a volume on given remote share.
  296 
  297         :param volume: volume reference
  298         """
  299         volume_path = self.local_path(volume)
  300         volume_size = volume.size
  301 
  302         encrypted = volume.encryption_key_id is not None
  303 
  304         if encrypted:
  305             encryption = volume_utils.check_encryption_provider(
  306                 self.db,
  307                 volume,
  308                 volume.obj_context)
  309 
  310             self._create_encrypted_volume_file(volume_path,
  311                                                volume_size,
  312                                                encryption,
  313                                                volume.obj_context)
  314         elif getattr(self.configuration,
  315                      self.driver_prefix + '_qcow2_volumes', False):
  316             # QCOW2 volumes are inherently sparse, so this setting
  317             # will override the _sparsed_volumes setting.
  318             self._create_qcow2_file(volume_path, volume_size)
  319         elif getattr(self.configuration,
  320                      self.driver_prefix + '_sparsed_volumes', False):
  321             self._create_sparsed_file(volume_path, volume_size)
  322         else:
  323             self._create_regular_file(volume_path, volume_size)
  324 
  325         self._set_rw_permissions(volume_path)
  326 
  327     def _ensure_shares_mounted(self):
  328         """Look for remote shares in the flags and mount them locally."""
  329         mounted_shares = []
  330 
  331         self._load_shares_config(getattr(self.configuration,
  332                                          self.driver_prefix +
  333                                          '_shares_config'))
  334 
  335         for share in self.shares:
  336             try:
  337                 self._ensure_share_mounted(share)
  338                 mounted_shares.append(share)
  339             except Exception as exc:
  340                 LOG.error('Exception during mounting %s', exc)
  341 
  342         self._mounted_shares = mounted_shares
  343 
  344         LOG.debug('Available shares %s', self._mounted_shares)
  345 
  346     @utils.trace
  347     def delete_volume(self, volume):
  348         """Deletes a logical volume.
  349 
  350         :param volume: volume reference
  351         """
  352 
  353         LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s',
  354                   {'vol': volume.id, 'loc': volume.provider_location})
  355         if not volume.provider_location:
  356             LOG.warning('Volume %s does not have '
  357                         'provider_location specified, '
  358                         'skipping', volume.name)
  359             return
  360 
  361         self._ensure_share_mounted(volume.provider_location)
  362 
  363         mounted_path = self.local_path(volume)
  364 
  365         self._delete(mounted_path)
  366 
  367     def ensure_export(self, ctx, volume):
  368         """Synchronously recreates an export for a logical volume."""
  369         self._ensure_share_mounted(volume.provider_location)
  370 
  371     def create_export(self, ctx, volume, connector):
  372         """Exports the volume.
  373 
  374         Can optionally return a dictionary of changes
  375         to the volume object to be persisted.
  376         """
  377         pass
  378 
  379     def remove_export(self, ctx, volume):
  380         """Removes an export for a logical volume."""
  381         pass
  382 
  383     def delete_snapshot(self, snapshot):
  384         """Delete snapshot.
  385 
  386         Do nothing for this driver, but allow manager to handle deletion
  387         of snapshot in error state.
  388         """
  389         pass
  390 
  391     def _delete(self, path):
  392         # Note(lpetrut): this method is needed in order to provide
  393         # interoperability with Windows as it will be overridden.
  394         self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
  395 
  396     def _create_sparsed_file(self, path, size):
  397         """Creates a sparse file of a given size in GiB."""
  398         self._execute('truncate', '-s', '%sG' % size,
  399                       path, run_as_root=self._execute_as_root)
  400 
  401     def _create_regular_file(self, path, size):
  402         """Creates a regular file of given size in GiB."""
  403 
  404         block_size_mb = 1
  405         block_count = size * units.Gi // (block_size_mb * units.Mi)
  406 
  407         self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
  408                       'bs=%dM' % block_size_mb,
  409                       'count=%d' % block_count,
  410                       run_as_root=self._execute_as_root)
  411 
  412     def _create_qcow2_file(self, path, size_gb):
  413         """Creates a QCOW2 file of a given size in GiB."""
  414 
  415         self._execute('qemu-img', 'create', '-f', 'qcow2',
  416                       '-o', 'preallocation=metadata',
  417                       path, str(size_gb * units.Gi),
  418                       run_as_root=self._execute_as_root)
  419 
  420     def _create_encrypted_volume_file(self,
  421                                       path,
  422                                       size_gb,
  423                                       encryption,
  424                                       context):
  425         """Create an encrypted volume.
  426 
  427         This works by creating an encrypted image locally,
  428         and then uploading it to the volume.
  429         """
  430 
  431         cipher_spec = image_utils.decode_cipher(encryption['cipher'],
  432                                                 encryption['key_size'])
  433 
  434         # TODO(enriquetaso): share this code w/ the RBD driver
  435         # Fetch the key associated with the volume and decode the passphrase
  436         keymgr = key_manager.API(CONF)
  437         key = keymgr.get(context, encryption['encryption_key_id'])
  438         passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
  439 
  440         # create a file
  441         tmp_dir = volume_utils.image_conversion_dir()
  442 
  443         with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key:
  444             # TODO(enriquetaso): encrypt w/ aes256 cipher text
  445             # (qemu-img feature) ?
  446             with open(tmp_key.name, 'w') as f:
  447                 f.write(passphrase)
  448 
  449             self._execute(
  450                 'qemu-img', 'create', '-f', 'qcow2',
  451                 '-o',
  452                 'encrypt.format=luks,'
  453                 'encrypt.key-secret=sec1,'
  454                 'encrypt.cipher-alg=%(cipher_alg)s,'
  455                 'encrypt.cipher-mode=%(cipher_mode)s,'
  456                 'encrypt.ivgen-alg=%(ivgen_alg)s' % cipher_spec,
  457                 '--object', 'secret,id=sec1,format=raw,file=' + tmp_key.name,
  458                 path, str(size_gb * units.Gi),
  459                 run_as_root=self._execute_as_root)
  460 
  461     def _set_rw_permissions(self, path):
  462         """Sets access permissions for given NFS path.
  463 
  464         Volume file permissions are set based upon the value of
  465         secure_file_permissions: 'true' sets secure access permissions and
  466         'false' sets more open (insecure) access permissions.
  467 
  468         :param path: the volume file path.
  469         """
  470         if self.configuration.nas_secure_file_permissions == 'true':
  471             permissions = '660'
  472             LOG.debug('File path %(path)s is being set with permissions: '
  473                       '%(permissions)s',
  474                       {'path': path, 'permissions': permissions})
  475         else:
  476             permissions = 'ugo+rw'
  477             LOG.warning('%(path)s is being set with open permissions: '
  478                         '%(perm)s', {'path': path, 'perm': permissions})
  479 
  480         self._execute('chmod', permissions, path,
  481                       run_as_root=self._execute_as_root)
  482 
  483     def _set_rw_permissions_for_all(self, path):
  484         """Sets 666 permissions for the path."""
  485         self._execute('chmod', 'ugo+rw', path,
  486                       run_as_root=self._execute_as_root)
  487 
  488     def _set_rw_permissions_for_owner(self, path):
  489         """Sets read-write permissions to the owner for the path."""
  490         self._execute('chmod', 'u+rw', path,
  491                       run_as_root=self._execute_as_root)
  492 
  493     def local_path(self, volume):
  494         """Get volume path (mounted locally fs path) for given volume.
  495 
  496         :param volume: volume reference
  497         """
  498         remotefs_share = volume.provider_location
  499         return os.path.join(self._get_mount_point_for_share(remotefs_share),
  500                             volume.name)
  501 
  502     def copy_image_to_volume(self, context, volume, image_service, image_id):
  503         """Fetch the image from image_service and write it to the volume."""
  504 
  505         image_utils.fetch_to_raw(context,
  506                                  image_service,
  507                                  image_id,
  508                                  self.local_path(volume),
  509                                  self.configuration.volume_dd_blocksize,
  510                                  size=volume.size,
  511                                  run_as_root=self._execute_as_root)
  512 
  513         # NOTE (leseb): Set the virtual size of the image
  514         # the raw conversion overwrote the destination file
  515         # (which had the correct size)
  516         # with the fetched glance image size,
  517         # thus the initial 'size' parameter is not honored
  518         # this sets the size to the one asked in the first place by the user
  519         # and then verify the final virtual size
  520         image_utils.resize_image(self.local_path(volume), volume.size,
  521                                  run_as_root=self._execute_as_root)
  522 
  523         data = image_utils.qemu_img_info(self.local_path(volume),
  524                                          run_as_root=self._execute_as_root)
  525         virt_size = data.virtual_size // units.Gi
  526         if virt_size != volume.size:
  527             raise exception.ImageUnacceptable(
  528                 image_id=image_id,
  529                 reason=(_("Expected volume size was %d") % volume.size)
  530                 + (_(" but size is now %d") % virt_size))
  531 
  532     def copy_volume_to_image(self, context, volume, image_service, image_meta):
  533         """Copy the volume to the specified image."""
  534         volume_utils.upload_volume(context,
  535                                    image_service,
  536                                    image_meta,
  537                                    self.local_path(volume),
  538                                    volume,
  539                                    run_as_root=self._execute_as_root)
  540 
  541     def _read_config_file(self, config_file):
  542         # Returns list of lines in file
  543         with open(config_file) as f:
  544             return f.readlines()
  545 
  546     def _load_shares_config(self, share_file=None):
  547         self.shares = {}
  548 
  549         if all((self.configuration.nas_host,
  550                 self.configuration.nas_share_path)):
  551             LOG.debug('Using nas_host and nas_share_path configuration.')
  552 
  553             nas_host = self.configuration.nas_host
  554             nas_share_path = self.configuration.nas_share_path
  555 
  556             share_address = '%s:%s' % (nas_host, nas_share_path)
  557 
  558             if not re.match(self.SHARE_FORMAT_REGEX, share_address):
  559                 msg = (_("Share %s ignored due to invalid format. Must "
  560                          "be of form address:/export. Please check the "
  561                          "nas_host and nas_share_path settings."),
  562                        share_address)
  563                 raise exception.InvalidConfigurationValue(msg)
  564 
  565             self.shares[share_address] = self.configuration.nas_mount_options
  566 
  567         elif share_file is not None:
  568             LOG.debug('Loading shares from %s.', share_file)
  569 
  570             for share in self._read_config_file(share_file):
  571                 # A configuration line may be either:
  572                 #  host:/vol_name
  573                 # or
  574                 #  host:/vol_name -o options=123,rw --other
  575                 if not share.strip():
  576                     # Skip blank or whitespace-only lines
  577                     continue
  578                 if share.startswith('#'):
  579                     continue
  580 
  581                 share_info = share.split(' ', 1)
  582                 # results in share_info =
  583                 #  [ 'address:/vol', '-o options=123,rw --other' ]
  584 
  585                 share_address = share_info[0].strip()
  586                 # Replace \040 with a space, to support paths with spaces
  587                 share_address = share_address.replace("\\040", " ")
  588                 share_opts = None
  589                 if len(share_info) > 1:
  590                     share_opts = share_info[1].strip()
  591 
  592                 if not re.match(self.SHARE_FORMAT_REGEX, share_address):
  593                     LOG.error("Share %s ignored due to invalid format. "
  594                               "Must be of form address:/export.",
  595                               share_address)
  596                     continue
  597 
  598                 self.shares[share_address] = share_opts
  599 
  600         LOG.debug("shares loaded: %s", self.shares)
  601 
  602     def _get_mount_point_for_share(self, path):
  603         raise NotImplementedError()
  604 
  605     def terminate_connection(self, volume, connector, **kwargs):
  606         """Disallow connection from connector."""
  607         pass
  608 
  609     def _update_volume_stats(self):
  610         """Retrieve stats info from volume group."""
  611 
  612         data = {}
  613         backend_name = self.configuration.safe_get('volume_backend_name')
  614         data['volume_backend_name'] = backend_name or self.volume_backend_name
  615         data['vendor_name'] = 'Open Source'
  616         data['driver_version'] = self.get_version()
  617         data['storage_protocol'] = self.driver_volume_type
  618 
  619         self._ensure_shares_mounted()
  620 
  621         global_capacity = 0
  622         global_free = 0
  623         for share in self._mounted_shares:
  624             capacity, free, used = self._get_capacity_info(share)
  625             global_capacity += capacity
  626             global_free += free
  627 
  628         data['total_capacity_gb'] = global_capacity / float(units.Gi)
  629         data['free_capacity_gb'] = global_free / float(units.Gi)
  630         data['reserved_percentage'] = self.configuration.reserved_percentage
  631         data['QoS_support'] = False
  632         self._stats = data
  633 
  634     def _get_capacity_info(self, share):
  635         raise NotImplementedError()
  636 
  637     def _find_share(self, volume):
  638         raise NotImplementedError()
  639 
  640     def _ensure_share_mounted(self, share):
  641         raise NotImplementedError()
  642 
  643     def secure_file_operations_enabled(self):
  644         """Determine if driver is operating in Secure File Operations mode.
  645 
  646         The Cinder Volume driver needs to query if this driver is operating
  647         in a secure file mode; check our nas_secure_file_operations flag.
  648         """
  649         if self.configuration.nas_secure_file_operations == 'true':
  650             return True
  651         return False
  652 
  653     def set_nas_security_options(self, is_new_cinder_install):
  654         """Determine the setting to use for Secure NAS options.
  655 
  656         This method must be overridden by child wishing to use secure
  657         NAS file operations. This base method will set the NAS security
  658         options to false.
  659         """
  660         doc_html = ("https://docs.openstack.org/cinder/latest/admin"
  661                     "/blockstorage-nfs-backend.html")
  662         self.configuration.nas_secure_file_operations = 'false'
  663         LOG.warning("The NAS file operations will be run as root: "
  664                     "allowing root level access at the storage backend. "
  665                     "This is considered an insecure NAS environment. "
  666                     "Please see %s for information on a secure NAS "
  667                     "configuration.",
  668                     doc_html)
  669         self.configuration.nas_secure_file_permissions = 'false'
  670         LOG.warning("The NAS file permissions mode will be 666 (allowing "
  671                     "other/world read & write access). This is considered "
  672                     "an insecure NAS environment. Please see %s for "
  673                     "information on a secure NFS configuration.",
  674                     doc_html)
  675 
  676     def _determine_nas_security_option_setting(self, nas_option, mount_point,
  677                                                is_new_cinder_install):
  678         """Determine NAS security option setting when 'auto' is assigned.
  679 
  680         This method determines the final 'true'/'false' setting of an NAS
  681         security option when the default value of 'auto' has been detected.
  682         If the nas option isn't 'auto' then its current value is used.
  683 
  684         :param nas_option: The NAS security option value loaded from config.
  685         :param mount_point: Mount where indicator file is written.
  686         :param is_new_cinder_install: boolean for new Cinder installation.
  687         :return string: 'true' or 'false' for new option setting.
  688         """
  689         if nas_option == 'auto':
  690             # For auto detection, we first check to see if we have been
  691             # through this process before by checking for the existence of
  692             # the Cinder secure environment indicator file.
  693             file_name = '.cinderSecureEnvIndicator'
  694             file_path = os.path.join(mount_point, file_name)
  695             if os.path.isfile(file_path):
  696                 nas_option = 'true'
  697                 LOG.info('Cinder secure environment '
  698                          'indicator file exists.')
  699             else:
  700                 # The indicator file does not exist. If it is a new
  701                 # installation, set to 'true' and create the indicator file.
  702                 if is_new_cinder_install:
  703                     nas_option = 'true'
  704                     try:
  705                         with open(file_path, 'w') as fh:
  706                             fh.write('Detector file for Cinder secure '
  707                                      'environment usage.\n')
  708                             fh.write('Do not delete this file.\n')
  709 
  710                         # Set the permissions on our special marker file to
  711                         # protect from accidental removal (owner write only).
  712                         self._execute('chmod', '640', file_path,
  713                                       run_as_root=self._execute_as_root)
  714                         LOG.info('New Cinder secure environment indicator'
  715                                  ' file created at path %s.', file_path)
  716                     except IOError as err:
  717                         LOG.error('Failed to created Cinder secure '
  718                                   'environment indicator file: %s',
  719                                   err)
  720                         if err.errno == errno.EACCES:
  721                             LOG.warning('Reverting to non-secure mode. Adjust '
  722                                         'permissions at %s to allow the '
  723                                         'cinder volume service write access '
  724                                         'to use secure mode.',
  725                                         mount_point)
  726                             nas_option = 'false'
  727                 else:
  728                     # For existing installs, we default to 'false'. The
  729                     # admin can always set the option at the driver config.
  730                     nas_option = 'false'
  731 
  732         return nas_option
  733 
  734 
  735 class RemoteFSSnapDriverBase(RemoteFSDriver):
  736     """Base class for remotefs drivers implementing qcow2 snapshots.
  737 
  738        Driver must implement:
  739          _local_volume_dir(self, volume)
  740     """
  741 
  742     _VALID_IMAGE_EXTENSIONS = []
  743     # The following flag may be overridden by the concrete drivers in order
  744     # to avoid using temporary volume snapshots when creating volume clones,
  745     # when possible.
  746 
  747     _always_use_temp_snap_when_cloning = True
  748 
  749     def __init__(self, *args, **kwargs):
  750         self._remotefsclient = None
  751         self.base = None
  752         self._nova = None
  753         super(RemoteFSSnapDriverBase, self).__init__(*args, **kwargs)
  754 
  755     def do_setup(self, context):
  756         super(RemoteFSSnapDriverBase, self).do_setup(context)
  757 
  758         self._nova = compute.API()
  759 
  760     def snapshot_revert_use_temp_snapshot(self):
  761         # Considering that RemoteFS based drivers use COW images
  762         # for storing snapshots, having chains of such images,
  763         # creating a backup snapshot when reverting one is not
  764         # actutally helpful.
  765         return False
  766 
  767     def _local_volume_dir(self, volume):
  768         share = volume.provider_location
  769         local_dir = self._get_mount_point_for_share(share)
  770         return local_dir
  771 
  772     def _local_path_volume(self, volume):
  773         path_to_disk = os.path.join(
  774             self._local_volume_dir(volume),
  775             volume.name)
  776 
  777         return path_to_disk
  778 
  779     def _get_new_snap_path(self, snapshot):
  780         vol_path = self.local_path(snapshot.volume)
  781         snap_path = '%s.%s' % (vol_path, snapshot.id)
  782         return snap_path
  783 
  784     def _local_path_volume_info(self, volume):
  785         return '%s%s' % (self.local_path(volume), '.info')
  786 
  787     def _read_file(self, filename):
  788         """This method is to make it easier to stub out code for testing.
  789 
  790         Returns a string representing the contents of the file.
  791         """
  792 
  793         with open(filename, 'r') as f:
  794             return f.read()
  795 
  796     def _write_info_file(self, info_path, snap_info):
  797         if 'active' not in snap_info.keys():
  798             msg = _("'active' must be present when writing snap_info.")
  799             raise exception.RemoteFSException(msg)
  800 
  801         if not (os.path.exists(info_path) or os.name == 'nt'):
  802             # We're not managing file permissions on Windows.
  803             # Plus, 'truncate' is not available.
  804             self._execute('truncate', "-s0", info_path,
  805                           run_as_root=self._execute_as_root)
  806             self._set_rw_permissions(info_path)
  807 
  808         with open(info_path, 'w') as f:
  809             json.dump(snap_info, f, indent=1, sort_keys=True)
  810 
  811     def _qemu_img_info_base(self, path, volume_name, basedir,
  812                             ext_bf_template=None,
  813                             force_share=False,
  814                             run_as_root=False):
  815         """Sanitize image_utils' qemu_img_info.
  816 
  817         This code expects to deal only with relative filenames.
  818 
  819         :param path: Path to the image file whose info is fetched
  820         :param volume_name: Name of the volume
  821         :param basedir: Path to backing files directory
  822         :param ext_bf_template: Alt. string.Template for allowed backing files
  823         :type object: BackingFileTemplate
  824         :param force_share: Wether to force fetching img info for images in use
  825         :param run_as_root: Wether to run with privileged permissions or not
  826         """
  827 
  828         run_as_root = run_as_root or self._execute_as_root
  829 
  830         info = image_utils.qemu_img_info(path,
  831                                          force_share=force_share,
  832                                          run_as_root=run_as_root)
  833         if info.image:
  834             info.image = os.path.basename(info.image)
  835         if info.backing_file:
  836             if self._VALID_IMAGE_EXTENSIONS:
  837                 valid_ext = r'(\.(%s))?' % '|'.join(
  838                     self._VALID_IMAGE_EXTENSIONS)
  839             else:
  840                 valid_ext = ''
  841 
  842             if ext_bf_template:
  843                 backing_file_template = ext_bf_template.substitute(
  844                     basedir=basedir, volname=volume_name, valid_ext=valid_ext
  845                 )
  846                 LOG.debug("Fetching qemu-img info with special "
  847                           "backing_file_template: %(bft)s", {
  848                               "bft": backing_file_template
  849                           })
  850             else:
  851                 backing_file_template = \
  852                     "(%(basedir)s/[0-9a-f]+/)?%" \
  853                     "(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % {
  854                         'basedir': basedir,
  855                         'volname': volume_name,
  856                         'valid_ext': valid_ext,
  857                     }
  858             if not re.match(backing_file_template, info.backing_file,
  859                             re.IGNORECASE):
  860                 raise exception.RemoteFSInvalidBackingFile(
  861                     path=path, backing_file=info.backing_file)
  862 
  863             info.backing_file = os.path.basename(info.backing_file)
  864 
  865         return info
  866 
  867     def _qemu_img_info(self, path, volume_name):
  868         raise NotImplementedError()
  869 
  870     def _img_commit(self, path, passphrase_file=None, backing_file=None):
  871         # TODO(eharney): this is not using the correct permissions for
  872         # NFS snapshots
  873         #  It needs to run as root for volumes attached to instances, but
  874         #  does not when in secure mode.
  875         cmd = ['qemu-img', 'commit']
  876         if passphrase_file:
  877             obj = ['--object',
  878                    'secret,id=s0,format=raw,file=%s' % passphrase_file]
  879             image_opts = ['--image-opts']
  880 
  881             src_opts = \
  882                 "file.filename=%(filename)s,encrypt.format=luks," \
  883                 "encrypt.key-secret=s0,backing.file.filename=%(backing)s," \
  884                 "backing.encrypt.key-secret=s0" % {
  885                     'filename': path,
  886                     'backing': backing_file,
  887                 }
  888 
  889             path_no_to_delete = ['-d', src_opts]
  890             cmd += obj + image_opts + path_no_to_delete
  891         else:
  892             cmd += ['-d', path]
  893 
  894         self._execute(*cmd, run_as_root=self._execute_as_root)
  895         self._delete(path)
  896 
  897     def _rebase_img(self, image, backing_file, volume_format,
  898                     passphrase_file=None):
  899         # qemu-img create must run as root, because it reads from the
  900         # backing file, which will be owned by qemu:qemu if attached to an
  901         # instance.
  902         # TODO(erlon): Sanity check this.
  903         command = ['qemu-img', 'rebase', '-u']
  904         # if encrypted
  905         if passphrase_file:
  906             objectdef = "secret,id=s0,file=%s" % passphrase_file
  907             filename = "encrypt.key-secret=s0,"\
  908                 "file.filename=%(filename)s" % {'filename': image}
  909 
  910             command += ['--object', objectdef, '-b', backing_file,
  911                         '-F', volume_format, '--image-opts', filename]
  912         # not encrypted
  913         else:
  914             command += ['-b', backing_file, image, '-F', volume_format]
  915 
  916         self._execute(*command, run_as_root=self._execute_as_root)
  917 
  918     def _read_info_file(self, info_path, empty_if_missing=False):
  919         """Return dict of snapshot information.
  920 
  921            :param info_path: path to file
  922            :param empty_if_missing: True=return empty dict if no file
  923         """
  924 
  925         if not os.path.exists(info_path):
  926             if empty_if_missing is True:
  927                 return {}
  928 
  929         return json.loads(self._read_file(info_path))
  930 
  931     def _get_higher_image_path(self, snapshot):
  932         volume = snapshot.volume
  933         info_path = self._local_path_volume_info(volume)
  934         snap_info = self._read_info_file(info_path)
  935 
  936         snapshot_file = snap_info[snapshot.id]
  937         active_file = self.get_active_image_from_info(volume)
  938         active_file_path = os.path.join(self._local_volume_dir(volume),
  939                                         active_file)
  940         backing_chain = self._get_backing_chain_for_path(
  941             volume, active_file_path)
  942         higher_file = next((os.path.basename(f['filename'])
  943                             for f in backing_chain
  944                             if utils.paths_normcase_equal(
  945                                 f.get('backing-filename', ''),
  946                                 snapshot_file)),
  947                            None)
  948         return higher_file
  949 
  950     def _get_backing_chain_for_path(self, volume, path):
  951         """Returns list of dicts containing backing-chain information.
  952 
  953         Includes 'filename', and 'backing-filename' for each
  954         applicable entry.
  955 
  956         Consider converting this to use --backing-chain and --output=json
  957         when environment supports qemu-img 1.5.0.
  958 
  959         :param volume: volume reference
  960         :param path: path to image file at top of chain
  961 
  962         """
  963 
  964         output = []
  965 
  966         info = self._qemu_img_info(path, volume.name)
  967         new_info = {}
  968         new_info['filename'] = os.path.basename(path)
  969         new_info['backing-filename'] = info.backing_file
  970 
  971         output.append(new_info)
  972 
  973         while new_info['backing-filename']:
  974             filename = new_info['backing-filename']
  975             path = os.path.join(self._local_volume_dir(volume), filename)
  976             info = self._qemu_img_info(path, volume.name)
  977             backing_filename = info.backing_file
  978             new_info = {}
  979             new_info['filename'] = filename
  980             new_info['backing-filename'] = backing_filename
  981 
  982             output.append(new_info)
  983 
  984         return output
  985 
  986     def _get_hash_str(self, base_str):
  987         """Return a string that represents hash of base_str.
  988 
  989         Returns string in a hex format.
  990         """
  991         if isinstance(base_str, six.text_type):
  992             base_str = base_str.encode('utf-8')
  993         return hashlib.md5(base_str).hexdigest()
  994 
  995     def _get_mount_point_for_share(self, share):
  996         """Return mount point for share.
  997 
  998         :param share: example 172.18.194.100:/var/fs
  999         """
 1000         return self._remotefsclient.get_mount_point(share)
 1001 
 1002     def _get_available_capacity(self, share):
 1003         """Calculate available space on the share.
 1004 
 1005         :param share: example 172.18.194.100:/var/fs
 1006         """
 1007         mount_point = self._get_mount_point_for_share(share)
 1008 
 1009         out, _ = self._execute('df', '--portability', '--block-size', '1',
 1010                                mount_point,
 1011                                run_as_root=self._execute_as_root)
 1012         out = out.splitlines()[1]
 1013 
 1014         size = int(out.split()[1])
 1015         available = int(out.split()[3])
 1016 
 1017         return available, size
 1018 
 1019     def _get_capacity_info(self, remotefs_share):
 1020         available, size = self._get_available_capacity(remotefs_share)
 1021         return size, available, size - available
 1022 
 1023     def _get_mount_point_base(self):
 1024         return self.base
 1025 
 1026     def _copy_volume_to_image(self, context, volume, image_service,
 1027                               image_meta, store_id=None):
 1028         """Copy the volume to the specified image."""
 1029 
 1030         # If snapshots exist, flatten to a temporary image, and upload it
 1031 
 1032         active_file = self.get_active_image_from_info(volume)
 1033         active_file_path = os.path.join(self._local_volume_dir(volume),
 1034                                         active_file)
 1035         info = self._qemu_img_info(active_file_path, volume.name)
 1036         backing_file = info.backing_file
 1037 
 1038         root_file_fmt = info.file_format
 1039 
 1040         tmp_params = {
 1041             'prefix': '%s.temp_image.%s' % (volume.id, image_meta['id']),
 1042             'suffix': '.img'
 1043         }
 1044         with image_utils.temporary_file(**tmp_params) as temp_path:
 1045             if backing_file or (root_file_fmt != 'raw'):
 1046                 # Convert due to snapshots
 1047                 # or volume data not being stored in raw format
 1048                 #  (upload_volume assumes raw format input)
 1049                 image_utils.convert_image(active_file_path, temp_path, 'raw',
 1050                                           run_as_root=self._execute_as_root)
 1051                 upload_path = temp_path
 1052             else:
 1053                 upload_path = active_file_path
 1054 
 1055             volume_utils.upload_volume(context,
 1056                                        image_service,
 1057                                        image_meta,
 1058                                        upload_path,
 1059                                        volume,
 1060                                        run_as_root=self._execute_as_root)
 1061 
 1062     def get_active_image_from_info(self, volume):
 1063         """Returns filename of the active image from the info file."""
 1064 
 1065         info_file = self._local_path_volume_info(volume)
 1066 
 1067         snap_info = self._read_info_file(info_file, empty_if_missing=True)
 1068 
 1069         if not snap_info:
 1070             # No info file = no snapshots exist
 1071             vol_path = os.path.basename(self.local_path(volume))
 1072             return vol_path
 1073 
 1074         return snap_info['active']
 1075 
 1076     def _local_path_active_image(self, volume):
 1077         active_fname = self.get_active_image_from_info(volume)
 1078         vol_dir = self._local_volume_dir(volume)
 1079 
 1080         active_fpath = os.path.join(vol_dir, active_fname)
 1081         return active_fpath
 1082 
 1083     def _get_snapshot_backing_file(self, snapshot):
 1084         info_path = self._local_path_volume_info(snapshot.volume)
 1085         snap_info = self._read_info_file(info_path)
 1086         vol_dir = self._local_volume_dir(snapshot.volume)
 1087 
 1088         forward_file = snap_info[snapshot.id]
 1089         forward_path = os.path.join(vol_dir, forward_file)
 1090 
 1091         # Find the file which backs this file, which represents the point
 1092         # in which this snapshot was created.
 1093         img_info = self._qemu_img_info(forward_path)
 1094         return img_info.backing_file
 1095 
 1096     def _snapshots_exist(self, volume):
 1097         if not volume.provider_location:
 1098             return False
 1099 
 1100         active_fpath = self._local_path_active_image(volume)
 1101         base_vol_path = self.local_path(volume)
 1102 
 1103         return not utils.paths_normcase_equal(active_fpath, base_vol_path)
 1104 
 1105     def _is_volume_attached(self, volume):
 1106         return volume.attach_status == fields.VolumeAttachStatus.ATTACHED
 1107 
 1108     def _create_cloned_volume(self, volume, src_vref, context):
 1109         LOG.info('Cloning volume %(src)s to volume %(dst)s',
 1110                  {'src': src_vref.id,
 1111                   'dst': volume.id})
 1112 
 1113         acceptable_states = ['available', 'backing-up', 'downloading']
 1114         self._validate_state(src_vref.status,
 1115                              acceptable_states,
 1116                              obj_description='source volume')
 1117 
 1118         volume_name = CONF.volume_name_template % volume.id
 1119 
 1120         # Create fake volume and snapshot objects
 1121         vol_attrs = ['provider_location', 'size', 'id', 'name', 'status',
 1122                      'volume_type', 'metadata', 'obj_context']
 1123         Volume = collections.namedtuple('Volume', vol_attrs)
 1124         volume_info = Volume(provider_location=src_vref.provider_location,
 1125                              size=src_vref.size,
 1126                              id=volume.id,
 1127                              name=volume_name,
 1128                              status=src_vref.status,
 1129                              volume_type=src_vref.volume_type,
 1130                              metadata=src_vref.metadata,
 1131                              obj_context=volume.obj_context)
 1132 
 1133         if (self._always_use_temp_snap_when_cloning or
 1134                 self._snapshots_exist(src_vref)):
 1135             kwargs = {
 1136                 'volume_id': src_vref.id,
 1137                 'user_id': context.user_id,
 1138                 'project_id': context.project_id,
 1139                 'status': fields.SnapshotStatus.CREATING,
 1140                 'progress': '0%',
 1141                 'volume_size': src_vref.size,
 1142                 'display_name': 'tmp-snap-%s' % volume.id,
 1143                 'display_description': None,
 1144                 'volume_type_id': src_vref.volume_type_id,
 1145                 'encryption_key_id': src_vref.encryption_key_id,
 1146             }
 1147             temp_snapshot = objects.Snapshot(context=context,
 1148                                              **kwargs)
 1149             temp_snapshot.create()
 1150 
 1151             self._create_snapshot(temp_snapshot)
 1152             try:
 1153                 self._copy_volume_from_snapshot(
 1154                     temp_snapshot,
 1155                     volume_info,
 1156                     volume.size,
 1157                     src_encryption_key_id=src_vref.encryption_key_id,
 1158                     new_encryption_key_id=volume.encryption_key_id)
 1159 
 1160                 # remove temp snapshot after the cloning is done
 1161                 temp_snapshot.status = fields.SnapshotStatus.DELETING
 1162                 temp_snapshot.context = context.elevated()
 1163                 temp_snapshot.save()
 1164             finally:
 1165                 self._delete_snapshot(temp_snapshot)
 1166                 temp_snapshot.destroy()
 1167         else:
 1168             self._copy_volume_image(self.local_path(src_vref),
 1169                                     self.local_path(volume_info))
 1170             self._extend_volume(volume_info, volume.size)
 1171 
 1172         return {'provider_location': src_vref.provider_location}
 1173 
 1174     def _copy_volume_image(self, src_path, dest_path):
 1175         shutil.copyfile(src_path, dest_path)
 1176         self._set_rw_permissions(dest_path)
 1177 
 1178     def _delete_stale_snapshot(self, snapshot):
 1179         info_path = self._local_path_volume_info(snapshot.volume)
 1180         snap_info = self._read_info_file(info_path)
 1181 
 1182         snapshot_file = snap_info[snapshot.id]
 1183         active_file = self.get_active_image_from_info(snapshot.volume)
 1184         snapshot_path = os.path.join(
 1185             self._local_volume_dir(snapshot.volume), snapshot_file)
 1186         if utils.paths_normcase_equal(snapshot_file, active_file):
 1187             return
 1188 
 1189         LOG.info('Deleting stale snapshot: %s', snapshot.id)
 1190         self._delete(snapshot_path)
 1191         del(snap_info[snapshot.id])
 1192         self._write_info_file(info_path, snap_info)
 1193 
 1194     def _delete_snapshot(self, snapshot):
 1195         """Delete a snapshot.
 1196 
 1197         If volume status is 'available', delete snapshot here in Cinder
 1198         using qemu-img.
 1199 
 1200         If volume status is 'in-use', calculate what qcow2 files need to
 1201         merge, and call to Nova to perform this operation.
 1202 
 1203         :raises: InvalidVolume if status not acceptable
 1204         :raises: RemoteFSException(msg) if operation fails
 1205         :returns: None
 1206 
 1207         """
 1208         LOG.debug('Deleting %(type)s snapshot %(snap)s of volume %(vol)s',
 1209                   {'snap': snapshot.id, 'vol': snapshot.volume.id,
 1210                    'type': ('online'
 1211                             if self._is_volume_attached(snapshot.volume)
 1212                             else 'offline')})
 1213 
 1214         volume_status = snapshot.volume.status
 1215         acceptable_states = ['available', 'in-use', 'backing-up', 'deleting',
 1216                              'downloading']
 1217         self._validate_state(volume_status, acceptable_states)
 1218 
 1219         vol_path = self._local_volume_dir(snapshot.volume)
 1220         volume_path = os.path.join(vol_path, snapshot.volume.name)
 1221 
 1222         # Determine the true snapshot file for this snapshot
 1223         # based on the .info file
 1224         info_path = self._local_path_volume_info(snapshot.volume)
 1225         snap_info = self._read_info_file(info_path, empty_if_missing=True)
 1226 
 1227         if snapshot.id not in snap_info:
 1228             # If snapshot info file is present, but snapshot record does not
 1229             # exist, do not attempt to delete.
 1230             # (This happens, for example, if snapshot_create failed due to lack
 1231             # of permission to write to the share.)
 1232             LOG.info('Snapshot record for %s is not present, allowing '
 1233                      'snapshot_delete to proceed.', snapshot.id)
 1234             return
 1235 
 1236         snapshot_file = snap_info[snapshot.id]
 1237         LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
 1238         snapshot_path = os.path.join(
 1239             self._local_volume_dir(snapshot.volume),
 1240             snapshot_file)
 1241 
 1242         snapshot_path_img_info = self._qemu_img_info(
 1243             snapshot_path,
 1244             snapshot.volume.name)
 1245 
 1246         base_file = snapshot_path_img_info.backing_file
 1247         if base_file is None:
 1248             # There should always be at least the original volume
 1249             # file as base.
 1250             LOG.warning('No backing file found for %s, allowing '
 1251                         'snapshot to be deleted.', snapshot_path)
 1252 
 1253             # Snapshot may be stale, so just delete it and update the
 1254             # info file instead of blocking
 1255             return self._delete_stale_snapshot(snapshot)
 1256 
 1257         base_path = os.path.join(vol_path, base_file)
 1258         base_file_img_info = self._qemu_img_info(base_path,
 1259                                                  snapshot.volume.name)
 1260 
 1261         # Find what file has this as its backing file
 1262         active_file = self.get_active_image_from_info(snapshot.volume)
 1263 
 1264         if self._is_volume_attached(snapshot.volume):
 1265             # Online delete
 1266             context = snapshot._context
 1267 
 1268             new_base_file = base_file_img_info.backing_file
 1269 
 1270             base_id = None
 1271             for key, value in snap_info.items():
 1272                 if utils.paths_normcase_equal(value,
 1273                                               base_file) and key != 'active':
 1274                     base_id = key
 1275                     break
 1276             if base_id is None:
 1277                 # This means we are deleting the oldest snapshot
 1278                 LOG.debug('No %(base_id)s found for %(file)s',
 1279                           {'base_id': 'base_id', 'file': snapshot_file})
 1280 
 1281             online_delete_info = {
 1282                 'active_file': active_file,
 1283                 'snapshot_file': snapshot_file,
 1284                 'base_file': base_file,
 1285                 'base_id': base_id,
 1286                 'new_base_file': new_base_file
 1287             }
 1288 
 1289             return self._delete_snapshot_online(context,
 1290                                                 snapshot,
 1291                                                 online_delete_info)
 1292 
 1293         encrypted = snapshot.encryption_key_id is not None
 1294 
 1295         if encrypted:
 1296             keymgr = key_manager.API(CONF)
 1297             encryption_key = snapshot.encryption_key_id
 1298             new_key = keymgr.get(snapshot.obj_context, encryption_key)
 1299             src_passphrase = \
 1300                 binascii.hexlify(new_key.get_encoded()).decode('utf-8')
 1301 
 1302             tmp_dir = volume_utils.image_conversion_dir()
 1303 
 1304         if utils.paths_normcase_equal(snapshot_file, active_file):
 1305             # There is no top file
 1306             #      T0       |        T1         |
 1307             #     base      |   snapshot_file   | None
 1308             # (guaranteed to|  (being deleted,  |
 1309             #    exist)     |  committed down)  |
 1310             if encrypted:
 1311                 with tempfile.NamedTemporaryFile(prefix='luks_',
 1312                                                  dir=tmp_dir) as src_file:
 1313                     with open(src_file.name, 'w') as f:
 1314                         f.write(src_passphrase)
 1315                     self._img_commit(snapshot_path,
 1316                                      passphrase_file=src_file.name,
 1317                                      backing_file=volume_path)
 1318             else:
 1319                 self._img_commit(snapshot_path)
 1320             # Active file has changed
 1321             snap_info['active'] = base_file
 1322         else:
 1323             #      T0        |      T1         |     T2         |      T3
 1324             #     base       |  snapshot_file  |  higher_file   | highest_file
 1325             # (guaranteed to | (being deleted, | (guaranteed to |  (may exist)
 1326             #   exist, not   | committed down) |  exist, needs  |
 1327             #   used here)   |                 |   ptr update)  |
 1328 
 1329             # This file is guaranteed to exist since we aren't operating on
 1330             # the active file.
 1331             higher_file = self._get_higher_image_path(snapshot)
 1332             if higher_file is None:
 1333                 msg = _('No file found with %s as backing file.') %\
 1334                     snapshot_file
 1335                 raise exception.RemoteFSException(msg)
 1336 
 1337             higher_id = next((i for i in snap_info
 1338                               if utils.paths_normcase_equal(snap_info[i],
 1339                                                             higher_file)
 1340                               and i != 'active'),
 1341                              None)
 1342             if higher_id is None:
 1343                 msg = _('No snap found with %s as backing file.') %\
 1344                     higher_file
 1345                 raise exception.RemoteFSException(msg)
 1346 
 1347             if encrypted:
 1348                 with tempfile.NamedTemporaryFile(prefix='luks_',
 1349                                                  dir=tmp_dir) as src_file:
 1350                     with open(src_file.name, 'w') as f:
 1351                         f.write(src_passphrase)
 1352                     self._img_commit(snapshot_path,
 1353                                      passphrase_file=src_file.name,
 1354                                      backing_file=volume_path)
 1355 
 1356                     higher_file_path = os.path.join(vol_path, higher_file)
 1357                     base_file_fmt = base_file_img_info.file_format
 1358                     self._rebase_img(higher_file_path, volume_path,
 1359                                      base_file_fmt, src_file.name)
 1360             else:
 1361                 self._img_commit(snapshot_path)
 1362 
 1363                 higher_file_path = os.path.join(vol_path, higher_file)
 1364                 base_file_fmt = base_file_img_info.file_format
 1365                 self._rebase_img(higher_file_path, base_file, base_file_fmt)
 1366 
 1367         # Remove snapshot_file from info
 1368         del(snap_info[snapshot.id])
 1369         self._write_info_file(info_path, snap_info)
 1370 
 1371     def _create_volume_from_snapshot(self, volume, snapshot):
 1372         """Creates a volume from a snapshot.
 1373 
 1374         Snapshot must not be the active snapshot. (offline)
 1375         """
 1376 
 1377         LOG.debug('Creating volume %(vol)s from snapshot %(snap)s',
 1378                   {'vol': volume.id, 'snap': snapshot.id})
 1379 
 1380         status = snapshot.status
 1381         acceptable_states = ['available', 'backing-up']
 1382         self._validate_state(status, acceptable_states,
 1383                              obj_description='snapshot',
 1384                              invalid_exc=exception.InvalidSnapshot)
 1385 
 1386         self._ensure_shares_mounted()
 1387 
 1388         volume.provider_location = self._find_share(volume)
 1389 
 1390         self._do_create_volume(volume)
 1391 
 1392         self._copy_volume_from_snapshot(snapshot,
 1393                                         volume,
 1394                                         volume.size,
 1395                                         snapshot.volume.encryption_key_id,
 1396                                         volume.encryption_key_id)
 1397 
 1398         return {'provider_location': volume.provider_location}
 1399 
 1400     def _copy_volume_from_snapshot(self, snapshot, volume, volume_size,
 1401                                    src_encryption_key_id=None,
 1402                                    new_encryption_key_id=None):
 1403         raise NotImplementedError()
 1404 
 1405     def _do_create_snapshot(self, snapshot, backing_filename,
 1406                             new_snap_path):
 1407         """Create a QCOW2 file backed by another file.
 1408 
 1409         :param snapshot: snapshot reference
 1410         :param backing_filename: filename of file that will back the
 1411             new qcow2 file
 1412         :param new_snap_path: filename of new qcow2 file
 1413         """
 1414         backing_path_full_path = os.path.join(
 1415             self._local_volume_dir(snapshot.volume),
 1416             backing_filename)
 1417 
 1418         volume_path = os.path.join(
 1419             self._local_volume_dir(snapshot.volume),
 1420             snapshot.volume.name)
 1421 
 1422         info = self._qemu_img_info(backing_path_full_path,
 1423                                    snapshot.volume.name)
 1424         backing_fmt = info.file_format
 1425         obj_context = snapshot.volume.obj_context
 1426 
 1427         # create new qcow2 file
 1428         if snapshot.volume.encryption_key_id is None:
 1429             command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
 1430                        'backing_file=%s,backing_fmt=%s' %
 1431                        (backing_path_full_path, backing_fmt),
 1432                        new_snap_path,
 1433                        "%dG" % snapshot.volume.size]
 1434 
 1435             self._execute(*command, run_as_root=self._execute_as_root)
 1436 
 1437             command = ['qemu-img', 'rebase', '-u',
 1438                        '-b', backing_filename,
 1439                        '-F', backing_fmt,
 1440                        new_snap_path]
 1441 
 1442             # qemu-img rebase must run as root for the same reasons as above
 1443             self._execute(*command, run_as_root=self._execute_as_root)
 1444 
 1445         else:
 1446             # encrypted
 1447             keymgr = key_manager.API(CONF)
 1448             # Get key for the source volume using the context of this request.
 1449             key = keymgr.get(obj_context,
 1450                              snapshot.volume.encryption_key_id)
 1451             passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
 1452 
 1453             tmp_dir = volume_utils.image_conversion_dir()
 1454             with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key:
 1455                 with open(tmp_key.name, 'w') as f:
 1456                     f.write(passphrase)
 1457 
 1458                 file_json_dict = {"driver": "qcow2",
 1459                                   "encrypt.key-secret": "s0",
 1460                                   "backing.encrypt.key-secret": "s0",
 1461                                   "backing.file.filename": volume_path,
 1462                                   "file": {"driver": "file",
 1463                                            "filename": backing_path_full_path,
 1464                                            }}
 1465                 file_json = jsonutils.dumps(file_json_dict)
 1466 
 1467                 encryption = volume_utils.check_encryption_provider(
 1468                     db=db,
 1469                     volume=snapshot.volume,
 1470                     context=obj_context)
 1471 
 1472                 cipher_spec = image_utils.decode_cipher(encryption['cipher'],
 1473                                                         encryption['key_size'])
 1474 
 1475                 command = ('qemu-img', 'create', '-f' 'qcow2',
 1476                            '-o', 'encrypt.format=luks,encrypt.key-secret=s1,'
 1477                            'encrypt.cipher-alg=%(cipher_alg)s,'
 1478                            'encrypt.cipher-mode=%(cipher_mode)s,'
 1479                            'encrypt.ivgen-alg=%(ivgen_alg)s' % cipher_spec,
 1480                            '-b', 'json:' + file_json,
 1481                            '--object', 'secret,id=s0,file=' + tmp_key.name,
 1482                            '--object', 'secret,id=s1,file=' + tmp_key.name,
 1483                            new_snap_path)
 1484                 self._execute(*command, run_as_root=self._execute_as_root)
 1485 
 1486                 command_path = 'encrypt.key-secret=s0,file.filename='
 1487                 command = ['qemu-img', 'rebase',
 1488                            '--object', 'secret,id=s0,file=' + tmp_key.name,
 1489                            '--image-opts',
 1490                            command_path + new_snap_path,
 1491                            '-u',
 1492                            '-b', backing_filename,
 1493                            '-F', backing_fmt]
 1494 
 1495                 # qemu-img rebase must run as root for the same reasons as
 1496                 # above
 1497                 self._execute(*command, run_as_root=self._execute_as_root)
 1498 
 1499         self._set_rw_permissions(new_snap_path)
 1500 
 1501         # if in secure mode, chown new file
 1502         if self.secure_file_operations_enabled():
 1503             ref_file = backing_path_full_path
 1504             log_msg = 'Setting permissions: %(file)s -> %(user)s:%(group)s' % {
 1505                 'file': ref_file, 'user': os.stat(ref_file).st_uid,
 1506                 'group': os.stat(ref_file).st_gid}
 1507             LOG.debug(log_msg)
 1508             command = ['chown',
 1509                        '--reference=%s' % ref_file,
 1510                        new_snap_path]
 1511             self._execute(*command, run_as_root=self._execute_as_root)
 1512 
 1513     def _create_snapshot(self, snapshot):
 1514         """Create a snapshot.
 1515 
 1516         If volume is attached, call to Nova to create snapshot, providing a
 1517         qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
 1518         responsible for transitioning the VM between them and handling live
 1519         transfers of data between files as required.
 1520 
 1521         If volume is detached, create locally with qemu-img. Cinder handles
 1522         manipulation of qcow2 files.
 1523 
 1524         A file named volume-<uuid>.info is stored with the volume
 1525         data and is a JSON table which contains a mapping between
 1526         Cinder snapshot UUIDs and filenames, as these associations
 1527         will change as snapshots are deleted.
 1528 
 1529 
 1530         Basic snapshot operation:
 1531 
 1532         1. Initial volume file:
 1533             volume-1234
 1534 
 1535         2. Snapshot created:
 1536             volume-1234  <- volume-1234.aaaa
 1537 
 1538             volume-1234.aaaa becomes the new "active" disk image.
 1539             If the volume is not attached, this filename will be used to
 1540             attach the volume to a VM at volume-attach time.
 1541             If the volume is attached, the VM will switch to this file as
 1542             part of the snapshot process.
 1543 
 1544             Note that volume-1234.aaaa represents changes after snapshot
 1545             'aaaa' was created.  So the data for snapshot 'aaaa' is actually
 1546             in the backing file(s) of volume-1234.aaaa.
 1547 
 1548             This file has a qcow2 header recording the fact that volume-1234 is
 1549             its backing file.  Delta changes since the snapshot was created are
 1550             stored in this file, and the backing file (volume-1234) does not
 1551             change.
 1552 
 1553             info file: { 'active': 'volume-1234.aaaa',
 1554                          'aaaa':   'volume-1234.aaaa' }
 1555 
 1556         3. Second snapshot created:
 1557             volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
 1558 
 1559             volume-1234.bbbb now becomes the "active" disk image, recording
 1560             changes made to the volume.
 1561 
 1562             info file: { 'active': 'volume-1234.bbbb',  (* changed!)
 1563                          'aaaa':   'volume-1234.aaaa',
 1564                          'bbbb':   'volume-1234.bbbb' } (* added!)
 1565 
 1566         4. Snapshot deletion when volume is attached ('in-use' state):
 1567 
 1568             * When first snapshot is deleted, Cinder calls Nova for online
 1569               snapshot deletion. Nova deletes snapshot with id "aaaa" and
 1570               makes snapshot with id "bbbb" point to the base image.
 1571               Snapshot with id "bbbb" is the active image.
 1572 
 1573               volume-1234 <- volume-1234.bbbb
 1574 
 1575               info file: { 'active': 'volume-1234.bbbb',
 1576                            'bbbb':   'volume-1234.bbbb'
 1577                          }
 1578 
 1579              * When second snapshot is deleted, Cinder calls Nova for online
 1580                snapshot deletion. Nova deletes snapshot with id "bbbb" by
 1581                pulling volume-1234's data into volume-1234.bbbb. This
 1582                (logically) removes snapshot with id "bbbb" and the active
 1583                file remains the same.
 1584 
 1585                volume-1234.bbbb
 1586 
 1587                info file: { 'active': 'volume-1234.bbbb' }
 1588 
 1589            TODO (deepakcs): Change this once Nova supports blockCommit for
 1590                             in-use volumes.
 1591 
 1592         5. Snapshot deletion when volume is detached ('available' state):
 1593 
 1594             * When first snapshot is deleted, Cinder does the snapshot
 1595               deletion. volume-1234.aaaa is removed from the snapshot chain.
 1596               The data from it is merged into its parent.
 1597 
 1598               volume-1234.bbbb is rebased, having volume-1234 as its new
 1599               parent.
 1600 
 1601               volume-1234 <- volume-1234.bbbb
 1602 
 1603               info file: { 'active': 'volume-1234.bbbb',
 1604                            'bbbb':   'volume-1234.bbbb'
 1605                          }
 1606 
 1607             * When second snapshot is deleted, Cinder does the snapshot
 1608               deletion. volume-1234.aaaa is removed from the snapshot chain.
 1609               The base image, volume-1234 becomes the active image for this
 1610               volume again.
 1611 
 1612               volume-1234
 1613 
 1614               info file: { 'active': 'volume-1234' }  (* changed!)
 1615         """
 1616 
 1617         LOG.debug('Creating %(type)s snapshot %(snap)s of volume %(vol)s',
 1618                   {'snap': snapshot.id, 'vol': snapshot.volume.id,
 1619                    'type': ('online'
 1620                             if self._is_volume_attached(snapshot.volume)
 1621                             else 'offline')})
 1622 
 1623         status = snapshot.volume.status
 1624 
 1625         acceptable_states = ['available', 'in-use', 'backing-up']
 1626         if (snapshot.display_name and
 1627                 snapshot.display_name.startswith('tmp-snap-')):
 1628             # This is an internal volume snapshot. In order to support
 1629             # image caching, we'll allow creating/deleting such snapshots
 1630             # while having volumes in 'downloading' state.
 1631             acceptable_states.append('downloading')
 1632 
 1633         self._validate_state(status, acceptable_states)
 1634 
 1635         info_path = self._local_path_volume_info(snapshot.volume)
 1636         snap_info = self._read_info_file(info_path, empty_if_missing=True)
 1637         backing_filename = self.get_active_image_from_info(
 1638             snapshot.volume)
 1639         new_snap_path = self._get_new_snap_path(snapshot)
 1640 
 1641         if self._is_volume_attached(snapshot.volume):
 1642             self._create_snapshot_online(snapshot,
 1643                                          backing_filename,
 1644                                          new_snap_path)
 1645         else:
 1646             self._do_create_snapshot(snapshot,
 1647                                      backing_filename,
 1648                                      new_snap_path)
 1649 
 1650         snap_info['active'] = os.path.basename(new_snap_path)
 1651         snap_info[snapshot.id] = os.path.basename(new_snap_path)
 1652         self._write_info_file(info_path, snap_info)
 1653 
 1654     def _create_snapshot_online(self, snapshot, backing_filename,
 1655                                 new_snap_path):
 1656         # Perform online snapshot via Nova
 1657         self._do_create_snapshot(snapshot,
 1658                                  backing_filename,
 1659                                  new_snap_path)
 1660 
 1661         connection_info = {
 1662             'type': 'qcow2',
 1663             'new_file': os.path.basename(new_snap_path),
 1664             'snapshot_id': snapshot.id
 1665         }
 1666 
 1667         try:
 1668             result = self._nova.create_volume_snapshot(
 1669                 snapshot.obj_context,
 1670                 snapshot.volume_id,
 1671                 connection_info)
 1672             LOG.debug('nova call result: %s', result)
 1673         except Exception:
 1674             LOG.exception('Call to Nova to create snapshot failed')
 1675             raise
 1676 
 1677         # Loop and wait for result
 1678         # Nova will call Cinderclient to update the status in the database
 1679         # An update of progress = '90%' means that Nova is done
 1680         seconds_elapsed = 0
 1681         increment = 1
 1682         timeout = 600
 1683         while True:
 1684             s = db.snapshot_get(snapshot.obj_context, snapshot.id)
 1685 
 1686             LOG.debug('Status of snapshot %(id)s is now %(status)s',
 1687                       {'id': snapshot['id'],
 1688                        'status': s['status']})
 1689 
 1690             if s['status'] == fields.SnapshotStatus.CREATING:
 1691                 if s['progress'] == '90%':
 1692                     # Nova tasks completed successfully
 1693                     break
 1694 
 1695                 time.sleep(increment)
 1696                 seconds_elapsed += increment
 1697             elif s['status'] == fields.SnapshotStatus.ERROR:
 1698 
 1699                 msg = _('Nova returned "error" status '
 1700                         'while creating snapshot.')
 1701                 raise exception.RemoteFSException(msg)
 1702 
 1703             elif (s['status'] == fields.SnapshotStatus.DELETING or
 1704                   s['status'] == fields.SnapshotStatus.ERROR_DELETING):
 1705                 msg = _('Snapshot %(id)s has been asked to be deleted while '
 1706                         'waiting for it to become available. Perhaps a '
 1707                         'concurrent request was made.') % {'id':
 1708                                                            snapshot.id}
 1709                 raise exception.RemoteFSConcurrentRequest(msg)
 1710 
 1711             if 10 < seconds_elapsed <= 20:
 1712                 increment = 2
 1713             elif 20 < seconds_elapsed <= 60:
 1714                 increment = 5
 1715             elif 60 < seconds_elapsed:
 1716                 increment = 10
 1717 
 1718             if seconds_elapsed > timeout:
 1719                 msg = _('Timed out while waiting for Nova update '
 1720                         'for creation of snapshot %s.') % snapshot.id
 1721                 raise exception.RemoteFSException(msg)
 1722 
 1723     def _delete_snapshot_online(self, context, snapshot, info):
 1724         # Update info over the course of this method
 1725         # active file never changes
 1726         info_path = self._local_path_volume_info(snapshot.volume)
 1727         snap_info = self._read_info_file(info_path)
 1728 
 1729         if utils.paths_normcase_equal(info['active_file'],
 1730                                       info['snapshot_file']):
 1731             # blockRebase/Pull base into active
 1732             # info['base'] => snapshot_file
 1733 
 1734             file_to_delete = info['base_file']
 1735             if info['base_id'] is None:
 1736                 # Passing base=none to blockRebase ensures that
 1737                 # libvirt blanks out the qcow2 backing file pointer
 1738                 new_base = None
 1739             else:
 1740                 new_base = info['new_base_file']
 1741                 snap_info[info['base_id']] = info['snapshot_file']
 1742 
 1743             delete_info = {'file_to_merge': new_base,
 1744                            'merge_target_file': None,  # current
 1745                            'type': 'qcow2',
 1746                            'volume_id': snapshot.volume.id}
 1747 
 1748             del(snap_info[snapshot.id])
 1749         else:
 1750             # blockCommit snapshot into base
 1751             # info['base'] <= snapshot_file
 1752             # delete record of snapshot
 1753             file_to_delete = info['snapshot_file']
 1754 
 1755             delete_info = {'file_to_merge': info['snapshot_file'],
 1756                            'merge_target_file': info['base_file'],
 1757                            'type': 'qcow2',
 1758                            'volume_id': snapshot.volume.id}
 1759 
 1760             del(snap_info[snapshot.id])
 1761 
 1762         self._nova_assisted_vol_snap_delete(context, snapshot, delete_info)
 1763 
 1764         # Write info file updated above
 1765         self._write_info_file(info_path, snap_info)
 1766 
 1767         # Delete stale file
 1768         path_to_delete = os.path.join(
 1769             self._local_volume_dir(snapshot.volume), file_to_delete)
 1770         self._delete(path_to_delete)
 1771 
 1772     def _nova_assisted_vol_snap_delete(self, context, snapshot, delete_info):
 1773         try:
 1774             self._nova.delete_volume_snapshot(
 1775                 context,
 1776                 snapshot.id,
 1777                 delete_info)
 1778         except Exception:
 1779             LOG.exception('Call to Nova delete snapshot failed')
 1780             raise
 1781 
 1782         # Loop and wait for result
 1783         # Nova will call Cinderclient to update the status in the database
 1784         # An update of progress = '90%' means that Nova is done
 1785         seconds_elapsed = 0
 1786         increment = 1
 1787         timeout = 7200
 1788         while True:
 1789             s = db.snapshot_get(context, snapshot.id)
 1790 
 1791             if s['status'] == fields.SnapshotStatus.DELETING:
 1792                 if s['progress'] == '90%':
 1793                     # Nova tasks completed successfully
 1794                     break
 1795                 else:
 1796                     LOG.debug('status of snapshot %s is still "deleting"... '
 1797                               'waiting', snapshot.id)
 1798                     time.sleep(increment)
 1799                     seconds_elapsed += increment
 1800             else:
 1801                 msg = _('Unable to delete snapshot %(id)s, '
 1802                         'status: %(status)s.') % {'id': snapshot.id,
 1803                                                   'status': s['status']}
 1804                 raise exception.RemoteFSException(msg)
 1805 
 1806             if 10 < seconds_elapsed <= 20:
 1807                 increment = 2
 1808             elif 20 < seconds_elapsed <= 60:
 1809                 increment = 5
 1810             elif 60 < seconds_elapsed:
 1811                 increment = 10
 1812 
 1813             if seconds_elapsed > timeout:
 1814                 msg = _('Timed out while waiting for Nova update '
 1815                         'for deletion of snapshot %(id)s.') %\
 1816                     {'id': snapshot.id}
 1817                 raise exception.RemoteFSException(msg)
 1818 
 1819     def _extend_volume(self, volume, size_gb):
 1820         raise NotImplementedError()
 1821 
 1822     def _revert_to_snapshot(self, context, volume, snapshot):
 1823         raise NotImplementedError()
 1824 
 1825 
 1826 class RemoteFSSnapDriver(RemoteFSSnapDriverBase):
 1827     @locked_volume_id_operation
 1828     def create_snapshot(self, snapshot):
 1829         """Apply locking to the create snapshot operation."""
 1830 
 1831         return self._create_snapshot(snapshot)
 1832 
 1833     @locked_volume_id_operation
 1834     def delete_snapshot(self, snapshot):
 1835         """Apply locking to the delete snapshot operation."""
 1836 
 1837         return self._delete_snapshot(snapshot)
 1838 
 1839     @locked_volume_id_operation
 1840     def create_volume_from_snapshot(self, volume, snapshot):
 1841         return self._create_volume_from_snapshot(volume, snapshot)
 1842 
 1843     # TODO: should be locking on src_vref id -- bug #1852449
 1844     @locked_volume_id_operation
 1845     def create_cloned_volume(self, volume, src_vref):
 1846         """Creates a clone of the specified volume."""
 1847 
 1848         return self._create_cloned_volume(volume, src_vref,
 1849                                           src_vref.obj_context)
 1850 
 1851     @locked_volume_id_operation
 1852     def copy_volume_to_image(self, context, volume, image_service, image_meta):
 1853         """Copy the volume to the specified image."""
 1854 
 1855         return self._copy_volume_to_image(context, volume, image_service,
 1856                                           image_meta)
 1857 
 1858     @locked_volume_id_operation
 1859     def extend_volume(self, volume, size_gb):
 1860         return self._extend_volume(volume, size_gb)
 1861 
 1862     @locked_volume_id_operation
 1863     def revert_to_snapshot(self, context, volume, snapshot):
 1864         """Revert to specified snapshot."""
 1865 
 1866         return self._revert_to_snapshot(context, volume, snapshot)
 1867 
 1868 
 1869 class RemoteFSSnapDriverDistributed(RemoteFSSnapDriverBase):
 1870     @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
 1871     def create_snapshot(self, snapshot):
 1872         """Apply locking to the create snapshot operation."""
 1873 
 1874         return self._create_snapshot(snapshot)
 1875 
 1876     @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}')
 1877     def delete_snapshot(self, snapshot):
 1878         """Apply locking to the delete snapshot operation."""
 1879 
 1880         return self._delete_snapshot(snapshot)
 1881 
 1882     @coordination.synchronized('{self.driver_prefix}-{volume.id}')
 1883     def create_volume_from_snapshot(self, volume, snapshot):
 1884         return self._create_volume_from_snapshot(volume, snapshot)
 1885 
 1886     # lock the source volume id first
 1887     @coordination.synchronized('{self.driver_prefix}-{src_vref.id}')
 1888     @coordination.synchronized('{self.driver_prefix}-{volume.id}')
 1889     def create_cloned_volume(self, volume, src_vref):
 1890         """Creates a clone of the specified volume."""
 1891 
 1892         return self._create_cloned_volume(volume, src_vref,
 1893                                           src_vref.obj_context)
 1894 
 1895     @coordination.synchronized('{self.driver_prefix}-{volume.id}')
 1896     def copy_volume_to_image(self, context, volume, image_service, image_meta):
 1897         """Copy the volume to the specified image."""
 1898 
 1899         return self._copy_volume_to_image(context, volume, image_service,
 1900                                           image_meta)
 1901 
 1902     @coordination.synchronized('{self.driver_prefix}-{volume.id}')
 1903     def extend_volume(self, volume, size_gb):
 1904         return self._extend_volume(volume, size_gb)
 1905 
 1906     @coordination.synchronized('{self.driver_prefix}-{volume.id}')
 1907     def revert_to_snapshot(self, context, volume, snapshot):
 1908         """Revert to specified snapshot."""
 1909 
 1910         return self._revert_to_snapshot(context, volume, snapshot)
 1911 
 1912 
 1913 class RemoteFSPoolMixin(object):
 1914     """Drivers inheriting this will report each share as a pool."""
 1915 
 1916     def _find_share(self, volume):
 1917         # We let the scheduler choose a pool for us.
 1918         pool_name = self._get_pool_name_from_volume(volume)
 1919         share = self._get_share_from_pool_name(pool_name)
 1920         return share
 1921 
 1922     def _get_pool_name_from_volume(self, volume):
 1923         pool_name = volume_utils.extract_host(volume['host'],
 1924                                               level='pool')
 1925         return pool_name
 1926 
 1927     def _get_pool_name_from_share(self, share):
 1928         raise NotImplementedError()
 1929 
 1930     def _get_share_from_pool_name(self, pool_name):
 1931         # To be implemented by drivers using pools.
 1932         raise NotImplementedError()
 1933 
 1934     def _update_volume_stats(self):
 1935         data = {}
 1936         pools = []
 1937         backend_name = self.configuration.safe_get('volume_backend_name')
 1938         data['volume_backend_name'] = backend_name or self.volume_backend_name
 1939         data['vendor_name'] = self.vendor_name
 1940         data['driver_version'] = self.get_version()
 1941         data['storage_protocol'] = self.driver_volume_type
 1942 
 1943         self._ensure_shares_mounted()
 1944 
 1945         for share in self._mounted_shares:
 1946             (share_capacity,
 1947              share_free,
 1948              total_allocated) = self._get_capacity_info(share)
 1949 
 1950             pool = {'pool_name': self._get_pool_name_from_share(share),
 1951                     'total_capacity_gb': share_capacity / float(units.Gi),
 1952                     'free_capacity_gb': share_free / float(units.Gi),
 1953                     'provisioned_capacity_gb': (
 1954                         total_allocated / float(units.Gi)),
 1955                     'reserved_percentage': (
 1956                         self.configuration.reserved_percentage),
 1957                     'max_over_subscription_ratio': (
 1958                         self.configuration.max_over_subscription_ratio),
 1959                     'thin_provisioning_support': (
 1960                         self._thin_provisioning_support),
 1961                     'thick_provisioning_support': (
 1962                         self._thick_provisioning_support),
 1963                     'QoS_support': False,
 1964                     }
 1965 
 1966             pools.append(pool)
 1967 
 1968         data['total_capacity_gb'] = 0
 1969         data['free_capacity_gb'] = 0
 1970         data['pools'] = pools
 1971 
 1972         self._stats = data
 1973 
 1974 
 1975 class RevertToSnapshotMixin(object):
 1976 
 1977     def _revert_to_snapshot(self, context, volume, snapshot):
 1978         """Revert a volume to specified snapshot
 1979 
 1980         The volume must not be attached. Only the latest snapshot
 1981         can be used.
 1982         """
 1983         status = snapshot.volume.status
 1984         acceptable_states = ['available', 'reverting']
 1985 
 1986         self._validate_state(status, acceptable_states)
 1987 
 1988         LOG.debug('Reverting volume %(vol)s to snapshot %(snap)s',
 1989                   {'vol': snapshot.volume.id, 'snap': snapshot.id})
 1990 
 1991         info_path = self._local_path_volume_info(snapshot.volume)
 1992         snap_info = self._read_info_file(info_path)
 1993 
 1994         snapshot_file = snap_info[snapshot.id]
 1995         active_file = snap_info['active']
 1996 
 1997         if not utils.paths_normcase_equal(snapshot_file, active_file):
 1998             msg = _("Could not revert volume '%(volume_id)s' to snapshot "
 1999                     "'%(snapshot_id)s' as it does not "
 2000                     "appear to be the latest snapshot. Current active "
 2001                     "image: %(active_file)s.")
 2002             raise exception.InvalidSnapshot(
 2003                 msg % dict(snapshot_id=snapshot.id,
 2004                            active_file=active_file,
 2005                            volume_id=volume.id))
 2006 
 2007         snapshot_path = os.path.join(
 2008             self._local_volume_dir(snapshot.volume), snapshot_file)
 2009         backing_filename = self._qemu_img_info(
 2010             snapshot_path, volume.name).backing_file
 2011 
 2012         # We revert the volume to the latest snapshot by recreating the top
 2013         # image from the chain.
 2014         # This workflow should work with most (if not all) drivers inheriting
 2015         # this class.
 2016         self._delete(snapshot_path)
 2017         self._do_create_snapshot(snapshot, backing_filename, snapshot_path)
 2018 
 2019 
 2020 class RemoteFSManageableVolumesMixin(object):
 2021     _SUPPORTED_IMAGE_FORMATS = ['raw', 'qcow2']
 2022     _MANAGEABLE_IMAGE_RE = None
 2023 
 2024     def _get_manageable_vol_location(self, existing_ref):
 2025         if 'source-name' not in existing_ref:
 2026             reason = _('The existing volume reference '
 2027                        'must contain "source-name".')
 2028             raise exception.ManageExistingInvalidReference(
 2029                 existing_ref=existing_ref, reason=reason)
 2030 
 2031         vol_remote_path = os.path.normcase(
 2032             os.path.normpath(existing_ref['source-name']))
 2033 
 2034         for mounted_share in self._mounted_shares:
 2035             # We don't currently attempt to resolve hostnames. This could
 2036             # be troublesome for some distributed shares, which may have
 2037             # hostnames resolving to multiple addresses.
 2038             norm_share = os.path.normcase(os.path.normpath(mounted_share))
 2039             head, match, share_rel_path = vol_remote_path.partition(norm_share)
 2040             if not (match and share_rel_path.startswith(os.path.sep)):
 2041                 continue
 2042 
 2043             mountpoint = self._get_mount_point_for_share(mounted_share)
 2044             vol_local_path = os.path.join(mountpoint,
 2045                                           share_rel_path.lstrip(os.path.sep))
 2046 
 2047             LOG.debug("Found mounted share referenced by %s.",
 2048                       vol_remote_path)
 2049 
 2050             if os.path.isfile(vol_local_path):
 2051                 LOG.debug("Found volume %(path)s on share %(share)s.",
 2052                           dict(path=vol_local_path, share=mounted_share))
 2053                 return dict(share=mounted_share,
 2054                             mountpoint=mountpoint,
 2055                             vol_local_path=vol_local_path,
 2056                             vol_remote_path=vol_remote_path)
 2057             else:
 2058                 LOG.error("Could not find volume %s on the "
 2059                           "specified share.", vol_remote_path)
 2060                 break
 2061 
 2062         raise exception.ManageExistingInvalidReference(
 2063             existing_ref=existing_ref, reason=_('Volume not found.'))
 2064 
 2065     def _get_managed_vol_expected_path(self, volume, volume_location):
 2066         # This may be overridden by the drivers.
 2067         return os.path.join(volume_location['mountpoint'],
 2068                             volume.name)
 2069 
 2070     def _is_volume_manageable(self, volume_path, already_managed=False):
 2071         unmanageable_reason = None
 2072 
 2073         if already_managed:
 2074             return False, _('Volume already managed.')
 2075 
 2076         try:
 2077             img_info = self._qemu_img_info(volume_path, volume_name=None)
 2078         except exception.RemoteFSInvalidBackingFile:
 2079             return False, _("Backing file present.")
 2080         except Exception:
 2081             return False, _("Failed to open image.")
 2082 
 2083         # We're double checking as some drivers do not validate backing
 2084         # files through '_qemu_img_info'.
 2085         if img_info.backing_file:
 2086             return False, _("Backing file present.")
 2087 
 2088         if img_info.file_format not in self._SUPPORTED_IMAGE_FORMATS:
 2089             unmanageable_reason = _(
 2090                 "Unsupported image format: '%s'.") % img_info.file_format
 2091             return False, unmanageable_reason
 2092 
 2093         return True, None
 2094 
 2095     def manage_existing(self, volume, existing_ref):
 2096         LOG.info('Managing volume %(volume_id)s with ref %(ref)s',
 2097                  {'volume_id': volume.id, 'ref': existing_ref})
 2098 
 2099         vol_location = self._get_manageable_vol_location(existing_ref)
 2100         vol_local_path = vol_location['vol_local_path']
 2101 
 2102         manageable, unmanageable_reason = self._is_volume_manageable(
 2103             vol_local_path)
 2104 
 2105         if not manageable:
 2106             raise exception.ManageExistingInvalidReference(
 2107                 existing_ref=existing_ref, reason=unmanageable_reason)
 2108 
 2109         expected_vol_path = self._get_managed_vol_expected_path(
 2110             volume, vol_location)
 2111 
 2112         self._set_rw_permissions(vol_local_path)
 2113 
 2114         # This should be the last thing we do.
 2115         if expected_vol_path != vol_local_path:
 2116             LOG.info("Renaming imported volume image %(src)s to %(dest)s",
 2117                      dict(src=vol_location['vol_local_path'],
 2118                           dest=expected_vol_path))
 2119             os.rename(vol_location['vol_local_path'],
 2120                       expected_vol_path)
 2121 
 2122         return {'provider_location': vol_location['share']}
 2123 
 2124     def _get_rounded_manageable_image_size(self, image_path):
 2125         image_size = image_utils.qemu_img_info(
 2126             image_path, run_as_root=self._execute_as_root).virtual_size
 2127         return int(math.ceil(float(image_size) / units.Gi))
 2128 
 2129     def manage_existing_get_size(self, volume, existing_ref):
 2130         vol_location = self._get_manageable_vol_location(existing_ref)
 2131         volume_path = vol_location['vol_local_path']
 2132         return self._get_rounded_manageable_image_size(volume_path)
 2133 
 2134     def unmanage(self, volume):
 2135         pass
 2136 
 2137     def _get_manageable_volume(self, share, volume_path, managed_volume=None):
 2138         manageable, unmanageable_reason = self._is_volume_manageable(
 2139             volume_path, already_managed=managed_volume is not None)
 2140         size_gb = None
 2141         if managed_volume:
 2142             # We may not be able to query in-use images.
 2143             size_gb = managed_volume.size
 2144         else:
 2145             try:
 2146                 size_gb = self._get_rounded_manageable_image_size(volume_path)
 2147             except Exception:
 2148                 manageable = False
 2149                 unmanageable_reason = (unmanageable_reason or
 2150                                        _("Failed to get size."))
 2151 
 2152         mountpoint = self._get_mount_point_for_share(share)
 2153         norm_mountpoint = os.path.normcase(os.path.normpath(mountpoint))
 2154         norm_vol_path = os.path.normcase(os.path.normpath(volume_path))
 2155 
 2156         ref = norm_vol_path.replace(norm_mountpoint, share).replace('\\', '/')
 2157         manageable_volume = {
 2158             'reference': {'source-name': ref},
 2159             'size': size_gb,
 2160             'safe_to_manage': manageable,
 2161             'reason_not_safe': unmanageable_reason,
 2162             'cinder_id': managed_volume.id if managed_volume else None,
 2163             'extra_info': None,
 2164         }
 2165         return manageable_volume
 2166 
 2167     def _get_share_manageable_volumes(self, share, managed_volumes):
 2168         manageable_volumes = []
 2169         mount_path = self._get_mount_point_for_share(share)
 2170 
 2171         for dir_path, dir_names, file_names in os.walk(mount_path):
 2172             for file_name in file_names:
 2173                 file_name = os.path.normcase(file_name)
 2174                 img_path = os.path.join(dir_path, file_name)
 2175                 # In the future, we may have the regex filtering images
 2176                 # as a config option.
 2177                 if (not self._MANAGEABLE_IMAGE_RE or
 2178                         self._MANAGEABLE_IMAGE_RE.match(file_name)):
 2179                     managed_volume = managed_volumes.get(
 2180                         os.path.splitext(file_name)[0])
 2181                     try:
 2182                         manageable_volume = self._get_manageable_volume(
 2183                             share, img_path, managed_volume)
 2184                         manageable_volumes.append(manageable_volume)
 2185                     except Exception as exc:
 2186                         LOG.error(
 2187                             "Failed to get manageable volume info: "
 2188                             "'%(image_path)s'. Exception: %(exc)s.",
 2189                             dict(image_path=img_path, exc=exc))
 2190         return manageable_volumes
 2191 
 2192     def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
 2193                                sort_keys, sort_dirs):
 2194         manageable_volumes = []
 2195         managed_volumes = {vol.name: vol for vol in cinder_volumes}
 2196 
 2197         for share in self._mounted_shares:
 2198             try:
 2199                 manageable_volumes += self._get_share_manageable_volumes(
 2200                     share, managed_volumes)
 2201             except Exception as exc:
 2202                 LOG.error("Failed to get manageable volumes for "
 2203                           "share %(share)s. Exception: %(exc)s.",
 2204                           dict(share=share, exc=exc))
 2205 
 2206         return volume_utils.paginate_entries_list(
 2207             manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)