"Fossies" - the Fresh Open Source Software Archive

Member "manila-11.0.1/manila/share/drivers/glusterfs/layout_volume.py" (1 Feb 2021, 26333 Bytes) of package /linux/misc/openstack/manila-11.0.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "layout_volume.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 11.0.0_vs_11.0.1.

    1 # Copyright (c) 2015 Red Hat, Inc.
    2 # All Rights Reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 
   16 """GlusterFS volume mapped share layout."""
   17 
   18 import os
   19 import random
   20 import re
   21 import shutil
   22 import string
   23 import tempfile
   24 import xml.etree.cElementTree as etree
   25 
   26 from oslo_config import cfg
   27 from oslo_log import log
   28 import six
   29 
   30 from manila import exception
   31 from manila.i18n import _
   32 from manila.share.drivers.glusterfs import common
   33 from manila.share.drivers.glusterfs import layout
   34 from manila import utils
   35 
   36 LOG = log.getLogger(__name__)
   37 
   38 
   39 glusterfs_volume_mapped_opts = [
   40     cfg.ListOpt('glusterfs_servers',
   41                 default=[],
   42                 deprecated_name='glusterfs_targets',
   43                 help='List of GlusterFS servers that can be used to create '
   44                      'shares. Each GlusterFS server should be of the form '
   45                      '[remoteuser@]<volserver>, and they are assumed to '
   46                      'belong to distinct Gluster clusters.'),
   47     cfg.StrOpt('glusterfs_volume_pattern',
   48                help='Regular expression template used to filter '
   49                     'GlusterFS volumes for share creation. '
   50                     'The regex template can optionally (ie. with support '
   51                     'of the GlusterFS backend) contain the #{size} '
   52                     'parameter which matches an integer (sequence of '
   53                     'digits) in which case the value shall be interpreted as '
   54                     'size of the volume in GB. Examples: '
   55                     r'"manila-share-volume-\d+$", '
   56                     r'"manila-share-volume-#{size}G-\d+$"; '
   57                     'with matching volume names, respectively: '
   58                     '"manila-share-volume-12", "manila-share-volume-3G-13". '
   59                     'In latter example, the number that matches "#{size}", '
   60                     'that is, 3, is an indication that the size of volume '
   61                     'is 3G.'),
   62 ]
   63 
   64 
   65 CONF = cfg.CONF
   66 CONF.register_opts(glusterfs_volume_mapped_opts)
   67 
   68 # The dict specifying named parameters
   69 # that can be used with glusterfs_volume_pattern
   70 # in #{<param>} format.
   71 # For each of them we give regex pattern it matches
   72 # and a transformer function ('trans') for the matched
   73 # string value.
   74 # Currently we handle only #{size}.
   75 PATTERN_DICT = {'size': {'pattern': r'(?P<size>\d+)', 'trans': int}}
   76 USER_MANILA_SHARE = 'user.manila-share'
   77 USER_CLONED_FROM = 'user.manila-cloned-from'
   78 UUID_RE = re.compile(r'\A[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}\Z', re.I)
   79 
   80 
   81 class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
   82 
   83     _snapshots_are_supported = True
   84 
   85     def __init__(self, driver, *args, **kwargs):
   86         super(GlusterfsVolumeMappedLayout, self).__init__(
   87             driver, *args, **kwargs)
   88         self.gluster_used_vols = set()
   89         self.configuration.append_config_values(
   90             common.glusterfs_common_opts)
   91         self.configuration.append_config_values(
   92             glusterfs_volume_mapped_opts)
   93         self.gluster_nosnap_vols_dict = {}
   94         self.volume_pattern = self._compile_volume_pattern()
   95         self.volume_pattern_keys = self.volume_pattern.groupindex.keys()
   96         for srvaddr in self.configuration.glusterfs_servers:
   97             # format check for srvaddr
   98             self._glustermanager(srvaddr, False)
   99         self.glusterfs_versions = {}
  100         self.private_storage = kwargs.get('private_storage')
  101 
  102     def _compile_volume_pattern(self):
  103         """Compile a RegexObject from the config specified regex template.
  104 
  105         (cfg.glusterfs_volume_pattern)
  106         """
  107 
  108         subdict = {}
  109         for key, val in PATTERN_DICT.items():
  110             subdict[key] = val['pattern']
  111 
  112         # Using templates with placeholder syntax #{<var>}
  113         class CustomTemplate(string.Template):
  114             delimiter = '#'
  115 
  116         volume_pattern = CustomTemplate(
  117             self.configuration.glusterfs_volume_pattern).substitute(
  118             subdict)
  119         return re.compile(volume_pattern)
  120 
  121     def do_setup(self, context):
  122         """Setup the GlusterFS volumes."""
  123         glusterfs_versions, exceptions = {}, {}
  124         for srvaddr in self.configuration.glusterfs_servers:
  125             try:
  126                 glusterfs_versions[srvaddr] = self._glustermanager(
  127                     srvaddr, False).get_gluster_version()
  128             except exception.GlusterfsException as exc:
  129                 exceptions[srvaddr] = six.text_type(exc)
  130         if exceptions:
  131             for srvaddr, excmsg in exceptions.items():
  132                 LOG.error("'gluster version' failed on server "
  133                           "%(server)s with: %(message)s",
  134                           {'server': srvaddr, 'message': excmsg})
  135             raise exception.GlusterfsException(_(
  136                 "'gluster version' failed on servers %s") % (
  137                 ','.join(exceptions.keys())))
  138         notsupp_servers = []
  139         for srvaddr, vers in glusterfs_versions.items():
  140             if common.numreduct(vers) < self.driver.GLUSTERFS_VERSION_MIN:
  141                 notsupp_servers.append(srvaddr)
  142         if notsupp_servers:
  143             gluster_version_min_str = '.'.join(
  144                 six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
  145             for srvaddr in notsupp_servers:
  146                 LOG.error("GlusterFS version %(version)s on server "
  147                           "%(server)s is not supported, "
  148                           "minimum requirement: %(minvers)s",
  149                           {'server': srvaddr,
  150                            'version': '.'.join(glusterfs_versions[srvaddr]),
  151                            'minvers': gluster_version_min_str})
  152             raise exception.GlusterfsException(_(
  153                 "Unsupported GlusterFS version on servers %(servers)s, "
  154                 "minimum requirement: %(minvers)s") % {
  155                 'servers': ','.join(notsupp_servers),
  156                 'minvers': gluster_version_min_str})
  157         self.glusterfs_versions = glusterfs_versions
  158 
  159         gluster_volumes_initial = set(
  160             self._fetch_gluster_volumes(filter_used=False))
  161         if not gluster_volumes_initial:
  162             # No suitable volumes are found on the Gluster end.
  163             # Raise exception.
  164             msg = (_("Gluster backend does not provide any volume "
  165                      "matching pattern %s"
  166                      ) % self.configuration.glusterfs_volume_pattern)
  167             LOG.error(msg)
  168             raise exception.GlusterfsException(msg)
  169 
  170         LOG.info("Found %d Gluster volumes allocated for Manila.",
  171                  len(gluster_volumes_initial))
  172 
  173         self._check_mount_glusterfs()
  174 
  175     def _glustermanager(self, gluster_address, req_volume=True):
  176         """Create GlusterManager object for gluster_address."""
  177 
  178         return common.GlusterManager(
  179             gluster_address, self.driver._execute,
  180             self.configuration.glusterfs_path_to_private_key,
  181             self.configuration.glusterfs_server_password,
  182             requires={'volume': req_volume})
  183 
  184     def _share_manager(self, share):
  185         """Return GlusterManager object representing share's backend."""
  186         gluster_address = self.private_storage.get(share['id'], 'volume')
  187         if gluster_address is None:
  188             return
  189         return self._glustermanager(gluster_address)
  190 
  191     def _fetch_gluster_volumes(self, filter_used=True):
  192         """Do a 'gluster volume list | grep <volume pattern>'.
  193 
  194         Aggregate the results from all servers.
  195         Extract the named groups from the matching volume names
  196         using the specs given in PATTERN_DICT.
  197         Return a dict with keys of the form <server>:/<volname>
  198         and values being dicts that map names of named groups
  199         to their extracted value.
  200         """
  201 
  202         volumes_dict = {}
  203         for srvaddr in self.configuration.glusterfs_servers:
  204             gluster_mgr = self._glustermanager(srvaddr, False)
  205             if gluster_mgr.user:
  206                 logmsg = ("Retrieving volume list "
  207                           "on host %s") % gluster_mgr.host
  208             else:
  209                 logmsg = ("Retrieving volume list")
  210             out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg)
  211             for volname in out.split("\n"):
  212                 patmatch = self.volume_pattern.match(volname)
  213                 if not patmatch:
  214                     continue
  215                 comp_vol = gluster_mgr.components.copy()
  216                 comp_vol.update({'volume': volname})
  217                 gluster_mgr_vol = self._glustermanager(comp_vol)
  218                 if filter_used:
  219                     vshr = gluster_mgr_vol.get_vol_option(
  220                         USER_MANILA_SHARE) or ''
  221                     if UUID_RE.search(vshr):
  222                         continue
  223                 pattern_dict = {}
  224                 for key in self.volume_pattern_keys:
  225                     keymatch = patmatch.group(key)
  226                     if keymatch is None:
  227                         pattern_dict[key] = None
  228                     else:
  229                         trans = PATTERN_DICT[key].get('trans', lambda x: x)
  230                         pattern_dict[key] = trans(keymatch)
  231                 volumes_dict[gluster_mgr_vol.qualified] = pattern_dict
  232         return volumes_dict
  233 
  234     @utils.synchronized("glusterfs_native", external=False)
  235     def _pop_gluster_vol(self, size=None):
  236         """Pick an unbound volume.
  237 
  238         Do a _fetch_gluster_volumes() first to get the complete
  239         list of usable volumes.
  240         Keep only the unbound ones (ones that are not yet used to
  241         back a share).
  242         If size is given, try to pick one which has a size specification
  243         (according to the 'size' named group of the volume pattern),
  244         and its size is greater-than-or-equal to the given size.
  245         Return the volume chosen (in <host>:/<volname> format).
  246         """
  247 
  248         voldict = self._fetch_gluster_volumes()
  249         # calculate the set of unused volumes
  250         unused_vols = set(voldict) - self.gluster_used_vols
  251 
  252         if not unused_vols:
  253             # No volumes available for use as share. Warn user.
  254             LOG.warning("No unused gluster volumes available for use as "
  255                         "share! Create share won't be supported unless "
  256                         "existing shares are deleted or some gluster "
  257                         "volumes are created with names matching "
  258                         "'glusterfs_volume_pattern'.")
  259         else:
  260             LOG.info("Number of gluster volumes in use:  "
  261                      "%(inuse-numvols)s. Number of gluster volumes "
  262                      "available for use as share: %(unused-numvols)s",
  263                      {'inuse-numvols': len(self.gluster_used_vols),
  264                       'unused-numvols': len(unused_vols)})
  265 
  266         # volmap is the data structure used to categorize and sort
  267         # the unused volumes. It's a nested dictionary of structure
  268         # {<size>: <hostmap>}
  269         # where <size> is either an integer or None,
  270         # <hostmap> is a dictionary of structure {<host>: <vols>}
  271         # where <host> is a host name (IP address), <vols> is a list
  272         # of volumes (gluster addresses).
  273         volmap = {None: {}}
  274         # if both caller has specified size and 'size' occurs as
  275         # a parameter in the volume pattern...
  276         if size and 'size' in self.volume_pattern_keys:
  277             # then this function is used to extract the
  278             # size value for a given volume from the voldict...
  279             get_volsize = lambda vol: voldict[vol]['size']  # noqa: E731
  280         else:
  281             # else just use a stub.
  282             get_volsize = lambda vol: None  # noqa: E731
  283         for vol in unused_vols:
  284             # For each unused volume, we extract the <size>
  285             # and <host> values with which it can be inserted
  286             # into the volmap, and conditionally perform
  287             # the insertion (with the condition being: once
  288             # caller specified size and a size indication was
  289             # found in the volume name, we require that the
  290             # indicated size adheres to caller's spec).
  291             volsize = get_volsize(vol)
  292             if not volsize or volsize >= size:
  293                 hostmap = volmap.get(volsize)
  294                 if not hostmap:
  295                     hostmap = {}
  296                     volmap[volsize] = hostmap
  297                 host = self._glustermanager(vol).host
  298                 hostvols = hostmap.get(host)
  299                 if not hostvols:
  300                     hostvols = []
  301                     hostmap[host] = hostvols
  302                 hostvols.append(vol)
  303         if len(volmap) > 1:
  304             # volmap has keys apart from the default None,
  305             # ie. volumes with sensible and adherent size
  306             # indication have been found. Then pick the smallest
  307             # of the size values.
  308             chosen_size = sorted(n for n in volmap.keys() if n)[0]
  309         else:
  310             chosen_size = None
  311         chosen_hostmap = volmap[chosen_size]
  312         if not chosen_hostmap:
  313             msg = (_("Couldn't find a free gluster volume to use."))
  314             LOG.error(msg)
  315             raise exception.GlusterfsException(msg)
  316 
  317         # From the hosts we choose randomly to tend towards
  318         # even distribution of share backing volumes among
  319         # Gluster clusters.
  320         chosen_host = random.choice(list(chosen_hostmap.keys()))
  321         # Within a host's volumes, choose alphabetically first,
  322         # to make it predictable.
  323         vol = sorted(chosen_hostmap[chosen_host])[0]
  324         self.gluster_used_vols.add(vol)
  325         return vol
  326 
  327     @utils.synchronized("glusterfs_native", external=False)
  328     def _push_gluster_vol(self, exp_locn):
  329         try:
  330             self.gluster_used_vols.remove(exp_locn)
  331         except KeyError:
  332             msg = (_("Couldn't find the share in used list."))
  333             LOG.error(msg)
  334             raise exception.GlusterfsException(msg)
  335 
  336     def _wipe_gluster_vol(self, gluster_mgr):
  337 
  338         # Create a temporary mount.
  339         gluster_export = gluster_mgr.export
  340         tmpdir = tempfile.mkdtemp()
  341         try:
  342             common._mount_gluster_vol(self.driver._execute, gluster_export,
  343                                       tmpdir)
  344         except exception.GlusterfsException:
  345             shutil.rmtree(tmpdir, ignore_errors=True)
  346             raise
  347 
  348         # Delete the contents of a GlusterFS volume that is temporarily
  349         # mounted.
  350         # From GlusterFS version 3.7, two directories, '.trashcan' at the root
  351         # of the GlusterFS volume and 'internal_op' within the '.trashcan'
  352         # directory, are internally created when a GlusterFS volume is started.
  353         # GlusterFS does not allow unlink(2) of the two directories. So do not
  354         # delete the paths of the two directories, but delete their contents
  355         # along with the rest of the contents of the volume.
  356         srvaddr = gluster_mgr.host_access
  357         if common.numreduct(self.glusterfs_versions[srvaddr]) < (3, 7):
  358             cmd = ['find', tmpdir, '-mindepth', '1', '-delete']
  359         else:
  360             ignored_dirs = map(lambda x: os.path.join(tmpdir, *x),
  361                                [('.trashcan', ), ('.trashcan', 'internal_op')])
  362             ignored_dirs = list(ignored_dirs)
  363             cmd = ['find', tmpdir, '-mindepth', '1', '!', '-path',
  364                    ignored_dirs[0], '!', '-path', ignored_dirs[1], '-delete']
  365 
  366         try:
  367             self.driver._execute(*cmd, run_as_root=True)
  368         except exception.ProcessExecutionError as exc:
  369             msg = (_("Error trying to wipe gluster volume. "
  370                      "gluster_export: %(export)s, Error: %(error)s") %
  371                    {'export': gluster_export, 'error': exc.stderr})
  372             LOG.error(msg)
  373             raise exception.GlusterfsException(msg)
  374         finally:
  375             # Unmount.
  376             common._umount_gluster_vol(self.driver._execute, tmpdir)
  377             shutil.rmtree(tmpdir, ignore_errors=True)
  378 
  379     def create_share(self, context, share, share_server=None):
  380         """Create a share using GlusterFS volume.
  381 
  382         1 Manila share = 1 GlusterFS volume. Pick an unused
  383         GlusterFS volume for use as a share.
  384         """
  385         try:
  386             vol = self._pop_gluster_vol(share['size'])
  387         except exception.GlusterfsException:
  388             msg = ("Error creating share %(share_id)s",
  389                    {'share_id': share['id']})
  390             LOG.error(msg)
  391             raise
  392 
  393         gmgr = self._glustermanager(vol)
  394         export = self.driver._setup_via_manager(
  395             {'share': share, 'manager': gmgr})
  396 
  397         gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
  398         self.private_storage.update(share['id'], {'volume': vol})
  399 
  400         # TODO(deepakcs): Enable quota and set it to the share size.
  401 
  402         # For native protocol, the export_location should be of the form:
  403         # server:/volname
  404         LOG.info("export_location sent back from create_share: %s",
  405                  export)
  406         return export
  407 
  408     def delete_share(self, context, share, share_server=None):
  409         """Delete a share on the GlusterFS volume.
  410 
  411         1 Manila share = 1 GlusterFS volume. Put the gluster
  412         volume back in the available list.
  413         """
  414         gmgr = self._share_manager(share)
  415         if not gmgr:
  416             # Share does not have a record in private storage.
  417             # It means create_share{,_from_snapshot} did not
  418             # succeed(*). In that case we should not obstruct
  419             # share deletion, so we just return doing nothing.
  420             #
  421             # (*) or we have a database corruption but then
  422             # basically does not matter what we do here
  423             return
  424         clone_of = gmgr.get_vol_option(USER_CLONED_FROM) or ''
  425         try:
  426             if UUID_RE.search(clone_of):
  427                 # We take responsibility for the lifecycle
  428                 # management of those volumes which were
  429                 # created by us (as snapshot clones) ...
  430                 gmgr.gluster_call('volume', 'delete', gmgr.volume)
  431             else:
  432                 # ... for volumes that come from the pool, we return
  433                 # them to the pool (after some purification rituals)
  434                 self._wipe_gluster_vol(gmgr)
  435                 gmgr.set_vol_option(USER_MANILA_SHARE, 'NONE')
  436                 gmgr.set_vol_option('nfs.disable', 'on')
  437 
  438             # When deleting the share instance, we may need to
  439             # update'self.gluster_used_vols' again
  440             self.gluster_used_vols.add(gmgr.qualified)
  441             self._push_gluster_vol(gmgr.qualified)
  442         except exception.GlusterfsException:
  443             msg = ("Error during delete_share request for "
  444                    "share %(share_id)s", {'share_id': share['id']})
  445             LOG.error(msg)
  446             raise
  447 
  448         self.private_storage.delete(share['id'])
  449         # TODO(deepakcs): Disable quota.
  450 
  451     @staticmethod
  452     def _find_actual_backend_snapshot_name(gluster_mgr, snapshot):
  453         args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
  454         out, err = gluster_mgr.gluster_call(
  455             *args,
  456             log=("Retrieving snapshot list"))
  457         snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
  458         if len(snapgrep) != 1:
  459             msg = (_("Failed to identify backing GlusterFS object "
  460                      "for snapshot %(snap_id)s of share %(share_id)s: "
  461                      "a single candidate was expected, %(found)d was found.") %
  462                    {'snap_id': snapshot['id'],
  463                     'share_id': snapshot['share_id'],
  464                     'found': len(snapgrep)})
  465             raise exception.GlusterfsException(msg)
  466         backend_snapshot_name = snapgrep[0]
  467         return backend_snapshot_name
  468 
  469     def create_share_from_snapshot(self, context, share, snapshot,
  470                                    share_server=None, parent_share=None):
  471         old_gmgr = self._share_manager(snapshot['share_instance'])
  472 
  473         # Snapshot clone feature in GlusterFS server essential to support this
  474         # API is available in GlusterFS server versions 3.7 and higher. So do
  475         # a version check.
  476         vers = self.glusterfs_versions[old_gmgr.host_access]
  477         minvers = (3, 7)
  478         if common.numreduct(vers) < minvers:
  479             minvers_str = '.'.join(six.text_type(c) for c in minvers)
  480             vers_str = '.'.join(vers)
  481             msg = (_("GlusterFS version %(version)s on server %(server)s does "
  482                      "not support creation of shares from snapshot. "
  483                      "minimum requirement: %(minversion)s") %
  484                    {'version': vers_str, 'server': old_gmgr.host,
  485                     'minversion': minvers_str})
  486             LOG.error(msg)
  487             raise exception.GlusterfsException(msg)
  488 
  489         # Clone the snapshot. The snapshot clone, a new GlusterFS volume
  490         # would serve as a share.
  491         backend_snapshot_name = self._find_actual_backend_snapshot_name(
  492             old_gmgr, snapshot)
  493         volume = ''.join(['manila-', share['id']])
  494         args_tuple = (('snapshot', 'activate', backend_snapshot_name,
  495                       'force', '--mode=script'),
  496                       ('snapshot', 'clone', volume, backend_snapshot_name))
  497         for args in args_tuple:
  498             out, err = old_gmgr.gluster_call(
  499                 *args,
  500                 log=("Creating share from snapshot"))
  501 
  502         # Get a manager for the new volume/share.
  503         comp_vol = old_gmgr.components.copy()
  504         comp_vol.update({'volume': volume})
  505         gmgr = self._glustermanager(comp_vol)
  506         export = self.driver._setup_via_manager(
  507             {'share': share, 'manager': gmgr},
  508             {'share': snapshot['share_instance'], 'manager': old_gmgr})
  509 
  510         argseq = (('set',
  511                    [USER_CLONED_FROM, snapshot['share_id']]),
  512                   ('set', [USER_MANILA_SHARE, share['id']]),
  513                   ('start', []))
  514         for op, opargs in argseq:
  515             args = ['volume', op, gmgr.volume] + opargs
  516             gmgr.gluster_call(*args, log=("Creating share from snapshot"))
  517 
  518         self.gluster_used_vols.add(gmgr.qualified)
  519         self.private_storage.update(share['id'], {'volume': gmgr.qualified})
  520 
  521         return export
  522 
  523     def create_snapshot(self, context, snapshot, share_server=None):
  524         """Creates a snapshot."""
  525 
  526         gluster_mgr = self._share_manager(snapshot['share'])
  527         if gluster_mgr.qualified in self.gluster_nosnap_vols_dict:
  528             opret, operrno = -1, 0
  529             operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified]
  530         else:
  531             args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'],
  532                     gluster_mgr.volume)
  533             out, err = gluster_mgr.gluster_call(
  534                 *args,
  535                 log=("Retrieving volume info"))
  536 
  537             if not out:
  538                 raise exception.GlusterfsException(
  539                     'gluster volume info %s: no data received' %
  540                     gluster_mgr.volume
  541                 )
  542 
  543             outxml = etree.fromstring(out)
  544             opret = int(common.volxml_get(outxml, 'opRet'))
  545             operrno = int(common.volxml_get(outxml, 'opErrno'))
  546             operrstr = common.volxml_get(outxml, 'opErrstr', default=None)
  547 
  548         if opret == -1:
  549             vers = self.glusterfs_versions[gluster_mgr.host_access]
  550             if common.numreduct(vers) > (3, 6):
  551                 # This logic has not yet been implemented in GlusterFS 3.6
  552                 if operrno == 0:
  553                     self.gluster_nosnap_vols_dict[
  554                         gluster_mgr.qualified] = operrstr
  555                     msg = _("Share %(share_id)s does not support snapshots: "
  556                             "%(errstr)s.") % {'share_id': snapshot['share_id'],
  557                                               'errstr': operrstr}
  558                     LOG.error(msg)
  559                     raise exception.ShareSnapshotNotSupported(msg)
  560             raise exception.GlusterfsException(
  561                 _("Creating snapshot for share %(share_id)s failed "
  562                   "with %(errno)d: %(errstr)s") % {
  563                       'share_id': snapshot['share_id'],
  564                       'errno': operrno,
  565                       'errstr': operrstr})
  566 
  567     def delete_snapshot(self, context, snapshot, share_server=None):
  568         """Deletes a snapshot."""
  569 
  570         gluster_mgr = self._share_manager(snapshot['share'])
  571         backend_snapshot_name = self._find_actual_backend_snapshot_name(
  572             gluster_mgr, snapshot)
  573         args = ('--xml', 'snapshot', 'delete', backend_snapshot_name,
  574                 '--mode=script')
  575         out, err = gluster_mgr.gluster_call(
  576             *args,
  577             log=("Error deleting snapshot"))
  578 
  579         if not out:
  580             raise exception.GlusterfsException(
  581                 _('gluster snapshot delete %s: no data received') %
  582                 gluster_mgr.volume
  583             )
  584 
  585         outxml = etree.fromstring(out)
  586         gluster_mgr.xml_response_check(outxml, args[1:])
  587 
  588     def ensure_share(self, context, share, share_server=None):
  589         """Invoked to ensure that share is exported."""
  590         gmgr = self._share_manager(share)
  591         self.gluster_used_vols.add(gmgr.qualified)
  592 
  593         gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
  594 
  595     # Debt...
  596 
  597     def manage_existing(self, share, driver_options):
  598         raise NotImplementedError()
  599 
  600     def unmanage(self, share):
  601         raise NotImplementedError()
  602 
  603     def extend_share(self, share, new_size, share_server=None):
  604         raise NotImplementedError()
  605 
  606     def shrink_share(self, share, new_size, share_server=None):
  607         raise NotImplementedError()