"Fossies" - the Fresh Open Source Software Archive

Member "manila-8.1.4/manila/share/drivers/glusterfs/layout_volume.py" (19 Nov 2020, 26321 Bytes) of package /linux/misc/openstack/manila-8.1.4.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "layout_volume.py" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 8.1.3_vs_8.1.4.

    1 # Copyright (c) 2015 Red Hat, Inc.
    2 # All Rights Reserved.
    3 #
    4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
    5 #    not use this file except in compliance with the License. You may obtain
    6 #    a copy of the License at
    7 #
    8 #         http://www.apache.org/licenses/LICENSE-2.0
    9 #
   10 #    Unless required by applicable law or agreed to in writing, software
   11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
   12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
   13 #    License for the specific language governing permissions and limitations
   14 #    under the License.
   15 
   16 """GlusterFS volume mapped share layout."""
   17 
   18 import os
   19 import random
   20 import re
   21 import shutil
   22 import string
   23 import tempfile
   24 import xml.etree.cElementTree as etree
   25 
   26 from oslo_config import cfg
   27 from oslo_log import log
   28 import six
   29 
   30 from manila import exception
   31 from manila.i18n import _
   32 from manila.share.drivers.glusterfs import common
   33 from manila.share.drivers.glusterfs import layout
   34 from manila import utils
   35 
   36 LOG = log.getLogger(__name__)
   37 
   38 
   39 glusterfs_volume_mapped_opts = [
   40     cfg.ListOpt('glusterfs_servers',
   41                 default=[],
   42                 deprecated_name='glusterfs_targets',
   43                 help='List of GlusterFS servers that can be used to create '
   44                      'shares. Each GlusterFS server should be of the form '
   45                      '[remoteuser@]<volserver>, and they are assumed to '
   46                      'belong to distinct Gluster clusters.'),
   47     cfg.StrOpt('glusterfs_volume_pattern',
   48                help='Regular expression template used to filter '
   49                     'GlusterFS volumes for share creation. '
   50                     'The regex template can optionally (ie. with support '
   51                     'of the GlusterFS backend) contain the #{size} '
   52                     'parameter which matches an integer (sequence of '
   53                     'digits) in which case the value shall be interpreted as '
   54                     'size of the volume in GB. Examples: '
   55                     '"manila-share-volume-\d+$", '
   56                     '"manila-share-volume-#{size}G-\d+$"; '
   57                     'with matching volume names, respectively: '
   58                     '"manila-share-volume-12", "manila-share-volume-3G-13". '
   59                     'In latter example, the number that matches "#{size}", '
   60                     'that is, 3, is an indication that the size of volume '
   61                     'is 3G.'),
   62 ]
   63 
   64 
   65 CONF = cfg.CONF
   66 CONF.register_opts(glusterfs_volume_mapped_opts)
   67 
   68 # The dict specifying named parameters
   69 # that can be used with glusterfs_volume_pattern
   70 # in #{<param>} format.
   71 # For each of them we give regex pattern it matches
   72 # and a transformer function ('trans') for the matched
   73 # string value.
   74 # Currently we handle only #{size}.
   75 PATTERN_DICT = {'size': {'pattern': '(?P<size>\d+)', 'trans': int}}
   76 USER_MANILA_SHARE = 'user.manila-share'
   77 USER_CLONED_FROM = 'user.manila-cloned-from'
   78 UUID_RE = re.compile('\A[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}\Z', re.I)
   79 
   80 
   81 class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
   82 
   83     _snapshots_are_supported = True
   84 
   85     def __init__(self, driver, *args, **kwargs):
   86         super(GlusterfsVolumeMappedLayout, self).__init__(
   87             driver, *args, **kwargs)
   88         self.gluster_used_vols = set()
   89         self.configuration.append_config_values(
   90             common.glusterfs_common_opts)
   91         self.configuration.append_config_values(
   92             glusterfs_volume_mapped_opts)
   93         self.gluster_nosnap_vols_dict = {}
   94         self.volume_pattern = self._compile_volume_pattern()
   95         self.volume_pattern_keys = self.volume_pattern.groupindex.keys()
   96         for srvaddr in self.configuration.glusterfs_servers:
   97             # format check for srvaddr
   98             self._glustermanager(srvaddr, False)
   99         self.glusterfs_versions = {}
  100         self.private_storage = kwargs.get('private_storage')
  101 
  102     def _compile_volume_pattern(self):
  103         """Compile a RegexObject from the config specified regex template.
  104 
  105         (cfg.glusterfs_volume_pattern)
  106         """
  107 
  108         subdict = {}
  109         for key, val in PATTERN_DICT.items():
  110             subdict[key] = val['pattern']
  111 
  112         # Using templates with placeholder syntax #{<var>}
  113         class CustomTemplate(string.Template):
  114             delimiter = '#'
  115 
  116         volume_pattern = CustomTemplate(
  117             self.configuration.glusterfs_volume_pattern).substitute(
  118             subdict)
  119         return re.compile(volume_pattern)
  120 
  121     def do_setup(self, context):
  122         """Setup the GlusterFS volumes."""
  123         glusterfs_versions, exceptions = {}, {}
  124         for srvaddr in self.configuration.glusterfs_servers:
  125             try:
  126                 glusterfs_versions[srvaddr] = self._glustermanager(
  127                     srvaddr, False).get_gluster_version()
  128             except exception.GlusterfsException as exc:
  129                 exceptions[srvaddr] = six.text_type(exc)
  130         if exceptions:
  131             for srvaddr, excmsg in exceptions.items():
  132                 LOG.error("'gluster version' failed on server "
  133                           "%(server)s with: %(message)s",
  134                           {'server': srvaddr, 'message': excmsg})
  135             raise exception.GlusterfsException(_(
  136                 "'gluster version' failed on servers %s") % (
  137                 ','.join(exceptions.keys())))
  138         notsupp_servers = []
  139         for srvaddr, vers in glusterfs_versions.items():
  140             if common.numreduct(vers) < self.driver.GLUSTERFS_VERSION_MIN:
  141                 notsupp_servers.append(srvaddr)
  142         if notsupp_servers:
  143             gluster_version_min_str = '.'.join(
  144                 six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
  145             for srvaddr in notsupp_servers:
  146                 LOG.error("GlusterFS version %(version)s on server "
  147                           "%(server)s is not supported, "
  148                           "minimum requirement: %(minvers)s",
  149                           {'server': srvaddr,
  150                            'version': '.'.join(glusterfs_versions[srvaddr]),
  151                            'minvers': gluster_version_min_str})
  152             raise exception.GlusterfsException(_(
  153                 "Unsupported GlusterFS version on servers %(servers)s, "
  154                 "minimum requirement: %(minvers)s") % {
  155                 'servers': ','.join(notsupp_servers),
  156                 'minvers': gluster_version_min_str})
  157         self.glusterfs_versions = glusterfs_versions
  158 
  159         gluster_volumes_initial = set(
  160             self._fetch_gluster_volumes(filter_used=False))
  161         if not gluster_volumes_initial:
  162             # No suitable volumes are found on the Gluster end.
  163             # Raise exception.
  164             msg = (_("Gluster backend does not provide any volume "
  165                      "matching pattern %s"
  166                      ) % self.configuration.glusterfs_volume_pattern)
  167             LOG.error(msg)
  168             raise exception.GlusterfsException(msg)
  169 
  170         LOG.info("Found %d Gluster volumes allocated for Manila.",
  171                  len(gluster_volumes_initial))
  172 
  173         self._check_mount_glusterfs()
  174 
  175     def _glustermanager(self, gluster_address, req_volume=True):
  176         """Create GlusterManager object for gluster_address."""
  177 
  178         return common.GlusterManager(
  179             gluster_address, self.driver._execute,
  180             self.configuration.glusterfs_path_to_private_key,
  181             self.configuration.glusterfs_server_password,
  182             requires={'volume': req_volume})
  183 
  184     def _share_manager(self, share):
  185         """Return GlusterManager object representing share's backend."""
  186         gluster_address = self.private_storage.get(share['id'], 'volume')
  187         if gluster_address is None:
  188             return
  189         return self._glustermanager(gluster_address)
  190 
  191     def _fetch_gluster_volumes(self, filter_used=True):
  192         """Do a 'gluster volume list | grep <volume pattern>'.
  193 
  194         Aggregate the results from all servers.
  195         Extract the named groups from the matching volume names
  196         using the specs given in PATTERN_DICT.
  197         Return a dict with keys of the form <server>:/<volname>
  198         and values being dicts that map names of named groups
  199         to their extracted value.
  200         """
  201 
  202         volumes_dict = {}
  203         for srvaddr in self.configuration.glusterfs_servers:
  204             gluster_mgr = self._glustermanager(srvaddr, False)
  205             if gluster_mgr.user:
  206                 logmsg = ("Retrieving volume list "
  207                           "on host %s") % gluster_mgr.host
  208             else:
  209                 logmsg = ("Retrieving volume list")
  210             out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg)
  211             for volname in out.split("\n"):
  212                 patmatch = self.volume_pattern.match(volname)
  213                 if not patmatch:
  214                     continue
  215                 comp_vol = gluster_mgr.components.copy()
  216                 comp_vol.update({'volume': volname})
  217                 gluster_mgr_vol = self._glustermanager(comp_vol)
  218                 if filter_used:
  219                     vshr = gluster_mgr_vol.get_vol_option(
  220                         USER_MANILA_SHARE) or ''
  221                     if UUID_RE.search(vshr):
  222                         continue
  223                 pattern_dict = {}
  224                 for key in self.volume_pattern_keys:
  225                     keymatch = patmatch.group(key)
  226                     if keymatch is None:
  227                         pattern_dict[key] = None
  228                     else:
  229                         trans = PATTERN_DICT[key].get('trans', lambda x: x)
  230                         pattern_dict[key] = trans(keymatch)
  231                 volumes_dict[gluster_mgr_vol.qualified] = pattern_dict
  232         return volumes_dict
  233 
  234     @utils.synchronized("glusterfs_native", external=False)
  235     def _pop_gluster_vol(self, size=None):
  236         """Pick an unbound volume.
  237 
  238         Do a _fetch_gluster_volumes() first to get the complete
  239         list of usable volumes.
  240         Keep only the unbound ones (ones that are not yet used to
  241         back a share).
  242         If size is given, try to pick one which has a size specification
  243         (according to the 'size' named group of the volume pattern),
  244         and its size is greater-than-or-equal to the given size.
  245         Return the volume chosen (in <host>:/<volname> format).
  246         """
  247 
  248         voldict = self._fetch_gluster_volumes()
  249         # calculate the set of unused volumes
  250         unused_vols = set(voldict) - self.gluster_used_vols
  251 
  252         if not unused_vols:
  253             # No volumes available for use as share. Warn user.
  254             LOG.warning("No unused gluster volumes available for use as "
  255                         "share! Create share won't be supported unless "
  256                         "existing shares are deleted or some gluster "
  257                         "volumes are created with names matching "
  258                         "'glusterfs_volume_pattern'.")
  259         else:
  260             LOG.info("Number of gluster volumes in use:  "
  261                      "%(inuse-numvols)s. Number of gluster volumes "
  262                      "available for use as share: %(unused-numvols)s",
  263                      {'inuse-numvols': len(self.gluster_used_vols),
  264                       'unused-numvols': len(unused_vols)})
  265 
  266         # volmap is the data structure used to categorize and sort
  267         # the unused volumes. It's a nested dictionary of structure
  268         # {<size>: <hostmap>}
  269         # where <size> is either an integer or None,
  270         # <hostmap> is a dictionary of structure {<host>: <vols>}
  271         # where <host> is a host name (IP address), <vols> is a list
  272         # of volumes (gluster addresses).
  273         volmap = {None: {}}
  274         # if both caller has specified size and 'size' occurs as
  275         # a parameter in the volume pattern...
  276         if size and 'size' in self.volume_pattern_keys:
  277             # then this function is used to extract the
  278             # size value for a given volume from the voldict...
  279             get_volsize = lambda vol: voldict[vol]['size']
  280         else:
  281             # else just use a stub.
  282             get_volsize = lambda vol: None
  283         for vol in unused_vols:
  284             # For each unused volume, we extract the <size>
  285             # and <host> values with which it can be inserted
  286             # into the volmap, and conditionally perform
  287             # the insertion (with the condition being: once
  288             # caller specified size and a size indication was
  289             # found in the volume name, we require that the
  290             # indicated size adheres to caller's spec).
  291             volsize = get_volsize(vol)
  292             if not volsize or volsize >= size:
  293                 hostmap = volmap.get(volsize)
  294                 if not hostmap:
  295                     hostmap = {}
  296                     volmap[volsize] = hostmap
  297                 host = self._glustermanager(vol).host
  298                 hostvols = hostmap.get(host)
  299                 if not hostvols:
  300                     hostvols = []
  301                     hostmap[host] = hostvols
  302                 hostvols.append(vol)
  303         if len(volmap) > 1:
  304             # volmap has keys apart from the default None,
  305             # ie. volumes with sensible and adherent size
  306             # indication have been found. Then pick the smallest
  307             # of the size values.
  308             chosen_size = sorted(n for n in volmap.keys() if n)[0]
  309         else:
  310             chosen_size = None
  311         chosen_hostmap = volmap[chosen_size]
  312         if not chosen_hostmap:
  313             msg = (_("Couldn't find a free gluster volume to use."))
  314             LOG.error(msg)
  315             raise exception.GlusterfsException(msg)
  316 
  317         # From the hosts we choose randomly to tend towards
  318         # even distribution of share backing volumes among
  319         # Gluster clusters.
  320         chosen_host = random.choice(list(chosen_hostmap.keys()))
  321         # Within a host's volumes, choose alphabetically first,
  322         # to make it predictable.
  323         vol = sorted(chosen_hostmap[chosen_host])[0]
  324         self.gluster_used_vols.add(vol)
  325         return vol
  326 
  327     @utils.synchronized("glusterfs_native", external=False)
  328     def _push_gluster_vol(self, exp_locn):
  329         try:
  330             self.gluster_used_vols.remove(exp_locn)
  331         except KeyError:
  332             msg = (_("Couldn't find the share in used list."))
  333             LOG.error(msg)
  334             raise exception.GlusterfsException(msg)
  335 
  336     def _wipe_gluster_vol(self, gluster_mgr):
  337 
  338         # Create a temporary mount.
  339         gluster_export = gluster_mgr.export
  340         tmpdir = tempfile.mkdtemp()
  341         try:
  342             common._mount_gluster_vol(self.driver._execute, gluster_export,
  343                                       tmpdir)
  344         except exception.GlusterfsException:
  345             shutil.rmtree(tmpdir, ignore_errors=True)
  346             raise
  347 
  348         # Delete the contents of a GlusterFS volume that is temporarily
  349         # mounted.
  350         # From GlusterFS version 3.7, two directories, '.trashcan' at the root
  351         # of the GlusterFS volume and 'internal_op' within the '.trashcan'
  352         # directory, are internally created when a GlusterFS volume is started.
  353         # GlusterFS does not allow unlink(2) of the two directories. So do not
  354         # delete the paths of the two directories, but delete their contents
  355         # along with the rest of the contents of the volume.
  356         srvaddr = gluster_mgr.host_access
  357         if common.numreduct(self.glusterfs_versions[srvaddr]) < (3, 7):
  358             cmd = ['find', tmpdir, '-mindepth', '1', '-delete']
  359         else:
  360             ignored_dirs = map(lambda x: os.path.join(tmpdir, *x),
  361                                [('.trashcan', ), ('.trashcan', 'internal_op')])
  362             ignored_dirs = list(ignored_dirs)
  363             cmd = ['find', tmpdir, '-mindepth', '1', '!', '-path',
  364                    ignored_dirs[0], '!', '-path', ignored_dirs[1], '-delete']
  365 
  366         try:
  367             self.driver._execute(*cmd, run_as_root=True)
  368         except exception.ProcessExecutionError as exc:
  369             msg = (_("Error trying to wipe gluster volume. "
  370                      "gluster_export: %(export)s, Error: %(error)s") %
  371                    {'export': gluster_export, 'error': exc.stderr})
  372             LOG.error(msg)
  373             raise exception.GlusterfsException(msg)
  374         finally:
  375             # Unmount.
  376             common._umount_gluster_vol(self.driver._execute, tmpdir)
  377             shutil.rmtree(tmpdir, ignore_errors=True)
  378 
  379     def create_share(self, context, share, share_server=None):
  380         """Create a share using GlusterFS volume.
  381 
  382         1 Manila share = 1 GlusterFS volume. Pick an unused
  383         GlusterFS volume for use as a share.
  384         """
  385         try:
  386             vol = self._pop_gluster_vol(share['size'])
  387         except exception.GlusterfsException:
  388             msg = ("Error creating share %(share_id)s",
  389                    {'share_id': share['id']})
  390             LOG.error(msg)
  391             raise
  392 
  393         gmgr = self._glustermanager(vol)
  394         export = self.driver._setup_via_manager(
  395             {'share': share, 'manager': gmgr})
  396 
  397         gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
  398         self.private_storage.update(share['id'], {'volume': vol})
  399 
  400         # TODO(deepakcs): Enable quota and set it to the share size.
  401 
  402         # For native protocol, the export_location should be of the form:
  403         # server:/volname
  404         LOG.info("export_location sent back from create_share: %s",
  405                  export)
  406         return export
  407 
  408     def delete_share(self, context, share, share_server=None):
  409         """Delete a share on the GlusterFS volume.
  410 
  411         1 Manila share = 1 GlusterFS volume. Put the gluster
  412         volume back in the available list.
  413         """
  414         gmgr = self._share_manager(share)
  415         if not gmgr:
  416             # Share does not have a record in private storage.
  417             # It means create_share{,_from_snapshot} did not
  418             # succeed(*). In that case we should not obstruct
  419             # share deletion, so we just return doing nothing.
  420             #
  421             # (*) or we have a database corruption but then
  422             # basically does not matter what we do here
  423             return
  424         clone_of = gmgr.get_vol_option(USER_CLONED_FROM) or ''
  425         try:
  426             if UUID_RE.search(clone_of):
  427                 # We take responsibility for the lifecycle
  428                 # management of those volumes which were
  429                 # created by us (as snapshot clones) ...
  430                 gmgr.gluster_call('volume', 'delete', gmgr.volume)
  431             else:
  432                 # ... for volumes that come from the pool, we return
  433                 # them to the pool (after some purification rituals)
  434                 self._wipe_gluster_vol(gmgr)
  435                 gmgr.set_vol_option(USER_MANILA_SHARE, 'NONE')
  436                 gmgr.set_vol_option('nfs.disable', 'on')
  437 
  438             # When deleting the share instance, we need to
  439             # update'self.gluster_used_vols' again
  440             self.gluster_used_vols = set()
  441             self.gluster_used_vols.add(gmgr.qualified)
  442             self._push_gluster_vol(gmgr.qualified)
  443         except exception.GlusterfsException:
  444             msg = ("Error during delete_share request for "
  445                    "share %(share_id)s", {'share_id': share['id']})
  446             LOG.error(msg)
  447             raise
  448 
  449         self.private_storage.delete(share['id'])
  450         # TODO(deepakcs): Disable quota.
  451 
  452     @staticmethod
  453     def _find_actual_backend_snapshot_name(gluster_mgr, snapshot):
  454         args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
  455         out, err = gluster_mgr.gluster_call(
  456             *args,
  457             log=("Retrieving snapshot list"))
  458         snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
  459         if len(snapgrep) != 1:
  460             msg = (_("Failed to identify backing GlusterFS object "
  461                      "for snapshot %(snap_id)s of share %(share_id)s: "
  462                      "a single candidate was expected, %(found)d was found.") %
  463                    {'snap_id': snapshot['id'],
  464                     'share_id': snapshot['share_id'],
  465                     'found': len(snapgrep)})
  466             raise exception.GlusterfsException(msg)
  467         backend_snapshot_name = snapgrep[0]
  468         return backend_snapshot_name
  469 
  470     def create_share_from_snapshot(self, context, share, snapshot,
  471                                    share_server=None):
  472         old_gmgr = self._share_manager(snapshot['share_instance'])
  473 
  474         # Snapshot clone feature in GlusterFS server essential to support this
  475         # API is available in GlusterFS server versions 3.7 and higher. So do
  476         # a version check.
  477         vers = self.glusterfs_versions[old_gmgr.host_access]
  478         minvers = (3, 7)
  479         if common.numreduct(vers) < minvers:
  480             minvers_str = '.'.join(six.text_type(c) for c in minvers)
  481             vers_str = '.'.join(vers)
  482             msg = (_("GlusterFS version %(version)s on server %(server)s does "
  483                      "not support creation of shares from snapshot. "
  484                      "minimum requirement: %(minversion)s") %
  485                    {'version': vers_str, 'server': old_gmgr.host,
  486                     'minversion': minvers_str})
  487             LOG.error(msg)
  488             raise exception.GlusterfsException(msg)
  489 
  490         # Clone the snapshot. The snapshot clone, a new GlusterFS volume
  491         # would serve as a share.
  492         backend_snapshot_name = self._find_actual_backend_snapshot_name(
  493             old_gmgr, snapshot)
  494         volume = ''.join(['manila-', share['id']])
  495         args_tuple = (('snapshot', 'activate', backend_snapshot_name,
  496                       'force', '--mode=script'),
  497                       ('snapshot', 'clone', volume, backend_snapshot_name))
  498         for args in args_tuple:
  499             out, err = old_gmgr.gluster_call(
  500                 *args,
  501                 log=("Creating share from snapshot"))
  502 
  503         # Get a manager for the new volume/share.
  504         comp_vol = old_gmgr.components.copy()
  505         comp_vol.update({'volume': volume})
  506         gmgr = self._glustermanager(comp_vol)
  507         export = self.driver._setup_via_manager(
  508             {'share': share, 'manager': gmgr},
  509             {'share': snapshot['share_instance'], 'manager': old_gmgr})
  510 
  511         argseq = (('set',
  512                    [USER_CLONED_FROM, snapshot['share_id']]),
  513                   ('set', [USER_MANILA_SHARE, share['id']]),
  514                   ('start', []))
  515         for op, opargs in argseq:
  516             args = ['volume', op, gmgr.volume] + opargs
  517             gmgr.gluster_call(*args, log=("Creating share from snapshot"))
  518 
  519         self.gluster_used_vols.add(gmgr.qualified)
  520         self.private_storage.update(share['id'], {'volume': gmgr.qualified})
  521 
  522         return export
  523 
  524     def create_snapshot(self, context, snapshot, share_server=None):
  525         """Creates a snapshot."""
  526 
  527         gluster_mgr = self._share_manager(snapshot['share'])
  528         if gluster_mgr.qualified in self.gluster_nosnap_vols_dict:
  529             opret, operrno = -1, 0
  530             operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified]
  531         else:
  532             args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'],
  533                     gluster_mgr.volume)
  534             out, err = gluster_mgr.gluster_call(
  535                 *args,
  536                 log=("Retrieving volume info"))
  537 
  538             if not out:
  539                 raise exception.GlusterfsException(
  540                     'gluster volume info %s: no data received' %
  541                     gluster_mgr.volume
  542                 )
  543 
  544             outxml = etree.fromstring(out)
  545             opret = int(common.volxml_get(outxml, 'opRet'))
  546             operrno = int(common.volxml_get(outxml, 'opErrno'))
  547             operrstr = common.volxml_get(outxml, 'opErrstr', default=None)
  548 
  549         if opret == -1:
  550             vers = self.glusterfs_versions[gluster_mgr.host_access]
  551             if common.numreduct(vers) > (3, 6):
  552                 # This logic has not yet been implemented in GlusterFS 3.6
  553                 if operrno == 0:
  554                     self.gluster_nosnap_vols_dict[
  555                         gluster_mgr.qualified] = operrstr
  556                     msg = _("Share %(share_id)s does not support snapshots: "
  557                             "%(errstr)s.") % {'share_id': snapshot['share_id'],
  558                                               'errstr': operrstr}
  559                     LOG.error(msg)
  560                     raise exception.ShareSnapshotNotSupported(msg)
  561             raise exception.GlusterfsException(
  562                 _("Creating snapshot for share %(share_id)s failed "
  563                   "with %(errno)d: %(errstr)s") % {
  564                       'share_id': snapshot['share_id'],
  565                       'errno': operrno,
  566                       'errstr': operrstr})
  567 
  568     def delete_snapshot(self, context, snapshot, share_server=None):
  569         """Deletes a snapshot."""
  570 
  571         gluster_mgr = self._share_manager(snapshot['share'])
  572         backend_snapshot_name = self._find_actual_backend_snapshot_name(
  573             gluster_mgr, snapshot)
  574         args = ('--xml', 'snapshot', 'delete', backend_snapshot_name,
  575                 '--mode=script')
  576         out, err = gluster_mgr.gluster_call(
  577             *args,
  578             log=("Error deleting snapshot"))
  579 
  580         if not out:
  581             raise exception.GlusterfsException(
  582                 _('gluster snapshot delete %s: no data received') %
  583                 gluster_mgr.volume
  584             )
  585 
  586         outxml = etree.fromstring(out)
  587         gluster_mgr.xml_response_check(outxml, args[1:])
  588 
  589     def ensure_share(self, context, share, share_server=None):
  590         """Invoked to ensure that share is exported."""
  591         gmgr = self._share_manager(share)
  592         self.gluster_used_vols.add(gmgr.qualified)
  593 
  594         gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
  595 
  596     # Debt...
  597 
  598     def manage_existing(self, share, driver_options):
  599         raise NotImplementedError()
  600 
  601     def unmanage(self, share):
  602         raise NotImplementedError()
  603 
  604     def extend_share(self, share, new_size, share_server=None):
  605         raise NotImplementedError()
  606 
  607     def shrink_share(self, share, new_size, share_server=None):
  608         raise NotImplementedError()