"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "cinder/volume/drivers/rbd.py" between
cinder-15.4.1.tar.gz and cinder-15.5.0.tar.gz

About: OpenStack Cinder (Core Service: Block Storage) provides persistent block storage to running instances. Its pluggable driver architecture facilitates the creation and management of block storage devices.
The "Train" series (maintained release).

rbd.py  (cinder-15.4.1):rbd.py  (cinder-15.5.0)
skipping to change at line 84 skipping to change at line 84
default=False, default=False,
help='Flatten volumes created from snapshots to remove ' help='Flatten volumes created from snapshots to remove '
'dependency from volume to snapshot'), 'dependency from volume to snapshot'),
cfg.StrOpt('rbd_secret_uuid', cfg.StrOpt('rbd_secret_uuid',
help='The libvirt uuid of the secret for the rbd_user ' help='The libvirt uuid of the secret for the rbd_user '
'volumes'), 'volumes'),
cfg.IntOpt('rbd_max_clone_depth', cfg.IntOpt('rbd_max_clone_depth',
default=5, default=5,
help='Maximum number of nested volume clones that are ' help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable ' 'taken before a flatten occurs. Set to 0 to disable '
'cloning.'), 'cloning. Note: lowering this value will not affect '
'existing volumes whose clone depth exceeds the new '
'value.'),
cfg.IntOpt('rbd_store_chunk_size', default=4, cfg.IntOpt('rbd_store_chunk_size', default=4,
help='Volumes will be chunked into objects of this size ' help='Volumes will be chunked into objects of this size '
'(in megabytes).'), '(in megabytes).'),
cfg.IntOpt('rados_connect_timeout', default=-1, cfg.IntOpt('rados_connect_timeout', default=-1,
help='Timeout value (in seconds) used when connecting to ' help='Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and ' 'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.'), 'default librados value is used.'),
cfg.IntOpt('rados_connection_retries', default=3, cfg.IntOpt('rados_connection_retries', default=3,
help='Number of retries if connection to ceph cluster ' help='Number of retries if connection to ceph cluster '
'failed.'), 'failed.'),
skipping to change at line 250 skipping to change at line 252
if val is not None: if val is not None:
setattr(self.configuration, attr, utils.convert_str(val)) setattr(self.configuration, attr, utils.convert_str(val))
self._backend_name = (self.configuration.volume_backend_name or self._backend_name = (self.configuration.volume_backend_name or
self.__class__.__name__) self.__class__.__name__)
self._active_backend_id = active_backend_id self._active_backend_id = active_backend_id
self._active_config = {} self._active_config = {}
self._is_replication_enabled = False self._is_replication_enabled = False
self._replication_targets = [] self._replication_targets = []
self._target_names = [] self._target_names = []
self._clone_v2_api_checked = False
if self.rbd is not None: if self.rbd is not None:
self.RBD_FEATURE_LAYERING = self.rbd.RBD_FEATURE_LAYERING self.RBD_FEATURE_LAYERING = self.rbd.RBD_FEATURE_LAYERING
self.RBD_FEATURE_EXCLUSIVE_LOCK = \ self.RBD_FEATURE_EXCLUSIVE_LOCK = \
self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK
self.RBD_FEATURE_OBJECT_MAP = self.rbd.RBD_FEATURE_OBJECT_MAP self.RBD_FEATURE_OBJECT_MAP = self.rbd.RBD_FEATURE_OBJECT_MAP
self.RBD_FEATURE_FAST_DIFF = self.rbd.RBD_FEATURE_FAST_DIFF self.RBD_FEATURE_FAST_DIFF = self.rbd.RBD_FEATURE_FAST_DIFF
self.RBD_FEATURE_JOURNALING = self.rbd.RBD_FEATURE_JOURNALING self.RBD_FEATURE_JOURNALING = self.rbd.RBD_FEATURE_JOURNALING
self.MULTIATTACH_EXCLUSIONS = ( self.MULTIATTACH_EXCLUSIONS = (
self.RBD_FEATURE_JOURNALING | self.RBD_FEATURE_JOURNALING |
self.RBD_FEATURE_FAST_DIFF | self.RBD_FEATURE_FAST_DIFF |
self.RBD_FEATURE_OBJECT_MAP | self.RBD_FEATURE_OBJECT_MAP |
self.RBD_FEATURE_EXCLUSIVE_LOCK) self.RBD_FEATURE_EXCLUSIVE_LOCK)
@staticmethod @staticmethod
def get_driver_options(): def get_driver_options():
return RBD_OPTS return RBD_OPTS
def _show_msg_check_clone_v2_api(self, volume_name):
if not self._clone_v2_api_checked:
self._clone_v2_api_checked = True
with RBDVolumeProxy(self, volume_name) as volume:
try:
if (volume.volume.op_features() &
self.rbd.RBD_OPERATION_FEATURE_CLONE_PARENT):
LOG.info('Using v2 Clone API')
return
except AttributeError:
pass
LOG.warning('Not using v2 clone API, please upgrade to'
' mimic+ and set the OSD minimum client'
' compat version to mimic for better'
' performance, fewer deletion issues')
def _get_target_config(self, target_id): def _get_target_config(self, target_id):
"""Get a replication target from known replication targets.""" """Get a replication target from known replication targets."""
for target in self._replication_targets: for target in self._replication_targets:
if target['name'] == target_id: if target['name'] == target_id:
return target return target
if not target_id or target_id == 'default': if not target_id or target_id == 'default':
return { return {
'name': self.configuration.rbd_cluster_name, 'name': self.configuration.rbd_cluster_name,
'conf': self.configuration.rbd_ceph_conf, 'conf': self.configuration.rbd_ceph_conf,
'user': self.configuration.rbd_user, 'user': self.configuration.rbd_user,
skipping to change at line 634 skipping to change at line 653
parent_volume = self.rbd.Image(client.ioctx, volume_name) parent_volume = self.rbd.Image(client.ioctx, volume_name)
try: try:
_pool, parent, _snap = self._get_clone_info(parent_volume, _pool, parent, _snap = self._get_clone_info(parent_volume,
volume_name) volume_name)
finally: finally:
parent_volume.close() parent_volume.close()
if not parent: if not parent:
return depth return depth
# If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > self.configuration.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(self.configuration.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1) return self._get_clone_depth(client, parent, depth + 1)
def _extend_if_required(self, volume, src_vref): def _extend_if_required(self, volume, src_vref):
"""Extends a volume if required """Extends a volume if required
In case src_vref size is smaller than the size if the requested In case src_vref size is smaller than the size if the requested
new volume call _resize(). new volume call _resize().
""" """
if volume.size != src_vref.size: if volume.size != src_vref.size:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
skipping to change at line 709 skipping to change at line 722
{'src_vol': src_name, {'src_vol': src_name,
'src_snap': clone_snap, 'src_snap': clone_snap,
'dest': dest_name, 'dest': dest_name,
'error': e}) 'error': e})
LOG.exception(msg) LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
depth = self._get_clone_depth(client, src_name) depth = self._get_clone_depth(client, src_name)
# If dest volume is a clone and rbd_max_clone_depth reached, # If dest volume is a clone and rbd_max_clone_depth reached,
# flatten the dest after cloning. Zero rbd_max_clone_depth means # flatten the dest after cloning. Zero rbd_max_clone_depth means
# infinite is allowed. # volumes are always flattened.
if depth >= self.configuration.rbd_max_clone_depth: if depth >= self.configuration.rbd_max_clone_depth:
LOG.info("maximum clone depth (%d) has been reached - " LOG.info("maximum clone depth (%d) has been reached - "
"flattening dest volume", "flattening dest volume",
self.configuration.rbd_max_clone_depth) self.configuration.rbd_max_clone_depth)
# Flatten destination volume # Flatten destination volume
try: try:
with RBDVolumeProxy(self, dest_name, client=client, with RBDVolumeProxy(self, dest_name, client=client,
ioctx=client.ioctx) as dest_volume: ioctx=client.ioctx) as dest_volume:
LOG.debug("flattening dest volume %s", dest_name) LOG.debug("flattening dest volume %s", dest_name)
skipping to change at line 1010 skipping to change at line 1023
vol.resize(size) vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot): def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.""" """Creates a volume from a snapshot."""
volume_update = self._clone(volume, self.configuration.rbd_pool, volume_update = self._clone(volume, self.configuration.rbd_pool,
snapshot.volume_name, snapshot.name) snapshot.volume_name, snapshot.name)
if self.configuration.rbd_flatten_volume_from_snapshot: if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume.name) self._flatten(self.configuration.rbd_pool, volume.name)
if int(volume.size): if int(volume.size):
self._resize(volume) self._resize(volume)
self._show_msg_check_clone_v2_api(snapshot.volume_name)
return volume_update return volume_update
def _delete_backup_snaps(self, rbd_image): def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image) backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps: if backup_snaps:
for snap in backup_snaps: for snap in backup_snaps:
rbd_image.remove_snap(snap['name']) rbd_image.remove_snap(snap['name'])
else: else:
LOG.debug("volume has no backup snaps") LOG.debug("volume has no backup snaps")
skipping to change at line 1573 skipping to change at line 1588
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id, image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name, tmp.name,
self.configuration.volume_dd_blocksize, self.configuration.volume_dd_blocksize,
size=volume.size) size=volume.size)
if encrypted: if encrypted:
self._encrypt_image(context, volume, tmp_dir, tmp.name) self._encrypt_image(context, volume, tmp_dir, tmp.name)
self.delete_volume(volume) @utils.retry(exception.VolumeIsBusy,
self.configuration.rados_connection_interval,
self.configuration.rados_connection_retries)
def _delete_volume(volume):
self.delete_volume(volume)
_delete_volume(volume)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2)) order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it # keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image # detects zeroes to preserve sparseness in the image
args = ['rbd', 'import', args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool, '--pool', self.configuration.rbd_pool,
'--order', order, '--order', order,
tmp.name, volume.name, tmp.name, volume.name,
'--new-format'] '--new-format']
 End of changes. 7 change blocks. 
9 lines changed or deleted 30 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)