lib_base.py (manila-8.1.3) | : | lib_base.py (manila-8.1.4) | ||
---|---|---|---|---|
skipping to change at line 1702 | skipping to change at line 1702 | |||
# Change the source replica for all destinations to the new | # Change the source replica for all destinations to the new | |||
# active replica. | # active replica. | |||
for r in replica_list: | for r in replica_list: | |||
if r['id'] != replica['id']: | if r['id'] != replica['id']: | |||
r = self._safe_change_replica_source(dm_session, r, | r = self._safe_change_replica_source(dm_session, r, | |||
orig_active_replica, | orig_active_replica, | |||
replica, | replica, | |||
replica_list) | replica_list) | |||
new_replica_list.append(r) | new_replica_list.append(r) | |||
# Unmount the original active replica. | ||||
orig_active_vserver = dm_session.get_vserver_from_share( | orig_active_vserver = dm_session.get_vserver_from_share( | |||
orig_active_replica) | orig_active_replica) | |||
# Cleanup the original active share if necessary | ||||
orig_active_replica_backend = ( | ||||
share_utils.extract_host(orig_active_replica['host'], | ||||
level='backend_name')) | ||||
orig_active_replica_name = self._get_backend_share_name( | ||||
orig_active_replica['id']) | ||||
orig_active_vserver_client = data_motion.get_client_for_backend( | ||||
orig_active_replica_backend, vserver_name=orig_active_vserver) | ||||
orig_active_replica_helper = self._get_helper(orig_active_replica) | ||||
orig_active_replica_helper.set_client(orig_active_vserver_client) | ||||
try: | ||||
orig_active_replica_helper.cleanup_demoted_replica( | ||||
orig_active_replica, orig_active_replica_name) | ||||
except exception.StorageCommunicationException: | ||||
LOG.exception("Could not cleanup the original active replica %s.", | ||||
orig_active_replica['id']) | ||||
# Unmount the original active replica. | ||||
self._unmount_orig_active_replica(orig_active_replica, | self._unmount_orig_active_replica(orig_active_replica, | |||
orig_active_vserver) | orig_active_vserver) | |||
self._handle_qos_on_replication_change(dm_session, | self._handle_qos_on_replication_change(dm_session, | |||
new_active_replica, | new_active_replica, | |||
orig_active_replica, | orig_active_replica, | |||
share_server=share_server) | share_server=share_server) | |||
return new_replica_list | return new_replica_list | |||
skipping to change at line 2206 | skipping to change at line 2226 | |||
'phase': status['phase'], | 'phase': status['phase'], | |||
'details': status['details'], | 'details': status['details'], | |||
} | } | |||
def migration_cancel(self, context, source_share, destination_share, | def migration_cancel(self, context, source_share, destination_share, | |||
source_snapshots, snapshot_mappings, | source_snapshots, snapshot_mappings, | |||
share_server=None, destination_share_server=None): | share_server=None, destination_share_server=None): | |||
"""Abort an ongoing migration.""" | """Abort an ongoing migration.""" | |||
vserver, vserver_client = self._get_vserver(share_server=share_server) | vserver, vserver_client = self._get_vserver(share_server=share_server) | |||
share_volume = self._get_backend_share_name(source_share['id']) | share_volume = self._get_backend_share_name(source_share['id']) | |||
retries = (self.configuration.netapp_migration_cancel_timeout / 5 or | ||||
1) | ||||
try: | try: | |||
self._get_volume_move_status(source_share, share_server) | self._get_volume_move_status(source_share, share_server) | |||
except exception.NetAppException: | except exception.NetAppException: | |||
LOG.exception("Could not get volume move status.") | LOG.exception("Could not get volume move status.") | |||
return | return | |||
self._client.abort_volume_move(share_volume, vserver) | self._client.abort_volume_move(share_volume, vserver) | |||
@manila_utils.retry(exception.InUse, interval=5, | ||||
retries=retries, backoff_rate=1) | ||||
def wait_for_migration_cancel_complete(): | ||||
move_status = self._get_volume_move_status(source_share, | ||||
share_server) | ||||
if move_status['state'] == 'failed': | ||||
return | ||||
else: | ||||
msg = "Migration cancelation isn't finished yet." | ||||
raise exception.InUse(message=msg) | ||||
try: | ||||
wait_for_migration_cancel_complete() | ||||
except exception.InUse: | ||||
move_status = self._get_volume_move_status(source_share, | ||||
share_server) | ||||
msg_args = { | ||||
'share_move_state': move_status['state'] | ||||
} | ||||
msg = _("Migration cancelation was not successful. The share " | ||||
"migration state failed while transitioning from " | ||||
"%(share_move_state)s state to 'failed'. Retries " | ||||
"exhausted.") % msg_args | ||||
raise exception.NetAppException(message=msg) | ||||
except exception.NetAppException: | ||||
LOG.exception("Could not get volume move status.") | ||||
msg = ("Share volume move operation for share %(shr)s from host " | msg = ("Share volume move operation for share %(shr)s from host " | |||
"%(src)s to %(dest)s was successfully aborted.") | "%(src)s to %(dest)s was successfully aborted.") | |||
msg_args = { | msg_args = { | |||
'shr': source_share['id'], | 'shr': source_share['id'], | |||
'src': source_share['host'], | 'src': source_share['host'], | |||
'dest': destination_share['host'], | 'dest': destination_share['host'], | |||
} | } | |||
LOG.info(msg, msg_args) | LOG.info(msg, msg_args) | |||
def migration_complete(self, context, source_share, destination_share, | def migration_complete(self, context, source_share, destination_share, | |||
End of changes. 4 change blocks. | ||||
1 lines changed or deleted | 50 lines changed or added |