drbd_receiver.c (drbd-9.1.8) | : | drbd_receiver.c (drbd-9.1.9) | ||
---|---|---|---|---|
skipping to change at line 4278 | skipping to change at line 4278 | |||
} | } | |||
rcu_read_unlock(); | rcu_read_unlock(); | |||
return rv; | return rv; | |||
} | } | |||
static int bitmap_mod_after_handshake(struct drbd_peer_device *peer_device, enum sync_strategy strategy, int peer_node_id) | static int bitmap_mod_after_handshake(struct drbd_peer_device *peer_device, enum sync_strategy strategy, int peer_node_id) | |||
{ | { | |||
struct drbd_device *device = peer_device->device; | struct drbd_device *device = peer_device->device; | |||
/* reduce contention by giving up uuid_sem before taking bitmap locks */ | ||||
if (test_and_clear_bit(HOLDING_UUID_READ_LOCK, &peer_device->flags)) { | ||||
struct drbd_transport *transport = &peer_device->connection->tran | ||||
sport; | ||||
up_read_non_owner(&device->uuid_sem); | ||||
transport->ops->set_rcvtimeo(transport, DATA_STREAM, MAX_SCHEDULE | ||||
_TIMEOUT); | ||||
} | ||||
if (strategy == SYNC_SOURCE_COPY_BITMAP) { | if (strategy == SYNC_SOURCE_COPY_BITMAP) { | |||
int from = device->ldev->md.peers[peer_node_id].bitmap_index; | int from = device->ldev->md.peers[peer_node_id].bitmap_index; | |||
if (from == -1) | if (from == -1) | |||
from = drbd_unallocated_index(device->ldev, device->bitma p->bm_max_peers); | from = drbd_unallocated_index(device->ldev, device->bitma p->bm_max_peers); | |||
if (peer_device->bitmap_index == -1) | if (peer_device->bitmap_index == -1) | |||
return 0; | return 0; | |||
if (from == -1) | if (from == -1) | |||
skipping to change at line 4348 | skipping to change at line 4341 | |||
if (err) | if (err) | |||
return err; | return err; | |||
} | } | |||
return 0; | return 0; | |||
} | } | |||
static enum drbd_repl_state strategy_to_repl_state(struct drbd_peer_device *peer _device, | static enum drbd_repl_state strategy_to_repl_state(struct drbd_peer_device *peer _device, | |||
enum drbd_role peer_role, | enum drbd_role peer_role, | |||
enum sync_strategy strategy) | enum sync_strategy strategy) | |||
{ | { | |||
struct drbd_device *device = peer_device->device; | ||||
enum drbd_role role = peer_device->device->resource->role[NOW]; | enum drbd_role role = peer_device->device->resource->role[NOW]; | |||
enum drbd_repl_state rv; | enum drbd_repl_state rv; | |||
if (strategy == SYNC_SOURCE_IF_BOTH_FAILED || strategy == SYNC_TARGET_IF_ BOTH_FAILED) { | if (strategy == SYNC_SOURCE_IF_BOTH_FAILED || strategy == SYNC_TARGET_IF_ BOTH_FAILED) { | |||
if (role == R_PRIMARY || peer_role == R_PRIMARY) { | if (role == R_PRIMARY || peer_role == R_PRIMARY) { | |||
/* We have at least one primary, follow that with the res ync decision */ | /* We have at least one primary, follow that with the res ync decision */ | |||
rv = peer_role == R_SECONDARY ? L_WF_BITMAP_S : | rv = peer_role == R_SECONDARY ? L_WF_BITMAP_S : | |||
role == R_SECONDARY ? L_WF_BITMAP_T : | role == R_SECONDARY ? L_WF_BITMAP_T : | |||
L_ESTABLISHED; | L_ESTABLISHED; | |||
return rv; | return rv; | |||
} | } | |||
/* No current primary. Handle it as a common power failure, consi der the | /* No current primary. Handle it as a common power failure, consi der the | |||
roles at crash time */ | roles at crash time */ | |||
} | } | |||
if (strategy_descriptor(strategy).is_sync_source) { | if (strategy_descriptor(strategy).is_sync_source) { | |||
rv = L_WF_BITMAP_S; | rv = L_WF_BITMAP_S; | |||
} else if (strategy_descriptor(strategy).is_sync_target) { | } else if (strategy_descriptor(strategy).is_sync_target) { | |||
rv = L_WF_BITMAP_T; | rv = L_WF_BITMAP_T; | |||
} else { | } else { | |||
u64 peer_current_uuid = peer_device->current_uuid & ~UUID_PRIMARY | ||||
; | ||||
u64 my_current_uuid = drbd_current_uuid(device) & ~UUID_PRIMARY; | ||||
rv = L_ESTABLISHED; | rv = L_ESTABLISHED; | |||
if (peer_current_uuid == my_current_uuid && | ||||
!(peer_device->uuid_flags & UUID_FLAG_SYNC_TARGET | ||||
) && | ||||
device->disk_state[NOW] >= D_OUTDATED && | ||||
peer_device->disk_state[NOW] >= D_OUTDATED) { | ||||
if (drbd_bitmap_uuid(peer_device)) { | ||||
drbd_info(peer_device, "clearing bitmap UUID and | ||||
bitmap content (%lu bits)\n", | ||||
drbd_bm_total_weight(peer_device)); | ||||
down_write(&device->uuid_sem); | ||||
drbd_uuid_set_bitmap(peer_device, 0); | ||||
up_write(&device->uuid_sem); | ||||
} else if (drbd_bm_total_weight(peer_device)) { | ||||
drbd_info(peer_device, "bitmap content (%lu bits) | ||||
\n", | ||||
drbd_bm_total_weight(peer_device)); | ||||
} | ||||
drbd_bm_clear_many_bits(peer_device, 0, -1UL); | ||||
} | ||||
} | } | |||
return rv; | return rv; | |||
} | } | |||
static enum sync_strategy drbd_disk_states_source_strategy( | static enum sync_strategy drbd_disk_states_source_strategy( | |||
struct drbd_peer_device *peer_device, | struct drbd_peer_device *peer_device, | |||
int *peer_node_id) | int *peer_node_id) | |||
{ | { | |||
struct drbd_device *device = peer_device->device; | struct drbd_device *device = peer_device->device; | |||
skipping to change at line 7324 | skipping to change at line 7296 | |||
drbd_warn(peer_device, "Downgrading joining peer' s disk as its data is older\n"); | drbd_warn(peer_device, "Downgrading joining peer' s disk as its data is older\n"); | |||
if (peer_disk_state >= D_CONSISTENT) | if (peer_disk_state >= D_CONSISTENT) | |||
peer_disk_state = D_CONSISTENT; /* See "D o not trust this guy!" in sanitize_state() */ | peer_disk_state = D_CONSISTENT; /* See "D o not trust this guy!" in sanitize_state() */ | |||
} else { | } else { | |||
drbd_warn(peer_device, "Current UUID of peer does not match my" | drbd_warn(peer_device, "Current UUID of peer does not match my" | |||
" exposed UUID."); | " exposed UUID."); | |||
set_bit(CONN_HANDSHAKE_DISCONNECT, &connection->f lags); | set_bit(CONN_HANDSHAKE_DISCONNECT, &connection->f lags); | |||
} | } | |||
} | } | |||
} | } | |||
if (peer_device->repl_state[NOW] == L_OFF && peer_state.disk == D_DISKLES S) { | if (peer_device->repl_state[NOW] == L_OFF && peer_state.disk == D_DISKLES S && get_ldev(device)) { | |||
u64 uuid_flags = 0; | u64 uuid_flags = 0; | |||
if (get_ldev(device)) { | drbd_collect_local_uuid_flags(peer_device, NULL); | |||
drbd_collect_local_uuid_flags(peer_device, NULL); | ||||
put_ldev(device); | ||||
} | ||||
drbd_uuid_dump_self(peer_device, peer_device->comm_bm_set, uuid_f lags); | drbd_uuid_dump_self(peer_device, peer_device->comm_bm_set, uuid_f lags); | |||
drbd_info(peer_device, "peer's exposed UUID: %016llX\n", peer_dev ice->current_uuid); | drbd_info(peer_device, "peer's exposed UUID: %016llX\n", peer_dev ice->current_uuid); | |||
if (peer_state.role == R_PRIMARY && | if (peer_state.role == R_PRIMARY && | |||
(peer_device->current_uuid & ~UUID_PRIMARY) == | (peer_device->current_uuid & ~UUID_PRIMARY) == | |||
(drbd_current_uuid(device) & ~UUID_PRIMARY)) { | (drbd_current_uuid(device) & ~UUID_PRIMARY)) { | |||
/* Connecting to diskless primary peer. When the state ch ange is committed, | /* Connecting to diskless primary peer. When the state ch ange is committed, | |||
* sanitize_state might set me D_UP_TO_DATE. Make sure th e | * sanitize_state might set me D_UP_TO_DATE. Make sure th e | |||
* effective_size is set. */ | * effective_size is set. */ | |||
peer_device->max_size = peer_device->c_size; | peer_device->max_size = peer_device->c_size; | |||
drbd_determine_dev_size(device, peer_device->max_size, 0, NULL); | drbd_determine_dev_size(device, peer_device->max_size, 0, NULL); | |||
} | } | |||
put_ldev(device); | ||||
} | } | |||
/* This is after the point where we did UUID comparison and joined with t | if (test_bit(HOLDING_UUID_READ_LOCK, &peer_device->flags)) { | |||
he | ||||
diskless case again. Releasing uuid_sem here */ | ||||
if (test_and_clear_bit(HOLDING_UUID_READ_LOCK, &peer_device->flags)) { | ||||
struct drbd_transport *transport = &connection->transport; | struct drbd_transport *transport = &connection->transport; | |||
up_read_non_owner(&device->uuid_sem); | ||||
/* Last packet of handshake received, disarm receive timeout */ | /* Last packet of handshake received, disarm receive timeout */ | |||
transport->ops->set_rcvtimeo(transport, DATA_STREAM, MAX_SCHEDULE _TIMEOUT); | transport->ops->set_rcvtimeo(transport, DATA_STREAM, MAX_SCHEDULE _TIMEOUT); | |||
} | } | |||
if (new_repl_state == L_ESTABLISHED && peer_disk_state == D_CONSISTENT && | if (new_repl_state == L_ESTABLISHED && peer_disk_state == D_CONSISTENT && | |||
drbd_suspended(device) && peer_device->repl_state[NOW] < L_ESTABLISHE D && | drbd_suspended(device) && peer_device->repl_state[NOW] < L_ESTABLISHE D && | |||
test_and_clear_bit(NEW_CUR_UUID, &device->flags)) { | test_and_clear_bit(NEW_CUR_UUID, &device->flags)) { | |||
/* Do not allow RESEND for a rebooted peer. We can only allow thi s | /* Do not allow RESEND for a rebooted peer. We can only allow thi s | |||
for temporary network outages! */ | for temporary network outages! */ | |||
drbd_err(peer_device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); | drbd_err(peer_device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); | |||
End of changes. 9 change blocks. | ||||
44 lines changed or deleted | 5 lines changed or added |