"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c" between
linux-5.3-rc3.tar.gz and linux-5.3-rc4.tar.gz

About: The full source of the development Linux kernel 5.3 (release candidate).

rxmq.c  (linux-5.3-rc3):rxmq.c  (linux-5.3-rc4)
skipping to change at line 466 skipping to change at line 466
dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_seq[tid] = hdr->seq_ctrl;
dup_data->last_sub_frame[tid] = sub_frame_idx; dup_data->last_sub_frame[tid] = sub_frame_idx;
rx_status->flag |= RX_FLAG_DUP_VALIDATED; rx_status->flag |= RX_FLAG_DUP_VALIDATED;
return false; return false;
} }
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
const u8 *data, u32 count) const u8 *data, u32 count, bool async)
{ {
struct iwl_rxq_sync_cmd *cmd; u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
sizeof(struct iwl_mvm_rss_sync_notif)];
struct iwl_rxq_sync_cmd *cmd = (void *)buf;
u32 data_size = sizeof(*cmd) + count; u32 data_size = sizeof(*cmd) + count;
int ret; int ret;
/* should be DWORD aligned */ /*
if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE)) * size must be a multiple of DWORD
* Ensure we don't overflow buf
*/
if (WARN_ON(count & 3 ||
count > sizeof(struct iwl_mvm_rss_sync_notif)))
return -EINVAL; return -EINVAL;
cmd = kzalloc(data_size, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->rxq_mask = cpu_to_le32(rxq_mask); cmd->rxq_mask = cpu_to_le32(rxq_mask);
cmd->count = cpu_to_le32(count); cmd->count = cpu_to_le32(count);
cmd->flags = 0; cmd->flags = 0;
memcpy(cmd->payload, data, count); memcpy(cmd->payload, data, count);
ret = iwl_mvm_send_cmd_pdu(mvm, ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(DATA_PATH_GROUP, WIDE_ID(DATA_PATH_GROUP,
TRIGGER_RX_QUEUES_NOTIF_CMD), TRIGGER_RX_QUEUES_NOTIF_CMD),
0, data_size, cmd); async ? CMD_ASYNC : 0, data_size, cmd);
kfree(cmd);
return ret; return ret;
} }
/* /*
* Returns true if sn2 - buffer_size < sn1 < sn2. * Returns true if sn2 - buffer_size < sn1 < sn2.
* To be used only in order to compare reorder buffer head with NSSN. * To be used only in order to compare reorder buffer head with NSSN.
* We fully trust NSSN unless it is behind us due to reorder timeout. * We fully trust NSSN unless it is behind us due to reorder timeout.
* Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
*/ */
static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
{ {
return ieee80211_sn_less(sn1, sn2) && return ieee80211_sn_less(sn1, sn2) &&
!ieee80211_sn_less(sn1, sn2 - buffer_size); !ieee80211_sn_less(sn1, sn2 - buffer_size);
} }
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
{
struct iwl_mvm_rss_sync_notif notif = {
.metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
.metadata.sync = 0,
.nssn_sync.baid = baid,
.nssn_sync.nssn = nssn,
};
iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
}
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
enum iwl_mvm_release_flags {
IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
};
static void iwl_mvm_release_frames(struct iwl_mvm *mvm, static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, struct ieee80211_sta *sta,
struct napi_struct *napi, struct napi_struct *napi,
struct iwl_mvm_baid_data *baid_data, struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf, struct iwl_mvm_reorder_buffer *reorder_buf,
u16 nssn) u16 nssn, u32 flags)
{ {
struct iwl_mvm_reorder_buf_entry *entries = struct iwl_mvm_reorder_buf_entry *entries =
&baid_data->entries[reorder_buf->queue * &baid_data->entries[reorder_buf->queue *
baid_data->entries_per_queue]; baid_data->entries_per_queue];
u16 ssn = reorder_buf->head_sn; u16 ssn = reorder_buf->head_sn;
lockdep_assert_held(&reorder_buf->lock); lockdep_assert_held(&reorder_buf->lock);
/*
* We keep the NSSN not too far behind, if we are sync'ing it and it
* is more than 2048 ahead of us, it must be behind us. Discard it.
* This can happen if the queue that hit the 0 / 2048 seqno was lagging
* behind and this queue already processed packets. The next if
* would have caught cases where this queue would have processed less
* than 64 packets, but it may have processed more than 64 packets.
*/
if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
ieee80211_sn_less(nssn, ssn))
goto set_timer;
/* ignore nssn smaller than head sn - this can happen due to timeout */ /* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
goto set_timer; goto set_timer;
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size; int index = ssn % reorder_buf->buf_size;
struct sk_buff_head *skb_list = &entries[index].e.frames; struct sk_buff_head *skb_list = &entries[index].e.frames;
struct sk_buff *skb; struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn); ssn = ieee80211_sn_inc(ssn);
if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
(ssn == 2048 || ssn == 0))
iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
/* /*
* Empty the list. Will have more than one frame for A-MSDU. * Empty the list. Will have more than one frame for A-MSDU.
* Empty list is valid as well since nssn indicates frames were * Empty list is valid as well since nssn indicates frames were
* received. * received.
*/ */
while ((skb = __skb_dequeue(skb_list))) { while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue, reorder_buf->queue,
sta, false); sta, false);
skipping to change at line 618 skipping to change at line 651
rcu_read_lock(); rcu_read_lock();
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* SN is set to the last expired frame + 1 */ /* SN is set to the last expired frame + 1 */
IWL_DEBUG_HT(buf->mvm, IWL_DEBUG_HT(buf->mvm,
"Releasing expired frames for sta %u, sn %d\n", "Releasing expired frames for sta %u, sn %d\n",
sta_id, sn); sta_id, sn);
iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
sta, baid_data->tid); sta, baid_data->tid);
iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn); iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
rcu_read_unlock(); rcu_read_unlock();
} else { } else {
/* /*
* If no frame expired and there are stored frames, index is now * If no frame expired and there are stored frames, index is now
* pointing to the first unexpired frame - modify timer * pointing to the first unexpired frame - modify timer
* accordingly to this frame. * accordingly to this frame.
*/ */
mod_timer(&buf->reorder_timer, mod_timer(&buf->reorder_timer,
entries[index].e.reorder_time + entries[index].e.reorder_time +
1 + RX_REORDER_BUF_TIMEOUT_MQ); 1 + RX_REORDER_BUF_TIMEOUT_MQ);
skipping to change at line 660 skipping to change at line 694
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
goto out; goto out;
reorder_buf = &ba_data->reorder_buf[queue]; reorder_buf = &ba_data->reorder_buf[queue];
/* release all frames that are in the reorder buffer to the stack */ /* release all frames that are in the reorder buffer to the stack */
spin_lock_bh(&reorder_buf->lock); spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn, ieee80211_sn_add(reorder_buf->head_sn,
reorder_buf->buf_size)); reorder_buf->buf_size),
0);
spin_unlock_bh(&reorder_buf->lock); spin_unlock_bh(&reorder_buf->lock);
del_timer_sync(&reorder_buf->reorder_timer); del_timer_sync(&reorder_buf->reorder_timer);
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
int queue) struct napi_struct *napi,
u8 baid, u16 nssn, int queue,
u32 flags)
{
struct ieee80211_sta *sta;
struct iwl_mvm_reorder_buffer *reorder_buf;
struct iwl_mvm_baid_data *ba_data;
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
baid, nssn);
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
baid >= ARRAY_SIZE(mvm->baid_map)))
return;
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON_ONCE(!ba_data))
goto out;
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
goto out;
reorder_buf = &ba_data->reorder_buf[queue];
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, napi, ba_data,
reorder_buf, nssn, flags);
spin_unlock_bh(&reorder_buf->lock);
out:
rcu_read_unlock();
}
static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
struct napi_struct *napi, int queue,
const struct iwl_mvm_nssn_sync_data *data)
{
iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
data->nssn, queue,
IWL_MVM_RELEASE_FROM_RSS_SYNC);
}
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rxq_sync_notification *notif; struct iwl_rxq_sync_notification *notif;
struct iwl_mvm_internal_rxq_notif *internal_notif; struct iwl_mvm_internal_rxq_notif *internal_notif;
notif = (void *)pkt->data; notif = (void *)pkt->data;
internal_notif = (void *)notif->payload; internal_notif = (void *)notif->payload;
if (internal_notif->sync && if (internal_notif->sync &&
mvm->queue_sync_cookie != internal_notif->cookie) { mvm->queue_sync_cookie != internal_notif->cookie) {
WARN_ONCE(1, "Received expired RX queue sync message\n"); WARN_ONCE(1, "Received expired RX queue sync message\n");
return; return;
} }
switch (internal_notif->type) { switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY: case IWL_MVM_RXQ_EMPTY:
break; break;
case IWL_MVM_RXQ_NOTIF_DEL_BA: case IWL_MVM_RXQ_NOTIF_DEL_BA:
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break; break;
case IWL_MVM_RXQ_NSSN_SYNC:
iwl_mvm_nssn_sync(mvm, napi, queue,
(void *)internal_notif->data);
break;
default: default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
} }
if (internal_notif->sync && if (internal_notif->sync &&
!atomic_dec_return(&mvm->queue_sync_counter)) !atomic_dec_return(&mvm->queue_sync_counter))
wake_up(&mvm->rx_sync_waitq); wake_up(&mvm->rx_sync_waitq);
} }
/* /*
skipping to change at line 788 skipping to change at line 873
if (!buffer->valid) { if (!buffer->valid) {
if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
spin_unlock_bh(&buffer->lock); spin_unlock_bh(&buffer->lock);
return false; return false;
} }
buffer->valid = true; buffer->valid = true;
} }
if (ieee80211_is_back_req(hdr->frame_control)) { if (ieee80211_is_back_req(hdr->frame_control)) {
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn); iwl_mvm_release_frames(mvm, sta, napi, baid_data,
buffer, nssn, 0);
goto drop; goto drop;
} }
/* /*
* If there was a significant jump in the nssn - adjust. * If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into * If the SN is smaller than the NSSN it might need to first go into
* the reorder buffer, in which case we just release up to it and the * the reorder buffer, in which case we just release up to it and the
* rest of the function will take care of storing it and releasing up to * rest of the function will take care of storing it and releasing up to
* the nssn * the nssn.
* This should not happen. This queue has been lagging and it should
* have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
* and update the other queues.
*/ */
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
buffer->buf_size) || buffer->buf_size) ||
!ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
min_sn); min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
} }
/* drop any oudated packets */ /* drop any oudated packets */
if (ieee80211_sn_less(sn, buffer->head_sn)) if (ieee80211_sn_less(sn, buffer->head_sn))
goto drop; goto drop;
/* release immediately if allowed by nssn and no stored frames */ /* release immediately if allowed by nssn and no stored frames */
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
buffer->buf_size) && buffer->buf_size) &&
(!amsdu || last_subframe)) (!amsdu || last_subframe)) {
/*
* If we crossed the 2048 or 0 SN, notify all the
* queues. This is done in order to avoid having a
* head_sn that lags behind for too long. When that
* happens, we can get to a situation where the head_sn
* is within the interval [nssn - buf_size : nssn]
* which will make us think that the nssn is a packet
* that we already freed because of the reordering
* buffer and we will ignore it. So maintain the
* head_sn somewhat updated across all the queues:
* when it crosses 0 and 2048.
*/
if (sn == 2048 || sn == 0)
iwl_mvm_sync_nssn(mvm, baid, sn);
buffer->head_sn = nssn; buffer->head_sn = nssn;
}
/* No need to update AMSDU last SN - we are moving the head */ /* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock); spin_unlock_bh(&buffer->lock);
return false; return false;
} }
/* /*
* release immediately if there are no stored frames, and the sn is * release immediately if there are no stored frames, and the sn is
* equal to the head. * equal to the head.
* This can happen due to reorder timer, where NSSN is behind head_sn. * This can happen due to reorder timer, where NSSN is behind head_sn.
* When we released everything, and we got the next frame in the * When we released everything, and we got the next frame in the
* sequence, according to the NSSN we can't release immediately, * sequence, according to the NSSN we can't release immediately,
* while technically there is no hole and we can move forward. * while technically there is no hole and we can move forward.
*/ */
if (!buffer->num_stored && sn == buffer->head_sn) { if (!buffer->num_stored && sn == buffer->head_sn) {
if (!amsdu || last_subframe) if (!amsdu || last_subframe) {
if (sn == 2048 || sn == 0)
iwl_mvm_sync_nssn(mvm, baid, sn);
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
}
/* No need to update AMSDU last SN - we are moving the head */ /* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock); spin_unlock_bh(&buffer->lock);
return false; return false;
} }
index = sn % buffer->buf_size; index = sn % buffer->buf_size;
/* /*
* Check if we already stored this frame * Check if we already stored this frame
* As AMSDU is either received or not as whole, logic is simple: * As AMSDU is either received or not as whole, logic is simple:
skipping to change at line 878 skipping to change at line 985
* The reason is that NSSN advances on the first sub-frame, and may * The reason is that NSSN advances on the first sub-frame, and may
* cause the reorder buffer to advance before all the sub-frames arrive. * cause the reorder buffer to advance before all the sub-frames arrive.
* Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
* SN 1. NSSN for first sub frame will be 3 with the result of driver * SN 1. NSSN for first sub frame will be 3 with the result of driver
* releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
* already ahead and it will be dropped. * already ahead and it will be dropped.
* If the last sub-frame is not on this queue - we will get frame * If the last sub-frame is not on this queue - we will get frame
* release notification with up to date NSSN. * release notification with up to date NSSN.
*/ */
if (!amsdu || last_subframe) if (!amsdu || last_subframe)
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn); iwl_mvm_release_frames(mvm, sta, napi, baid_data,
buffer, nssn,
IWL_MVM_RELEASE_SEND_RSS_SYNC);
spin_unlock_bh(&buffer->lock); spin_unlock_bh(&buffer->lock);
return true; return true;
drop: drop:
kfree_skb(skb); kfree_skb(skb);
spin_unlock_bh(&buffer->lock); spin_unlock_bh(&buffer->lock);
return true; return true;
} }
skipping to change at line 1843 skipping to change at line 1952
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
rx_status->rate_idx = rate; rx_status->rate_idx = rate;
} }
ieee80211_rx_napi(mvm->hw, sta, skb, napi); ieee80211_rx_napi(mvm->hw, sta, skb, napi);
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue) struct iwl_rx_cmd_buffer *rxb, int queue)
{ {
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_frame_release *release = (void *)pkt->data; struct iwl_frame_release *release = (void *)pkt->data;
struct ieee80211_sta *sta;
struct iwl_mvm_reorder_buffer *reorder_buf;
struct iwl_mvm_baid_data *ba_data;
int baid = release->baid;
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
release->baid, le16_to_cpu(release->nssn));
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
return; le16_to_cpu(release->nssn),
queue, 0);
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
if (WARN_ON_ONCE(!ba_data))
goto out;
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
goto out;
reorder_buf = &ba_data->reorder_buf[queue];
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
le16_to_cpu(release->nssn));
spin_unlock_bh(&reorder_buf->lock);
out:
rcu_read_unlock();
} }
 End of changes. 26 change blocks. 
51 lines changed or deleted 134 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)