"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "drivers/net/ethernet/google/gve/gve_rx.c" between
linux-5.3-rc3.tar.gz and linux-5.3-rc4.tar.gz

About: The full source of the development Linux kernel 5.3 (release candidate).

gve_rx.c  (linux-5.3-rc3):gve_rx.c  (linux-5.3-rc4)
skipping to change at line 40 skipping to change at line 40
rx->desc.desc_ring = NULL; rx->desc.desc_ring = NULL;
dma_free_coherent(dev, sizeof(*rx->q_resources), dma_free_coherent(dev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus); rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL; rx->q_resources = NULL;
gve_unassign_qpl(priv, rx->data.qpl->id); gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL; rx->data.qpl = NULL;
kvfree(rx->data.page_info); kvfree(rx->data.page_info);
slots = rx->data.mask + 1; slots = rx->mask + 1;
bytes = sizeof(*rx->data.data_ring) * slots; bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring, dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus); rx->data.data_bus);
rx->data.data_ring = NULL; rx->data.data_ring = NULL;
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
} }
static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
struct gve_rx_data_slot *slot, struct gve_rx_data_slot *slot,
dma_addr_t addr, struct page *page) dma_addr_t addr, struct page *page)
skipping to change at line 67 skipping to change at line 67
static int gve_prefill_rx_pages(struct gve_rx_ring *rx) static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
{ {
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
u32 slots; u32 slots;
int i; int i;
/* Allocate one page per Rx queue slot. Each page is split into two /* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two. * packet buffers, when possible we "page flip" between the two.
*/ */
slots = rx->data.mask + 1; slots = rx->mask + 1;
rx->data.page_info = kvzalloc(slots * rx->data.page_info = kvzalloc(slots *
sizeof(*rx->data.page_info), GFP_KERNEL); sizeof(*rx->data.page_info), GFP_KERNEL);
if (!rx->data.page_info) if (!rx->data.page_info)
return -ENOMEM; return -ENOMEM;
rx->data.qpl = gve_assign_rx_qpl(priv); rx->data.qpl = gve_assign_rx_qpl(priv);
for (i = 0; i < slots; i++) { for (i = 0; i < slots; i++) {
struct page *page = rx->data.qpl->pages[i]; struct page *page = rx->data.qpl->pages[i];
skipping to change at line 114 skipping to change at line 114
int err; int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
/* Make sure everything is zeroed to start with */ /* Make sure everything is zeroed to start with */
memset(rx, 0, sizeof(*rx)); memset(rx, 0, sizeof(*rx));
rx->gve = priv; rx->gve = priv;
rx->q_num = idx; rx->q_num = idx;
slots = priv->rx_pages_per_qpl; slots = priv->rx_pages_per_qpl;
rx->data.mask = slots - 1; rx->mask = slots - 1;
/* alloc rx data ring */ /* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots; bytes = sizeof(*rx->data.data_ring) * slots;
rx->data.data_ring = dma_alloc_coherent(hdev, bytes, rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
&rx->data.data_bus, &rx->data.data_bus,
GFP_KERNEL); GFP_KERNEL);
if (!rx->data.data_ring) if (!rx->data.data_ring)
return -ENOMEM; return -ENOMEM;
filled_pages = gve_prefill_rx_pages(rx); filled_pages = gve_prefill_rx_pages(rx);
if (filled_pages < 0) { if (filled_pages < 0) {
err = -ENOMEM; err = -ENOMEM;
goto abort_with_slots; goto abort_with_slots;
} }
rx->desc.fill_cnt = filled_pages; rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */ /* Ensure data ring slots (packet buffers) are visible. */
dma_wmb(); dma_wmb();
/* Alloc gve_queue_resources */ /* Alloc gve_queue_resources */
rx->q_resources = rx->q_resources =
dma_alloc_coherent(hdev, dma_alloc_coherent(hdev,
sizeof(*rx->q_resources), sizeof(*rx->q_resources),
&rx->q_resources_bus, &rx->q_resources_bus,
GFP_KERNEL); GFP_KERNEL);
if (!rx->q_resources) { if (!rx->q_resources) {
skipping to change at line 159 skipping to change at line 159
err = -EIO; err = -EIO;
goto abort_with_q_resources; goto abort_with_q_resources;
} }
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL); GFP_KERNEL);
if (!rx->desc.desc_ring) { if (!rx->desc.desc_ring) {
err = -ENOMEM; err = -ENOMEM;
goto abort_with_q_resources; goto abort_with_q_resources;
} }
rx->desc.mask = slots - 1; rx->mask = slots - 1;
rx->desc.cnt = 0; rx->cnt = 0;
rx->desc.seqno = 1; rx->desc.seqno = 1;
gve_rx_add_to_block(priv, idx); gve_rx_add_to_block(priv, idx);
return 0; return 0;
abort_with_q_resources: abort_with_q_resources:
dma_free_coherent(hdev, sizeof(*rx->q_resources), dma_free_coherent(hdev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus); rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL; rx->q_resources = NULL;
abort_filled: abort_filled:
skipping to change at line 216 skipping to change at line 216
int i; int i;
for (i = 0; i < priv->rx_cfg.num_queues; i++) for (i = 0; i < priv->rx_cfg.num_queues; i++)
gve_rx_free_ring(priv, i); gve_rx_free_ring(priv, i);
} }
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
{ {
u32 db_idx = be32_to_cpu(rx->q_resources->db_index); u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
iowrite32be(rx->desc.fill_cnt, &priv->db_bar2[db_idx]); iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
} }
static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
{ {
if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP))) if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
return PKT_HASH_TYPE_L4; return PKT_HASH_TYPE_L4;
if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6)) if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
return PKT_HASH_TYPE_L3; return PKT_HASH_TYPE_L3;
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
skipping to change at line 276 skipping to change at line 276
struct gve_rx_data_slot *data_ring) struct gve_rx_data_slot *data_ring)
{ {
u64 addr = be64_to_cpu(data_ring->qpl_offset); u64 addr = be64_to_cpu(data_ring->qpl_offset);
page_info->page_offset ^= PAGE_SIZE / 2; page_info->page_offset ^= PAGE_SIZE / 2;
addr ^= PAGE_SIZE / 2; addr ^= PAGE_SIZE / 2;
data_ring->qpl_offset = cpu_to_be64(addr); data_ring->qpl_offset = cpu_to_be64(addr);
} }
static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
netdev_features_t feat) netdev_features_t feat, u32 idx)
{ {
struct gve_rx_slot_page_info *page_info; struct gve_rx_slot_page_info *page_info;
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
struct net_device *dev = priv->dev; struct net_device *dev = priv->dev;
struct sk_buff *skb; struct sk_buff *skb;
int pagecount; int pagecount;
u16 len; u16 len;
u32 idx;
/* drop this packet */ /* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
return true; return true;
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
idx = rx->data.cnt & rx->data.mask;
page_info = &rx->data.page_info[idx]; page_info = &rx->data.page_info[idx];
/* gvnic can only receive into registered segments. If the buffer /* gvnic can only receive into registered segments. If the buffer
* can't be recycled, our only choice is to copy the data out of * can't be recycled, our only choice is to copy the data out of
* it so that we can return it to the device. * it so that we can return it to the device.
*/ */
if (PAGE_SIZE == 4096) { if (PAGE_SIZE == 4096) {
if (len <= priv->rx_copybreak) { if (len <= priv->rx_copybreak) {
/* Just copy small packets */ /* Just copy small packets */
skipping to change at line 343 skipping to change at line 341
skb = gve_rx_copy(dev, napi, page_info, len); skb = gve_rx_copy(dev, napi, page_info, len);
} }
have_skb: have_skb:
/* We didn't manage to allocate an skb but we haven't had any /* We didn't manage to allocate an skb but we haven't had any
* reset worthy failures. * reset worthy failures.
*/ */
if (!skb) if (!skb)
return true; return true;
rx->data.cnt++;
if (likely(feat & NETIF_F_RXCSUM)) { if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */ /* NIC passes up the partial sum */
if (rx_desc->csum) if (rx_desc->csum)
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
else else
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
skb->csum = csum_unfold(rx_desc->csum); skb->csum = csum_unfold(rx_desc->csum);
} }
/* parse flags & pass relevant info up */ /* parse flags & pass relevant info up */
skipping to change at line 373 skipping to change at line 369
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
return true; return true;
} }
static bool gve_rx_work_pending(struct gve_rx_ring *rx) static bool gve_rx_work_pending(struct gve_rx_ring *rx)
{ {
struct gve_rx_desc *desc; struct gve_rx_desc *desc;
__be16 flags_seq; __be16 flags_seq;
u32 next_idx; u32 next_idx;
next_idx = rx->desc.cnt & rx->desc.mask; next_idx = rx->cnt & rx->mask;
desc = rx->desc.desc_ring + next_idx; desc = rx->desc.desc_ring + next_idx;
flags_seq = desc->flags_seq; flags_seq = desc->flags_seq;
/* Make sure we have synchronized the seq no with the device */ /* Make sure we have synchronized the seq no with the device */
smp_rmb(); smp_rmb();
return (GVE_SEQNO(flags_seq) == rx->desc.seqno); return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
} }
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat) netdev_features_t feat)
{ {
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
struct gve_rx_desc *desc; struct gve_rx_desc *desc;
u32 cnt = rx->desc.cnt; u32 cnt = rx->cnt;
u32 idx = cnt & rx->desc.mask; u32 idx = cnt & rx->mask;
u32 work_done = 0; u32 work_done = 0;
u64 bytes = 0; u64 bytes = 0;
desc = rx->desc.desc_ring + idx; desc = rx->desc.desc_ring + idx;
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
work_done < budget) { work_done < budget) {
netif_info(priv, rx_status, priv->dev, netif_info(priv, rx_status, priv->dev,
"[%d] idx=%d desc=%p desc->flags_seq=0x%x\n", "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
rx->q_num, idx, desc, desc->flags_seq); rx->q_num, idx, desc, desc->flags_seq);
netif_info(priv, rx_status, priv->dev, netif_info(priv, rx_status, priv->dev,
"[%d] seqno=%d rx->desc.seqno=%d\n", "[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq), rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno); rx->desc.seqno);
bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
if (!gve_rx(rx, desc, feat)) if (!gve_rx(rx, desc, feat, idx))
gve_schedule_reset(priv); gve_schedule_reset(priv);
cnt++; cnt++;
idx = cnt & rx->desc.mask; idx = cnt & rx->mask;
desc = rx->desc.desc_ring + idx; desc = rx->desc.desc_ring + idx;
rx->desc.seqno = gve_next_seqno(rx->desc.seqno); rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
work_done++; work_done++;
} }
if (!work_done) if (!work_done)
return false; return false;
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
rx->rpackets += work_done; rx->rpackets += work_done;
rx->rbytes += bytes; rx->rbytes += bytes;
u64_stats_update_end(&rx->statss); u64_stats_update_end(&rx->statss);
rx->desc.cnt = cnt; rx->cnt = cnt;
rx->desc.fill_cnt += work_done; rx->fill_cnt += work_done;
/* restock desc ring slots */ /* restock desc ring slots */
dma_wmb(); /* Ensure descs are visible before ringing doorbell */ dma_wmb(); /* Ensure descs are visible before ringing doorbell */
gve_rx_write_doorbell(priv, rx); gve_rx_write_doorbell(priv, rx);
return gve_rx_work_pending(rx); return gve_rx_work_pending(rx);
} }
bool gve_rx_poll(struct gve_notify_block *block, int budget) bool gve_rx_poll(struct gve_notify_block *block, int budget)
{ {
struct gve_rx_ring *rx = block->rx; struct gve_rx_ring *rx = block->rx;
 End of changes. 15 change blocks. 
19 lines changed or deleted 15 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)