summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/amazon/ena
diff options
context:
space:
mode:
authorShay Agroskin <shayagr@amazon.com>2020-12-08 20:02:05 +0200
committerJakub Kicinski <kuba@kernel.org>2020-12-09 15:26:40 -0800
commite8223eeff021bc0f348efa10781119d23a68cf04 (patch)
tree3369b4497e68ab42777ecd88931905f0d52956e8 /drivers/net/ethernet/amazon/ena
parent89dd735e8c1e58b9b0d39535c7c32261773cb495 (diff)
downloadlinux-e8223eeff021bc0f348efa10781119d23a68cf04.tar.bz2
net: ena: use xdp_frame in XDP TX flow
Rename the ena_xdp_xmit_buff() function to ena_xdp_xmit_frame() and pass it an xdp_frame struct instead of xdp_buff. This change lays the ground for XDP redirect implementation which uses xdp_frames when 'xmit'ing packets. Signed-off-by: Shay Agroskin <shayagr@amazon.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/amazon/ena')
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c53
1 files changed, 29 insertions, 24 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0c17e5b37fc4..48cbbd44d6c2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -233,18 +233,18 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
return ret;
}
-static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_buff *xdp,
- void **push_hdr,
- u32 *push_len)
+static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ void **push_hdr,
+ u32 *push_len)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
dma_addr_t dma = 0;
u32 size;
- tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
+ tx_info->xdpf = xdpf;
size = tx_info->xdpf->len;
ena_buf = tx_info->bufs;
@@ -281,29 +281,31 @@ error_report_dma_error:
return -EINVAL;
}
-static int ena_xdp_xmit_buff(struct net_device *dev,
- struct xdp_buff *xdp,
- int qid,
- struct ena_rx_buffer *rx_info)
+static int ena_xdp_xmit_frame(struct net_device *dev,
+ struct xdp_frame *xdpf,
+ int qid)
{
struct ena_adapter *adapter = netdev_priv(dev);
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
struct ena_ring *xdp_ring;
+ struct page *rx_buff_page;
u16 next_to_use, req_id;
int rc;
void *push_hdr;
u32 push_len;
+ rx_buff_page = virt_to_page(xdpf->data);
+
xdp_ring = &adapter->tx_ring[qid];
next_to_use = xdp_ring->next_to_use;
req_id = xdp_ring->free_ids[next_to_use];
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- page_ref_inc(rx_info->page);
- tx_info->xdp_rx_page = rx_info->page;
+ page_ref_inc(rx_buff_page);
+ tx_info->xdp_rx_page = rx_buff_page;
- rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
if (unlikely(rc))
goto error_drop_packet;
@@ -318,7 +320,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
tx_info,
&ena_tx_ctx,
next_to_use,
- xdp->data_end - xdp->data);
+ xdpf->len);
if (rc)
goto error_unmap_dma;
/* trigger the dma engine. ena_com_write_sq_doorbell()
@@ -337,12 +339,11 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static int ena_xdp_execute(struct ena_ring *rx_ring,
- struct xdp_buff *xdp,
- struct ena_rx_buffer *rx_info)
+static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
struct bpf_prog *xdp_prog;
u32 verdict = XDP_PASS;
+ struct xdp_frame *xdpf;
u64 *xdp_stat;
rcu_read_lock();
@@ -354,12 +355,16 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
verdict = bpf_prog_run_xdp(xdp_prog, xdp);
if (verdict == XDP_TX) {
- ena_xdp_xmit_buff(rx_ring->netdev,
- xdp,
- rx_ring->qid + rx_ring->adapter->num_io_queues,
- rx_info);
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ } else {
+ ena_xdp_xmit_frame(rx_ring->netdev, xdpf,
+ rx_ring->qid + rx_ring->adapter->num_io_queues);
- xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ }
} else if (unlikely(verdict == XDP_ABORTED)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
@@ -1521,7 +1526,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return XDP_DROP;
- ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+ ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
if (ret == XDP_PASS) {
@@ -1600,7 +1605,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (unlikely(!skb)) {
/* The page might not actually be freed here since the
* page reference count is incremented in
- * ena_xdp_xmit_buff(), and it will be decreased only
+ * ena_xdp_xmit_frame(), and it will be decreased only
* when send completion was received from the device
*/
if (xdp_verdict == XDP_TX)