diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa2')
17 files changed, 1329 insertions, 378 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 3d9842af7f10..1b05ba8d1cbf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index 8356af4631fd..1af254caeb0d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) int i; seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); - seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", - "CHID", "CPU", "Deq busy", "Frames", "CDANs", + seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n", + "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs", "Avg Frm/CDAN", "Buf count"); for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; - seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n", - ch->ch_id, + seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n", + "CH#", i, ch->ch_id, ch->nctx.desired_cpu, ch->stats.dequeue_portal_busy, ch->stats.frames, @@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch); +static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + int i, j, num_queues, buf_cnt; + struct dpaa2_eth_bp *bp; + char ch_name[10]; + int err; + + /* Print out the header */ + seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name); + seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count"); + num_queues = dpaa2_eth_queue_count(priv); + for (i = 0; i < num_queues; i++) { + snprintf(ch_name, sizeof(ch_name), "CH#%d", i); + seq_printf(file, "%10s", ch_name); + } + seq_printf(file, "\n"); + + /* For each buffer pool, print out its BPID, the number of buffers in + * that buffer pool and the channels which are using it. + */ + for (i = 0; i < priv->num_bps; i++) { + bp = priv->bp[i]; + + err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt); + if (err) { + netdev_warn(priv->net_dev, "Buffer count query error %d\n", err); + return err; + } + + seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt); + for (j = 0; j < num_queues; j++) { + if (priv->channel[j]->bp == bp) + seq_printf(file, "%10s", "x"); + else + seq_printf(file, "%10s", ""); + } + seq_printf(file, "\n"); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp); + void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) { struct fsl_mc_device *dpni_dev; @@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) /* per-fq stats file */ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops); + + /* per buffer pool stats file */ + debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops); + } void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 7fefe1574b6a..76f808d38066 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -37,18 +37,9 @@ static int dpaa2_eth_dl_info_get(struct devlink *devlink, struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink); struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv; char buf[10]; - int err; - - err = devlink_info_driver_name_put(req, KBUILD_MODNAME); - if (err) - return err; scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor); - err = devlink_info_version_running_put(req, "dpni", buf); - if (err) - return err; - - return 0; + return devlink_info_version_running_put(req, "dpni", buf); } static struct dpaa2_eth_trap_item * @@ -226,25 +217,16 @@ int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv) { struct devlink_port *devlink_port = &priv->devlink_port; struct devlink_port_attrs attrs = {}; - int err; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(devlink_port, &attrs); - - err = devlink_port_register(priv->devlink, devlink_port, 0); - if (err) - return err; - - devlink_port_type_eth_set(devlink_port, priv->net_dev); - - return 0; + return devlink_port_register(priv->devlink, devlink_port, 0); } void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv) { struct devlink_port *devlink_port = &priv->devlink_port; - devlink_port_type_clear(devlink_port); devlink_port_unregister(devlink_port); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h index 5fb5f14e01ec..9b43fadb9b11 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, TP_ARGS(netdev, fd) ); +/* Tx (egress) XSK fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + /* Rx fd */ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, TP_PROTO(struct net_device *netdev, @@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, TP_ARGS(netdev, fd) ); +/* Rx XSK fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + /* Tx confirmation fd */ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, TP_PROTO(struct net_device *netdev, @@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, ); /* Log data about raw buffers. Useful for tracing DPBP content. */ -TRACE_EVENT(dpaa2_eth_buf_seed, - /* Trace function prototype */ - TP_PROTO(struct net_device *netdev, - /* virtual address and size */ - void *vaddr, - size_t size, - /* dma map address and size */ - dma_addr_t dma_addr, - size_t map_size, - /* buffer pool id, if relevant */ - u16 bpid), - - /* Repeat argument list here */ - TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), - - /* A structure containing the relevant information we want - * to record. Declare name and type for each normal element, - * name, type and size for arrays. Use __string for variable - * length strings. - */ - TP_STRUCT__entry( - __field(void *, vaddr) - __field(size_t, size) - __field(dma_addr_t, dma_addr) - __field(size_t, map_size) - __field(u16, bpid) - __string(name, netdev->name) - ), - - /* The function that assigns values to the above declared - * fields - */ - TP_fast_assign( - __entry->vaddr = vaddr; - __entry->size = size; - __entry->dma_addr = dma_addr; - __entry->map_size = map_size; - __entry->bpid = bpid; - __assign_str(name, netdev->name); - ), - - /* This is what gets printed when the trace event is - * triggered. - */ - TP_printk(TR_BUF_FMT, - __get_str(name), - __entry->vaddr, - __entry->size, - &__entry->dma_addr, - __entry->map_size, - __entry->bpid) +DECLARE_EVENT_CLASS(dpaa2_eth_buf, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + /* virtual address and size */ + void *vaddr, + size_t size, + /* dma map address and size */ + dma_addr_t dma_addr, + size_t map_size, + /* buffer pool id, if relevant */ + u16 bpid), + + /* Repeat argument list here */ + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(void *, vaddr) + __field(size_t, size) + __field(dma_addr_t, dma_addr) + __field(size_t, map_size) + __field(u16, bpid) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->size = size; + __entry->dma_addr = dma_addr; + __entry->map_size = map_size; + __entry->bpid = bpid; + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_BUF_FMT, + __get_str(name), + __entry->vaddr, + __entry->size, + &__entry->dma_addr, + __entry->map_size, + __entry->bpid) +); + +/* Main memory buff seeding */ +DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed, + TP_PROTO(struct net_device *netdev, + void *vaddr, + size_t size, + dma_addr_t dma_addr, + size_t map_size, + u16 bpid), + + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid) +); + +/* UMEM buff seeding on AF_XDP fast path */ +DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed, + TP_PROTO(struct net_device *netdev, + void *vaddr, + size_t size, + dma_addr_t dma_addr, + size_t map_size, + u16 bpid), + + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid) ); /* If only one event of a certain type needs to be declared, use TRACE_EVENT(). diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 8d029addddad..0c35abb7d065 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #include <linux/init.h> #include <linux/module.h> @@ -8,7 +8,6 @@ #include <linux/etherdevice.h> #include <linux/of_net.h> #include <linux/interrupt.h> -#include <linux/msi.h> #include <linux/kthread.h> #include <linux/iommu.h> #include <linux/fsl/mc.h> @@ -19,6 +18,7 @@ #include <net/pkt_cls.h> #include <net/sock.h> #include <net/tso.h> +#include <net/xdp_sock_drv.h> #include "dpaa2-eth.h" @@ -104,8 +104,8 @@ static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; } -static void *dpaa2_iova_to_virt(struct iommu_domain *domain, - dma_addr_t iova_addr) +void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) { phys_addr_t phys_addr; @@ -279,23 +279,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, * be released in the pool */ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, - int count) + int count, bool xsk_zc) { struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_eth_swa *swa; + struct xdp_buff *xdp_buff; void *vaddr; int i; for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, - DMA_BIDIRECTIONAL); - free_pages((unsigned long)vaddr, 0); + + if (!xsk_zc) { + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); + } else { + swa = (struct dpaa2_eth_swa *) + (vaddr + DPAA2_ETH_RX_HWA_SIZE); + xdp_buff = swa->xsk.xdp_buff; + xsk_buff_free(xdp_buff); + } } } -static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; @@ -304,7 +314,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) return; - while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, ch->recycled_bufs, ch->recycled_bufs_cnt)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) @@ -313,7 +323,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, } if (err) { - dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt); + dpaa2_eth_free_bufs(priv, ch->recycled_bufs, + ch->recycled_bufs_cnt, ch->xsk_zc); ch->buf_count -= ch->recycled_bufs_cnt; } @@ -377,10 +388,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, fq->xdp_tx_fds.num = 0; } -static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_fd *fd, - void *buf_start, u16 queue_id) +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) { struct dpaa2_faead *faead; struct dpaa2_fd *dest_fd; @@ -485,19 +496,15 @@ out: return xdp_act; } -static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - void *fd_vaddr) +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr) { u16 fd_offset = dpaa2_fd_get_offset(fd); - struct dpaa2_eth_priv *priv = ch->priv; - u32 fd_length = dpaa2_fd_get_len(fd); struct sk_buff *skb = NULL; unsigned int skb_len; - if (fd_length > priv->rx_copybreak) - return NULL; - skb_len = fd_length + dpaa2_eth_needed_headroom(NULL); skb = napi_alloc_skb(&ch->napi, skb_len); @@ -514,11 +521,66 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, return skb; } +static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + struct dpaa2_eth_priv *priv = ch->priv; + u32 fd_length = dpaa2_fd_get_len(fd); + + if (fd_length > priv->rx_copybreak) + return NULL; + + return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr); +} + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb) +{ + struct dpaa2_fas *fas; + u32 status = 0; + + fas = dpaa2_get_fas(vaddr, false); + prefetch(fas); + prefetch(skb->data); + + /* Get the timestamp value */ + if (priv->rx_tstamp) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + __le64 *ts = dpaa2_get_ts(vaddr, false); + u64 ns; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + } + + /* Check if we need to validate the L4 csum */ + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { + status = le32_to_cpu(fas->status); + dpaa2_eth_validate_rx_csum(priv, status, skb); + } + + skb->protocol = eth_type_trans(skb, priv->net_dev); + skb_record_rx_queue(skb, fq->flowid); + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); + + list_add_tail(&skb->list, ch->rx_list); +} + /* Main Rx frame processing routine */ -static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq) +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) { dma_addr_t addr = dpaa2_fd_get_addr(fd); u8 fd_format = dpaa2_fd_get_format(fd); @@ -527,9 +589,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; struct device *dev = priv->net_dev->dev.parent; - struct dpaa2_fas *fas; void *buf_data; - u32 status = 0; u32 xdp_act; /* Tracing point */ @@ -539,8 +599,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - fas = dpaa2_get_fas(vaddr, false); - prefetch(fas); buf_data = vaddr + dpaa2_fd_get_offset(fd); prefetch(buf_data); @@ -578,35 +636,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, if (unlikely(!skb)) goto err_build_skb; - prefetch(skb->data); - - /* Get the timestamp value */ - if (priv->rx_tstamp) { - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - __le64 *ts = dpaa2_get_ts(vaddr, false); - u64 ns; - - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - - ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - } - - /* Check if we need to validate the L4 csum */ - if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { - status = le32_to_cpu(fas->status); - dpaa2_eth_validate_rx_csum(priv, status, skb); - } - - skb->protocol = eth_type_trans(skb, priv->net_dev); - skb_record_rx_queue(skb, fq->flowid); - - percpu_stats->rx_packets++; - percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); - ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); - - list_add_tail(&skb->list, ch->rx_list); - + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); return; err_build_skb: @@ -827,7 +857,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, } } -static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_sgt_cache *sgt_cache; void *sgt_buf = NULL; @@ -849,7 +879,7 @@ static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) return sgt_buf; } -static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) { struct dpaa2_eth_sgt_cache *sgt_cache; @@ -1084,9 +1114,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, * This can be called either from dpaa2_eth_tx_conf() or on the error path of * dpaa2_eth_tx(). */ -static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq, - const struct dpaa2_fd *fd, bool in_napi) +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr, sg_addr; @@ -1153,6 +1184,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, if (!swa->tso.is_last_fd) should_free_skb = 0; + } else if (swa->type == DPAA2_ETH_SWA_XSK) { + /* Unmap the SGT Buffer */ + dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size, + DMA_BIDIRECTIONAL); } else { skb = swa->single.skb; @@ -1170,6 +1205,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, return; } + if (swa->type == DPAA2_ETH_SWA_XSK) { + ch->xsk_tx_pkts_sent++; + dpaa2_eth_sgt_recycle(priv, buffer_start); + return; + } + if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { fq->dq_frames++; fq->dq_bytes += fd_len; @@ -1344,7 +1385,7 @@ err_alloc_tso_hdr: err_sgt_get: /* Free all the other FDs that were already fully created */ for (i = 0; i < index; i++) - dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false); + dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false); return err; } @@ -1460,7 +1501,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - dpaa2_eth_free_tx_fd(priv, fq, fd, false); + dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false); netdev_tx_completed_queue(nq, 1, fd_len); } else { percpu_stats->tx_packets += total_enqueued; @@ -1553,7 +1594,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - dpaa2_eth_free_tx_fd(priv, fq, fd, true); + dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true); if (likely(!fd_errors)) return; @@ -1631,44 +1672,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) * to the specified buffer pool */ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, u16 bpid) + struct dpaa2_eth_channel *ch) { + struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + struct dpaa2_eth_swa *swa; struct page *page; dma_addr_t addr; int retries = 0; - int i, err; - - for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { - /* Allocate buffer visible to WRIOP + skb shared info + - * alignment padding - */ - /* allocate one page for each Rx buffer. WRIOP sees - * the entire page except for a tailroom reserved for - * skb shared info + int i = 0, err; + u32 batch; + + /* Allocate buffers visible to WRIOP */ + if (!ch->xsk_zc) { + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Also allocate skb shared info and alignment padding. + * There is one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) + goto err_alloc; + + addr = dma_map_page(dev, page, 0, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + page_address(page), + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } + } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { + /* Allocate XSK buffers for AF_XDP fast path in batches + * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot + * provide enough buffers at the moment */ - page = dev_alloc_pages(0); - if (!page) + batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs, + DPAA2_ETH_BUFS_PER_CMD); + if (!batch) goto err_alloc; - addr = dma_map_page(dev, page, 0, priv->rx_buf_size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dev, addr))) - goto err_map; + for (i = 0; i < batch; i++) { + swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + + DPAA2_ETH_RX_HWA_SIZE); + swa->xsk.xdp_buff = xdp_buffs[i]; + + addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; - buf_array[i] = addr; + buf_array[i] = addr; - /* tracing point */ - trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page), - DPAA2_ETH_RX_BUF_RAW_SIZE, - addr, priv->rx_buf_size, - bpid); + trace_dpaa2_xsk_buf_seed(priv->net_dev, + xdp_buffs[i]->data_hard_start, + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } } release_bufs: /* In case the portal is busy, retry until successful */ - while ((err = dpaa2_io_service_release(ch->dpio, bpid, + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, buf_array, i)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) break; @@ -1679,14 +1752,19 @@ release_bufs: * not much else we can do about it */ if (err) { - dpaa2_eth_free_bufs(priv, buf_array, i); + dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); return 0; } return i; err_map: - __free_pages(page, 0); + if (!ch->xsk_zc) { + __free_pages(page, 0); + } else { + for (; i < batch; i++) + xsk_buff_free(xdp_buffs[i]); + } err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -1697,39 +1775,64 @@ err_alloc: return 0; } -static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) { - int i, j; + int i; int new_count; - for (j = 0; j < priv->num_channels; j++) { - for (i = 0; i < DPAA2_ETH_NUM_BUFS; - i += DPAA2_ETH_BUFS_PER_CMD) { - new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); - priv->channel[j]->buf_count += new_count; + for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { + new_count = dpaa2_eth_add_bufs(priv, ch); + ch->buf_count += new_count; - if (new_count < DPAA2_ETH_BUFS_PER_CMD) { - return -ENOMEM; - } - } + if (new_count < DPAA2_ETH_BUFS_PER_CMD) + return -ENOMEM; } return 0; } +static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct dpaa2_eth_channel *channel; + int i, err = 0; + + for (i = 0; i < priv->num_channels; i++) { + channel = priv->channel[i]; + + err = dpaa2_eth_seed_pool(priv, channel); + + /* Not much to do; the buffer pool, though not filled up, + * may still contain some buffers which would enable us + * to limp on. + */ + if (err) + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", + channel->bp->dev->obj_desc.id, + channel->bp->bpid); + } +} + /* - * Drain the specified number of buffers from the DPNI's private buffer pool. + * Drain the specified number of buffers from one of the DPNI's private buffer + * pools. * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD */ -static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) +static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, + int count) { u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + bool xsk_zc = false; int retries = 0; - int ret; + int i, ret; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->bp->bpid == bpid) + xsk_zc = priv->channel[i]->xsk_zc; do { - ret = dpaa2_io_service_acquire(NULL, priv->bpid, - buf_array, count); + ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); if (ret < 0) { if (ret == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) @@ -1737,28 +1840,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - dpaa2_eth_free_bufs(priv, buf_array, ret); + dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc); retries = 0; } while (ret); } -static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid) { int i; - dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); - dpaa2_eth_drain_bufs(priv, 1); + /* Drain the buffer pool */ + dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD); + dpaa2_eth_drain_bufs(priv, bpid, 1); + /* Setup to zero the buffer count of all channels which were + * using this buffer pool. + */ for (i = 0; i < priv->num_channels; i++) - priv->channel[i]->buf_count = 0; + if (priv->channel[i]->bp->bpid == bpid) + priv->channel[i]->buf_count = 0; +} + +static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_bps; i++) + dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid); } /* Function is called from softirq context only, so we don't need to guard * the access to percpu count */ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - u16 bpid) + struct dpaa2_eth_channel *ch) { int new_count; @@ -1766,7 +1881,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, return 0; do { - new_count = dpaa2_eth_add_bufs(priv, ch, bpid); + new_count = dpaa2_eth_add_bufs(priv, ch); if (unlikely(!new_count)) { /* Out of memory; abort for now, we'll try later on */ break; @@ -1830,6 +1945,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) struct dpaa2_eth_fq *fq, *txc_fq = NULL; struct netdev_queue *nq; int store_cleaned, work_done; + bool work_done_zc = false; struct list_head rx_list; int retries = 0; u16 flowid; @@ -1842,13 +1958,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) INIT_LIST_HEAD(&rx_list); ch->rx_list = &rx_list; + if (ch->xsk_zc) { + work_done_zc = dpaa2_xsk_tx(priv, ch); + /* If we reached the XSK Tx per NAPI threshold, we're done */ + if (work_done_zc) { + work_done = budget; + goto out; + } + } + do { err = dpaa2_eth_pull_channel(ch); if (unlikely(err)) break; /* Refill pool if appropriate */ - dpaa2_eth_refill_pool(priv, ch, priv->bpid); + dpaa2_eth_refill_pool(priv, ch); store_cleaned = dpaa2_eth_consume_frames(ch, &fq); if (store_cleaned <= 0) @@ -1894,6 +2019,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) out: netif_receive_skb_list(ch->rx_list); + if (ch->xsk_tx_pkts_sent) { + xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent); + ch->xsk_tx_pkts_sent = 0; + } + if (txc_fq && txc_fq->dq_frames) { nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); netdev_tx_completed_queue(nq, txc_fq->dq_frames, @@ -2017,8 +2147,11 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) /* When we manage the MAC/PHY using phylink there is no need * to manually update the netif_carrier. + * We can avoid locking because we are called from the "link changed" + * IRQ handler, which is the same as the "endpoint changed" IRQ handler + * (the writer to priv->mac), so we cannot race with it. */ - if (dpaa2_eth_is_type_phy(priv)) + if (dpaa2_mac_is_type_phy(priv->mac)) goto out; /* Chech link state; speed / duplex changes are not treated yet */ @@ -2047,15 +2180,9 @@ static int dpaa2_eth_open(struct net_device *net_dev) struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int err; - err = dpaa2_eth_seed_pool(priv, priv->bpid); - if (err) { - /* Not much to do; the buffer pool, though not filled up, - * may still contain some buffers which would enable us - * to limp on. - */ - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", - priv->dpbp_dev->obj_desc.id, priv->bpid); - } + dpaa2_eth_seed_pools(priv); + + mutex_lock(&priv->mac_lock); if (!dpaa2_eth_is_type_phy(priv)) { /* We'll only start the txqs when the link is actually ready; @@ -2075,20 +2202,21 @@ static int dpaa2_eth_open(struct net_device *net_dev) err = dpni_enable(priv->mc_io, 0, priv->mc_token); if (err < 0) { + mutex_unlock(&priv->mac_lock); netdev_err(net_dev, "dpni_enable() failed\n"); goto enable_err; } - if (dpaa2_eth_is_type_phy(priv)) { + if (dpaa2_eth_is_type_phy(priv)) dpaa2_mac_start(priv->mac); - phylink_start(priv->mac->phylink); - } + + mutex_unlock(&priv->mac_lock); return 0; enable_err: dpaa2_eth_disable_ch_napi(priv); - dpaa2_eth_drain_pool(priv); + dpaa2_eth_drain_pools(priv); return err; } @@ -2155,14 +2283,17 @@ static int dpaa2_eth_stop(struct net_device *net_dev) int dpni_enabled = 0; int retries = 10; + mutex_lock(&priv->mac_lock); + if (dpaa2_eth_is_type_phy(priv)) { - phylink_stop(priv->mac->phylink); dpaa2_mac_stop(priv->mac); } else { netif_tx_stop_all_queues(net_dev); netif_carrier_off(net_dev); } + mutex_unlock(&priv->mac_lock); + /* On dpni_disable(), the MC firmware will: * - stop MAC Rx and wait for all Rx frames to be enqueued to software * - cut off WRIOP dequeues from egress FQs and wait until transmission @@ -2193,7 +2324,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) dpaa2_eth_disable_ch_napi(priv); /* Empty the buffer pool */ - dpaa2_eth_drain_pool(priv); + dpaa2_eth_drain_pools(priv); /* Empty the Scatter-Gather Buffer cache */ dpaa2_eth_sgt_cache_drain(priv); @@ -2488,12 +2619,20 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct dpaa2_eth_priv *priv = netdev_priv(dev); + int err; if (cmd == SIOCSHWTSTAMP) return dpaa2_eth_ts_ioctl(dev, rq, cmd); - if (dpaa2_eth_is_type_phy(priv)) - return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) { + err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd); + mutex_unlock(&priv->mac_lock); + return err; + } + + mutex_unlock(&priv->mac_lock); return -EOPNOTSUPP; } @@ -2602,7 +2741,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) need_update = (!!priv->xdp_prog != !!prog); if (up) - dpaa2_eth_stop(dev); + dev_close(dev); /* While in xdp mode, enforce a maximum Rx frame size based on MTU. * Also, when switching between xdp/non-xdp modes we need to reconfigure @@ -2630,7 +2769,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) } if (up) { - err = dpaa2_eth_open(dev); + err = dev_open(dev, NULL); if (err) return err; } @@ -2641,7 +2780,7 @@ out_err: if (prog) bpf_prog_sub(prog, priv->num_channels); if (up) - dpaa2_eth_open(dev); + dev_open(dev, NULL); return err; } @@ -2651,6 +2790,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return dpaa2_eth_setup_xdp(dev, xdp->prog); + case XDP_SETUP_XSK_POOL: + return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } @@ -2881,6 +3022,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, + .ndo_xsk_wakeup = dpaa2_xsk_wakeup, .ndo_setup_tc = dpaa2_eth_setup_tc, .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid @@ -2895,7 +3037,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) /* Update NAPI statistics */ ch->stats.cdan++; - napi_schedule(&ch->napi); + /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed + * so that it can be rescheduled again. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); } /* Allocate and configure a DPCON object */ @@ -2908,10 +3054,12 @@ static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPCON, &dpcon); if (err) { - if (err == -ENXIO) + if (err == -ENXIO) { + dev_dbg(dev, "Waiting for DPCON\n"); err = -EPROBE_DEFER; - else + } else { dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + } return ERR_PTR(err); } @@ -3021,7 +3169,9 @@ static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) channel = dpaa2_eth_alloc_channel(priv); if (IS_ERR_OR_NULL(channel)) { err = PTR_ERR_OR_ZERO(channel); - if (err != -EPROBE_DEFER) + if (err == -EPROBE_DEFER) + dev_dbg(dev, "waiting for affine channel\n"); + else dev_info(dev, "No affine channel for cpu %d and above\n", i); goto err_alloc_ch; @@ -3204,13 +3354,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) dpaa2_eth_set_fq_affinity(priv); } -/* Allocate and configure one buffer pool for each interface */ -static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) +/* Allocate and configure a buffer pool */ +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv) { - int err; - struct fsl_mc_device *dpbp_dev; struct device *dev = priv->net_dev->dev.parent; + struct fsl_mc_device *dpbp_dev; struct dpbp_attr dpbp_attrs; + struct dpaa2_eth_bp *bp; + int err; err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, &dpbp_dev); @@ -3219,12 +3370,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) err = -EPROBE_DEFER; else dev_err(dev, "DPBP device allocation failed\n"); - return err; + return ERR_PTR(err); } - priv->dpbp_dev = dpbp_dev; + bp = kzalloc(sizeof(*bp), GFP_KERNEL); + if (!bp) { + err = -ENOMEM; + goto err_alloc; + } - err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, + err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id, &dpbp_dev->mc_handle); if (err) { dev_err(dev, "dpbp_open() failed\n"); @@ -3249,9 +3404,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) dev_err(dev, "dpbp_get_attributes() failed\n"); goto err_get_attr; } - priv->bpid = dpbp_attrs.bpid; - return 0; + bp->dev = dpbp_dev; + bp->bpid = dpbp_attrs.bpid; + + return bp; err_get_attr: dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); @@ -3259,17 +3416,58 @@ err_enable: err_reset: dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); err_open: + kfree(bp); +err_alloc: fsl_mc_object_free(dpbp_dev); - return err; + return ERR_PTR(err); } -static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv) { - dpaa2_eth_drain_pool(priv); - dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - fsl_mc_object_free(priv->dpbp_dev); + struct dpaa2_eth_bp *bp; + int i; + + bp = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(bp)) + return PTR_ERR(bp); + + priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp; + priv->num_bps++; + + for (i = 0; i < priv->num_channels; i++) + priv->channel[i]->bp = bp; + + return 0; +} + +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp) +{ + int idx_bp; + + /* Find the index at which this BP is stored */ + for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++) + if (priv->bp[idx_bp] == bp) + break; + + /* Drain the pool and disable the associated MC object */ + dpaa2_eth_drain_pool(priv, bp->bpid); + dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle); + dpbp_close(priv->mc_io, 0, bp->dev->mc_handle); + fsl_mc_object_free(bp->dev); + kfree(bp); + + /* Move the last in use DPBP over in this position */ + priv->bp[idx_bp] = priv->bp[priv->num_bps - 1]; + priv->num_bps--; +} + +static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_bps; i++) + dpaa2_eth_free_dpbp(priv, priv->bp[i]); } static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) @@ -3610,7 +3808,7 @@ static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", priv->dpni_ver_major, priv->dpni_ver_minor, DPNI_VER_MAJOR, DPNI_VER_MINOR); - err = -ENOTSUPP; + err = -EOPNOTSUPP; goto close; } @@ -4154,15 +4352,16 @@ out: */ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) { + struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; struct net_device *net_dev = priv->net_dev; + struct dpni_pools_cfg pools_params = { 0 }; struct device *dev = net_dev->dev.parent; - struct dpni_pools_cfg pools_params; struct dpni_error_cfg err_cfg; int err = 0; int i; pools_params.num_dpbp = 1; - pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; pools_params.pools[0].backup_pool = 0; pools_params.pools[0].buffer_size = priv->rx_buf_size; err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); @@ -4426,8 +4625,10 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0); - if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) + if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) { + netdev_dbg(priv->net_dev, "waiting for mac\n"); return PTR_ERR(dpmac_dev); + } if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) return 0; @@ -4443,22 +4644,29 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) err = dpaa2_mac_open(mac); if (err) goto err_free_mac; - priv->mac = mac; - if (dpaa2_eth_is_type_phy(priv)) { + if (dpaa2_mac_is_type_phy(mac)) { err = dpaa2_mac_connect(mac); - if (err && err != -EPROBE_DEFER) - netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe", - ERR_PTR(err)); - if (err) + if (err) { + if (err == -EPROBE_DEFER) + netdev_dbg(priv->net_dev, + "could not connect to MAC\n"); + else + netdev_err(priv->net_dev, + "Error connecting to the MAC endpoint: %pe", + ERR_PTR(err)); goto err_close_mac; + } } + mutex_lock(&priv->mac_lock); + priv->mac = mac; + mutex_unlock(&priv->mac_lock); + return 0; err_close_mac: dpaa2_mac_close(mac); - priv->mac = NULL; err_free_mac: kfree(mac); return err; @@ -4466,15 +4674,21 @@ err_free_mac: static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) { - if (dpaa2_eth_is_type_phy(priv)) - dpaa2_mac_disconnect(priv->mac); + struct dpaa2_mac *mac; + + mutex_lock(&priv->mac_lock); + mac = priv->mac; + priv->mac = NULL; + mutex_unlock(&priv->mac_lock); - if (!dpaa2_eth_has_mac(priv)) + if (!mac) return; - dpaa2_mac_close(priv->mac); - kfree(priv->mac); - priv->mac = NULL; + if (dpaa2_mac_is_type_phy(mac)) + dpaa2_mac_disconnect(mac); + + dpaa2_mac_close(mac); + kfree(mac); } static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) @@ -4484,6 +4698,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); struct net_device *net_dev = dev_get_drvdata(dev); struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + bool had_mac; int err; err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, @@ -4500,12 +4715,15 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); dpaa2_eth_update_tx_fqids(priv); - rtnl_lock(); - if (dpaa2_eth_has_mac(priv)) + /* We can avoid locking because the "endpoint changed" IRQ + * handler is the only one who changes priv->mac at runtime, + * so we are not racing with anyone. + */ + had_mac = !!priv->mac; + if (had_mac) dpaa2_eth_disconnect_mac(priv); else dpaa2_eth_connect_mac(priv); - rtnl_unlock(); } return IRQ_HANDLED; @@ -4601,6 +4819,9 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) priv = netdev_priv(net_dev); priv->net_dev = net_dev; + SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port); + + mutex_init(&priv->mac_lock); priv->iommu_domain = iommu_get_domain_for_dev(dev); @@ -4623,10 +4844,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &priv->mc_io); if (err) { - if (err == -ENXIO) + if (err == -ENXIO) { + dev_dbg(dev, "waiting for MC portal\n"); err = -EPROBE_DEFER; - else + } else { dev_err(dev, "MC portal allocation failed\n"); + } goto err_portal_alloc; } @@ -4641,7 +4864,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dpaa2_eth_setup_fqs(priv); - err = dpaa2_eth_setup_dpbp(priv); + err = dpaa2_eth_setup_default_dpbp(priv); if (err) goto err_dpbp_setup; @@ -4707,6 +4930,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } #endif + err = dpaa2_eth_connect_mac(priv); + if (err) + goto err_connect_mac; + err = dpaa2_eth_setup_irqs(dpni_dev); if (err) { netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); @@ -4719,10 +4946,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) priv->do_link_poll = true; } - err = dpaa2_eth_connect_mac(priv); - if (err) - goto err_connect_mac; - err = dpaa2_eth_dl_alloc(priv); if (err) goto err_dl_register; @@ -4756,13 +4979,13 @@ err_dl_port_add: err_dl_trap_register: dpaa2_eth_dl_free(priv); err_dl_register: - dpaa2_eth_disconnect_mac(priv); -err_connect_mac: if (priv->do_link_poll) kthread_stop(priv->poll_thread); else fsl_mc_free_irqs(dpni_dev); err_poll_thread: + dpaa2_eth_disconnect_mac(priv); +err_connect_mac: dpaa2_eth_free_rings(priv); err_alloc_rings: err_csum: @@ -4777,7 +5000,7 @@ err_alloc_percpu_extras: err_alloc_percpu_stats: dpaa2_eth_del_ch_napi(priv); err_bind: - dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpbps(priv); err_dpbp_setup: dpaa2_eth_free_dpio(priv); err_dpio_setup: @@ -4810,9 +5033,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) #endif unregister_netdev(net_dev); - rtnl_lock(); - dpaa2_eth_disconnect_mac(priv); - rtnl_unlock(); dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); @@ -4823,6 +5043,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) else fsl_mc_free_irqs(ls_dev); + dpaa2_eth_disconnect_mac(priv); dpaa2_eth_free_rings(priv); free_percpu(priv->fd); free_percpu(priv->sgt_cache); @@ -4830,7 +5051,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) free_percpu(priv->percpu_extras); dpaa2_eth_del_ch_napi(priv); - dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpbps(priv); dpaa2_eth_free_dpio(priv); dpaa2_eth_free_dpni(priv); if (priv->onestep_reg_base) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 447718483ef4..d56d7a13262e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #ifndef __DPAA2_ETH_H @@ -53,6 +53,12 @@ */ #define DPAA2_ETH_TXCONF_PER_NAPI 256 +/* Maximum number of Tx frames to be processed in a single NAPI + * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI + * to maximize the throughput. + */ +#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI + /* Buffer qouta per channel. We want to keep in check number of ingress frames * in flight: for small sized frames, congestion group taildrop may kick in * first; for large sizes, Rx FQ taildrop threshold will ensure only a @@ -109,6 +115,14 @@ #define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 #define DPAA2_ETH_RX_BUF_ALIGN 64 +/* The firmware allows assigning multiple buffer pools to a single DPNI - + * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for + * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs + * object: the default and 8 other distinct buffer pools, one for each queue. + */ +#define DPAA2_ETH_DEFAULT_BP_IDX 0 +#define DPAA2_ETH_MAX_BPS 9 + /* We are accommodating a skb backpointer and some S/G info * in the frame's software annotation. The hardware * options are either 0 or 64, so we choose the latter. @@ -122,6 +136,7 @@ enum dpaa2_eth_swa_type { DPAA2_ETH_SWA_SINGLE, DPAA2_ETH_SWA_SG, DPAA2_ETH_SWA_XDP, + DPAA2_ETH_SWA_XSK, DPAA2_ETH_SWA_SW_TSO, }; @@ -144,6 +159,10 @@ struct dpaa2_eth_swa { struct xdp_frame *xdpf; } xdp; struct { + struct xdp_buff *xdp_buff; + int sgt_size; + } xsk; + struct { struct sk_buff *skb; int num_sg; int sgt_size; @@ -421,12 +440,19 @@ enum dpaa2_eth_fq_type { }; struct dpaa2_eth_priv; +struct dpaa2_eth_channel; +struct dpaa2_eth_fq; struct dpaa2_eth_xdp_fds { struct dpaa2_fd fds[DEV_MAP_BULK_SIZE]; ssize_t num; }; +typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; @@ -439,10 +465,7 @@ struct dpaa2_eth_fq { struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; - void (*consume)(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq); + dpaa2_eth_consume_cb_t *consume; struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_xdp_fds xdp_redirect_fds; @@ -454,6 +477,11 @@ struct dpaa2_eth_ch_xdp { unsigned int res; }; +struct dpaa2_eth_bp { + struct fsl_mc_device *dev; + int bpid; +}; + struct dpaa2_eth_channel { struct dpaa2_io_notification_ctx nctx; struct fsl_mc_device *dpcon; @@ -472,6 +500,11 @@ struct dpaa2_eth_channel { /* Buffers to be recycled back in the buffer pool */ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD]; int recycled_bufs_cnt; + + bool xsk_zc; + int xsk_tx_pkts_sent; + struct xsk_buff_pool *xsk_pool; + struct dpaa2_eth_bp *bp; }; struct dpaa2_eth_dist_fields { @@ -506,7 +539,7 @@ struct dpaa2_eth_trap_data { #define DPAA2_ETH_DEFAULT_COPYBREAK 512 -#define DPAA2_ETH_ENQUEUE_MAX_FDS 200 +#define DPAA2_ETH_ENQUEUE_MAX_FDS 256 struct dpaa2_eth_fds { struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS]; }; @@ -535,14 +568,16 @@ struct dpaa2_eth_priv { u8 ptp_correction_off; void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv, u32 offset, u8 udp); - struct fsl_mc_device *dpbp_dev; u16 rx_buf_size; - u16 bpid; struct iommu_domain *iommu_domain; enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */ bool rx_tstamp; /* Rx timestamping enabled */ + /* Buffer pool management */ + struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS]; + int num_bps; + u16 tx_qdid; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. @@ -580,6 +615,8 @@ struct dpaa2_eth_priv { #endif struct dpaa2_mac *mac; + /* Serializes changes to priv->mac */ + struct mutex mac_lock; struct workqueue_struct *dpaa2_ptp_wq; struct work_struct tx_onestep_tstamp; struct sk_buff_head tx_skbs; @@ -733,16 +770,15 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv) { - if (priv->mac && - (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || - priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) - return true; + lockdep_assert_held(&priv->mac_lock); - return false; + return dpaa2_mac_is_type_phy(priv->mac); } static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv) { + lockdep_assert_held(&priv->mac_lock); + return priv->mac ? true : false; } @@ -771,4 +807,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv); struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv, struct dpaa2_fapr *fapr); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp); + +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr); + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb); + +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_bp *bp); + +void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr); +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr); + +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id); + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid); + +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi); +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch); + +/* SGT (Scatter-Gather Table) cache management */ +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf); + #endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index eea7d7a07c00..e80e9388c71f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016 NXP - * Copyright 2020 NXP + * Copyright 2016-2022 NXP */ #include <linux/net_tstamp.h> @@ -86,11 +85,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, static int dpaa2_eth_nway_reset(struct net_device *net_dev) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = -EOPNOTSUPP; + + mutex_lock(&priv->mac_lock); if (dpaa2_eth_is_type_phy(priv)) - return phylink_ethtool_nway_reset(priv->mac->phylink); + err = phylink_ethtool_nway_reset(priv->mac->phylink); + + mutex_unlock(&priv->mac_lock); - return -EOPNOTSUPP; + return err; } static int @@ -98,10 +102,18 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev, struct ethtool_link_ksettings *link_settings) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; - if (dpaa2_eth_is_type_phy(priv)) - return phylink_ethtool_ksettings_get(priv->mac->phylink, - link_settings); + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) { + err = phylink_ethtool_ksettings_get(priv->mac->phylink, + link_settings); + mutex_unlock(&priv->mac_lock); + return err; + } + + mutex_unlock(&priv->mac_lock); link_settings->base.autoneg = AUTONEG_DISABLE; if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX)) @@ -116,11 +128,17 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev, const struct ethtool_link_ksettings *link_settings) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = -EOPNOTSUPP; + + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) + err = phylink_ethtool_ksettings_set(priv->mac->phylink, + link_settings); - if (!dpaa2_eth_is_type_phy(priv)) - return -ENOTSUPP; + mutex_unlock(&priv->mac_lock); - return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings); + return err; } static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, @@ -129,11 +147,16 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, struct dpaa2_eth_priv *priv = netdev_priv(net_dev); u64 link_options = priv->link_state.options; + mutex_lock(&priv->mac_lock); + if (dpaa2_eth_is_type_phy(priv)) { phylink_ethtool_get_pauseparam(priv->mac->phylink, pause); + mutex_unlock(&priv->mac_lock); return; } + mutex_unlock(&priv->mac_lock); + pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options); pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options); pause->autoneg = AUTONEG_DISABLE; @@ -152,9 +175,17 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, return -EOPNOTSUPP; } - if (dpaa2_eth_is_type_phy(priv)) - return phylink_ethtool_set_pauseparam(priv->mac->phylink, - pause); + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) { + err = phylink_ethtool_set_pauseparam(priv->mac->phylink, + pause); + mutex_unlock(&priv->mac_lock); + return err; + } + + mutex_unlock(&priv->mac_lock); + if (pause->autoneg) return -EOPNOTSUPP; @@ -186,7 +217,6 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct dpaa2_eth_priv *priv = netdev_priv(netdev); u8 *p = data; int i; @@ -200,22 +230,17 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (dpaa2_eth_has_mac(priv)) - dpaa2_mac_get_strings(p); + dpaa2_mac_get_strings(p); break; } } static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) { - int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - switch (sset) { case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ - if (dpaa2_eth_has_mac(priv)) - num_ss_stats += dpaa2_mac_get_sset_count(); - return num_ss_stats; + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS + + dpaa2_mac_get_sset_count(); default: return -EOPNOTSUPP; } @@ -227,17 +252,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { - int i = 0; - int j, k, err; - int num_cnt; - union dpni_statistics dpni_stats; - u32 fcnt, bcnt; - u32 fcnt_rx_total = 0, fcnt_tx_total = 0; - u32 bcnt_rx_total = 0, bcnt_tx_total = 0; - u32 buf_cnt; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct dpaa2_eth_drv_stats *extras; - struct dpaa2_eth_ch_stats *ch_stats; + union dpni_statistics dpni_stats; int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { sizeof(dpni_stats.page_0), sizeof(dpni_stats.page_1), @@ -247,6 +263,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, sizeof(dpni_stats.page_5), sizeof(dpni_stats.page_6), }; + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; + struct dpaa2_eth_ch_stats *ch_stats; + struct dpaa2_eth_drv_stats *extras; + u32 buf_cnt, buf_cnt_total = 0; + int j, k, err, num_cnt, i = 0; + u32 fcnt, bcnt; memset(data, 0, sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); @@ -308,15 +331,22 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, *(data + i++) = fcnt_tx_total; *(data + i++) = bcnt_tx_total; - err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); - if (err) { - netdev_warn(net_dev, "Buffer count query error %d\n", err); - return; + for (j = 0; j < priv->num_bps; j++) { + err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt); + if (err) { + netdev_warn(net_dev, "Buffer count query error %d\n", err); + return; + } + buf_cnt_total += buf_cnt; } - *(data + i++) = buf_cnt; + *(data + i++) = buf_cnt_total; + + mutex_lock(&priv->mac_lock); if (dpaa2_eth_has_mac(priv)) dpaa2_mac_get_ethtool_stats(priv->mac, data + i); + + mutex_unlock(&priv->mac_lock); } static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, @@ -876,6 +906,29 @@ restore_rx_usecs: return err; } +static void dpaa2_eth_get_channels(struct net_device *net_dev, + struct ethtool_channels *channels) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int queue_count = dpaa2_eth_queue_count(priv); + + channels->max_rx = queue_count; + channels->max_tx = queue_count; + channels->rx_count = queue_count; + channels->tx_count = queue_count; + + /* Tx confirmation and Rx error */ + channels->max_other = queue_count + 1; + channels->max_combined = channels->max_rx + + channels->max_tx + + channels->max_other; + /* Tx conf and Rx err */ + channels->other_count = queue_count + 1; + channels->combined_count = channels->rx_count + + channels->tx_count + + channels->other_count; +} + const struct ethtool_ops dpaa2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, @@ -896,4 +949,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .set_tunable = dpaa2_eth_set_tunable, .get_coalesce = dpaa2_eth_get_coalesce, .set_coalesce = dpaa2_eth_set_coalesce, + .get_channels = dpaa2_eth_get_channels, }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index 49ff85633783..c886f33f8c6f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -105,6 +105,7 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev, * thus the fwnode field is not yet set. Defer probe if we are * facing this situation. */ + dev_dbg(dev, "dprc not finished probing\n"); return ERR_PTR(-EPROBE_DEFER); } @@ -235,7 +236,6 @@ static void dpaa2_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops dpaa2_mac_phylink_ops = { - .validate = phylink_generic_validate, .mac_select_pcs = dpaa2_mac_select_pcs, .mac_config = dpaa2_mac_config, .mac_link_up = dpaa2_mac_link_up, @@ -264,8 +264,10 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac, mdiodev = fwnode_mdio_find_device(node); fwnode_handle_put(node); - if (!mdiodev) + if (!mdiodev) { + netdev_dbg(mac->net_dev, "missing PCS device\n"); return -EPROBE_DEFER; + } mac->pcs = lynx_pcs_create(mdiodev); if (!mac->pcs) { @@ -336,12 +338,20 @@ static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac) void dpaa2_mac_start(struct dpaa2_mac *mac) { + ASSERT_RTNL(); + if (mac->serdes_phy) phy_power_on(mac->serdes_phy); + + phylink_start(mac->phylink); } void dpaa2_mac_stop(struct dpaa2_mac *mac) { + ASSERT_RTNL(); + + phylink_stop(mac->phylink); + if (mac->serdes_phy) phy_power_off(mac->serdes_phy); } @@ -420,7 +430,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac) } mac->phylink = phylink; + rtnl_lock(); err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0); + rtnl_unlock(); if (err) { netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err); goto err_phylink_destroy; @@ -438,10 +450,10 @@ err_pcs_destroy: void dpaa2_mac_disconnect(struct dpaa2_mac *mac) { - if (!mac->phylink) - return; - + rtnl_lock(); phylink_disconnect_phy(mac->phylink); + rtnl_unlock(); + phylink_destroy(mac->phylink); dpaa2_pcs_destroy(mac); of_phy_put(mac->serdes_phy); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h index a58cab188a99..c1ec9efd413a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h @@ -30,8 +30,14 @@ struct dpaa2_mac { struct phy *serdes_phy; }; -bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev, - struct fsl_mc_io *mc_io); +static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac) +{ + if (!mac) + return false; + + return mac->attr.link_type == DPMAC_LINK_TYPE_PHY || + mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE; +} int dpaa2_mac_open(struct dpaa2_mac *mac); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index c8cb541572ff..90d23ab1ce9d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -8,7 +8,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/msi.h> #include <linux/fsl/mc.h> #include "dpaa2-ptp.h" diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 720c9230cab5..6bc1988be311 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev, { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct dpsw_link_state state = {0}; - int err = 0; + int err; + + mutex_lock(&port_priv->mac_lock); + + if (dpaa2_switch_port_is_type_phy(port_priv)) { + err = phylink_ethtool_ksettings_get(port_priv->mac->phylink, + link_ksettings); + mutex_unlock(&port_priv->mac_lock); + return err; + } - if (dpaa2_switch_port_is_type_phy(port_priv)) - return phylink_ethtool_ksettings_get(port_priv->mac->phylink, - link_ksettings); + mutex_unlock(&port_priv->mac_lock); err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, @@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, bool if_running; int err = 0, ret; - if (dpaa2_switch_port_is_type_phy(port_priv)) - return phylink_ethtool_ksettings_set(port_priv->mac->phylink, - link_ksettings); + mutex_lock(&port_priv->mac_lock); + + if (dpaa2_switch_port_is_type_phy(port_priv)) { + err = phylink_ethtool_ksettings_set(port_priv->mac->phylink, + link_ksettings); + mutex_unlock(&port_priv->mac_lock); + return err; + } + + mutex_unlock(&port_priv->mac_lock); /* Interface needs to be down to change link settings */ if_running = netif_running(netdev); @@ -145,14 +159,9 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, static int dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) { - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS; - switch (sset) { case ETH_SS_STATS: - if (port_priv->mac) - num_ss_stats += dpaa2_mac_get_sset_count(); - return num_ss_stats; + return DPAA2_SWITCH_NUM_COUNTERS + dpaa2_mac_get_sset_count(); default: return -EOPNOTSUPP; } @@ -161,7 +170,6 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct ethsw_port_priv *port_priv = netdev_priv(netdev); u8 *p = data; int i; @@ -172,8 +180,7 @@ static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (port_priv->mac) - dpaa2_mac_get_strings(p); + dpaa2_mac_get_strings(p); break; } } @@ -196,8 +203,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev, dpaa2_switch_ethtool_counters[i].name, err); } - if (port_priv->mac) + mutex_lock(&port_priv->mac_lock); + + if (dpaa2_switch_port_has_mac(port_priv)) dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i); + + mutex_unlock(&port_priv->mac_lock); } const struct ethtool_ops dpaa2_switch_port_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 2b5909fa93cf..f4ae4289c41a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/msi.h> #include <linux/kthread.h> #include <linux/workqueue.h> #include <linux/iommu.h> @@ -603,8 +602,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev) /* When we manage the MAC/PHY using phylink there is no need * to manually update the netif_carrier. + * We can avoid locking because we are called from the "link changed" + * IRQ handler, which is the same as the "endpoint changed" IRQ handler + * (the writer to port_priv->mac), so we cannot race with it. */ - if (dpaa2_switch_port_is_type_phy(port_priv)) + if (dpaa2_mac_is_type_phy(port_priv->mac)) return 0; /* Interrupts are received even though no one issued an 'ifconfig up' @@ -684,6 +686,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; + mutex_lock(&port_priv->mac_lock); + if (!dpaa2_switch_port_is_type_phy(port_priv)) { /* Explicitly set carrier off, otherwise * netif_carrier_ok() will return true and cause 'ip link show' @@ -697,16 +701,17 @@ static int dpaa2_switch_port_open(struct net_device *netdev) port_priv->ethsw_data->dpsw_handle, port_priv->idx); if (err) { + mutex_unlock(&port_priv->mac_lock); netdev_err(netdev, "dpsw_if_enable err %d\n", err); return err; } dpaa2_switch_enable_ctrl_if_napi(ethsw); - if (dpaa2_switch_port_is_type_phy(port_priv)) { + if (dpaa2_switch_port_is_type_phy(port_priv)) dpaa2_mac_start(port_priv->mac); - phylink_start(port_priv->mac->phylink); - } + + mutex_unlock(&port_priv->mac_lock); return 0; } @@ -717,14 +722,17 @@ static int dpaa2_switch_port_stop(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; + mutex_lock(&port_priv->mac_lock); + if (dpaa2_switch_port_is_type_phy(port_priv)) { - phylink_stop(port_priv->mac->phylink); dpaa2_mac_stop(port_priv->mac); } else { netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } + mutex_unlock(&port_priv->mac_lock); + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, port_priv->idx); @@ -1453,9 +1461,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) err = dpaa2_mac_open(mac); if (err) goto err_free_mac; - port_priv->mac = mac; - if (dpaa2_switch_port_is_type_phy(port_priv)) { + if (dpaa2_mac_is_type_phy(mac)) { err = dpaa2_mac_connect(mac); if (err) { netdev_err(port_priv->netdev, @@ -1465,11 +1472,14 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) } } + mutex_lock(&port_priv->mac_lock); + port_priv->mac = mac; + mutex_unlock(&port_priv->mac_lock); + return 0; err_close_mac: dpaa2_mac_close(mac); - port_priv->mac = NULL; err_free_mac: kfree(mac); return err; @@ -1477,15 +1487,21 @@ err_free_mac: static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) { - if (dpaa2_switch_port_is_type_phy(port_priv)) - dpaa2_mac_disconnect(port_priv->mac); + struct dpaa2_mac *mac; + + mutex_lock(&port_priv->mac_lock); + mac = port_priv->mac; + port_priv->mac = NULL; + mutex_unlock(&port_priv->mac_lock); - if (!dpaa2_switch_port_has_mac(port_priv)) + if (!mac) return; - dpaa2_mac_close(port_priv->mac); - kfree(port_priv->mac); - port_priv->mac = NULL; + if (dpaa2_mac_is_type_phy(mac)) + dpaa2_mac_disconnect(mac); + + dpaa2_mac_close(mac); + kfree(mac); } static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) @@ -1495,6 +1511,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) struct ethsw_port_priv *port_priv; u32 status = ~0; int err, if_id; + bool had_mac; err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, DPSW_IRQ_INDEX_IF, &status); @@ -1512,12 +1529,15 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) } if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { - rtnl_lock(); - if (dpaa2_switch_port_has_mac(port_priv)) + /* We can avoid locking because the "endpoint changed" IRQ + * handler is the only one who changes priv->mac at runtime, + * so we are not racing with anyone. + */ + had_mac = !!port_priv->mac; + if (had_mac) dpaa2_switch_port_disconnect_mac(port_priv); else dpaa2_switch_port_connect_mac(port_priv); - rtnl_unlock(); } out: @@ -2935,9 +2955,7 @@ static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, { struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; - rtnl_lock(); dpaa2_switch_port_disconnect_mac(port_priv); - rtnl_unlock(); free_netdev(port_priv->netdev); ethsw->ports[port_idx] = NULL; } @@ -3256,6 +3274,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, port_priv->netdev = port_netdev; port_priv->ethsw_data = ethsw; + mutex_init(&port_priv->mac_lock); + port_priv->idx = port_idx; port_priv->stp_state = BR_STATE_FORWARDING; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h index 0002dca4d417..42b3ca73f55d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -161,6 +161,8 @@ struct ethsw_port_priv { struct dpaa2_switch_filter_block *filter_block; struct dpaa2_mac *mac; + /* Protects against changes to port_priv->mac */ + struct mutex mac_lock; }; /* Switch data */ @@ -230,12 +232,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) static inline bool dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv) { - if (port_priv->mac && - (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || - port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) - return true; - - return false; + return dpaa2_mac_is_type_phy(port_priv->mac); } static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c new file mode 100644 index 000000000000..051748b997f3 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2022 NXP + */ +#include <linux/filter.h> +#include <linux/compiler.h> +#include <linux/bpf_trace.h> +#include <net/xdp.h> +#include <net/xdp_sock_drv.h> + +#include "dpaa2-eth.h" + +static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type type, + dpaa2_eth_consume_cb_t *consume) +{ + struct dpaa2_eth_fq *fq; + int i; + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + + if (fq->type != type) + continue; + if (fq->channel != ch) + continue; + + fq->consume = consume; + } +} + +static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + struct bpf_prog *xdp_prog; + struct xdp_buff *xdp_buff; + struct dpaa2_eth_swa *swa; + u32 xdp_act = XDP_PASS; + int err; + + xdp_prog = READ_ONCE(ch->xdp.prog); + if (!xdp_prog) + goto out; + + swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE + + ch->xsk_pool->umem->headroom); + xdp_buff = swa->xsk.xdp_buff; + + xdp_buff->data_hard_start = vaddr; + xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd); + xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd); + xdp_set_data_meta_invalid(xdp_buff); + xdp_buff->rxq = &ch->xdp_rxq; + + xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool); + xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff); + + /* xdp.data pointer may have changed */ + dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr); + dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data); + + if (likely(xdp_act == XDP_REDIRECT)) { + err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog); + if (unlikely(err)) { + ch->stats.xdp_drop++; + dpaa2_eth_recycle_buf(priv, ch, addr); + } else { + ch->buf_count--; + ch->stats.xdp_redirect++; + } + + goto xdp_redir; + } + + switch (xdp_act) { + case XDP_PASS: + break; + case XDP_TX: + dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); + break; + default: + bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + dpaa2_eth_recycle_buf(priv, ch, addr); + ch->stats.xdp_drop++; + break; + } + +xdp_redir: + ch->xdp.res |= xdp_act; +out: + return xdp_act; +} + +/* Rx frame processing routine for the AF_XDP fast path */ +static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct rtnl_link_stats64 *percpu_stats; + u32 fd_length = dpaa2_fd_get_len(fd); + struct sk_buff *skb; + void *vaddr; + u32 xdp_act; + + trace_dpaa2_rx_xsk_fd(priv->net_dev, fd); + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + percpu_stats = this_cpu_ptr(priv->percpu_stats); + + if (fd_format != dpaa2_fd_single) { + WARN_ON(priv->xdp_prog); + /* AF_XDP doesn't support any other formats */ + goto err_frame_format; + } + + xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + if (xdp_act != XDP_PASS) { + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + return; + } + + /* Build skb */ + skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr); + if (!skb) + /* Nothing else we can do, recycle the buffer and + * drop the frame. + */ + goto err_alloc_skb; + + /* Send the skb to the Linux networking stack */ + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); + + return; + +err_alloc_skb: + dpaa2_eth_recycle_buf(priv, ch, addr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv, + struct dpni_pools_cfg *pools_params) +{ + int curr_bp = 0, i, j; + + pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN; + for (i = 0; i < priv->num_bps; i++) { + for (j = 0; j < priv->num_channels; j++) + if (priv->bp[i] == priv->channel[j]->bp) + pools_params->pools[curr_bp].priority_mask |= (1 << j); + if (!pools_params->pools[curr_bp].priority_mask) + continue; + + pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; + pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; + pools_params->pools[curr_bp++].backup_pool = 0; + } + pools_params->num_dpbp = curr_bp; +} + +static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid); + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err; + bool up; + + ch = priv->channel[qid]; + if (!ch->xsk_pool) + return -EINVAL; + + up = netif_running(dev); + if (up) + dev_close(dev); + + xsk_pool_dma_unmap(pool, 0); + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n", + err); + + dpaa2_eth_free_dpbp(priv, ch->bp); + + ch->xsk_zc = false; + ch->xsk_pool = NULL; + ch->xsk_tx_pkts_sent = 0; + ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) + netdev_err(dev, "dpni_set_pools() failed\n"); + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_xsk_enable_pool(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err, err2; + bool up; + + if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) { + netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n"); + return -EOPNOTSUPP; + } + + if (priv->dpni_attrs.num_queues > 8) { + netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n"); + return -EOPNOTSUPP; + } + + up = netif_running(dev); + if (up) + dev_close(dev); + + err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0); + if (err) { + netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n", + err); + goto err_dma_unmap; + } + + ch = priv->channel[qid]; + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) { + netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err); + goto err_mem_model; + } + xsk_pool_set_rxq_info(pool, &ch->xdp_rxq); + + priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(priv->bp[priv->num_bps])) { + err = PTR_ERR(priv->bp[priv->num_bps]); + goto err_bp_alloc; + } + ch->xsk_zc = true; + ch->xsk_pool = pool; + ch->bp = priv->bp[priv->num_bps++]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + netdev_err(dev, "dpni_set_pools() failed\n"); + goto err_set_pools; + } + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; + +err_set_pools: + err2 = dpaa2_xsk_disable_pool(dev, qid); + if (err2) + netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2); +err_bp_alloc: + err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err2) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2); +err_mem_model: + xsk_pool_dma_unmap(pool, 0); +err_dma_unmap: + if (up) + dev_open(dev, NULL); + + return err; +} + +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) +{ + return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) : + dpaa2_xsk_disable_pool(dev, qid); +} + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_eth_channel *ch = priv->channel[qid]; + + if (!priv->link_state.up) + return -ENETDOWN; + + if (!priv->xdp_prog) + return -EINVAL; + + if (!ch->xsk_zc) + return -EINVAL; + + /* We do not have access to a per channel SW interrupt, so instead we + * schedule a NAPI instance. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); + + return 0; +} + +static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + struct xdp_desc *xdp_desc) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_sg_entry *sgt; + struct dpaa2_eth_swa *swa; + void *sgt_buf = NULL; + dma_addr_t sgt_addr; + int sgt_buf_size; + dma_addr_t addr; + int err = 0; + + /* Prepare the HW SGT structure */ + sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); + sgt_buf = dpaa2_eth_sgt_get(priv); + if (unlikely(!sgt_buf)) + return -ENOMEM; + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + /* Get the address of the XSK Tx buffer */ + addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr); + xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len); + + /* Fill in the HW SGT structure */ + dpaa2_sg_set_addr(sgt, addr); + dpaa2_sg_set_len(sgt, xdp_desc->len); + dpaa2_sg_set_final(sgt, true); + + /* Store the necessary info in the SGT buffer */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->type = DPAA2_ETH_SWA_XSK; + swa->xsk.sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, sgt_addr))) { + err = -ENOMEM; + goto sgt_map_failed; + } + + /* Initialize FD fields */ + memset(fd, 0, sizeof(struct dpaa2_fd)); + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, sgt_addr); + dpaa2_fd_set_len(fd, xdp_desc->len); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); + + return 0; + +sgt_map_failed: + dpaa2_eth_sgt_recycle(priv, sgt_buf); + + return err; +} + +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) +{ + struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs; + struct dpaa2_eth_drv_stats *percpu_extras; + struct rtnl_link_stats64 *percpu_stats; + int budget = DPAA2_ETH_TX_ZC_PER_NAPI; + int total_enqueued, enqueued; + int retries, max_retries; + struct dpaa2_eth_fq *fq; + struct dpaa2_fd *fds; + int batch, i, err; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + fds = (this_cpu_ptr(priv->fd))->array; + + /* Use the FQ with the same idx as the affine CPU */ + fq = &priv->fq[ch->nctx.desired_cpu]; + + batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); + if (!batch) + return false; + + /* Create a FD for each XSK frame to be sent */ + for (i = 0; i < batch; i++) { + err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]); + if (err) { + batch = i; + break; + } + + trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]); + } + + /* Enqueue all the created FDs */ + max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES; + total_enqueued = 0; + enqueued = 0; + retries = 0; + while (total_enqueued < batch && retries < max_retries) { + err = priv->enqueue(priv, fq, &fds[total_enqueued], 0, + batch - total_enqueued, &enqueued); + if (err == -EBUSY) { + retries++; + continue; + } + + total_enqueued += enqueued; + } + percpu_extras->tx_portal_busy += retries; + + /* Update statistics */ + percpu_stats->tx_packets += total_enqueued; + for (i = 0; i < total_enqueued; i++) + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); + for (i = total_enqueued; i < batch; i++) { + dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false); + percpu_stats->tx_errors++; + } + + xsk_tx_release(ch->xsk_pool); + + return total_enqueued == budget; +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index 828f538097af..be9492b8d5dc 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -13,10 +13,12 @@ #define DPNI_VER_MINOR 0 #define DPNI_CMD_BASE_VERSION 1 #define DPNI_CMD_2ND_VERSION 2 +#define DPNI_CMD_3RD_VERSION 3 #define DPNI_CMD_ID_OFFSET 4 #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) #define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION) +#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION) #define DPNI_CMDID_OPEN DPNI_CMD(0x801) #define DPNI_CMDID_CLOSE DPNI_CMD(0x800) @@ -39,7 +41,7 @@ #define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) #define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) -#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200) #define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) #define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) @@ -115,14 +117,19 @@ struct dpni_cmd_open { }; #define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) + +struct dpni_cmd_pool { + __le16 dpbp_id; + u8 priority_mask; + u8 pad; +}; + struct dpni_cmd_set_pools { - /* cmd word 0 */ u8 num_dpbp; u8 backup_pool_mask; - __le16 pad; - /* cmd word 0..4 */ - __le32 dpbp_id[DPNI_MAX_DPBP]; - /* cmd word 4..6 */ + u8 pad; + u8 pool_options; + struct dpni_cmd_pool pool[DPNI_MAX_DPBP]; __le16 buffer_size[DPNI_MAX_DPBP]; }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 6c3b36f20fb8..02601a283b59 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpni_cmd_set_pools *)cmd.params; cmd_params->num_dpbp = cfg->num_dpbp; + cmd_params->pool_options = cfg->pool_options; for (i = 0; i < DPNI_MAX_DPBP; i++) { - cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; cmd_params->buffer_size[i] = cpu_to_le16(cfg->pools[i].buffer_size); cmd_params->backup_pool_mask |= diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 6fffd519aa00..5c0a1d5ac934 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +#define DPNI_POOL_ASSOC_QPRI 0 +#define DPNI_POOL_ASSOC_QDBIN 1 + /** * struct dpni_pools_cfg - Structure representing buffer pools configuration * @num_dpbp: Number of DPBPs + * @pool_options: Buffer assignment options. + * This field is a combination of DPNI_POOL_ASSOC_flags * @pools: Array of buffer pools parameters; The number of valid entries * must match 'num_dpbp' value * @pools.dpbp_id: DPBP object ID + * @pools.priority: Priority mask that indicates TC's used with this buffer. + * If set to 0x00 MC will assume value 0xff. * @pools.buffer_size: Buffer size * @pools.backup_pool: Backup pool */ struct dpni_pools_cfg { u8 num_dpbp; + u8 pool_options; struct { int dpbp_id; + u8 priority_mask; u16 buffer_size; int backup_pool; } pools[DPNI_MAX_DPBP]; |