From c4d175c323e3751badf77766c79cb130b4a3eb5f Mon Sep 17 00:00:00 2001 From: Michał Grzelak Date: Fri, 14 Oct 2022 23:32:52 +0200 Subject: dt-bindings: net: marvell,pp2: convert to json-schema MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the marvell,pp2 bindings from text to proper schema. Move 'marvell,system-controller' and 'dma-coherent' properties from port up to the controller node, to match what is actually done in DT. Rename all subnodes to match "^(ethernet-)?port@[0-2]$" and deprecate port-id in favour of 'reg'. Signed-off-by: Michał Grzelak Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a96c60c787af..5c6ce094e55e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12307,7 +12307,7 @@ M: Marcin Wojtas M: Russell King L: netdev@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/net/marvell-pp2.txt +F: Documentation/devicetree/bindings/net/marvell,pp2.yaml F: drivers/net/ethernet/marvell/mvpp2/ MARVELL MWIFIEX WIRELESS DRIVER -- cgit v1.2.3 From 48276c08cf5d2039aad5ef92ae7057ae0946d51e Mon Sep 17 00:00:00 2001 From: Robert-Ionut Alexa Date: Tue, 18 Oct 2022 17:18:59 +0300 Subject: net: dpaa2-eth: AF_XDP RX zero copy support This patch adds the support for receiving packets via the AF_XDP zero-copy mechanism in the dpaa2-eth driver. The support is available only on the LX2160A SoC and variants because we are relying on the HW capability to associate a buffer pool to a specific queue (QDBIN), only available on newer WRIOP versions. On the control path, the dpaa2_xsk_enable_pool() function is responsible to allocate a buffer pool (BP), setup this new BP to be used only on the requested queue and change the consume function to point to the XSK ZC one. We are forced to call dev_close() in order to change the queue to buffer pool association (dpaa2_xsk_set_bp_per_qdbin) . This also works in our favor since at dev_close() the buffer pools will be drained and at the later dev_open() call they will be again seeded, this time with buffers allocated from the XSK pool if needed. On the data path, a new software annotation type is defined to be used only for the XSK scenarios. This will enable us to pass keep necessary information about a packet buffer between the moment in which it was seeded and when it's received by the driver. In the XSK case, we are keeping the associated xdp_buff. Depending on the action returned by the BPF program, we will do the following: - XDP_PASS: copy the contents of the packet into a brand new skb, recycle the initial buffer. - XDP_TX: just enqueue the same frame descriptor back into the Tx path, the buffer will get automatically released into the initial BP. - XDP_REDIRECT: call xdp_do_redirect() and exit. Signed-off-by: Robert-Ionut Alexa Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/ethernet/freescale/dpaa2/Makefile | 2 +- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 131 ++++++--- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 36 ++- drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c | 327 +++++++++++++++++++++++ 5 files changed, 452 insertions(+), 45 deletions(-) create mode 100644 drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 92708912fef8..479a15d86a91 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6326,6 +6326,7 @@ F: drivers/net/ethernet/freescale/dpaa2/Kconfig F: drivers/net/ethernet/freescale/dpaa2/Makefile F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth* F: drivers/net/ethernet/freescale/dpaa2/dpaa2-mac* +F: drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk* F: drivers/net/ethernet/freescale/dpaa2/dpkg.h F: drivers/net/ethernet/freescale/dpaa2/dpmac* F: drivers/net/ethernet/freescale/dpaa2/dpni* diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 3d9842af7f10..1b05ba8d1cbf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 0b21a3ee1bf1..4c340f70f50e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "dpaa2-eth.h" @@ -104,8 +105,8 @@ static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; } -static void *dpaa2_iova_to_virt(struct iommu_domain *domain, - dma_addr_t iova_addr) +void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) { phys_addr_t phys_addr; @@ -279,23 +280,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, * be released in the pool */ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, - int count) + int count, bool xsk_zc) { struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_eth_swa *swa; + struct xdp_buff *xdp_buff; void *vaddr; int i; for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, - DMA_BIDIRECTIONAL); - free_pages((unsigned long)vaddr, 0); + + if (!xsk_zc) { + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); + } else { + swa = (struct dpaa2_eth_swa *) + (vaddr + DPAA2_ETH_RX_HWA_SIZE); + xdp_buff = swa->xsk.xdp_buff; + xsk_buff_free(xdp_buff); + } } } -static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; @@ -313,7 +324,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, } if (err) { - dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt); + dpaa2_eth_free_bufs(priv, ch->recycled_bufs, + ch->recycled_bufs_cnt, ch->xsk_zc); ch->buf_count -= ch->recycled_bufs_cnt; } @@ -377,10 +389,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, fq->xdp_tx_fds.num = 0; } -static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_fd *fd, - void *buf_start, u16 queue_id) +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) { struct dpaa2_faead *faead; struct dpaa2_fd *dest_fd; @@ -1652,37 +1664,63 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch) { + struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + struct dpaa2_eth_swa *swa; struct page *page; dma_addr_t addr; int retries = 0; - int i, err; - - for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { - /* Allocate buffer visible to WRIOP + skb shared info + - * alignment padding - */ - /* allocate one page for each Rx buffer. WRIOP sees - * the entire page except for a tailroom reserved for - * skb shared info + int i = 0, err; + u32 batch; + + /* Allocate buffers visible to WRIOP */ + if (!ch->xsk_zc) { + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Also allocate skb shared info and alignment padding. + * There is one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) + goto err_alloc; + + addr = dma_map_page(dev, page, 0, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + page_address(page), + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } + } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { + /* Allocate XSK buffers for AF_XDP fast path in batches + * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot + * provide enough buffers at the moment */ - page = dev_alloc_pages(0); - if (!page) + batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs, + DPAA2_ETH_BUFS_PER_CMD); + if (!batch) goto err_alloc; - addr = dma_map_page(dev, page, 0, priv->rx_buf_size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dev, addr))) - goto err_map; + for (i = 0; i < batch; i++) { + swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + + DPAA2_ETH_RX_HWA_SIZE); + swa->xsk.xdp_buff = xdp_buffs[i]; - buf_array[i] = addr; + addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; - /* tracing point */ - trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page), - DPAA2_ETH_RX_BUF_RAW_SIZE, - addr, priv->rx_buf_size, - ch->bp->bpid); + buf_array[i] = addr; + } } release_bufs: @@ -1698,14 +1736,19 @@ release_bufs: * not much else we can do about it */ if (err) { - dpaa2_eth_free_bufs(priv, buf_array, i); + dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); return 0; } return i; err_map: - __free_pages(page, 0); + if (!ch->xsk_zc) { + __free_pages(page, 0); + } else { + for (; i < batch; i++) + xsk_buff_free(xdp_buffs[i]); + } err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -1764,8 +1807,13 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, int count) { u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + bool xsk_zc = false; int retries = 0; - int ret; + int i, ret; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->bp->bpid == bpid) + xsk_zc = priv->channel[i]->xsk_zc; do { ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); @@ -1776,7 +1824,7 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - dpaa2_eth_free_bufs(priv, buf_array, ret); + dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc); retries = 0; } while (ret); } @@ -2694,6 +2742,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return dpaa2_eth_setup_xdp(dev, xdp->prog); + case XDP_SETUP_XSK_POOL: + return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } @@ -2924,6 +2974,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, + .ndo_xsk_wakeup = dpaa2_xsk_wakeup, .ndo_setup_tc = dpaa2_eth_setup_tc, .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid @@ -4247,8 +4298,8 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; struct net_device *net_dev = priv->net_dev; + struct dpni_pools_cfg pools_params = { 0 }; struct device *dev = net_dev->dev.parent; - struct dpni_pools_cfg pools_params; struct dpni_error_cfg err_cfg; int err = 0; int i; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 3c4fc46b1324..38f67b98865f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -130,6 +130,7 @@ enum dpaa2_eth_swa_type { DPAA2_ETH_SWA_SINGLE, DPAA2_ETH_SWA_SG, DPAA2_ETH_SWA_XDP, + DPAA2_ETH_SWA_XSK, DPAA2_ETH_SWA_SW_TSO, }; @@ -151,6 +152,9 @@ struct dpaa2_eth_swa { int dma_size; struct xdp_frame *xdpf; } xdp; + struct { + struct xdp_buff *xdp_buff; + } xsk; struct { struct sk_buff *skb; int num_sg; @@ -429,12 +433,19 @@ enum dpaa2_eth_fq_type { }; struct dpaa2_eth_priv; +struct dpaa2_eth_channel; +struct dpaa2_eth_fq; struct dpaa2_eth_xdp_fds { struct dpaa2_fd fds[DEV_MAP_BULK_SIZE]; ssize_t num; }; +typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; @@ -447,10 +458,7 @@ struct dpaa2_eth_fq { struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; - void (*consume)(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq); + dpaa2_eth_consume_cb_t *consume; struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_xdp_fds xdp_redirect_fds; @@ -486,6 +494,8 @@ struct dpaa2_eth_channel { u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD]; int recycled_bufs_cnt; + bool xsk_zc; + struct xsk_buff_pool *xsk_pool; struct dpaa2_eth_bp *bp; }; @@ -808,4 +818,22 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch, const struct dpaa2_fd *fd, struct dpaa2_eth_fq *fq); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_bp *bp); + +void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr); +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr); + +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id); + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid); + #endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c new file mode 100644 index 000000000000..2df7bffec5a7 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2022 NXP + */ +#include +#include +#include +#include +#include + +#include "dpaa2-eth.h" + +static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type type, + dpaa2_eth_consume_cb_t *consume) +{ + struct dpaa2_eth_fq *fq; + int i; + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + + if (fq->type != type) + continue; + if (fq->channel != ch) + continue; + + fq->consume = consume; + } +} + +static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + struct bpf_prog *xdp_prog; + struct xdp_buff *xdp_buff; + struct dpaa2_eth_swa *swa; + u32 xdp_act = XDP_PASS; + int err; + + xdp_prog = READ_ONCE(ch->xdp.prog); + if (!xdp_prog) + goto out; + + swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE + + ch->xsk_pool->umem->headroom); + xdp_buff = swa->xsk.xdp_buff; + + xdp_buff->data_hard_start = vaddr; + xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd); + xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd); + xdp_set_data_meta_invalid(xdp_buff); + xdp_buff->rxq = &ch->xdp_rxq; + + xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool); + xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff); + + /* xdp.data pointer may have changed */ + dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr); + dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data); + + if (likely(xdp_act == XDP_REDIRECT)) { + err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog); + if (unlikely(err)) { + ch->stats.xdp_drop++; + dpaa2_eth_recycle_buf(priv, ch, addr); + } else { + ch->buf_count--; + ch->stats.xdp_redirect++; + } + + goto xdp_redir; + } + + switch (xdp_act) { + case XDP_PASS: + break; + case XDP_TX: + dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); + break; + default: + bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + dpaa2_eth_recycle_buf(priv, ch, addr); + ch->stats.xdp_drop++; + break; + } + +xdp_redir: + ch->xdp.res |= xdp_act; +out: + return xdp_act; +} + +/* Rx frame processing routine for the AF_XDP fast path */ +static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct rtnl_link_stats64 *percpu_stats; + u32 fd_length = dpaa2_fd_get_len(fd); + struct sk_buff *skb; + void *vaddr; + u32 xdp_act; + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + percpu_stats = this_cpu_ptr(priv->percpu_stats); + + if (fd_format != dpaa2_fd_single) { + WARN_ON(priv->xdp_prog); + /* AF_XDP doesn't support any other formats */ + goto err_frame_format; + } + + xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + if (xdp_act != XDP_PASS) { + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + return; + } + + /* Build skb */ + skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr); + if (!skb) + /* Nothing else we can do, recycle the buffer and + * drop the frame. + */ + goto err_alloc_skb; + + /* Send the skb to the Linux networking stack */ + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); + + return; + +err_alloc_skb: + dpaa2_eth_recycle_buf(priv, ch, addr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv, + struct dpni_pools_cfg *pools_params) +{ + int curr_bp = 0, i, j; + + pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN; + for (i = 0; i < priv->num_bps; i++) { + for (j = 0; j < priv->num_channels; j++) + if (priv->bp[i] == priv->channel[j]->bp) + pools_params->pools[curr_bp].priority_mask |= (1 << j); + if (!pools_params->pools[curr_bp].priority_mask) + continue; + + pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; + pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; + pools_params->pools[curr_bp++].backup_pool = 0; + } + pools_params->num_dpbp = curr_bp; +} + +static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid); + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err; + bool up; + + ch = priv->channel[qid]; + if (!ch->xsk_pool) + return -EINVAL; + + up = netif_running(dev); + if (up) + dev_close(dev); + + xsk_pool_dma_unmap(pool, 0); + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n", + err); + + dpaa2_eth_free_dpbp(priv, ch->bp); + + ch->xsk_zc = false; + ch->xsk_pool = NULL; + ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) + netdev_err(dev, "dpni_set_pools() failed\n"); + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_xsk_enable_pool(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err, err2; + bool up; + + if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) { + netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n"); + return -EOPNOTSUPP; + } + + if (priv->dpni_attrs.num_queues > 8) { + netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n"); + return -EOPNOTSUPP; + } + + up = netif_running(dev); + if (up) + dev_close(dev); + + err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0); + if (err) { + netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n", + err); + goto err_dma_unmap; + } + + ch = priv->channel[qid]; + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) { + netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err); + goto err_mem_model; + } + xsk_pool_set_rxq_info(pool, &ch->xdp_rxq); + + priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(priv->bp[priv->num_bps])) { + err = PTR_ERR(priv->bp[priv->num_bps]); + goto err_bp_alloc; + } + ch->xsk_zc = true; + ch->xsk_pool = pool; + ch->bp = priv->bp[priv->num_bps++]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + netdev_err(dev, "dpni_set_pools() failed\n"); + goto err_set_pools; + } + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; + +err_set_pools: + err2 = dpaa2_xsk_disable_pool(dev, qid); + if (err2) + netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2); +err_bp_alloc: + err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err2) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2); +err_mem_model: + xsk_pool_dma_unmap(pool, 0); +err_dma_unmap: + if (up) + dev_open(dev, NULL); + + return err; +} + +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) +{ + return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) : + dpaa2_xsk_disable_pool(dev, qid); +} + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_eth_channel *ch = priv->channel[qid]; + + if (!priv->link_state.up) + return -ENETDOWN; + + if (!priv->xdp_prog) + return -EINVAL; + + if (!ch->xsk_zc) + return -EINVAL; + + /* We do not have access to a per channel SW interrupt, so instead we + * schedule a NAPI instance. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); + + return 0; +} -- cgit v1.2.3 From 8beef08f4618c9bfab9374fc72930fc6ed70fdc1 Mon Sep 17 00:00:00 2001 From: Steen Hegelund Date: Thu, 20 Oct 2022 15:08:56 +0200 Subject: net: microchip: sparx5: Adding initial VCAP API support This provides the initial VCAP API framework and Sparx5 specific VCAP implementation. When the Sparx5 Switchdev driver is initialized it will also initialize its VCAP module, and this hooks up the concrete Sparx5 VCAP model to the VCAP API, so that the VCAP API knows what VCAP instances are available. Signed-off-by: Steen Hegelund Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/ethernet/microchip/Kconfig | 1 + drivers/net/ethernet/microchip/sparx5/Kconfig | 1 + drivers/net/ethernet/microchip/sparx5/Makefile | 8 +- .../net/ethernet/microchip/sparx5/sparx5_main.c | 9 + .../net/ethernet/microchip/sparx5/sparx5_main.h | 6 + .../ethernet/microchip/sparx5/sparx5_vcap_impl.c | 41 +++ drivers/net/ethernet/microchip/vcap/Kconfig | 39 +++ drivers/net/ethernet/microchip/vcap/vcap_ag_api.h | 326 +++++++++++++++++++++ drivers/net/ethernet/microchip/vcap/vcap_api.h | 269 +++++++++++++++++ 10 files changed, 699 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c create mode 100644 drivers/net/ethernet/microchip/vcap/Kconfig create mode 100644 drivers/net/ethernet/microchip/vcap/vcap_ag_api.h create mode 100644 drivers/net/ethernet/microchip/vcap/vcap_api.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 479a15d86a91..45c79261e355 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2439,6 +2439,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported T: git git://github.com/microchip-ung/linux-upstream.git F: arch/arm64/boot/dts/microchip/ +F: drivers/net/ethernet/microchip/vcap/ F: drivers/pinctrl/pinctrl-microchip-sgpio.c N: sparx5 diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index ed7a35c3ceac..24c994baad13 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -57,5 +57,6 @@ config LAN743X source "drivers/net/ethernet/microchip/lan966x/Kconfig" source "drivers/net/ethernet/microchip/sparx5/Kconfig" +source "drivers/net/ethernet/microchip/vcap/Kconfig" endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index cc5e48e1bb4c..98e27530a91f 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -9,5 +9,6 @@ config SPARX5_SWITCH select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER + select VCAP help This driver supports the Sparx5 network switch device. diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index d1c6ad966747..aa4dadb8a25d 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -5,7 +5,11 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o -sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ +sparx5-switch-y := sparx5_main.o sparx5_packet.o \ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \ - sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o + sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \ + sparx5_vcap_impl.o + +# Provide include files +ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 62a325e96345..0b70c00c6eaa 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -672,6 +672,14 @@ static int sparx5_start(struct sparx5 *sparx5) sparx5_board_init(sparx5); err = sparx5_register_notifier_blocks(sparx5); + if (err) + return err; + + err = sparx5_vcap_init(sparx5); + if (err) { + sparx5_unregister_notifier_blocks(sparx5); + return err; + } /* Start Frame DMA with fallback to register based INJ/XTR */ err = -ENXIO; @@ -906,6 +914,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev) sparx5_ptp_deinit(sparx5); sparx5_fdma_stop(sparx5); sparx5_cleanup_ports(sparx5); + sparx5_vcap_destroy(sparx5); /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 7a83222caa73..2ab22a7b799e 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -288,6 +288,8 @@ struct sparx5 { struct mutex ptp_lock; /* lock for ptp interface state */ u16 ptp_skbs; int ptp_irq; + /* VCAP */ + struct vcap_control *vcap_ctrl; /* PGID allocation map */ u8 pgid_map[PGID_TABLE_SIZE]; }; @@ -382,6 +384,10 @@ void sparx5_ptp_txtstamp_release(struct sparx5_port *port, struct sk_buff *skb); irqreturn_t sparx5_ptp_irq_handler(int irq, void *args); +/* sparx5_vcap_impl.c */ +int sparx5_vcap_init(struct sparx5 *sparx5); +void sparx5_vcap_destroy(struct sparx5 *sparx5); + /* sparx5_pgid.c */ enum sparx5_pgid_type { SPX5_PGID_FREE, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c new file mode 100644 index 000000000000..8df7cba77a28 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver VCAP implementation + * + * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#include +#include + +#include "vcap_api.h" +#include "sparx5_main_regs.h" +#include "sparx5_main.h" + +/* Allocate a vcap control and vcap instances and configure the system */ +int sparx5_vcap_init(struct sparx5 *sparx5) +{ + struct vcap_control *ctrl; + + /* Create a VCAP control instance that owns the platform specific VCAP + * model with VCAP instances and information about keysets, keys, + * actionsets and actions + */ + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + sparx5->vcap_ctrl = ctrl; + + return 0; +} + +void sparx5_vcap_destroy(struct sparx5 *sparx5) +{ + if (!sparx5->vcap_ctrl) + return; + + kfree(sparx5->vcap_ctrl); +} diff --git a/drivers/net/ethernet/microchip/vcap/Kconfig b/drivers/net/ethernet/microchip/vcap/Kconfig new file mode 100644 index 000000000000..a78cbc6ce6bb --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/Kconfig @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Microchip VCAP API configuration +# + +if NET_VENDOR_MICROCHIP + +config VCAP + bool "VCAP (Versatile Content-Aware Processor) library" + help + Provides the basic VCAP functionality for multiple Microchip switchcores + + A VCAP is essentially a TCAM with rules consisting of + + - Programmable key fields + - Programmable action fields + - A counter (which may be only one bit wide) + + Besides this each VCAP has: + + - A number of lookups + - A keyset configuration per port per lookup + + The VCAP implementation provides switchcore independent handling of rules + and supports: + + - Creating and deleting rules + - Updating and getting rules + + The platform specific configuration as well as the platform specific model + of the VCAP instances are attached to the VCAP API and a client can then + access rules via the API in a platform independent way, with the + limitations that each VCAP has in terms of its supported keys and actions. + + Different switchcores will have different VCAP instances with different + characteristics. Look in the datasheet for the VCAP specifications for the + specific switchcore. + +endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h new file mode 100644 index 000000000000..804d57b9b60a --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200. + * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86 + */ + +#ifndef __VCAP_AG_API__ +#define __VCAP_AG_API__ + +enum vcap_type { + VCAP_TYPE_IS2, + VCAP_TYPE_MAX +}; + +/* Keyfieldset names with origin information */ +enum vcap_keyfield_set { + VCAP_KFS_NO_VALUE, /* initial value */ + VCAP_KFS_ARP, /* sparx5 is2 X6 */ + VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6 */ + VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6 */ + VCAP_KFS_IP6_STD, /* sparx5 is2 X6 */ + VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12 */ + VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6 */ +}; + +/* List of keyfields with description + * + * Keys ending in _IS are booleans derived from frame data + * Keys ending in _CLS are classified frame data + * + * VCAP_KF_8021Q_DEI_CLS: W1, sparx5: is2 + * Classified DEI + * VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2 + * Classified PCP + * VCAP_KF_8021Q_VID_CLS: W13, sparx5: is2 + * Classified VID + * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2 + * Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has + * one or more Q-tags. Independent of port VLAN awareness + * VCAP_KF_ARP_ADDR_SPACE_OK_IS: W1, sparx5: is2 + * Set if hardware address is Ethernet + * VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2 + * Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP). + * VCAP_KF_ARP_OPCODE: W2, sparx5: is2 + * ARP opcode + * VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2 + * Set if not one of the codes defined in VCAP_KF_ARP_OPCODE + * VCAP_KF_ARP_PROTO_SPACE_OK_IS: W1, sparx5: is2 + * Set if protocol address space is 0x0800 + * VCAP_KF_ARP_SENDER_MATCH_IS: W1, sparx5: is2 + * Sender Hardware Address = SMAC (ARP) + * VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2 + * Target Hardware Address = SMAC (RARP) + * VCAP_KF_ETYPE: W16, sparx5: is2 + * Ethernet type + * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is2 + * Set if frame has EtherType >= 0x600 + * VCAP_KF_IF_IGR_PORT_MASK: sparx5 is2 W32, sparx5 is2 W65 + * Ingress port mask, one bit per port/erleg + * VCAP_KF_IF_IGR_PORT_MASK_L3: W1, sparx5: is2 + * If set, IF_IGR_PORT_MASK, IF_IGR_PORT_MASK_RNG, and IF_IGR_PORT_MASK_SEL are + * used to specify L3 interfaces + * VCAP_KF_IF_IGR_PORT_MASK_RNG: W4, sparx5: is2 + * Range selector for IF_IGR_PORT_MASK. Specifies which group of 32 ports are + * available in IF_IGR_PORT_MASK + * VCAP_KF_IF_IGR_PORT_MASK_SEL: W2, sparx5: is2 + * Mode selector for IF_IGR_PORT_MASK, applicable when IF_IGR_PORT_MASK_L3 == 0. + * Mapping: 0: DEFAULT 1: LOOPBACK 2: MASQUERADE 3: CPU_VD + * VCAP_KF_IP4_IS: W1, sparx5: is2 + * Set if frame has EtherType = 0x800 and IP version = 4 + * VCAP_KF_ISDX_CLS: W12, sparx5: is2 + * Classified ISDX + * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2 + * Set if classified ISDX > 0 + * VCAP_KF_L2_BC_IS: W1, sparx5: is2 + * Set if frame’s destination MAC address is the broadcast address + * (FF-FF-FF-FF-FF-FF). + * VCAP_KF_L2_DMAC: W48, sparx5: is2 + * Destination MAC address + * VCAP_KF_L2_FWD_IS: W1, sparx5: is2 + * Set if the frame is allowed to be forwarded to front ports + * VCAP_KF_L2_MC_IS: W1, sparx5: is2 + * Set if frame’s destination MAC address is a multicast address (bit 40 = 1). + * VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2 + * Byte 0-7 of L2 payload after Type/Len field and overloading for OAM + * VCAP_KF_L2_SMAC: W48, sparx5: is2 + * Source MAC address + * VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2 + * Set if Src IP matches Dst IP address + * VCAP_KF_L3_DST_IS: W1, sparx5: is2 + * Set if lookup is done for egress router leg + * VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is2 + * L3 Fragmentation type (none, initial, suspicious, valid follow up) + * VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is2 + * Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L + * EN + * VCAP_KF_L3_IP4_DIP: W32, sparx5: is2 + * Destination IPv4 Address + * VCAP_KF_L3_IP4_SIP: W32, sparx5: is2 + * Source IPv4 Address + * VCAP_KF_L3_IP6_DIP: W128, sparx5: is2 + * Sparx5: Full IPv6 DIP, LAN966x: Either Full IPv6 DIP or a subset depending on + * frame type + * VCAP_KF_L3_IP6_SIP: W128, sparx5: is2 + * Sparx5: Full IPv6 SIP, LAN966x: Either Full IPv6 SIP or a subset depending on + * frame type + * VCAP_KF_L3_IP_PROTO: W8, sparx5: is2 + * IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4 + * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is2 + * Set if IPv4 frame contains options (IP len > 5) + * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40 + * Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so + * payload is always taken 20 bytes after the start of the IPv4 header, LAN966x: + * Bytes 0-6 after IP header + * VCAP_KF_L3_RT_IS: W1, sparx5: is2 + * Set if frame has hit a router leg + * VCAP_KF_L3_TOS: W8, sparx5: is2 + * Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field + * VCAP_KF_L3_TTL_GT0: W1, sparx5: is2 + * Set if IPv4 TTL / IPv6 hop limit is greater than 0 + * VCAP_KF_L4_ACK: W1, sparx5: is2 + * Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2 + * (unicastFlag) + * VCAP_KF_L4_DPORT: W16, sparx5: is2 + * Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP + * frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port + * VCAP_KF_L4_FIN: W1, sparx5: is2 + * TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1 + * VCAP_KF_L4_PAYLOAD: W64, sparx5: is2 + * Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP + * frames: Payload bytes 0–7 after IP header. IPv4 options are not parsed so + * payload is always taken 20 bytes after the start of the IPv4 header for non + * TCP/UDP IPv4 frames + * VCAP_KF_L4_PSH: W1, sparx5: is2 + * Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit + * 1 (twoStepFlag) + * VCAP_KF_L4_RNG: W16, sparx5: is2 + * Range checker bitmask (one for each range checker). Input into range checkers + * is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE, + * outer VID, inner VID) + * VCAP_KF_L4_RST: W1, sparx5: is2 + * Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType + * bit 3 + * VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2 + * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP: + * messageType bit 0 + * VCAP_KF_L4_SPORT: W16, sparx5: is2 + * TCP/UDP source port + * VCAP_KF_L4_SPORT_EQ_DPORT_IS: W1, sparx5: is2 + * Set if UDP or TCP source port equals UDP or TCP destination port + * VCAP_KF_L4_SYN: W1, sparx5: is2 + * Sparx5: TCP flag SYN, LAN966x: TCP: TCP flag SYN. PTP over UDP: messageType + * bit 2 + * VCAP_KF_L4_URG: W1, sparx5: is2 + * Sparx5: TCP flag URG, LAN966x: TCP: TCP flag URG. PTP over UDP: flagField bit + * 7 (reserved) + * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is2 + * Selects between entries relevant for first and second lookup. Set for first + * lookup, cleared for second lookup. + * VCAP_KF_LOOKUP_PAG: W8, sparx5: is2 + * Classified Policy Association Group: chains rules from IS1/CLM to IS2 + * VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2 + * Dual-ended loss measurement counters in CCM frames are all zero + * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is2 + * Set if frame’s EtherType = 0x8902 + * VCAP_KF_TCP_IS: W1, sparx5: is2 + * Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next + * header = 6) + * VCAP_KF_TCP_UDP_IS: W1, sparx5: is2 + * Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6 + * or 17) + * VCAP_KF_TYPE: sparx5 is2 W4, sparx5 is2 W2 + * Keyset type id - set by the API + */ + +/* Keyfield names */ +enum vcap_key_field { + VCAP_KF_NO_VALUE, /* initial value */ + VCAP_KF_8021Q_DEI_CLS, + VCAP_KF_8021Q_PCP_CLS, + VCAP_KF_8021Q_VID_CLS, + VCAP_KF_8021Q_VLAN_TAGGED_IS, + VCAP_KF_ARP_ADDR_SPACE_OK_IS, + VCAP_KF_ARP_LEN_OK_IS, + VCAP_KF_ARP_OPCODE, + VCAP_KF_ARP_OPCODE_UNKNOWN_IS, + VCAP_KF_ARP_PROTO_SPACE_OK_IS, + VCAP_KF_ARP_SENDER_MATCH_IS, + VCAP_KF_ARP_TGT_MATCH_IS, + VCAP_KF_ETYPE, + VCAP_KF_ETYPE_LEN_IS, + VCAP_KF_IF_IGR_PORT_MASK, + VCAP_KF_IF_IGR_PORT_MASK_L3, + VCAP_KF_IF_IGR_PORT_MASK_RNG, + VCAP_KF_IF_IGR_PORT_MASK_SEL, + VCAP_KF_IP4_IS, + VCAP_KF_ISDX_CLS, + VCAP_KF_ISDX_GT0_IS, + VCAP_KF_L2_BC_IS, + VCAP_KF_L2_DMAC, + VCAP_KF_L2_FWD_IS, + VCAP_KF_L2_MC_IS, + VCAP_KF_L2_PAYLOAD_ETYPE, + VCAP_KF_L2_SMAC, + VCAP_KF_L3_DIP_EQ_SIP_IS, + VCAP_KF_L3_DST_IS, + VCAP_KF_L3_FRAGMENT_TYPE, + VCAP_KF_L3_FRAG_INVLD_L4_LEN, + VCAP_KF_L3_IP4_DIP, + VCAP_KF_L3_IP4_SIP, + VCAP_KF_L3_IP6_DIP, + VCAP_KF_L3_IP6_SIP, + VCAP_KF_L3_IP_PROTO, + VCAP_KF_L3_OPTIONS_IS, + VCAP_KF_L3_PAYLOAD, + VCAP_KF_L3_RT_IS, + VCAP_KF_L3_TOS, + VCAP_KF_L3_TTL_GT0, + VCAP_KF_L4_ACK, + VCAP_KF_L4_DPORT, + VCAP_KF_L4_FIN, + VCAP_KF_L4_PAYLOAD, + VCAP_KF_L4_PSH, + VCAP_KF_L4_RNG, + VCAP_KF_L4_RST, + VCAP_KF_L4_SEQUENCE_EQ0_IS, + VCAP_KF_L4_SPORT, + VCAP_KF_L4_SPORT_EQ_DPORT_IS, + VCAP_KF_L4_SYN, + VCAP_KF_L4_URG, + VCAP_KF_LOOKUP_FIRST_IS, + VCAP_KF_LOOKUP_PAG, + VCAP_KF_OAM_CCM_CNTS_EQ0, + VCAP_KF_OAM_Y1731_IS, + VCAP_KF_TCP_IS, + VCAP_KF_TCP_UDP_IS, + VCAP_KF_TYPE, +}; + +/* Actionset names with origin information */ +enum vcap_actionfield_set { + VCAP_AFS_NO_VALUE, /* initial value */ + VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3 */ +}; + +/* List of actionfields with description + * + * VCAP_AF_CNT_ID: W12, sparx5: is2 + * Counter ID, used per lookup to index the 4K frame counters (ANA_ACL:CNT_TBL). + * Multiple VCAP IS2 entries can use the same counter. + * VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2 + * Setting this bit to 1 causes all frames that hit this action to be copied to + * the CPU extraction queue specified in CPU_QUEUE_NUM. + * VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2 + * CPU queue number. Used when CPU_COPY_ENA is set. + * VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2 + * Setting this bit to 1 causes the first frame that hits this action where the + * HIT_CNT counter is zero to be copied to the CPU extraction queue specified in + * CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that + * hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE + * functionality, the HIT_CNT counter must be cleared. + * VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2 + * Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action + * even when the pipeline control has terminated the frame before VCAP IS2. + * VCAP_AF_INTR_ENA: W1, sparx5: is2 + * If set, an interrupt is triggered when this rule is hit + * VCAP_AF_LRN_DIS: W1, sparx5: is2 + * Setting this bit to 1 disables learning of frames hitting this action. + * VCAP_AF_MASK_MODE: W3, sparx5: is2 + * Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2: + * REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7: + * Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy + * forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by + * MASK_MODE. + * VCAP_AF_MATCH_ID: W16, sparx5: is2 + * Logical ID for the entry. The MATCH_ID is extracted together with the frame + * if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in + * IFH.CL_RSLT. + * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is2 + * Mask used by MATCH_ID. + * VCAP_AF_MIRROR_PROBE: W2, sparx5: is2 + * Mirroring performed according to configuration of a mirror probe. 0: No + * mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2 + * VCAP_AF_PIPELINE_FORCE_ENA: W1, sparx5: is2 + * If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if + * PIPELINE_PT == NONE. Overrules previous settings of pipeline point. + * VCAP_AF_PIPELINE_PT: W5, sparx5: is2 + * Pipeline point used if PIPELINE_FORCE_ENA is set + * VCAP_AF_POLICE_ENA: W1, sparx5: is2 + * Setting this bit to 1 causes frames that hit this action to be policed by the + * ACL policer specified in POLICE_IDX. Only applies to the first lookup. + * VCAP_AF_POLICE_IDX: W6, sparx5: is2 + * Selects VCAP policer used when policing frames (POLICE_ENA) + * VCAP_AF_PORT_MASK: W68, sparx5: is2 + * Port mask applied to the forwarding decision based on MASK_MODE. + * VCAP_AF_RT_DIS: W1, sparx5: is2 + * If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also + * IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX. + */ + +/* Actionfield names */ +enum vcap_action_field { + VCAP_AF_NO_VALUE, /* initial value */ + VCAP_AF_CNT_ID, + VCAP_AF_CPU_COPY_ENA, + VCAP_AF_CPU_QUEUE_NUM, + VCAP_AF_HIT_ME_ONCE, + VCAP_AF_IGNORE_PIPELINE_CTRL, + VCAP_AF_INTR_ENA, + VCAP_AF_LRN_DIS, + VCAP_AF_MASK_MODE, + VCAP_AF_MATCH_ID, + VCAP_AF_MATCH_ID_MASK, + VCAP_AF_MIRROR_PROBE, + VCAP_AF_PIPELINE_FORCE_ENA, + VCAP_AF_PIPELINE_PT, + VCAP_AF_POLICE_ENA, + VCAP_AF_POLICE_IDX, + VCAP_AF_PORT_MASK, + VCAP_AF_RT_DIS, +}; + +#endif /* __VCAP_AG_API__ */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h new file mode 100644 index 000000000000..4444bf67ebec --- /dev/null +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries. + * Microchip VCAP API + */ + +#ifndef __VCAP_API__ +#define __VCAP_API__ + +#include +#include +#include + +/* Use the generated API model */ +#include "vcap_ag_api.h" + +#define VCAP_CID_LOOKUP_SIZE 100000 /* Chains in a lookup */ +#define VCAP_CID_INGRESS_L0 1000000 /* Ingress Stage 1 Lookup 0 */ +#define VCAP_CID_INGRESS_L1 1100000 /* Ingress Stage 1 Lookup 1 */ +#define VCAP_CID_INGRESS_L2 1200000 /* Ingress Stage 1 Lookup 2 */ +#define VCAP_CID_INGRESS_L3 1300000 /* Ingress Stage 1 Lookup 3 */ +#define VCAP_CID_INGRESS_L4 1400000 /* Ingress Stage 1 Lookup 4 */ +#define VCAP_CID_INGRESS_L5 1500000 /* Ingress Stage 1 Lookup 5 */ + +#define VCAP_CID_PREROUTING_IPV6 3000000 /* Prerouting Stage */ +#define VCAP_CID_PREROUTING 6000000 /* Prerouting Stage */ + +#define VCAP_CID_INGRESS_STAGE2_L0 8000000 /* Ingress Stage 2 Lookup 0 */ +#define VCAP_CID_INGRESS_STAGE2_L1 8100000 /* Ingress Stage 2 Lookup 1 */ +#define VCAP_CID_INGRESS_STAGE2_L2 8200000 /* Ingress Stage 2 Lookup 2 */ +#define VCAP_CID_INGRESS_STAGE2_L3 8300000 /* Ingress Stage 2 Lookup 3 */ + +#define VCAP_CID_EGRESS_L0 10000000 /* Egress Lookup 0 */ +#define VCAP_CID_EGRESS_L1 10100000 /* Egress Lookup 1 */ + +#define VCAP_CID_EGRESS_STAGE2_L0 20000000 /* Egress Stage 2 Lookup 0 */ +#define VCAP_CID_EGRESS_STAGE2_L1 20100000 /* Egress Stage 2 Lookup 1 */ + +/* Known users of the VCAP API */ +enum vcap_user { + VCAP_USER_PTP, + VCAP_USER_MRP, + VCAP_USER_CFM, + VCAP_USER_VLAN, + VCAP_USER_QOS, + VCAP_USER_VCAP_UTIL, + VCAP_USER_TC, + VCAP_USER_TC_EXTRA, + + /* add new users above here */ + + /* used to define VCAP_USER_MAX below */ + __VCAP_USER_AFTER_LAST, + VCAP_USER_MAX = __VCAP_USER_AFTER_LAST - 1, +}; + +/* VCAP information used for displaying data */ +struct vcap_statistics { + char *name; + int count; + const char * const *keyfield_set_names; + const char * const *actionfield_set_names; + const char * const *keyfield_names; + const char * const *actionfield_names; +}; + +/* VCAP key/action field type, position and width */ +struct vcap_field { + u16 type; + u16 width; + u16 offset; +}; + +/* VCAP keyset or actionset type and width */ +struct vcap_set { + u8 type_id; + u8 sw_per_item; + u8 sw_cnt; +}; + +/* VCAP typegroup position and bitvalue */ +struct vcap_typegroup { + u16 offset; + u16 width; + u16 value; +}; + +/* VCAP model data */ +struct vcap_info { + char *name; /* user-friendly name */ + u16 rows; /* number of row in instance */ + u16 sw_count; /* maximum subwords used per rule */ + u16 sw_width; /* bits per subword in a keyset */ + u16 sticky_width; /* sticky bits per rule */ + u16 act_width; /* bits per subword in an actionset */ + u16 default_cnt; /* number of default rules */ + u16 require_cnt_dis; /* not used */ + u16 version; /* vcap rtl version */ + const struct vcap_set *keyfield_set; /* keysets */ + int keyfield_set_size; /* number of keysets */ + const struct vcap_set *actionfield_set; /* actionsets */ + int actionfield_set_size; /* number of actionsets */ + /* map of keys per keyset */ + const struct vcap_field **keyfield_set_map; + /* number of entries in the above map */ + int *keyfield_set_map_size; + /* map of actions per actionset */ + const struct vcap_field **actionfield_set_map; + /* number of entries in the above map */ + int *actionfield_set_map_size; + /* map of keyset typegroups per subword size */ + const struct vcap_typegroup **keyfield_set_typegroups; + /* map of actionset typegroups per subword size */ + const struct vcap_typegroup **actionfield_set_typegroups; +}; + +enum vcap_field_type { + VCAP_FIELD_BIT, + VCAP_FIELD_U32, + VCAP_FIELD_U48, + VCAP_FIELD_U56, + VCAP_FIELD_U64, + VCAP_FIELD_U72, + VCAP_FIELD_U112, + VCAP_FIELD_U128, +}; + +/* VCAP rule data towards the VCAP cache */ +struct vcap_cache_data { + u32 *keystream; + u32 *maskstream; + u32 *actionstream; + u32 counter; + bool sticky; +}; + +/* Selects which part of the rule must be updated */ +enum vcap_selection { + VCAP_SEL_ENTRY = 0x01, + VCAP_SEL_ACTION = 0x02, + VCAP_SEL_COUNTER = 0x04, + VCAP_SEL_ALL = 0xff, +}; + +/* Commands towards the VCAP cache */ +enum vcap_command { + VCAP_CMD_WRITE = 0, + VCAP_CMD_READ = 1, + VCAP_CMD_MOVE_DOWN = 2, + VCAP_CMD_MOVE_UP = 3, + VCAP_CMD_INITIALIZE = 4, +}; + +enum vcap_rule_error { + VCAP_ERR_NONE = 0, /* No known error */ + VCAP_ERR_NO_ADMIN, /* No admin instance */ + VCAP_ERR_NO_NETDEV, /* No netdev instance */ + VCAP_ERR_NO_KEYSET_MATCH, /* No keyset matched the rule keys */ + VCAP_ERR_NO_ACTIONSET_MATCH, /* No actionset matched the rule actions */ + VCAP_ERR_NO_PORT_KEYSET_MATCH, /* No port keyset matched the rule keys */ +}; + +/* Administration of each VCAP instance */ +struct vcap_admin { + struct list_head list; /* for insertion in vcap_control */ + struct list_head rules; /* list of rules */ + enum vcap_type vtype; /* type of vcap */ + int vinst; /* instance number within the same type */ + int first_cid; /* first chain id in this vcap */ + int last_cid; /* last chain id in this vcap */ + int tgt_inst; /* hardware instance number */ + int lookups; /* number of lookups in this vcap type */ + int lookups_per_instance; /* number of lookups in this instance */ + int last_valid_addr; /* top of address range to be used */ + int first_valid_addr; /* bottom of address range to be used */ + int last_used_addr; /* address of lowest added rule */ + bool w32be; /* vcap uses "32bit-word big-endian" encoding */ + struct vcap_cache_data cache; /* encoded rule data */ +}; + +/* Client supplied VCAP rule data */ +struct vcap_rule { + int vcap_chain_id; /* chain used for this rule */ + enum vcap_user user; /* rule owner */ + u16 priority; + u32 id; /* vcap rule id, must be unique, 0 will auto-generate a value */ + u64 cookie; /* used by the client to identify the rule */ + struct list_head keyfields; /* list of vcap_client_keyfield */ + struct list_head actionfields; /* list of vcap_client_actionfield */ + enum vcap_keyfield_set keyset; /* keyset used: may be derived from fields */ + enum vcap_actionfield_set actionset; /* actionset used: may be derived from fields */ + enum vcap_rule_error exterr; /* extended error - used by TC */ + u64 client; /* space for client defined data */ +}; + +/* List of keysets */ +struct vcap_keyset_list { + int max; /* size of the keyset list */ + int cnt; /* count of keysets actually in the list */ + enum vcap_keyfield_set *keysets; /* the list of keysets */ +}; + +/* Client supplied VCAP callback operations */ +struct vcap_operations { + /* validate port keyset operation */ + enum vcap_keyfield_set (*validate_keyset) + (struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule, + struct vcap_keyset_list *kslist, + u16 l3_proto); + /* add default rule fields for the selected keyset operations */ + void (*add_default_fields) + (struct net_device *ndev, + struct vcap_admin *admin, + struct vcap_rule *rule); + /* cache operations */ + void (*cache_erase) + (struct vcap_admin *admin); + void (*cache_write) + (struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_selection sel, + u32 idx, u32 count); + void (*cache_read) + (struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_selection sel, + u32 idx, + u32 count); + /* block operations */ + void (*init) + (struct net_device *ndev, + struct vcap_admin *admin, + u32 addr, + u32 count); + void (*update) + (struct net_device *ndev, + struct vcap_admin *admin, + enum vcap_command cmd, + enum vcap_selection sel, + u32 addr); + void (*move) + (struct net_device *ndev, + struct vcap_admin *admin, + u32 addr, + int offset, + int count); + /* informational */ + int (*port_info) + (struct net_device *ndev, + enum vcap_type vtype, + int (*pf)(void *out, int arg, const char *fmt, ...), + void *out, + int arg); +}; + +/* VCAP API Client control interface */ +struct vcap_control { + u32 rule_id; /* last used rule id (unique across VCAP instances) */ + struct vcap_operations *ops; /* client supplied operations */ + const struct vcap_info *vcaps; /* client supplied vcap models */ + const struct vcap_statistics *stats; /* client supplied vcap stats */ + struct list_head list; /* list of vcap instances */ +}; + +/* Set client control interface on the API */ +int vcap_api_set_client(struct vcap_control *vctrl); + +#endif /* __VCAP_API__ */ -- cgit v1.2.3 From 70479a40954cf353e87a486997a3477108c75aa9 Mon Sep 17 00:00:00 2001 From: Frank Date: Fri, 28 Oct 2022 17:26:21 +0800 Subject: net: phy: Add driver for Motorcomm yt8521 gigabit ethernet phy Add a driver for the motorcomm yt8521 gigabit ethernet phy. We have verified the driver on StarFive VisionFive development board, which is developed by Shanghai StarFive Technology Co., Ltd.. On the board, yt8521 gigabit ethernet phy works in utp mode, RGMII interface, supports 1000M/100M/10M speeds, and wol(magic package). Signed-off-by: Frank Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/phy/Kconfig | 2 +- drivers/net/phy/motorcomm.c | 1635 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 1635 insertions(+), 3 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 377aedb3808b..7d44c5a6607d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13943,6 +13943,7 @@ F: include/uapi/linux/meye.h MOTORCOMM PHY DRIVER M: Peter Geis +M: Frank L: netdev@vger.kernel.org S: Maintained F: drivers/net/phy/motorcomm.c diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c57a0262fb64..040c8bf6d05b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -260,7 +260,7 @@ config MOTORCOMM_PHY tristate "Motorcomm PHYs" help Enables support for Motorcomm network PHYs. - Currently supports the YT8511 gigabit PHY. + Currently supports the YT8511, YT8521 Gigabit Ethernet PHYs. config NATIONAL_PHY tristate "National Semiconductor PHYs" diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c index 7e6ac2c5e27e..c7593f224177 100644 --- a/drivers/net/phy/motorcomm.c +++ b/drivers/net/phy/motorcomm.c @@ -1,15 +1,106 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Driver for Motorcomm PHYs + * Motorcomm 8511/8521 PHY driver. * * Author: Peter Geis + * Author: Frank */ +#include #include #include #include #define PHY_ID_YT8511 0x0000010a +#define PHY_ID_YT8521 0x0000011A + +/* YT8521 Register Overview + * UTP Register space | FIBER Register space + * ------------------------------------------------------------ + * | UTP MII | FIBER MII | + * | UTP MMD | | + * | UTP Extended | FIBER Extended | + * ------------------------------------------------------------ + * | Common Extended | + * ------------------------------------------------------------ + */ + +/* 0x10 ~ 0x15 , 0x1E and 0x1F are common MII registers of yt phy */ + +/* Specific Function Control Register */ +#define YTPHY_SPECIFIC_FUNCTION_CONTROL_REG 0x10 + +/* 2b00 Manual MDI configuration + * 2b01 Manual MDIX configuration + * 2b10 Reserved + * 2b11 Enable automatic crossover for all modes *default* + */ +#define YTPHY_SFCR_MDI_CROSSOVER_MODE_MASK (BIT(6) | BIT(5)) +#define YTPHY_SFCR_CROSSOVER_EN BIT(3) +#define YTPHY_SFCR_SQE_TEST_EN BIT(2) +#define YTPHY_SFCR_POLARITY_REVERSAL_EN BIT(1) +#define YTPHY_SFCR_JABBER_DIS BIT(0) + +/* Specific Status Register */ +#define YTPHY_SPECIFIC_STATUS_REG 0x11 +#define YTPHY_SSR_SPEED_MODE_OFFSET 14 + +#define YTPHY_SSR_SPEED_MODE_MASK (BIT(15) | BIT(14)) +#define YTPHY_SSR_SPEED_10M 0x0 +#define YTPHY_SSR_SPEED_100M 0x1 +#define YTPHY_SSR_SPEED_1000M 0x2 +#define YTPHY_SSR_DUPLEX_OFFSET 13 +#define YTPHY_SSR_DUPLEX BIT(13) +#define YTPHY_SSR_PAGE_RECEIVED BIT(12) +#define YTPHY_SSR_SPEED_DUPLEX_RESOLVED BIT(11) +#define YTPHY_SSR_LINK BIT(10) +#define YTPHY_SSR_MDIX_CROSSOVER BIT(6) +#define YTPHY_SSR_DOWNGRADE BIT(5) +#define YTPHY_SSR_TRANSMIT_PAUSE BIT(3) +#define YTPHY_SSR_RECEIVE_PAUSE BIT(2) +#define YTPHY_SSR_POLARITY BIT(1) +#define YTPHY_SSR_JABBER BIT(0) + +/* Interrupt enable Register */ +#define YTPHY_INTERRUPT_ENABLE_REG 0x12 +#define YTPHY_IER_WOL BIT(6) + +/* Interrupt Status Register */ +#define YTPHY_INTERRUPT_STATUS_REG 0x13 +#define YTPHY_ISR_AUTONEG_ERR BIT(15) +#define YTPHY_ISR_SPEED_CHANGED BIT(14) +#define YTPHY_ISR_DUPLEX_CHANGED BIT(13) +#define YTPHY_ISR_PAGE_RECEIVED BIT(12) +#define YTPHY_ISR_LINK_FAILED BIT(11) +#define YTPHY_ISR_LINK_SUCCESSED BIT(10) +#define YTPHY_ISR_WOL BIT(6) +#define YTPHY_ISR_WIRESPEED_DOWNGRADE BIT(5) +#define YTPHY_ISR_SERDES_LINK_FAILED BIT(3) +#define YTPHY_ISR_SERDES_LINK_SUCCESSED BIT(2) +#define YTPHY_ISR_POLARITY_CHANGED BIT(1) +#define YTPHY_ISR_JABBER_HAPPENED BIT(0) + +/* Speed Auto Downgrade Control Register */ +#define YTPHY_SPEED_AUTO_DOWNGRADE_CONTROL_REG 0x14 +#define YTPHY_SADCR_SPEED_DOWNGRADE_EN BIT(5) + +/* If these bits are set to 3, the PHY attempts five times ( 3(set value) + + * additional 2) before downgrading, default 0x3 + */ +#define YTPHY_SADCR_SPEED_RETRY_LIMIT (0x3 << 2) + +/* Rx Error Counter Register */ +#define YTPHY_RX_ERROR_COUNTER_REG 0x15 + +/* Extended Register's Address Offset Register */ +#define YTPHY_PAGE_SELECT 0x1E + +/* Extended Register's Data Register */ +#define YTPHY_PAGE_DATA 0x1F + +/* FIBER Auto-Negotiation link partner ability */ +#define YTPHY_FLPA_PAUSE (0x3 << 7) +#define YTPHY_FLPA_ASYM_PAUSE (0x2 << 7) #define YT8511_PAGE_SELECT 0x1e #define YT8511_PAGE 0x1f @@ -38,6 +129,352 @@ #define YT8511_DELAY_FE_TX_EN (0xf << 12) #define YT8511_DELAY_FE_TX_DIS (0x2 << 12) +/* Extended register is different from MMD Register and MII Register. + * We can use ytphy_read_ext/ytphy_write_ext/ytphy_modify_ext function to + * operate extended register. + * Extended Register start + */ + +/* Phy gmii clock gating Register */ +#define YT8521_CLOCK_GATING_REG 0xC +#define YT8521_CGR_RX_CLK_EN BIT(12) + +#define YT8521_EXTREG_SLEEP_CONTROL1_REG 0x27 +#define YT8521_ESC1R_SLEEP_SW BIT(15) +#define YT8521_ESC1R_PLLON_SLP BIT(14) + +/* Phy fiber Link timer cfg2 Register */ +#define YT8521_LINK_TIMER_CFG2_REG 0xA5 +#define YT8521_LTCR_EN_AUTOSEN BIT(15) + +/* 0xA000, 0xA001, 0xA003 ,and 0xA006 ~ 0xA00A are common ext registers + * of yt8521 phy. There is no need to switch reg space when operating these + * registers. + */ + +#define YT8521_REG_SPACE_SELECT_REG 0xA000 +#define YT8521_RSSR_SPACE_MASK BIT(1) +#define YT8521_RSSR_FIBER_SPACE (0x1 << 1) +#define YT8521_RSSR_UTP_SPACE (0x0 << 1) +#define YT8521_RSSR_TO_BE_ARBITRATED (0xFF) + +#define YT8521_CHIP_CONFIG_REG 0xA001 +#define YT8521_CCR_SW_RST BIT(15) + +#define YT8521_CCR_MODE_SEL_MASK (BIT(2) | BIT(1) | BIT(0)) +#define YT8521_CCR_MODE_UTP_TO_RGMII 0 +#define YT8521_CCR_MODE_FIBER_TO_RGMII 1 +#define YT8521_CCR_MODE_UTP_FIBER_TO_RGMII 2 +#define YT8521_CCR_MODE_UTP_TO_SGMII 3 +#define YT8521_CCR_MODE_SGPHY_TO_RGMAC 4 +#define YT8521_CCR_MODE_SGMAC_TO_RGPHY 5 +#define YT8521_CCR_MODE_UTP_TO_FIBER_AUTO 6 +#define YT8521_CCR_MODE_UTP_TO_FIBER_FORCE 7 + +/* 3 phy polling modes,poll mode combines utp and fiber mode*/ +#define YT8521_MODE_FIBER 0x1 +#define YT8521_MODE_UTP 0x2 +#define YT8521_MODE_POLL 0x3 + +#define YT8521_RGMII_CONFIG1_REG 0xA003 + +/* TX Gig-E Delay is bits 3:0, default 0x1 + * TX Fast-E Delay is bits 7:4, default 0xf + * RX Delay is bits 13:10, default 0x0 + * Delay = 150ps * N + * On = 2250ps, off = 0ps + */ +#define YT8521_RC1R_RX_DELAY_MASK (0xF << 10) +#define YT8521_RC1R_RX_DELAY_EN (0xF << 10) +#define YT8521_RC1R_RX_DELAY_DIS (0x0 << 10) +#define YT8521_RC1R_FE_TX_DELAY_MASK (0xF << 4) +#define YT8521_RC1R_FE_TX_DELAY_EN (0xF << 4) +#define YT8521_RC1R_FE_TX_DELAY_DIS (0x0 << 4) +#define YT8521_RC1R_GE_TX_DELAY_MASK (0xF << 0) +#define YT8521_RC1R_GE_TX_DELAY_EN (0xF << 0) +#define YT8521_RC1R_GE_TX_DELAY_DIS (0x0 << 0) + +#define YTPHY_MISC_CONFIG_REG 0xA006 +#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0) +#define YTPHY_MCR_FIBER_1000BX (0x1 << 0) +#define YTPHY_MCR_FIBER_100FX (0x0 << 0) + +/* WOL MAC ADDR: MACADDR2(highest), MACADDR1(middle), MACADDR0(lowest) */ +#define YTPHY_WOL_MACADDR2_REG 0xA007 +#define YTPHY_WOL_MACADDR1_REG 0xA008 +#define YTPHY_WOL_MACADDR0_REG 0xA009 + +#define YTPHY_WOL_CONFIG_REG 0xA00A +#define YTPHY_WCR_INTR_SEL BIT(6) +#define YTPHY_WCR_ENABLE BIT(3) + +/* 2b00 84ms + * 2b01 168ms *default* + * 2b10 336ms + * 2b11 672ms + */ +#define YTPHY_WCR_PULSE_WIDTH_MASK (BIT(2) | BIT(1)) +#define YTPHY_WCR_PULSE_WIDTH_672MS (BIT(2) | BIT(1)) + +/* 1b0 Interrupt and WOL events is level triggered and active LOW *default* + * 1b1 Interrupt and WOL events is pulse triggered and active LOW + */ +#define YTPHY_WCR_TYPE_PULSE BIT(0) + +/* Extended Register end */ + +struct yt8521_priv { + /* combo_advertising is used for case of YT8521 in combo mode, + * this means that yt8521 may work in utp or fiber mode which depends + * on which media is connected (YT8521_RSSR_TO_BE_ARBITRATED). + */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(combo_advertising); + + /* YT8521_MODE_FIBER / YT8521_MODE_UTP / YT8521_MODE_POLL*/ + u8 polling_mode; + u8 strap_mode; /* 8 working modes */ + /* current reg page of yt8521 phy: + * YT8521_RSSR_UTP_SPACE + * YT8521_RSSR_FIBER_SPACE + * YT8521_RSSR_TO_BE_ARBITRATED + */ + u8 reg_page; +}; + +/** + * ytphy_read_ext() - read a PHY's extended register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number to read + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns the value of regnum reg or negative error code + */ +static int ytphy_read_ext(struct phy_device *phydev, u16 regnum) +{ + int ret; + + ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum); + if (ret < 0) + return ret; + + return __phy_read(phydev, YTPHY_PAGE_DATA); +} + +/** + * ytphy_read_ext_with_lock() - read a PHY's extended register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number to read + * + * returns the value of regnum reg or negative error code + */ +static int ytphy_read_ext_with_lock(struct phy_device *phydev, u16 regnum) +{ + int ret; + + phy_lock_mdio_bus(phydev); + ret = ytphy_read_ext(phydev, regnum); + phy_unlock_mdio_bus(phydev); + + return ret; +} + +/** + * ytphy_write_ext() - write a PHY's extended register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number to write + * @val: value to write to @regnum + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns 0 or negative error code + */ +static int ytphy_write_ext(struct phy_device *phydev, u16 regnum, u16 val) +{ + int ret; + + ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum); + if (ret < 0) + return ret; + + return __phy_write(phydev, YTPHY_PAGE_DATA, val); +} + +/** + * ytphy_write_ext_with_lock() - write a PHY's extended register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number to write + * @val: value to write to @regnum + * + * returns 0 or negative error code + */ +static int ytphy_write_ext_with_lock(struct phy_device *phydev, u16 regnum, + u16 val) +{ + int ret; + + phy_lock_mdio_bus(phydev); + ret = ytphy_write_ext(phydev, regnum, val); + phy_unlock_mdio_bus(phydev); + + return ret; +} + +/** + * ytphy_modify_ext() - bits modify a PHY's extended register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * NOTE: Convenience function which allows a PHY's extended register to be + * modified as new register value = (old register value & ~mask) | set. + * The caller must have taken the MDIO bus lock. + * + * returns 0 or negative error code + */ +static int ytphy_modify_ext(struct phy_device *phydev, u16 regnum, u16 mask, + u16 set) +{ + int ret; + + ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum); + if (ret < 0) + return ret; + + return __phy_modify(phydev, YTPHY_PAGE_DATA, mask, set); +} + +/** + * ytphy_modify_ext_with_lock() - bits modify a PHY's extended register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * NOTE: Convenience function which allows a PHY's extended register to be + * modified as new register value = (old register value & ~mask) | set. + * + * returns 0 or negative error code + */ +static int ytphy_modify_ext_with_lock(struct phy_device *phydev, u16 regnum, + u16 mask, u16 set) +{ + int ret; + + phy_lock_mdio_bus(phydev); + ret = ytphy_modify_ext(phydev, regnum, mask, set); + phy_unlock_mdio_bus(phydev); + + return ret; +} + +/** + * ytphy_get_wol() - report whether wake-on-lan is enabled + * @phydev: a pointer to a &struct phy_device + * @wol: a pointer to a &struct ethtool_wolinfo + * + * NOTE: YTPHY_WOL_CONFIG_REG is common ext reg. + */ +static void ytphy_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int wol_config; + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG); + if (wol_config < 0) + return; + + if (wol_config & YTPHY_WCR_ENABLE) + wol->wolopts |= WAKE_MAGIC; +} + +/** + * ytphy_set_wol() - turn wake-on-lan on or off + * @phydev: a pointer to a &struct phy_device + * @wol: a pointer to a &struct ethtool_wolinfo + * + * NOTE: YTPHY_WOL_CONFIG_REG, YTPHY_WOL_MACADDR2_REG, YTPHY_WOL_MACADDR1_REG + * and YTPHY_WOL_MACADDR0_REG are common ext reg. The + * YTPHY_INTERRUPT_ENABLE_REG of UTP is special, fiber also use this register. + * + * returns 0 or negative errno code + */ +static int ytphy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) +{ + struct net_device *p_attached_dev; + const u16 mac_addr_reg[] = { + YTPHY_WOL_MACADDR2_REG, + YTPHY_WOL_MACADDR1_REG, + YTPHY_WOL_MACADDR0_REG, + }; + const u8 *mac_addr; + int old_page; + int ret = 0; + u16 mask; + u16 val; + u8 i; + + if (wol->wolopts & WAKE_MAGIC) { + p_attached_dev = phydev->attached_dev; + if (!p_attached_dev) + return -ENODEV; + + mac_addr = (const u8 *)p_attached_dev->dev_addr; + if (!is_valid_ether_addr(mac_addr)) + return -EINVAL; + + /* lock mdio bus then switch to utp reg space */ + old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); + if (old_page < 0) + goto err_restore_page; + + /* Store the device address for the magic packet */ + for (i = 0; i < 3; i++) { + ret = ytphy_write_ext(phydev, mac_addr_reg[i], + ((mac_addr[i * 2] << 8)) | + (mac_addr[i * 2 + 1])); + if (ret < 0) + goto err_restore_page; + } + + /* Enable WOL feature */ + mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL; + val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL; + val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS; + ret = ytphy_modify_ext(phydev, YTPHY_WOL_CONFIG_REG, mask, val); + if (ret < 0) + goto err_restore_page; + + /* Enable WOL interrupt */ + ret = __phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0, + YTPHY_IER_WOL); + if (ret < 0) + goto err_restore_page; + + } else { + old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); + if (old_page < 0) + goto err_restore_page; + + /* Disable WOL feature */ + mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL; + ret = ytphy_modify_ext(phydev, YTPHY_WOL_CONFIG_REG, mask, 0); + + /* Disable WOL interrupt */ + ret = __phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, + YTPHY_IER_WOL, 0); + if (ret < 0) + goto err_restore_page; + } + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + static int yt8511_read_page(struct phy_device *phydev) { return __phy_read(phydev, YT8511_PAGE_SELECT); @@ -111,6 +548,1181 @@ err_restore_page: return phy_restore_page(phydev, oldpage, ret); } +/** + * yt8521_read_page() - read reg page + * @phydev: a pointer to a &struct phy_device + * + * returns current reg space of yt8521 (YT8521_RSSR_FIBER_SPACE/ + * YT8521_RSSR_UTP_SPACE) or negative errno code + */ +static int yt8521_read_page(struct phy_device *phydev) +{ + int old_page; + + old_page = ytphy_read_ext(phydev, YT8521_REG_SPACE_SELECT_REG); + if (old_page < 0) + return old_page; + + if ((old_page & YT8521_RSSR_SPACE_MASK) == YT8521_RSSR_FIBER_SPACE) + return YT8521_RSSR_FIBER_SPACE; + + return YT8521_RSSR_UTP_SPACE; +}; + +/** + * yt8521_write_page() - write reg page + * @phydev: a pointer to a &struct phy_device + * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to write. + * + * returns 0 or negative errno code + */ +static int yt8521_write_page(struct phy_device *phydev, int page) +{ + int mask = YT8521_RSSR_SPACE_MASK; + int set; + + if ((page & YT8521_RSSR_SPACE_MASK) == YT8521_RSSR_FIBER_SPACE) + set = YT8521_RSSR_FIBER_SPACE; + else + set = YT8521_RSSR_UTP_SPACE; + + return ytphy_modify_ext(phydev, YT8521_REG_SPACE_SELECT_REG, mask, set); +}; + +/** + * yt8521_probe() - read chip config then set suitable polling_mode + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct yt8521_priv *priv; + int chip_config; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + chip_config = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG); + if (chip_config < 0) + return chip_config; + + priv->strap_mode = chip_config & YT8521_CCR_MODE_SEL_MASK; + switch (priv->strap_mode) { + case YT8521_CCR_MODE_FIBER_TO_RGMII: + case YT8521_CCR_MODE_SGPHY_TO_RGMAC: + case YT8521_CCR_MODE_SGMAC_TO_RGPHY: + priv->polling_mode = YT8521_MODE_FIBER; + priv->reg_page = YT8521_RSSR_FIBER_SPACE; + phydev->port = PORT_FIBRE; + break; + case YT8521_CCR_MODE_UTP_FIBER_TO_RGMII: + case YT8521_CCR_MODE_UTP_TO_FIBER_AUTO: + case YT8521_CCR_MODE_UTP_TO_FIBER_FORCE: + priv->polling_mode = YT8521_MODE_POLL; + priv->reg_page = YT8521_RSSR_TO_BE_ARBITRATED; + phydev->port = PORT_NONE; + break; + case YT8521_CCR_MODE_UTP_TO_SGMII: + case YT8521_CCR_MODE_UTP_TO_RGMII: + priv->polling_mode = YT8521_MODE_UTP; + priv->reg_page = YT8521_RSSR_UTP_SPACE; + phydev->port = PORT_TP; + break; + } + /* set default reg space */ + if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { + ret = ytphy_write_ext_with_lock(phydev, + YT8521_REG_SPACE_SELECT_REG, + priv->reg_page); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * ytphy_utp_read_lpa() - read LPA then setup lp_advertising for utp + * @phydev: a pointer to a &struct phy_device + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int ytphy_utp_read_lpa(struct phy_device *phydev) +{ + int lpa, lpagb; + + if (phydev->autoneg == AUTONEG_ENABLE) { + if (!phydev->autoneg_complete) { + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, + 0); + mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0); + return 0; + } + + if (phydev->is_gigabit_capable) { + lpagb = __phy_read(phydev, MII_STAT1000); + if (lpagb < 0) + return lpagb; + + if (lpagb & LPA_1000MSFAIL) { + int adv = __phy_read(phydev, MII_CTRL1000); + + if (adv < 0) + return adv; + + if (adv & CTL1000_ENABLE_MASTER) + phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); + else + phydev_err(phydev, "Master/Slave resolution failed\n"); + return -ENOLINK; + } + + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, + lpagb); + } + + lpa = __phy_read(phydev, MII_LPA); + if (lpa < 0) + return lpa; + + mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); + } else { + linkmode_zero(phydev->lp_advertising); + } + + return 0; +} + +/** + * yt8521_adjust_status() - update speed and duplex to phydev. when in fiber + * mode, adjust speed and duplex. + * @phydev: a pointer to a &struct phy_device + * @status: yt8521 status read from YTPHY_SPECIFIC_STATUS_REG + * @is_utp: false(yt8521 work in fiber mode) or true(yt8521 work in utp mode) + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns 0 + */ +static int yt8521_adjust_status(struct phy_device *phydev, int status, + bool is_utp) +{ + int speed_mode, duplex; + int speed; + int err; + int lpa; + + if (is_utp) + duplex = (status & YTPHY_SSR_DUPLEX) >> YTPHY_SSR_DUPLEX_OFFSET; + else + duplex = DUPLEX_FULL; /* for fiber, it always DUPLEX_FULL */ + + speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >> + YTPHY_SSR_SPEED_MODE_OFFSET; + + switch (speed_mode) { + case YTPHY_SSR_SPEED_10M: + if (is_utp) + speed = SPEED_10; + else + /* for fiber, it will never run here, default to + * SPEED_UNKNOWN + */ + speed = SPEED_UNKNOWN; + break; + case YTPHY_SSR_SPEED_100M: + speed = SPEED_100; + break; + case YTPHY_SSR_SPEED_1000M: + speed = SPEED_1000; + break; + default: + speed = SPEED_UNKNOWN; + break; + } + + phydev->speed = speed; + phydev->duplex = duplex; + + if (is_utp) { + err = ytphy_utp_read_lpa(phydev); + if (err < 0) + return err; + + phy_resolve_aneg_pause(phydev); + } else { + lpa = __phy_read(phydev, MII_LPA); + if (lpa < 0) + return lpa; + + /* only support 1000baseX Full */ + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + phydev->lp_advertising, lpa & LPA_1000XFULL); + + if (!(lpa & YTPHY_FLPA_PAUSE)) { + phydev->pause = 0; + phydev->asym_pause = 0; + } else if ((lpa & YTPHY_FLPA_ASYM_PAUSE)) { + phydev->pause = 1; + phydev->asym_pause = 1; + } else { + phydev->pause = 1; + phydev->asym_pause = 0; + } + } + + return 0; +} + +/** + * yt8521_read_status_paged() - determines the speed and duplex of one page + * @phydev: a pointer to a &struct phy_device + * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to + * operate. + * + * returns 1 (utp or fiber link),0 (no link) or negative errno code + */ +static int yt8521_read_status_paged(struct phy_device *phydev, int page) +{ + int fiber_latch_val; + int fiber_curr_val; + int old_page; + int ret = 0; + int status; + int link; + + linkmode_zero(phydev->lp_advertising); + phydev->duplex = DUPLEX_UNKNOWN; + phydev->speed = SPEED_UNKNOWN; + phydev->asym_pause = 0; + phydev->pause = 0; + + /* YT8521 has two reg space (utp/fiber) for linkup with utp/fiber + * respectively. but for utp/fiber combo mode, reg space should be + * arbitrated based on media priority. by default, utp takes + * priority. reg space should be properly set before read + * YTPHY_SPECIFIC_STATUS_REG. + */ + + page &= YT8521_RSSR_SPACE_MASK; + old_page = phy_select_page(phydev, page); + if (old_page < 0) + goto err_restore_page; + + /* Read YTPHY_SPECIFIC_STATUS_REG, which indicates the speed and duplex + * of the PHY is actually using. + */ + ret = __phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG); + if (ret < 0) + goto err_restore_page; + + status = ret; + link = !!(status & YTPHY_SSR_LINK); + + /* When PHY is in fiber mode, speed transferred from 1000Mbps to + * 100Mbps,there is not link down from YTPHY_SPECIFIC_STATUS_REG, so + * we need check MII_BMSR to identify such case. + */ + if (page == YT8521_RSSR_FIBER_SPACE) { + ret = __phy_read(phydev, MII_BMSR); + if (ret < 0) + goto err_restore_page; + + fiber_latch_val = ret; + ret = __phy_read(phydev, MII_BMSR); + if (ret < 0) + goto err_restore_page; + + fiber_curr_val = ret; + if (link && fiber_latch_val != fiber_curr_val) { + link = 0; + phydev_info(phydev, + "%s, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, fiber_latch_val, fiber_curr_val); + } + } else { + /* Read autonegotiation status */ + ret = __phy_read(phydev, MII_BMSR); + if (ret < 0) + goto err_restore_page; + + phydev->autoneg_complete = ret & BMSR_ANEGCOMPLETE ? 1 : 0; + } + + if (link) { + if (page == YT8521_RSSR_UTP_SPACE) + yt8521_adjust_status(phydev, status, true); + else + yt8521_adjust_status(phydev, status, false); + } + return phy_restore_page(phydev, old_page, link); + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8521_read_status() - determines the negotiated speed and duplex + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_read_status(struct phy_device *phydev) +{ + struct yt8521_priv *priv = phydev->priv; + int link_fiber = 0; + int link_utp; + int link; + int ret; + + if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { + link = yt8521_read_status_paged(phydev, priv->reg_page); + if (link < 0) + return link; + } else { + /* when page is YT8521_RSSR_TO_BE_ARBITRATED, arbitration is + * needed. by default, utp is higher priority. + */ + + link_utp = yt8521_read_status_paged(phydev, + YT8521_RSSR_UTP_SPACE); + if (link_utp < 0) + return link_utp; + + if (!link_utp) { + link_fiber = yt8521_read_status_paged(phydev, + YT8521_RSSR_FIBER_SPACE); + if (link_fiber < 0) + return link_fiber; + } + + link = link_utp || link_fiber; + } + + if (link) { + if (phydev->link == 0) { + /* arbitrate reg space based on linkup media type. */ + if (priv->polling_mode == YT8521_MODE_POLL && + priv->reg_page == YT8521_RSSR_TO_BE_ARBITRATED) { + if (link_fiber) + priv->reg_page = + YT8521_RSSR_FIBER_SPACE; + else + priv->reg_page = YT8521_RSSR_UTP_SPACE; + + ret = ytphy_write_ext_with_lock(phydev, + YT8521_REG_SPACE_SELECT_REG, + priv->reg_page); + if (ret < 0) + return ret; + + phydev->port = link_fiber ? PORT_FIBRE : PORT_TP; + + phydev_info(phydev, "%s, link up, media: %s\n", + __func__, + (phydev->port == PORT_TP) ? + "UTP" : "Fiber"); + } + } + phydev->link = 1; + } else { + if (phydev->link == 1) { + phydev_info(phydev, "%s, link down, media: %s\n", + __func__, (phydev->port == PORT_TP) ? + "UTP" : "Fiber"); + + /* When in YT8521_MODE_POLL mode, need prepare for next + * arbitration. + */ + if (priv->polling_mode == YT8521_MODE_POLL) { + priv->reg_page = YT8521_RSSR_TO_BE_ARBITRATED; + phydev->port = PORT_NONE; + } + } + + phydev->link = 0; + } + + return 0; +} + +/** + * yt8521_modify_bmcr_paged - bits modify a PHY's BMCR register of one page + * @phydev: the phy_device struct + * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to operate + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * NOTE: Convenience function which allows a PHY's BMCR register to be + * modified as new register value = (old register value & ~mask) | set. + * YT8521 has two space (utp/fiber) and three mode (utp/fiber/poll), each space + * has MII_BMCR. poll mode combines utp and faber,so need do both. + * If it is reset, it will wait for completion. + * + * returns 0 or negative errno code + */ +static int yt8521_modify_bmcr_paged(struct phy_device *phydev, int page, + u16 mask, u16 set) +{ + int max_cnt = 500; /* the max wait time of reset ~ 500 ms */ + int old_page; + int ret = 0; + + old_page = phy_select_page(phydev, page & YT8521_RSSR_SPACE_MASK); + if (old_page < 0) + goto err_restore_page; + + ret = __phy_modify(phydev, MII_BMCR, mask, set); + if (ret < 0) + goto err_restore_page; + + /* If it is reset, need to wait for the reset to complete */ + if (set == BMCR_RESET) { + while (max_cnt--) { + usleep_range(1000, 1100); + ret = __phy_read(phydev, MII_BMCR); + if (ret < 0) + goto err_restore_page; + + if (!(ret & BMCR_RESET)) + return phy_restore_page(phydev, old_page, 0); + } + } + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8521_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register + * @phydev: the phy_device struct + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * NOTE: Convenience function which allows a PHY's BMCR register to be + * modified as new register value = (old register value & ~mask) | set. + * YT8521 has two space (utp/fiber) and three mode (utp/fiber/poll), each space + * has MII_BMCR. poll mode combines utp and faber,so need do both. + * + * returns 0 or negative errno code + */ +static int yt8521_modify_utp_fiber_bmcr(struct phy_device *phydev, u16 mask, + u16 set) +{ + struct yt8521_priv *priv = phydev->priv; + int ret; + + if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { + ret = yt8521_modify_bmcr_paged(phydev, priv->reg_page, mask, + set); + if (ret < 0) + return ret; + } else { + ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE, + mask, set); + if (ret < 0) + return ret; + + ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE, + mask, set); + if (ret < 0) + return ret; + } + return 0; +} + +/** + * yt8521_soft_reset() - called to issue a PHY software reset + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_soft_reset(struct phy_device *phydev) +{ + return yt8521_modify_utp_fiber_bmcr(phydev, 0, BMCR_RESET); +} + +/** + * yt8521_suspend() - suspend the hardware + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_suspend(struct phy_device *phydev) +{ + int wol_config; + + /* YTPHY_WOL_CONFIG_REG is common ext reg */ + wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG); + if (wol_config < 0) + return wol_config; + + /* if wol enable, do nothing */ + if (wol_config & YTPHY_WCR_ENABLE) + return 0; + + return yt8521_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN); +} + +/** + * yt8521_resume() - resume the hardware + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_resume(struct phy_device *phydev) +{ + int ret; + int wol_config; + + /* disable auto sleep */ + ret = ytphy_modify_ext_with_lock(phydev, + YT8521_EXTREG_SLEEP_CONTROL1_REG, + YT8521_ESC1R_SLEEP_SW, 0); + if (ret < 0) + return ret; + + wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG); + if (wol_config < 0) + return wol_config; + + /* if wol enable, do nothing */ + if (wol_config & YTPHY_WCR_ENABLE) + return 0; + + return yt8521_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0); +} + +/** + * yt8521_config_init() - called to initialize the PHY + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_config_init(struct phy_device *phydev) +{ + int old_page; + int ret = 0; + u16 val; + + old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); + if (old_page < 0) + goto err_restore_page; + + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII: + val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_GE_TX_DELAY_DIS; + val |= YT8521_RC1R_RX_DELAY_DIS; + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_GE_TX_DELAY_DIS; + val |= YT8521_RC1R_RX_DELAY_EN; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_GE_TX_DELAY_EN; + val |= YT8521_RC1R_RX_DELAY_DIS; + break; + case PHY_INTERFACE_MODE_RGMII_ID: + val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_GE_TX_DELAY_EN; + val |= YT8521_RC1R_RX_DELAY_EN; + break; + case PHY_INTERFACE_MODE_SGMII: + break; + default: /* do not support other modes */ + ret = -EOPNOTSUPP; + goto err_restore_page; + } + + /* set rgmii delay mode */ + if (phydev->interface != PHY_INTERFACE_MODE_SGMII) { + ret = ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG, + (YT8521_RC1R_RX_DELAY_MASK | + YT8521_RC1R_FE_TX_DELAY_MASK | + YT8521_RC1R_GE_TX_DELAY_MASK), + val); + if (ret < 0) + goto err_restore_page; + } + + /* disable auto sleep */ + ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG, + YT8521_ESC1R_SLEEP_SW, 0); + if (ret < 0) + goto err_restore_page; + + /* enable RXC clock when no wire plug */ + ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG, + YT8521_CGR_RX_CLK_EN, 0); + if (ret < 0) + goto err_restore_page; + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8521_prepare_fiber_features() - A small helper function that setup + * fiber's features. + * @phydev: a pointer to a &struct phy_device + * @dst: a pointer to store fiber's features + */ +static void yt8521_prepare_fiber_features(struct phy_device *phydev, + unsigned long *dst) +{ + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, dst); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, dst); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, dst); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, dst); +} + +/** + * yt8521_fiber_setup_forced - configures/forces speed from @phydev + * @phydev: target phy_device struct + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int yt8521_fiber_setup_forced(struct phy_device *phydev) +{ + u16 val; + int ret; + + if (phydev->speed == SPEED_1000) + val = YTPHY_MCR_FIBER_1000BX; + else if (phydev->speed == SPEED_100) + val = YTPHY_MCR_FIBER_100FX; + else + return -EINVAL; + + ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0); + if (ret < 0) + return ret; + + /* disable Fiber auto sensing */ + ret = ytphy_modify_ext(phydev, YT8521_LINK_TIMER_CFG2_REG, + YT8521_LTCR_EN_AUTOSEN, 0); + if (ret < 0) + return ret; + + ret = ytphy_modify_ext(phydev, YTPHY_MISC_CONFIG_REG, + YTPHY_MCR_FIBER_SPEED_MASK, val); + if (ret < 0) + return ret; + + return ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG, + YT8521_CCR_SW_RST, 0); +} + +/** + * ytphy_check_and_restart_aneg - Enable and restart auto-negotiation + * @phydev: target phy_device struct + * @restart: whether aneg restart is requested + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int ytphy_check_and_restart_aneg(struct phy_device *phydev, bool restart) +{ + int ret; + + if (!restart) { + /* Advertisement hasn't changed, but maybe aneg was never on to + * begin with? Or maybe phy was isolated? + */ + ret = __phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + + if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE)) + restart = true; + } + /* Enable and Restart Autonegotiation + * Don't isolate the PHY if we're negotiating + */ + if (restart) + return __phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, + BMCR_ANENABLE | BMCR_ANRESTART); + + return 0; +} + +/** + * yt8521_fiber_config_aneg - restart auto-negotiation or write + * YTPHY_MISC_CONFIG_REG. + * @phydev: target phy_device struct + * + * NOTE:The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int yt8521_fiber_config_aneg(struct phy_device *phydev) +{ + int err, changed = 0; + int bmcr; + u16 adv; + + if (phydev->autoneg != AUTONEG_ENABLE) + return yt8521_fiber_setup_forced(phydev); + + /* enable Fiber auto sensing */ + err = ytphy_modify_ext(phydev, YT8521_LINK_TIMER_CFG2_REG, + 0, YT8521_LTCR_EN_AUTOSEN); + if (err < 0) + return err; + + err = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG, + YT8521_CCR_SW_RST, 0); + if (err < 0) + return err; + + bmcr = __phy_read(phydev, MII_BMCR); + if (bmcr < 0) + return bmcr; + + /* When it is coming from fiber forced mode, add bmcr power down + * and power up to let aneg work fine. + */ + if (!(bmcr & BMCR_ANENABLE)) { + __phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN); + usleep_range(1000, 1100); + __phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0); + } + + adv = linkmode_adv_to_mii_adv_x(phydev->advertising, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + + /* Setup fiber advertisement */ + err = __phy_modify_changed(phydev, MII_ADVERTISE, + ADVERTISE_1000XHALF | ADVERTISE_1000XFULL | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM, + adv); + if (err < 0) + return err; + + if (err > 0) + changed = 1; + + return ytphy_check_and_restart_aneg(phydev, changed); +} + +/** + * ytphy_setup_master_slave + * @phydev: target phy_device struct + * + * NOTE: The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int ytphy_setup_master_slave(struct phy_device *phydev) +{ + u16 ctl = 0; + + if (!phydev->is_gigabit_capable) + return 0; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + ctl |= CTL1000_PREFER_MASTER; + break; + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl |= CTL1000_AS_MASTER; + fallthrough; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + ctl |= CTL1000_ENABLE_MASTER; + break; + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + return 0; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + return __phy_modify_changed(phydev, MII_CTRL1000, + (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER | + CTL1000_PREFER_MASTER), ctl); +} + +/** + * ytphy_utp_config_advert - sanitize and advertise auto-negotiation parameters + * @phydev: target phy_device struct + * + * NOTE: Writes MII_ADVERTISE with the appropriate values, + * after sanitizing the values to make sure we only advertise + * what is supported. Returns < 0 on error, 0 if the PHY's advertisement + * hasn't changed, and > 0 if it has changed. + * The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int ytphy_utp_config_advert(struct phy_device *phydev) +{ + int err, bmsr, changed = 0; + u32 adv; + + /* Only allow advertising what this PHY supports */ + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); + + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); + + /* Setup standard advertisement */ + err = __phy_modify_changed(phydev, MII_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_100BASE4 | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, + adv); + if (err < 0) + return err; + if (err > 0) + changed = 1; + + bmsr = __phy_read(phydev, MII_BMSR); + if (bmsr < 0) + return bmsr; + + /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all + * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a + * logical 1. + */ + if (!(bmsr & BMSR_ESTATEN)) + return changed; + + adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); + + err = __phy_modify_changed(phydev, MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF, + adv); + if (err < 0) + return err; + if (err > 0) + changed = 1; + + return changed; +} + +/** + * ytphy_utp_config_aneg - restart auto-negotiation or write BMCR + * @phydev: target phy_device struct + * @changed: whether autoneg is requested + * + * NOTE: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we write the BMCR. + * The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int ytphy_utp_config_aneg(struct phy_device *phydev, bool changed) +{ + int err; + u16 ctl; + + err = ytphy_setup_master_slave(phydev); + if (err < 0) + return err; + else if (err) + changed = true; + + if (phydev->autoneg != AUTONEG_ENABLE) { + /* configures/forces speed/duplex from @phydev */ + + ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); + + return __phy_modify(phydev, MII_BMCR, ~(BMCR_LOOPBACK | + BMCR_ISOLATE | BMCR_PDOWN), ctl); + } + + err = ytphy_utp_config_advert(phydev); + if (err < 0) /* error */ + return err; + else if (err) + changed = true; + + return ytphy_check_and_restart_aneg(phydev, changed); +} + +/** + * yt8521_config_aneg_paged() - switch reg space then call genphy_config_aneg + * of one page + * @phydev: a pointer to a &struct phy_device + * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to + * operate. + * + * returns 0 or negative errno code + */ +static int yt8521_config_aneg_paged(struct phy_device *phydev, int page) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(fiber_supported); + struct yt8521_priv *priv = phydev->priv; + int old_page; + int ret = 0; + + page &= YT8521_RSSR_SPACE_MASK; + + old_page = phy_select_page(phydev, page); + if (old_page < 0) + goto err_restore_page; + + /* If reg_page is YT8521_RSSR_TO_BE_ARBITRATED, + * phydev->advertising should be updated. + */ + if (priv->reg_page == YT8521_RSSR_TO_BE_ARBITRATED) { + linkmode_zero(fiber_supported); + yt8521_prepare_fiber_features(phydev, fiber_supported); + + /* prepare fiber_supported, then setup advertising. */ + if (page == YT8521_RSSR_FIBER_SPACE) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + fiber_supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + fiber_supported); + linkmode_and(phydev->advertising, + priv->combo_advertising, fiber_supported); + } else { + /* ETHTOOL_LINK_MODE_Autoneg_BIT is also used in utp */ + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + fiber_supported); + linkmode_andnot(phydev->advertising, + priv->combo_advertising, + fiber_supported); + } + } + + if (page == YT8521_RSSR_FIBER_SPACE) + ret = yt8521_fiber_config_aneg(phydev); + else + ret = ytphy_utp_config_aneg(phydev, false); + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8521_config_aneg() - change reg space then call yt8521_config_aneg_paged + * @phydev: a pointer to a &struct phy_device + * + * returns 0 or negative errno code + */ +static int yt8521_config_aneg(struct phy_device *phydev) +{ + struct yt8521_priv *priv = phydev->priv; + int ret; + + if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { + ret = yt8521_config_aneg_paged(phydev, priv->reg_page); + if (ret < 0) + return ret; + } else { + /* If reg_page is YT8521_RSSR_TO_BE_ARBITRATED, + * phydev->advertising need to be saved at first run. + * Because it contains the advertising which supported by both + * mac and yt8521(utp and fiber). + */ + if (linkmode_empty(priv->combo_advertising)) { + linkmode_copy(priv->combo_advertising, + phydev->advertising); + } + + ret = yt8521_config_aneg_paged(phydev, YT8521_RSSR_UTP_SPACE); + if (ret < 0) + return ret; + + ret = yt8521_config_aneg_paged(phydev, YT8521_RSSR_FIBER_SPACE); + if (ret < 0) + return ret; + + /* we don't known which will be link, so restore + * phydev->advertising as default value. + */ + linkmode_copy(phydev->advertising, priv->combo_advertising); + } + return 0; +} + +/** + * yt8521_aneg_done_paged() - determines the auto negotiation result of one + * page. + * @phydev: a pointer to a &struct phy_device + * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to + * operate. + * + * returns 0(no link)or 1(fiber or utp link) or negative errno code + */ +static int yt8521_aneg_done_paged(struct phy_device *phydev, int page) +{ + int old_page; + int ret = 0; + int link; + + old_page = phy_select_page(phydev, page & YT8521_RSSR_SPACE_MASK); + if (old_page < 0) + goto err_restore_page; + + ret = __phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG); + if (ret < 0) + goto err_restore_page; + + link = !!(ret & YTPHY_SSR_LINK); + ret = link; + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8521_aneg_done() - determines the auto negotiation result + * @phydev: a pointer to a &struct phy_device + * + * returns 0(no link)or 1(fiber or utp link) or negative errno code + */ +static int yt8521_aneg_done(struct phy_device *phydev) +{ + struct yt8521_priv *priv = phydev->priv; + int link_fiber = 0; + int link_utp; + int link; + + if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { + link = yt8521_aneg_done_paged(phydev, priv->reg_page); + } else { + link_utp = yt8521_aneg_done_paged(phydev, + YT8521_RSSR_UTP_SPACE); + if (link_utp < 0) + return link_utp; + + if (!link_utp) { + link_fiber = yt8521_aneg_done_paged(phydev, + YT8521_RSSR_FIBER_SPACE); + if (link_fiber < 0) + return link_fiber; + } + link = link_fiber || link_utp; + phydev_info(phydev, "%s, link_fiber: %d, link_utp: %d\n", + __func__, link_fiber, link_utp); + } + + return link; +} + +/** + * ytphy_utp_read_abilities - read PHY abilities from Clause 22 registers + * @phydev: target phy_device struct + * + * NOTE: Reads the PHY's abilities and populates + * phydev->supported accordingly. + * The caller must have taken the MDIO bus lock. + * + * returns 0 or negative errno code + */ +static int ytphy_utp_read_abilities(struct phy_device *phydev) +{ + int val; + + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + val = __phy_read(phydev, MII_BMSR); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported, + val & BMSR_ANEGCAPABLE); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported, + val & BMSR_100FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported, + val & BMSR_100HALF); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported, + val & BMSR_10FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported, + val & BMSR_10HALF); + + if (val & BMSR_ESTATEN) { + val = __phy_read(phydev, MII_ESTATUS); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported, val & ESTATUS_1000_TFULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported, val & ESTATUS_1000_THALF); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + phydev->supported, val & ESTATUS_1000_XFULL); + } + + return 0; +} + +/** + * yt8521_get_features_paged() - read supported link modes for one page + * @phydev: a pointer to a &struct phy_device + * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to + * operate. + * + * returns 0 or negative errno code + */ +static int yt8521_get_features_paged(struct phy_device *phydev, int page) +{ + int old_page; + int ret = 0; + + page &= YT8521_RSSR_SPACE_MASK; + old_page = phy_select_page(phydev, page); + if (old_page < 0) + goto err_restore_page; + + if (page == YT8521_RSSR_FIBER_SPACE) { + linkmode_zero(phydev->supported); + yt8521_prepare_fiber_features(phydev, phydev->supported); + } else { + ret = ytphy_utp_read_abilities(phydev); + if (ret < 0) + goto err_restore_page; + } + +err_restore_page: + return phy_restore_page(phydev, old_page, ret); +} + +/** + * yt8521_get_features - switch reg space then call yt8521_get_features_paged + * @phydev: target phy_device struct + * + * returns 0 or negative errno code + */ +static int yt8521_get_features(struct phy_device *phydev) +{ + struct yt8521_priv *priv = phydev->priv; + int ret; + + if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { + ret = yt8521_get_features_paged(phydev, priv->reg_page); + } else { + ret = yt8521_get_features_paged(phydev, + YT8521_RSSR_UTP_SPACE); + if (ret < 0) + return ret; + + /* add fiber's features to phydev->supported */ + yt8521_prepare_fiber_features(phydev, phydev->supported); + } + return ret; +} + static struct phy_driver motorcomm_phy_drvs[] = { { PHY_ID_MATCH_EXACT(PHY_ID_YT8511), @@ -121,16 +1733,35 @@ static struct phy_driver motorcomm_phy_drvs[] = { .read_page = yt8511_read_page, .write_page = yt8511_write_page, }, + { + PHY_ID_MATCH_EXACT(PHY_ID_YT8521), + .name = "YT8521 Gigabit Ethernet", + .get_features = yt8521_get_features, + .probe = yt8521_probe, + .read_page = yt8521_read_page, + .write_page = yt8521_write_page, + .get_wol = ytphy_get_wol, + .set_wol = ytphy_set_wol, + .config_aneg = yt8521_config_aneg, + .aneg_done = yt8521_aneg_done, + .config_init = yt8521_config_init, + .read_status = yt8521_read_status, + .soft_reset = yt8521_soft_reset, + .suspend = yt8521_suspend, + .resume = yt8521_resume, + }, }; module_phy_driver(motorcomm_phy_drvs); -MODULE_DESCRIPTION("Motorcomm PHY driver"); +MODULE_DESCRIPTION("Motorcomm 8511/8521 PHY driver"); MODULE_AUTHOR("Peter Geis"); +MODULE_AUTHOR("Frank"); MODULE_LICENSE("GPL"); static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = { { PHY_ID_MATCH_EXACT(PHY_ID_YT8511) }, + { PHY_ID_MATCH_EXACT(PHY_ID_YT8521) }, { /* sentinal */ } }; -- cgit v1.2.3 From fd325cd648f15eb9a8b32a68de3bafc72bcfe753 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:25 -0700 Subject: net: mana: Move header files to a common location In preparation to add MANA RDMA driver, move all the required header files to a common location for use by both Ethernet and RDMA drivers. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-8-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- MAINTAINERS | 1 + drivers/net/ethernet/microsoft/mana/gdma.h | 689 --------------------- drivers/net/ethernet/microsoft/mana/gdma_main.c | 2 +- drivers/net/ethernet/microsoft/mana/hw_channel.c | 4 +- drivers/net/ethernet/microsoft/mana/hw_channel.h | 195 ------ drivers/net/ethernet/microsoft/mana/mana.h | 650 ------------------- .../net/ethernet/microsoft/mana/mana_auxiliary.h | 10 - drivers/net/ethernet/microsoft/mana/mana_bpf.c | 2 +- drivers/net/ethernet/microsoft/mana/mana_en.c | 4 +- drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 2 +- drivers/net/ethernet/microsoft/mana/shm_channel.c | 2 +- drivers/net/ethernet/microsoft/mana/shm_channel.h | 21 - include/net/mana/gdma.h | 689 +++++++++++++++++++++ include/net/mana/hw_channel.h | 195 ++++++ include/net/mana/mana.h | 650 +++++++++++++++++++ include/net/mana/mana_auxiliary.h | 10 + include/net/mana/shm_channel.h | 21 + 17 files changed, 1574 insertions(+), 1573 deletions(-) delete mode 100644 drivers/net/ethernet/microsoft/mana/gdma.h delete mode 100644 drivers/net/ethernet/microsoft/mana/hw_channel.h delete mode 100644 drivers/net/ethernet/microsoft/mana/mana.h delete mode 100644 drivers/net/ethernet/microsoft/mana/mana_auxiliary.h delete mode 100644 drivers/net/ethernet/microsoft/mana/shm_channel.h create mode 100644 include/net/mana/gdma.h create mode 100644 include/net/mana/hw_channel.h create mode 100644 include/net/mana/mana.h create mode 100644 include/net/mana/mana_auxiliary.h create mode 100644 include/net/mana/shm_channel.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 379945f82a64..441a65d41eb4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9541,6 +9541,7 @@ F: include/asm-generic/hyperv-tlfs.h F: include/asm-generic/mshyperv.h F: include/clocksource/hyperv_timer.h F: include/linux/hyperv.h +F: include/net/mana F: include/uapi/linux/hyperv.h F: net/vmw_vsock/hyperv_transport.c F: tools/hv/ diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h deleted file mode 100644 index 72eaec2470c0..000000000000 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ /dev/null @@ -1,689 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _GDMA_H -#define _GDMA_H - -#include -#include - -#include "shm_channel.h" - -/* Structures labeled with "HW DATA" are exchanged with the hardware. All of - * them are naturally aligned and hence don't need __packed. - */ - -enum gdma_request_type { - GDMA_VERIFY_VF_DRIVER_VERSION = 1, - GDMA_QUERY_MAX_RESOURCES = 2, - GDMA_LIST_DEVICES = 3, - GDMA_REGISTER_DEVICE = 4, - GDMA_DEREGISTER_DEVICE = 5, - GDMA_GENERATE_TEST_EQE = 10, - GDMA_CREATE_QUEUE = 12, - GDMA_DISABLE_QUEUE = 13, - GDMA_CREATE_DMA_REGION = 25, - GDMA_DMA_REGION_ADD_PAGES = 26, - GDMA_DESTROY_DMA_REGION = 27, -}; - -enum gdma_queue_type { - GDMA_INVALID_QUEUE, - GDMA_SQ, - GDMA_RQ, - GDMA_CQ, - GDMA_EQ, -}; - -enum gdma_work_request_flags { - GDMA_WR_NONE = 0, - GDMA_WR_OOB_IN_SGL = BIT(0), - GDMA_WR_PAD_BY_SGE0 = BIT(1), -}; - -enum gdma_eqe_type { - GDMA_EQE_COMPLETION = 3, - GDMA_EQE_TEST_EVENT = 64, - GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, - GDMA_EQE_HWC_INIT_DATA = 130, - GDMA_EQE_HWC_INIT_DONE = 131, -}; - -enum { - GDMA_DEVICE_NONE = 0, - GDMA_DEVICE_HWC = 1, - GDMA_DEVICE_MANA = 2, -}; - -struct gdma_resource { - /* Protect the bitmap */ - spinlock_t lock; - - /* The bitmap size in bits. */ - u32 size; - - /* The bitmap tracks the resources. */ - unsigned long *map; -}; - -union gdma_doorbell_entry { - u64 as_uint64; - - struct { - u64 id : 24; - u64 reserved : 8; - u64 tail_ptr : 31; - u64 arm : 1; - } cq; - - struct { - u64 id : 24; - u64 wqe_cnt : 8; - u64 tail_ptr : 32; - } rq; - - struct { - u64 id : 24; - u64 reserved : 8; - u64 tail_ptr : 32; - } sq; - - struct { - u64 id : 16; - u64 reserved : 16; - u64 tail_ptr : 31; - u64 arm : 1; - } eq; -}; /* HW DATA */ - -struct gdma_msg_hdr { - u32 hdr_type; - u32 msg_type; - u16 msg_version; - u16 hwc_msg_id; - u32 msg_size; -}; /* HW DATA */ - -struct gdma_dev_id { - union { - struct { - u16 type; - u16 instance; - }; - - u32 as_uint32; - }; -}; /* HW DATA */ - -struct gdma_req_hdr { - struct gdma_msg_hdr req; - struct gdma_msg_hdr resp; /* The expected response */ - struct gdma_dev_id dev_id; - u32 activity_id; -}; /* HW DATA */ - -struct gdma_resp_hdr { - struct gdma_msg_hdr response; - struct gdma_dev_id dev_id; - u32 activity_id; - u32 status; - u32 reserved; -}; /* HW DATA */ - -struct gdma_general_req { - struct gdma_req_hdr hdr; -}; /* HW DATA */ - -#define GDMA_MESSAGE_V1 1 - -struct gdma_general_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -#define GDMA_STANDARD_HEADER_TYPE 0 - -static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, - u32 req_size, u32 resp_size) -{ - hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; - hdr->req.msg_type = code; - hdr->req.msg_version = GDMA_MESSAGE_V1; - hdr->req.msg_size = req_size; - - hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; - hdr->resp.msg_type = code; - hdr->resp.msg_version = GDMA_MESSAGE_V1; - hdr->resp.msg_size = resp_size; -} - -/* The 16-byte struct is part of the GDMA work queue entry (WQE). */ -struct gdma_sge { - u64 address; - u32 mem_key; - u32 size; -}; /* HW DATA */ - -struct gdma_wqe_request { - struct gdma_sge *sgl; - u32 num_sge; - - u32 inline_oob_size; - const void *inline_oob_data; - - u32 flags; - u32 client_data_unit; -}; - -enum gdma_page_type { - GDMA_PAGE_TYPE_4K, -}; - -#define GDMA_INVALID_DMA_REGION 0 - -struct gdma_mem_info { - struct device *dev; - - dma_addr_t dma_handle; - void *virt_addr; - u64 length; - - /* Allocated by the PF driver */ - u64 gdma_region; -}; - -#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 - -struct gdma_dev { - struct gdma_context *gdma_context; - - struct gdma_dev_id dev_id; - - u32 pdid; - u32 doorbell; - u32 gpa_mkey; - - /* GDMA driver specific pointer */ - void *driver_data; - - struct auxiliary_device *adev; -}; - -#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE - -#define GDMA_CQE_SIZE 64 -#define GDMA_EQE_SIZE 16 -#define GDMA_MAX_SQE_SIZE 512 -#define GDMA_MAX_RQE_SIZE 256 - -#define GDMA_COMP_DATA_SIZE 0x3C - -#define GDMA_EVENT_DATA_SIZE 0xC - -/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ -#define GDMA_WQE_BU_SIZE 32 - -#define INVALID_PDID UINT_MAX -#define INVALID_DOORBELL UINT_MAX -#define INVALID_MEM_KEY UINT_MAX -#define INVALID_QUEUE_ID UINT_MAX -#define INVALID_PCI_MSIX_INDEX UINT_MAX - -struct gdma_comp { - u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; - u32 wq_num; - bool is_sq; -}; - -struct gdma_event { - u32 details[GDMA_EVENT_DATA_SIZE / 4]; - u8 type; -}; - -struct gdma_queue; - -struct mana_eq { - struct gdma_queue *eq; -}; - -typedef void gdma_eq_callback(void *context, struct gdma_queue *q, - struct gdma_event *e); - -typedef void gdma_cq_callback(void *context, struct gdma_queue *q); - -/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE - * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the - * driver increases the 'head' in BUs rather than in bytes, and notifies - * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track - * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. - * - * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is - * processed, the driver increases the 'tail' to indicate that WQEs have - * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. - * - * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures - * that the EQ/CQ is big enough so they can't overflow, and the driver uses - * the owner bits mechanism to detect if the queue has become empty. - */ -struct gdma_queue { - struct gdma_dev *gdma_dev; - - enum gdma_queue_type type; - u32 id; - - struct gdma_mem_info mem_info; - - void *queue_mem_ptr; - u32 queue_size; - - bool monitor_avl_buf; - - u32 head; - u32 tail; - - /* Extra fields specific to EQ/CQ. */ - union { - struct { - bool disable_needed; - - gdma_eq_callback *callback; - void *context; - - unsigned int msix_index; - - u32 log2_throttle_limit; - } eq; - - struct { - gdma_cq_callback *callback; - void *context; - - struct gdma_queue *parent; /* For CQ/EQ relationship */ - } cq; - }; -}; - -struct gdma_queue_spec { - enum gdma_queue_type type; - bool monitor_avl_buf; - unsigned int queue_size; - - /* Extra fields specific to EQ/CQ. */ - union { - struct { - gdma_eq_callback *callback; - void *context; - - unsigned long log2_throttle_limit; - } eq; - - struct { - gdma_cq_callback *callback; - void *context; - - struct gdma_queue *parent_eq; - - } cq; - }; -}; - -struct gdma_irq_context { - void (*handler)(void *arg); - void *arg; -}; - -struct gdma_context { - struct device *dev; - - /* Per-vPort max number of queues */ - unsigned int max_num_queues; - unsigned int max_num_msix; - unsigned int num_msix_usable; - struct gdma_resource msix_resource; - struct gdma_irq_context *irq_contexts; - - /* This maps a CQ index to the queue structure. */ - unsigned int max_num_cqs; - struct gdma_queue **cq_table; - - /* Protect eq_test_event and test_event_eq_id */ - struct mutex eq_test_event_mutex; - struct completion eq_test_event; - u32 test_event_eq_id; - - bool is_pf; - phys_addr_t bar0_pa; - void __iomem *bar0_va; - void __iomem *shm_base; - void __iomem *db_page_base; - phys_addr_t phys_db_page_base; - u32 db_page_size; - - /* Shared memory chanenl (used to bootstrap HWC) */ - struct shm_channel shm_channel; - - /* Hardware communication channel (HWC) */ - struct gdma_dev hwc; - - /* Azure network adapter */ - struct gdma_dev mana; -}; - -#define MAX_NUM_GDMA_DEVICES 4 - -static inline bool mana_gd_is_mana(struct gdma_dev *gd) -{ - return gd->dev_id.type == GDMA_DEVICE_MANA; -} - -static inline bool mana_gd_is_hwc(struct gdma_dev *gd) -{ - return gd->dev_id.type == GDMA_DEVICE_HWC; -} - -u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); -u32 mana_gd_wq_avail_space(struct gdma_queue *wq); - -int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); - -int mana_gd_create_hwc_queue(struct gdma_dev *gd, - const struct gdma_queue_spec *spec, - struct gdma_queue **queue_ptr); - -int mana_gd_create_mana_eq(struct gdma_dev *gd, - const struct gdma_queue_spec *spec, - struct gdma_queue **queue_ptr); - -int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, - const struct gdma_queue_spec *spec, - struct gdma_queue **queue_ptr); - -void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); - -int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); - -void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); - -struct gdma_wqe { - u32 reserved :24; - u32 last_vbytes :8; - - union { - u32 flags; - - struct { - u32 num_sge :8; - u32 inline_oob_size_div4:3; - u32 client_oob_in_sgl :1; - u32 reserved1 :4; - u32 client_data_unit :14; - u32 reserved2 :2; - }; - }; -}; /* HW DATA */ - -#define INLINE_OOB_SMALL_SIZE 8 -#define INLINE_OOB_LARGE_SIZE 24 - -#define MAX_TX_WQE_SIZE 512 -#define MAX_RX_WQE_SIZE 256 - -struct gdma_cqe { - u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; - - union { - u32 as_uint32; - - struct { - u32 wq_num : 24; - u32 is_sq : 1; - u32 reserved : 4; - u32 owner_bits : 3; - }; - } cqe_info; -}; /* HW DATA */ - -#define GDMA_CQE_OWNER_BITS 3 - -#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) - -#define SET_ARM_BIT 1 - -#define GDMA_EQE_OWNER_BITS 3 - -union gdma_eqe_info { - u32 as_uint32; - - struct { - u32 type : 8; - u32 reserved1 : 8; - u32 client_id : 2; - u32 reserved2 : 11; - u32 owner_bits : 3; - }; -}; /* HW DATA */ - -#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) -#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) - -struct gdma_eqe { - u32 details[GDMA_EVENT_DATA_SIZE / 4]; - u32 eqe_info; -}; /* HW DATA */ - -#define GDMA_REG_DB_PAGE_OFFSET 8 -#define GDMA_REG_DB_PAGE_SIZE 0x10 -#define GDMA_REG_SHM_OFFSET 0x18 - -#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 -#define GDMA_PF_REG_DB_PAGE_OFF 0xC8 -#define GDMA_PF_REG_SHM_OFF 0x70 - -#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 - -#define MANA_PF_DEVICE_ID 0x00B9 -#define MANA_VF_DEVICE_ID 0x00BA - -struct gdma_posted_wqe_info { - u32 wqe_size_in_bu; -}; - -/* GDMA_GENERATE_TEST_EQE */ -struct gdma_generate_test_event_req { - struct gdma_req_hdr hdr; - u32 queue_index; -}; /* HW DATA */ - -/* GDMA_VERIFY_VF_DRIVER_VERSION */ -enum { - GDMA_PROTOCOL_V1 = 1, - GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, - GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, -}; - -#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) - -#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT - -#define GDMA_DRV_CAP_FLAGS2 0 - -#define GDMA_DRV_CAP_FLAGS3 0 - -#define GDMA_DRV_CAP_FLAGS4 0 - -struct gdma_verify_ver_req { - struct gdma_req_hdr hdr; - - /* Mandatory fields required for protocol establishment */ - u64 protocol_ver_min; - u64 protocol_ver_max; - - /* Gdma Driver Capability Flags */ - u64 gd_drv_cap_flags1; - u64 gd_drv_cap_flags2; - u64 gd_drv_cap_flags3; - u64 gd_drv_cap_flags4; - - /* Advisory fields */ - u64 drv_ver; - u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ - u32 reserved; - u32 os_ver_major; - u32 os_ver_minor; - u32 os_ver_build; - u32 os_ver_platform; - u64 reserved_2; - u8 os_ver_str1[128]; - u8 os_ver_str2[128]; - u8 os_ver_str3[128]; - u8 os_ver_str4[128]; -}; /* HW DATA */ - -struct gdma_verify_ver_resp { - struct gdma_resp_hdr hdr; - u64 gdma_protocol_ver; - u64 pf_cap_flags1; - u64 pf_cap_flags2; - u64 pf_cap_flags3; - u64 pf_cap_flags4; -}; /* HW DATA */ - -/* GDMA_QUERY_MAX_RESOURCES */ -struct gdma_query_max_resources_resp { - struct gdma_resp_hdr hdr; - u32 status; - u32 max_sq; - u32 max_rq; - u32 max_cq; - u32 max_eq; - u32 max_db; - u32 max_mst; - u32 max_cq_mod_ctx; - u32 max_mod_cq; - u32 max_msix; -}; /* HW DATA */ - -/* GDMA_LIST_DEVICES */ -struct gdma_list_devices_resp { - struct gdma_resp_hdr hdr; - u32 num_of_devs; - u32 reserved; - struct gdma_dev_id devs[64]; -}; /* HW DATA */ - -/* GDMA_REGISTER_DEVICE */ -struct gdma_register_device_resp { - struct gdma_resp_hdr hdr; - u32 pdid; - u32 gpa_mkey; - u32 db_id; -}; /* HW DATA */ - -/* GDMA_CREATE_QUEUE */ -struct gdma_create_queue_req { - struct gdma_req_hdr hdr; - u32 type; - u32 reserved1; - u32 pdid; - u32 doolbell_id; - u64 gdma_region; - u32 reserved2; - u32 queue_size; - u32 log2_throttle_limit; - u32 eq_pci_msix_index; - u32 cq_mod_ctx_id; - u32 cq_parent_eq_id; - u8 rq_drop_on_overrun; - u8 rq_err_on_wqe_overflow; - u8 rq_chain_rec_wqes; - u8 sq_hw_db; - u32 reserved3; -}; /* HW DATA */ - -struct gdma_create_queue_resp { - struct gdma_resp_hdr hdr; - u32 queue_index; -}; /* HW DATA */ - -/* GDMA_DISABLE_QUEUE */ -struct gdma_disable_queue_req { - struct gdma_req_hdr hdr; - u32 type; - u32 queue_index; - u32 alloc_res_id_on_creation; -}; /* HW DATA */ - -/* GDMA_CREATE_DMA_REGION */ -struct gdma_create_dma_region_req { - struct gdma_req_hdr hdr; - - /* The total size of the DMA region */ - u64 length; - - /* The offset in the first page */ - u32 offset_in_page; - - /* enum gdma_page_type */ - u32 gdma_page_type; - - /* The total number of pages */ - u32 page_count; - - /* If page_addr_list_len is smaller than page_count, - * the remaining page addresses will be added via the - * message GDMA_DMA_REGION_ADD_PAGES. - */ - u32 page_addr_list_len; - u64 page_addr_list[]; -}; /* HW DATA */ - -struct gdma_create_dma_region_resp { - struct gdma_resp_hdr hdr; - u64 gdma_region; -}; /* HW DATA */ - -/* GDMA_DMA_REGION_ADD_PAGES */ -struct gdma_dma_region_add_pages_req { - struct gdma_req_hdr hdr; - - u64 gdma_region; - - u32 page_addr_list_len; - u32 reserved3; - - u64 page_addr_list[]; -}; /* HW DATA */ - -/* GDMA_DESTROY_DMA_REGION */ -struct gdma_destroy_dma_region_req { - struct gdma_req_hdr hdr; - - u64 gdma_region; -}; /* HW DATA */ - -int mana_gd_verify_vf_version(struct pci_dev *pdev); - -int mana_gd_register_device(struct gdma_dev *gd); -int mana_gd_deregister_device(struct gdma_dev *gd); - -int mana_gd_post_work_request(struct gdma_queue *wq, - const struct gdma_wqe_request *wqe_req, - struct gdma_posted_wqe_info *wqe_info); - -int mana_gd_post_and_ring(struct gdma_queue *queue, - const struct gdma_wqe_request *wqe, - struct gdma_posted_wqe_info *wqe_info); - -int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); -void mana_gd_free_res_map(struct gdma_resource *r); - -void mana_gd_wq_ring_doorbell(struct gdma_context *gc, - struct gdma_queue *queue); - -int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, - struct gdma_mem_info *gmi); - -void mana_gd_free_memory(struct gdma_mem_info *gmi); - -int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, - u32 resp_len, void *resp); -#endif /* _GDMA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index f0e22954d5c0..69795bc679e7 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -6,7 +6,7 @@ #include #include -#include "mana.h" +#include static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 543a5d5c304f..76829ab43d40 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2021, Microsoft Corporation. */ -#include "gdma.h" -#include "hw_channel.h" +#include +#include static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id) { diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h deleted file mode 100644 index 6a757a6e2732..000000000000 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _HW_CHANNEL_H -#define _HW_CHANNEL_H - -#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4 - -#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000 -#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000 - -#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1 - -#define HWC_INIT_DATA_CQID 1 -#define HWC_INIT_DATA_RQID 2 -#define HWC_INIT_DATA_SQID 3 -#define HWC_INIT_DATA_QUEUE_DEPTH 4 -#define HWC_INIT_DATA_MAX_REQUEST 5 -#define HWC_INIT_DATA_MAX_RESPONSE 6 -#define HWC_INIT_DATA_MAX_NUM_CQS 7 -#define HWC_INIT_DATA_PDID 8 -#define HWC_INIT_DATA_GPA_MKEY 9 -#define HWC_INIT_DATA_PF_DEST_RQ_ID 10 -#define HWC_INIT_DATA_PF_DEST_CQ_ID 11 - -/* Structures labeled with "HW DATA" are exchanged with the hardware. All of - * them are naturally aligned and hence don't need __packed. - */ - -union hwc_init_eq_id_db { - u32 as_uint32; - - struct { - u32 eq_id : 16; - u32 doorbell : 16; - }; -}; /* HW DATA */ - -union hwc_init_type_data { - u32 as_uint32; - - struct { - u32 value : 24; - u32 type : 8; - }; -}; /* HW DATA */ - -struct hwc_rx_oob { - u32 type : 6; - u32 eom : 1; - u32 som : 1; - u32 vendor_err : 8; - u32 reserved1 : 16; - - u32 src_virt_wq : 24; - u32 src_vfid : 8; - - u32 reserved2; - - union { - u32 wqe_addr_low; - u32 wqe_offset; - }; - - u32 wqe_addr_high; - - u32 client_data_unit : 14; - u32 reserved3 : 18; - - u32 tx_oob_data_size; - - u32 chunk_offset : 21; - u32 reserved4 : 11; -}; /* HW DATA */ - -struct hwc_tx_oob { - u32 reserved1; - - u32 reserved2; - - u32 vrq_id : 24; - u32 dest_vfid : 8; - - u32 vrcq_id : 24; - u32 reserved3 : 8; - - u32 vscq_id : 24; - u32 loopback : 1; - u32 lso_override: 1; - u32 dest_pf : 1; - u32 reserved4 : 5; - - u32 vsq_id : 24; - u32 reserved5 : 8; -}; /* HW DATA */ - -struct hwc_work_request { - void *buf_va; - void *buf_sge_addr; - u32 buf_len; - u32 msg_size; - - struct gdma_wqe_request wqe_req; - struct hwc_tx_oob tx_oob; - - struct gdma_sge sge; -}; - -/* hwc_dma_buf represents the array of in-flight WQEs. - * mem_info as know as the GDMA mapped memory is partitioned and used by - * in-flight WQEs. - * The number of WQEs is determined by the number of in-flight messages. - */ -struct hwc_dma_buf { - struct gdma_mem_info mem_info; - - u32 gpa_mkey; - - u32 num_reqs; - struct hwc_work_request reqs[]; -}; - -typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, - const struct hwc_rx_oob *rx_oob); - -typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id, - const struct hwc_rx_oob *rx_oob); - -struct hwc_cq { - struct hw_channel_context *hwc; - - struct gdma_queue *gdma_cq; - struct gdma_queue *gdma_eq; - struct gdma_comp *comp_buf; - u16 queue_depth; - - hwc_rx_event_handler_t *rx_event_handler; - void *rx_event_ctx; - - hwc_tx_event_handler_t *tx_event_handler; - void *tx_event_ctx; -}; - -struct hwc_wq { - struct hw_channel_context *hwc; - - struct gdma_queue *gdma_wq; - struct hwc_dma_buf *msg_buf; - u16 queue_depth; - - struct hwc_cq *hwc_cq; -}; - -struct hwc_caller_ctx { - struct completion comp_event; - void *output_buf; - u32 output_buflen; - - u32 error; /* Linux error code */ - u32 status_code; -}; - -struct hw_channel_context { - struct gdma_dev *gdma_dev; - struct device *dev; - - u16 num_inflight_msg; - u32 max_req_msg_size; - - u16 hwc_init_q_depth_max; - u32 hwc_init_max_req_msg_size; - u32 hwc_init_max_resp_msg_size; - - struct completion hwc_init_eqe_comp; - - struct hwc_wq *rxq; - struct hwc_wq *txq; - struct hwc_cq *cq; - - struct semaphore sema; - struct gdma_resource inflight_msg_res; - - u32 pf_dest_vrq_id; - u32 pf_dest_vrcq_id; - - struct hwc_caller_ctx *caller_ctx; -}; - -int mana_hwc_create_channel(struct gdma_context *gc); -void mana_hwc_destroy_channel(struct gdma_context *gc); - -int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, - const void *req, u32 resp_len, void *resp); - -#endif /* _HW_CHANNEL_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h deleted file mode 100644 index 6e9e86fb4c02..000000000000 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ /dev/null @@ -1,650 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _MANA_H -#define _MANA_H - -#include "gdma.h" -#include "hw_channel.h" - -/* Microsoft Azure Network Adapter (MANA)'s definitions - * - * Structures labeled with "HW DATA" are exchanged with the hardware. All of - * them are naturally aligned and hence don't need __packed. - */ - -/* MANA protocol version */ -#define MANA_MAJOR_VERSION 0 -#define MANA_MINOR_VERSION 1 -#define MANA_MICRO_VERSION 1 - -typedef u64 mana_handle_t; -#define INVALID_MANA_HANDLE ((mana_handle_t)-1) - -enum TRI_STATE { - TRI_STATE_UNKNOWN = -1, - TRI_STATE_FALSE = 0, - TRI_STATE_TRUE = 1 -}; - -/* Number of entries for hardware indirection table must be in power of 2 */ -#define MANA_INDIRECT_TABLE_SIZE 64 -#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) - -/* The Toeplitz hash key's length in bytes: should be multiple of 8 */ -#define MANA_HASH_KEY_SIZE 40 - -#define COMP_ENTRY_SIZE 64 - -#define ADAPTER_MTU_SIZE 1500 -#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) - -#define RX_BUFFERS_PER_QUEUE 512 - -#define MAX_SEND_BUFFERS_PER_QUEUE 256 - -#define EQ_SIZE (8 * PAGE_SIZE) -#define LOG2_EQ_THROTTLE 3 - -#define MAX_PORTS_IN_MANA_DEV 256 - -struct mana_stats_rx { - u64 packets; - u64 bytes; - u64 xdp_drop; - u64 xdp_tx; - u64 xdp_redirect; - struct u64_stats_sync syncp; -}; - -struct mana_stats_tx { - u64 packets; - u64 bytes; - u64 xdp_xmit; - struct u64_stats_sync syncp; -}; - -struct mana_txq { - struct gdma_queue *gdma_sq; - - union { - u32 gdma_txq_id; - struct { - u32 reserved1 : 10; - u32 vsq_frame : 14; - u32 reserved2 : 8; - }; - }; - - u16 vp_offset; - - struct net_device *ndev; - - /* The SKBs are sent to the HW and we are waiting for the CQEs. */ - struct sk_buff_head pending_skbs; - struct netdev_queue *net_txq; - - atomic_t pending_sends; - - struct mana_stats_tx stats; -}; - -/* skb data and frags dma mappings */ -struct mana_skb_head { - dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; - - u32 size[MAX_SKB_FRAGS + 1]; -}; - -#define MANA_HEADROOM sizeof(struct mana_skb_head) - -enum mana_tx_pkt_format { - MANA_SHORT_PKT_FMT = 0, - MANA_LONG_PKT_FMT = 1, -}; - -struct mana_tx_short_oob { - u32 pkt_fmt : 2; - u32 is_outer_ipv4 : 1; - u32 is_outer_ipv6 : 1; - u32 comp_iphdr_csum : 1; - u32 comp_tcp_csum : 1; - u32 comp_udp_csum : 1; - u32 supress_txcqe_gen : 1; - u32 vcq_num : 24; - - u32 trans_off : 10; /* Transport header offset */ - u32 vsq_frame : 14; - u32 short_vp_offset : 8; -}; /* HW DATA */ - -struct mana_tx_long_oob { - u32 is_encap : 1; - u32 inner_is_ipv6 : 1; - u32 inner_tcp_opt : 1; - u32 inject_vlan_pri_tag : 1; - u32 reserved1 : 12; - u32 pcp : 3; /* 802.1Q */ - u32 dei : 1; /* 802.1Q */ - u32 vlan_id : 12; /* 802.1Q */ - - u32 inner_frame_offset : 10; - u32 inner_ip_rel_offset : 6; - u32 long_vp_offset : 12; - u32 reserved2 : 4; - - u32 reserved3; - u32 reserved4; -}; /* HW DATA */ - -struct mana_tx_oob { - struct mana_tx_short_oob s_oob; - struct mana_tx_long_oob l_oob; -}; /* HW DATA */ - -enum mana_cq_type { - MANA_CQ_TYPE_RX, - MANA_CQ_TYPE_TX, -}; - -enum mana_cqe_type { - CQE_INVALID = 0, - CQE_RX_OKAY = 1, - CQE_RX_COALESCED_4 = 2, - CQE_RX_OBJECT_FENCE = 3, - CQE_RX_TRUNCATED = 4, - - CQE_TX_OKAY = 32, - CQE_TX_SA_DROP = 33, - CQE_TX_MTU_DROP = 34, - CQE_TX_INVALID_OOB = 35, - CQE_TX_INVALID_ETH_TYPE = 36, - CQE_TX_HDR_PROCESSING_ERROR = 37, - CQE_TX_VF_DISABLED = 38, - CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, - CQE_TX_VPORT_DISABLED = 40, - CQE_TX_VLAN_TAGGING_VIOLATION = 41, -}; - -#define MANA_CQE_COMPLETION 1 - -struct mana_cqe_header { - u32 cqe_type : 6; - u32 client_type : 2; - u32 vendor_err : 24; -}; /* HW DATA */ - -/* NDIS HASH Types */ -#define NDIS_HASH_IPV4 BIT(0) -#define NDIS_HASH_TCP_IPV4 BIT(1) -#define NDIS_HASH_UDP_IPV4 BIT(2) -#define NDIS_HASH_IPV6 BIT(3) -#define NDIS_HASH_TCP_IPV6 BIT(4) -#define NDIS_HASH_UDP_IPV6 BIT(5) -#define NDIS_HASH_IPV6_EX BIT(6) -#define NDIS_HASH_TCP_IPV6_EX BIT(7) -#define NDIS_HASH_UDP_IPV6_EX BIT(8) - -#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) -#define MANA_HASH_L4 \ - (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ - NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) - -struct mana_rxcomp_perpkt_info { - u32 pkt_len : 16; - u32 reserved1 : 16; - u32 reserved2; - u32 pkt_hash; -}; /* HW DATA */ - -#define MANA_RXCOMP_OOB_NUM_PPI 4 - -/* Receive completion OOB */ -struct mana_rxcomp_oob { - struct mana_cqe_header cqe_hdr; - - u32 rx_vlan_id : 12; - u32 rx_vlantag_present : 1; - u32 rx_outer_iphdr_csum_succeed : 1; - u32 rx_outer_iphdr_csum_fail : 1; - u32 reserved1 : 1; - u32 rx_hashtype : 9; - u32 rx_iphdr_csum_succeed : 1; - u32 rx_iphdr_csum_fail : 1; - u32 rx_tcp_csum_succeed : 1; - u32 rx_tcp_csum_fail : 1; - u32 rx_udp_csum_succeed : 1; - u32 rx_udp_csum_fail : 1; - u32 reserved2 : 1; - - struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; - - u32 rx_wqe_offset; -}; /* HW DATA */ - -struct mana_tx_comp_oob { - struct mana_cqe_header cqe_hdr; - - u32 tx_data_offset; - - u32 tx_sgl_offset : 5; - u32 tx_wqe_offset : 27; - - u32 reserved[12]; -}; /* HW DATA */ - -struct mana_rxq; - -#define CQE_POLLING_BUFFER 512 - -struct mana_cq { - struct gdma_queue *gdma_cq; - - /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ - u32 gdma_id; - - /* Type of the CQ: TX or RX */ - enum mana_cq_type type; - - /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. - * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. - */ - struct mana_rxq *rxq; - - /* Pointer to the mana_txq that is pushing TX CQEs to the queue. - * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. - */ - struct mana_txq *txq; - - /* Buffer which the CQ handler can copy the CQE's into. */ - struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; - - /* NAPI data */ - struct napi_struct napi; - int work_done; - int budget; -}; - -#define GDMA_MAX_RQE_SGES 15 - -struct mana_recv_buf_oob { - /* A valid GDMA work request representing the data buffer. */ - struct gdma_wqe_request wqe_req; - - void *buf_va; - dma_addr_t buf_dma_addr; - - /* SGL of the buffer going to be sent has part of the work request. */ - u32 num_sge; - struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; - - /* Required to store the result of mana_gd_post_work_request. - * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the - * work queue when the WQE is consumed. - */ - struct gdma_posted_wqe_info wqe_inf; -}; - -struct mana_rxq { - struct gdma_queue *gdma_rq; - /* Cache the gdma receive queue id */ - u32 gdma_id; - - /* Index of RQ in the vPort, not gdma receive queue id */ - u32 rxq_idx; - - u32 datasize; - - mana_handle_t rxobj; - - struct mana_cq rx_cq; - - struct completion fence_event; - - struct net_device *ndev; - - /* Total number of receive buffers to be allocated */ - u32 num_rx_buf; - - u32 buf_index; - - struct mana_stats_rx stats; - - struct bpf_prog __rcu *bpf_prog; - struct xdp_rxq_info xdp_rxq; - struct page *xdp_save_page; - bool xdp_flush; - int xdp_rc; /* XDP redirect return code */ - - /* MUST BE THE LAST MEMBER: - * Each receive buffer has an associated mana_recv_buf_oob. - */ - struct mana_recv_buf_oob rx_oobs[]; -}; - -struct mana_tx_qp { - struct mana_txq txq; - - struct mana_cq tx_cq; - - mana_handle_t tx_object; -}; - -struct mana_ethtool_stats { - u64 stop_queue; - u64 wake_queue; -}; - -struct mana_context { - struct gdma_dev *gdma_dev; - - u16 num_ports; - - struct mana_eq *eqs; - - struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; -}; - -struct mana_port_context { - struct mana_context *ac; - struct net_device *ndev; - - u8 mac_addr[ETH_ALEN]; - - enum TRI_STATE rss_state; - - mana_handle_t default_rxobj; - bool tx_shortform_allowed; - u16 tx_vp_offset; - - struct mana_tx_qp *tx_qp; - - /* Indirection Table for RX & TX. The values are queue indexes */ - u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; - - /* Indirection table containing RxObject Handles */ - mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; - - /* Hash key used by the NIC */ - u8 hashkey[MANA_HASH_KEY_SIZE]; - - /* This points to an array of num_queues of RQ pointers. */ - struct mana_rxq **rxqs; - - struct bpf_prog *bpf_prog; - - /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ - unsigned int max_queues; - unsigned int num_queues; - - mana_handle_t port_handle; - mana_handle_t pf_filter_handle; - - /* Mutex for sharing access to vport_use_count */ - struct mutex vport_mutex; - int vport_use_count; - - u16 port_idx; - - bool port_is_up; - bool port_st_save; /* Saved port state */ - - struct mana_ethtool_stats eth_stats; -}; - -int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); -int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, - bool update_hash, bool update_tab); - -int mana_alloc_queues(struct net_device *ndev); -int mana_attach(struct net_device *ndev); -int mana_detach(struct net_device *ndev, bool from_close); - -int mana_probe(struct gdma_dev *gd, bool resuming); -void mana_remove(struct gdma_dev *gd, bool suspending); - -void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); -int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, - u32 flags); -u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, - struct xdp_buff *xdp, void *buf_va, uint pkt_len); -struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); -void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); -int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); - -extern const struct ethtool_ops mana_ethtool_ops; - -struct mana_obj_spec { - u32 queue_index; - u64 gdma_region; - u32 queue_size; - u32 attached_eq; - u32 modr_ctx_id; -}; - -enum mana_command_code { - MANA_QUERY_DEV_CONFIG = 0x20001, - MANA_QUERY_GF_STAT = 0x20002, - MANA_CONFIG_VPORT_TX = 0x20003, - MANA_CREATE_WQ_OBJ = 0x20004, - MANA_DESTROY_WQ_OBJ = 0x20005, - MANA_FENCE_RQ = 0x20006, - MANA_CONFIG_VPORT_RX = 0x20007, - MANA_QUERY_VPORT_CONFIG = 0x20008, - - /* Privileged commands for the PF mode */ - MANA_REGISTER_FILTER = 0x28000, - MANA_DEREGISTER_FILTER = 0x28001, - MANA_REGISTER_HW_PORT = 0x28003, - MANA_DEREGISTER_HW_PORT = 0x28004, -}; - -/* Query Device Configuration */ -struct mana_query_device_cfg_req { - struct gdma_req_hdr hdr; - - /* MANA Nic Driver Capability flags */ - u64 mn_drv_cap_flags1; - u64 mn_drv_cap_flags2; - u64 mn_drv_cap_flags3; - u64 mn_drv_cap_flags4; - - u32 proto_major_ver; - u32 proto_minor_ver; - u32 proto_micro_ver; - - u32 reserved; -}; /* HW DATA */ - -struct mana_query_device_cfg_resp { - struct gdma_resp_hdr hdr; - - u64 pf_cap_flags1; - u64 pf_cap_flags2; - u64 pf_cap_flags3; - u64 pf_cap_flags4; - - u16 max_num_vports; - u16 reserved; - u32 max_num_eqs; -}; /* HW DATA */ - -/* Query vPort Configuration */ -struct mana_query_vport_cfg_req { - struct gdma_req_hdr hdr; - u32 vport_index; -}; /* HW DATA */ - -struct mana_query_vport_cfg_resp { - struct gdma_resp_hdr hdr; - u32 max_num_sq; - u32 max_num_rq; - u32 num_indirection_ent; - u32 reserved1; - u8 mac_addr[6]; - u8 reserved2[2]; - mana_handle_t vport; -}; /* HW DATA */ - -/* Configure vPort */ -struct mana_config_vport_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u32 pdid; - u32 doorbell_pageid; -}; /* HW DATA */ - -struct mana_config_vport_resp { - struct gdma_resp_hdr hdr; - u16 tx_vport_offset; - u8 short_form_allowed; - u8 reserved; -}; /* HW DATA */ - -/* Create WQ Object */ -struct mana_create_wqobj_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u32 wq_type; - u32 reserved; - u64 wq_gdma_region; - u64 cq_gdma_region; - u32 wq_size; - u32 cq_size; - u32 cq_moderation_ctx_id; - u32 cq_parent_qid; -}; /* HW DATA */ - -struct mana_create_wqobj_resp { - struct gdma_resp_hdr hdr; - u32 wq_id; - u32 cq_id; - mana_handle_t wq_obj; -}; /* HW DATA */ - -/* Destroy WQ Object */ -struct mana_destroy_wqobj_req { - struct gdma_req_hdr hdr; - u32 wq_type; - u32 reserved; - mana_handle_t wq_obj_handle; -}; /* HW DATA */ - -struct mana_destroy_wqobj_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Fence RQ */ -struct mana_fence_rq_req { - struct gdma_req_hdr hdr; - mana_handle_t wq_obj_handle; -}; /* HW DATA */ - -struct mana_fence_rq_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Configure vPort Rx Steering */ -struct mana_cfg_rx_steer_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u16 num_indir_entries; - u16 indir_tab_offset; - u32 rx_enable; - u32 rss_enable; - u8 update_default_rxobj; - u8 update_hashkey; - u8 update_indir_tab; - u8 reserved; - mana_handle_t default_rxobj; - u8 hashkey[MANA_HASH_KEY_SIZE]; -}; /* HW DATA */ - -struct mana_cfg_rx_steer_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Register HW vPort */ -struct mana_register_hw_vport_req { - struct gdma_req_hdr hdr; - u16 attached_gfid; - u8 is_pf_default_vport; - u8 reserved1; - u8 allow_all_ether_types; - u8 reserved2; - u8 reserved3; - u8 reserved4; -}; /* HW DATA */ - -struct mana_register_hw_vport_resp { - struct gdma_resp_hdr hdr; - mana_handle_t hw_vport_handle; -}; /* HW DATA */ - -/* Deregister HW vPort */ -struct mana_deregister_hw_vport_req { - struct gdma_req_hdr hdr; - mana_handle_t hw_vport_handle; -}; /* HW DATA */ - -struct mana_deregister_hw_vport_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Register filter */ -struct mana_register_filter_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u8 mac_addr[6]; - u8 reserved1; - u8 reserved2; - u8 reserved3; - u8 reserved4; - u16 reserved5; - u32 reserved6; - u32 reserved7; - u32 reserved8; -}; /* HW DATA */ - -struct mana_register_filter_resp { - struct gdma_resp_hdr hdr; - mana_handle_t filter_handle; -}; /* HW DATA */ - -/* Deregister filter */ -struct mana_deregister_filter_req { - struct gdma_req_hdr hdr; - mana_handle_t filter_handle; -}; /* HW DATA */ - -struct mana_deregister_filter_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -#define MANA_MAX_NUM_QUEUES 64 - -#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) - -struct mana_tx_package { - struct gdma_wqe_request wqe_req; - struct gdma_sge sgl_array[5]; - struct gdma_sge *sgl_ptr; - - struct mana_tx_oob tx_oob; - - struct gdma_posted_wqe_info wqe_info; -}; - -int mana_create_wq_obj(struct mana_port_context *apc, - mana_handle_t vport, - u32 wq_type, struct mana_obj_spec *wq_spec, - struct mana_obj_spec *cq_spec, - mana_handle_t *wq_obj); - -void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, - mana_handle_t wq_obj); - -int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, - u32 doorbell_pg_id); -void mana_uncfg_vport(struct mana_port_context *apc); -#endif /* _MANA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h b/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h deleted file mode 100644 index 373d59756846..000000000000 --- a/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2022, Microsoft Corporation. */ - -#include "mana.h" -#include - -struct mana_adev { - struct auxiliary_device adev; - struct gdma_dev *mdev; -}; diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c index 421fd39ff3a8..3caea631229c 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -8,7 +8,7 @@ #include #include -#include "mana.h" +#include void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev) { diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index b6303a43fa7c..ffa2a0e2c213 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -12,8 +12,8 @@ #include #include -#include "mana.h" -#include "mana_auxiliary.h" +#include +#include static DEFINE_IDA(mana_adev_ida); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index c530db76880f..6f98de6d7440 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -5,7 +5,7 @@ #include #include -#include "mana.h" +#include static const struct { char name[ETH_GSTRING_LEN]; diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c index da255da62176..5553af9c8085 100644 --- a/drivers/net/ethernet/microsoft/mana/shm_channel.c +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c @@ -6,7 +6,7 @@ #include #include -#include "shm_channel.h" +#include #define PAGE_FRAME_L48_WIDTH_BYTES 6 #define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8) diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h deleted file mode 100644 index 5199b41497ff..000000000000 --- a/drivers/net/ethernet/microsoft/mana/shm_channel.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _SHM_CHANNEL_H -#define _SHM_CHANNEL_H - -struct shm_channel { - struct device *dev; - void __iomem *base; -}; - -void mana_smc_init(struct shm_channel *sc, struct device *dev, - void __iomem *base); - -int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, - u64 cq_addr, u64 rq_addr, u64 sq_addr, - u32 eq_msix_index); - -int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf); - -#endif /* _SHM_CHANNEL_H */ diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h new file mode 100644 index 000000000000..72eaec2470c0 --- /dev/null +++ b/include/net/mana/gdma.h @@ -0,0 +1,689 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _GDMA_H +#define _GDMA_H + +#include +#include + +#include "shm_channel.h" + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +enum gdma_request_type { + GDMA_VERIFY_VF_DRIVER_VERSION = 1, + GDMA_QUERY_MAX_RESOURCES = 2, + GDMA_LIST_DEVICES = 3, + GDMA_REGISTER_DEVICE = 4, + GDMA_DEREGISTER_DEVICE = 5, + GDMA_GENERATE_TEST_EQE = 10, + GDMA_CREATE_QUEUE = 12, + GDMA_DISABLE_QUEUE = 13, + GDMA_CREATE_DMA_REGION = 25, + GDMA_DMA_REGION_ADD_PAGES = 26, + GDMA_DESTROY_DMA_REGION = 27, +}; + +enum gdma_queue_type { + GDMA_INVALID_QUEUE, + GDMA_SQ, + GDMA_RQ, + GDMA_CQ, + GDMA_EQ, +}; + +enum gdma_work_request_flags { + GDMA_WR_NONE = 0, + GDMA_WR_OOB_IN_SGL = BIT(0), + GDMA_WR_PAD_BY_SGE0 = BIT(1), +}; + +enum gdma_eqe_type { + GDMA_EQE_COMPLETION = 3, + GDMA_EQE_TEST_EVENT = 64, + GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, + GDMA_EQE_HWC_INIT_DATA = 130, + GDMA_EQE_HWC_INIT_DONE = 131, +}; + +enum { + GDMA_DEVICE_NONE = 0, + GDMA_DEVICE_HWC = 1, + GDMA_DEVICE_MANA = 2, +}; + +struct gdma_resource { + /* Protect the bitmap */ + spinlock_t lock; + + /* The bitmap size in bits. */ + u32 size; + + /* The bitmap tracks the resources. */ + unsigned long *map; +}; + +union gdma_doorbell_entry { + u64 as_uint64; + + struct { + u64 id : 24; + u64 reserved : 8; + u64 tail_ptr : 31; + u64 arm : 1; + } cq; + + struct { + u64 id : 24; + u64 wqe_cnt : 8; + u64 tail_ptr : 32; + } rq; + + struct { + u64 id : 24; + u64 reserved : 8; + u64 tail_ptr : 32; + } sq; + + struct { + u64 id : 16; + u64 reserved : 16; + u64 tail_ptr : 31; + u64 arm : 1; + } eq; +}; /* HW DATA */ + +struct gdma_msg_hdr { + u32 hdr_type; + u32 msg_type; + u16 msg_version; + u16 hwc_msg_id; + u32 msg_size; +}; /* HW DATA */ + +struct gdma_dev_id { + union { + struct { + u16 type; + u16 instance; + }; + + u32 as_uint32; + }; +}; /* HW DATA */ + +struct gdma_req_hdr { + struct gdma_msg_hdr req; + struct gdma_msg_hdr resp; /* The expected response */ + struct gdma_dev_id dev_id; + u32 activity_id; +}; /* HW DATA */ + +struct gdma_resp_hdr { + struct gdma_msg_hdr response; + struct gdma_dev_id dev_id; + u32 activity_id; + u32 status; + u32 reserved; +}; /* HW DATA */ + +struct gdma_general_req { + struct gdma_req_hdr hdr; +}; /* HW DATA */ + +#define GDMA_MESSAGE_V1 1 + +struct gdma_general_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +#define GDMA_STANDARD_HEADER_TYPE 0 + +static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, + u32 req_size, u32 resp_size) +{ + hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; + hdr->req.msg_type = code; + hdr->req.msg_version = GDMA_MESSAGE_V1; + hdr->req.msg_size = req_size; + + hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; + hdr->resp.msg_type = code; + hdr->resp.msg_version = GDMA_MESSAGE_V1; + hdr->resp.msg_size = resp_size; +} + +/* The 16-byte struct is part of the GDMA work queue entry (WQE). */ +struct gdma_sge { + u64 address; + u32 mem_key; + u32 size; +}; /* HW DATA */ + +struct gdma_wqe_request { + struct gdma_sge *sgl; + u32 num_sge; + + u32 inline_oob_size; + const void *inline_oob_data; + + u32 flags; + u32 client_data_unit; +}; + +enum gdma_page_type { + GDMA_PAGE_TYPE_4K, +}; + +#define GDMA_INVALID_DMA_REGION 0 + +struct gdma_mem_info { + struct device *dev; + + dma_addr_t dma_handle; + void *virt_addr; + u64 length; + + /* Allocated by the PF driver */ + u64 gdma_region; +}; + +#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 + +struct gdma_dev { + struct gdma_context *gdma_context; + + struct gdma_dev_id dev_id; + + u32 pdid; + u32 doorbell; + u32 gpa_mkey; + + /* GDMA driver specific pointer */ + void *driver_data; + + struct auxiliary_device *adev; +}; + +#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE + +#define GDMA_CQE_SIZE 64 +#define GDMA_EQE_SIZE 16 +#define GDMA_MAX_SQE_SIZE 512 +#define GDMA_MAX_RQE_SIZE 256 + +#define GDMA_COMP_DATA_SIZE 0x3C + +#define GDMA_EVENT_DATA_SIZE 0xC + +/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ +#define GDMA_WQE_BU_SIZE 32 + +#define INVALID_PDID UINT_MAX +#define INVALID_DOORBELL UINT_MAX +#define INVALID_MEM_KEY UINT_MAX +#define INVALID_QUEUE_ID UINT_MAX +#define INVALID_PCI_MSIX_INDEX UINT_MAX + +struct gdma_comp { + u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; + u32 wq_num; + bool is_sq; +}; + +struct gdma_event { + u32 details[GDMA_EVENT_DATA_SIZE / 4]; + u8 type; +}; + +struct gdma_queue; + +struct mana_eq { + struct gdma_queue *eq; +}; + +typedef void gdma_eq_callback(void *context, struct gdma_queue *q, + struct gdma_event *e); + +typedef void gdma_cq_callback(void *context, struct gdma_queue *q); + +/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE + * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the + * driver increases the 'head' in BUs rather than in bytes, and notifies + * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track + * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. + * + * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is + * processed, the driver increases the 'tail' to indicate that WQEs have + * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. + * + * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures + * that the EQ/CQ is big enough so they can't overflow, and the driver uses + * the owner bits mechanism to detect if the queue has become empty. + */ +struct gdma_queue { + struct gdma_dev *gdma_dev; + + enum gdma_queue_type type; + u32 id; + + struct gdma_mem_info mem_info; + + void *queue_mem_ptr; + u32 queue_size; + + bool monitor_avl_buf; + + u32 head; + u32 tail; + + /* Extra fields specific to EQ/CQ. */ + union { + struct { + bool disable_needed; + + gdma_eq_callback *callback; + void *context; + + unsigned int msix_index; + + u32 log2_throttle_limit; + } eq; + + struct { + gdma_cq_callback *callback; + void *context; + + struct gdma_queue *parent; /* For CQ/EQ relationship */ + } cq; + }; +}; + +struct gdma_queue_spec { + enum gdma_queue_type type; + bool monitor_avl_buf; + unsigned int queue_size; + + /* Extra fields specific to EQ/CQ. */ + union { + struct { + gdma_eq_callback *callback; + void *context; + + unsigned long log2_throttle_limit; + } eq; + + struct { + gdma_cq_callback *callback; + void *context; + + struct gdma_queue *parent_eq; + + } cq; + }; +}; + +struct gdma_irq_context { + void (*handler)(void *arg); + void *arg; +}; + +struct gdma_context { + struct device *dev; + + /* Per-vPort max number of queues */ + unsigned int max_num_queues; + unsigned int max_num_msix; + unsigned int num_msix_usable; + struct gdma_resource msix_resource; + struct gdma_irq_context *irq_contexts; + + /* This maps a CQ index to the queue structure. */ + unsigned int max_num_cqs; + struct gdma_queue **cq_table; + + /* Protect eq_test_event and test_event_eq_id */ + struct mutex eq_test_event_mutex; + struct completion eq_test_event; + u32 test_event_eq_id; + + bool is_pf; + phys_addr_t bar0_pa; + void __iomem *bar0_va; + void __iomem *shm_base; + void __iomem *db_page_base; + phys_addr_t phys_db_page_base; + u32 db_page_size; + + /* Shared memory chanenl (used to bootstrap HWC) */ + struct shm_channel shm_channel; + + /* Hardware communication channel (HWC) */ + struct gdma_dev hwc; + + /* Azure network adapter */ + struct gdma_dev mana; +}; + +#define MAX_NUM_GDMA_DEVICES 4 + +static inline bool mana_gd_is_mana(struct gdma_dev *gd) +{ + return gd->dev_id.type == GDMA_DEVICE_MANA; +} + +static inline bool mana_gd_is_hwc(struct gdma_dev *gd) +{ + return gd->dev_id.type == GDMA_DEVICE_HWC; +} + +u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); +u32 mana_gd_wq_avail_space(struct gdma_queue *wq); + +int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); + +int mana_gd_create_hwc_queue(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +int mana_gd_create_mana_eq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, + const struct gdma_queue_spec *spec, + struct gdma_queue **queue_ptr); + +void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); + +int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); + +void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); + +struct gdma_wqe { + u32 reserved :24; + u32 last_vbytes :8; + + union { + u32 flags; + + struct { + u32 num_sge :8; + u32 inline_oob_size_div4:3; + u32 client_oob_in_sgl :1; + u32 reserved1 :4; + u32 client_data_unit :14; + u32 reserved2 :2; + }; + }; +}; /* HW DATA */ + +#define INLINE_OOB_SMALL_SIZE 8 +#define INLINE_OOB_LARGE_SIZE 24 + +#define MAX_TX_WQE_SIZE 512 +#define MAX_RX_WQE_SIZE 256 + +struct gdma_cqe { + u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; + + union { + u32 as_uint32; + + struct { + u32 wq_num : 24; + u32 is_sq : 1; + u32 reserved : 4; + u32 owner_bits : 3; + }; + } cqe_info; +}; /* HW DATA */ + +#define GDMA_CQE_OWNER_BITS 3 + +#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) + +#define SET_ARM_BIT 1 + +#define GDMA_EQE_OWNER_BITS 3 + +union gdma_eqe_info { + u32 as_uint32; + + struct { + u32 type : 8; + u32 reserved1 : 8; + u32 client_id : 2; + u32 reserved2 : 11; + u32 owner_bits : 3; + }; +}; /* HW DATA */ + +#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) +#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) + +struct gdma_eqe { + u32 details[GDMA_EVENT_DATA_SIZE / 4]; + u32 eqe_info; +}; /* HW DATA */ + +#define GDMA_REG_DB_PAGE_OFFSET 8 +#define GDMA_REG_DB_PAGE_SIZE 0x10 +#define GDMA_REG_SHM_OFFSET 0x18 + +#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 +#define GDMA_PF_REG_DB_PAGE_OFF 0xC8 +#define GDMA_PF_REG_SHM_OFF 0x70 + +#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 + +#define MANA_PF_DEVICE_ID 0x00B9 +#define MANA_VF_DEVICE_ID 0x00BA + +struct gdma_posted_wqe_info { + u32 wqe_size_in_bu; +}; + +/* GDMA_GENERATE_TEST_EQE */ +struct gdma_generate_test_event_req { + struct gdma_req_hdr hdr; + u32 queue_index; +}; /* HW DATA */ + +/* GDMA_VERIFY_VF_DRIVER_VERSION */ +enum { + GDMA_PROTOCOL_V1 = 1, + GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, + GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, +}; + +#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) + +#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT + +#define GDMA_DRV_CAP_FLAGS2 0 + +#define GDMA_DRV_CAP_FLAGS3 0 + +#define GDMA_DRV_CAP_FLAGS4 0 + +struct gdma_verify_ver_req { + struct gdma_req_hdr hdr; + + /* Mandatory fields required for protocol establishment */ + u64 protocol_ver_min; + u64 protocol_ver_max; + + /* Gdma Driver Capability Flags */ + u64 gd_drv_cap_flags1; + u64 gd_drv_cap_flags2; + u64 gd_drv_cap_flags3; + u64 gd_drv_cap_flags4; + + /* Advisory fields */ + u64 drv_ver; + u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ + u32 reserved; + u32 os_ver_major; + u32 os_ver_minor; + u32 os_ver_build; + u32 os_ver_platform; + u64 reserved_2; + u8 os_ver_str1[128]; + u8 os_ver_str2[128]; + u8 os_ver_str3[128]; + u8 os_ver_str4[128]; +}; /* HW DATA */ + +struct gdma_verify_ver_resp { + struct gdma_resp_hdr hdr; + u64 gdma_protocol_ver; + u64 pf_cap_flags1; + u64 pf_cap_flags2; + u64 pf_cap_flags3; + u64 pf_cap_flags4; +}; /* HW DATA */ + +/* GDMA_QUERY_MAX_RESOURCES */ +struct gdma_query_max_resources_resp { + struct gdma_resp_hdr hdr; + u32 status; + u32 max_sq; + u32 max_rq; + u32 max_cq; + u32 max_eq; + u32 max_db; + u32 max_mst; + u32 max_cq_mod_ctx; + u32 max_mod_cq; + u32 max_msix; +}; /* HW DATA */ + +/* GDMA_LIST_DEVICES */ +struct gdma_list_devices_resp { + struct gdma_resp_hdr hdr; + u32 num_of_devs; + u32 reserved; + struct gdma_dev_id devs[64]; +}; /* HW DATA */ + +/* GDMA_REGISTER_DEVICE */ +struct gdma_register_device_resp { + struct gdma_resp_hdr hdr; + u32 pdid; + u32 gpa_mkey; + u32 db_id; +}; /* HW DATA */ + +/* GDMA_CREATE_QUEUE */ +struct gdma_create_queue_req { + struct gdma_req_hdr hdr; + u32 type; + u32 reserved1; + u32 pdid; + u32 doolbell_id; + u64 gdma_region; + u32 reserved2; + u32 queue_size; + u32 log2_throttle_limit; + u32 eq_pci_msix_index; + u32 cq_mod_ctx_id; + u32 cq_parent_eq_id; + u8 rq_drop_on_overrun; + u8 rq_err_on_wqe_overflow; + u8 rq_chain_rec_wqes; + u8 sq_hw_db; + u32 reserved3; +}; /* HW DATA */ + +struct gdma_create_queue_resp { + struct gdma_resp_hdr hdr; + u32 queue_index; +}; /* HW DATA */ + +/* GDMA_DISABLE_QUEUE */ +struct gdma_disable_queue_req { + struct gdma_req_hdr hdr; + u32 type; + u32 queue_index; + u32 alloc_res_id_on_creation; +}; /* HW DATA */ + +/* GDMA_CREATE_DMA_REGION */ +struct gdma_create_dma_region_req { + struct gdma_req_hdr hdr; + + /* The total size of the DMA region */ + u64 length; + + /* The offset in the first page */ + u32 offset_in_page; + + /* enum gdma_page_type */ + u32 gdma_page_type; + + /* The total number of pages */ + u32 page_count; + + /* If page_addr_list_len is smaller than page_count, + * the remaining page addresses will be added via the + * message GDMA_DMA_REGION_ADD_PAGES. + */ + u32 page_addr_list_len; + u64 page_addr_list[]; +}; /* HW DATA */ + +struct gdma_create_dma_region_resp { + struct gdma_resp_hdr hdr; + u64 gdma_region; +}; /* HW DATA */ + +/* GDMA_DMA_REGION_ADD_PAGES */ +struct gdma_dma_region_add_pages_req { + struct gdma_req_hdr hdr; + + u64 gdma_region; + + u32 page_addr_list_len; + u32 reserved3; + + u64 page_addr_list[]; +}; /* HW DATA */ + +/* GDMA_DESTROY_DMA_REGION */ +struct gdma_destroy_dma_region_req { + struct gdma_req_hdr hdr; + + u64 gdma_region; +}; /* HW DATA */ + +int mana_gd_verify_vf_version(struct pci_dev *pdev); + +int mana_gd_register_device(struct gdma_dev *gd); +int mana_gd_deregister_device(struct gdma_dev *gd); + +int mana_gd_post_work_request(struct gdma_queue *wq, + const struct gdma_wqe_request *wqe_req, + struct gdma_posted_wqe_info *wqe_info); + +int mana_gd_post_and_ring(struct gdma_queue *queue, + const struct gdma_wqe_request *wqe, + struct gdma_posted_wqe_info *wqe_info); + +int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); +void mana_gd_free_res_map(struct gdma_resource *r); + +void mana_gd_wq_ring_doorbell(struct gdma_context *gc, + struct gdma_queue *queue); + +int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, + struct gdma_mem_info *gmi); + +void mana_gd_free_memory(struct gdma_mem_info *gmi); + +int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, + u32 resp_len, void *resp); +#endif /* _GDMA_H */ diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h new file mode 100644 index 000000000000..6a757a6e2732 --- /dev/null +++ b/include/net/mana/hw_channel.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _HW_CHANNEL_H +#define _HW_CHANNEL_H + +#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4 + +#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000 +#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000 + +#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1 + +#define HWC_INIT_DATA_CQID 1 +#define HWC_INIT_DATA_RQID 2 +#define HWC_INIT_DATA_SQID 3 +#define HWC_INIT_DATA_QUEUE_DEPTH 4 +#define HWC_INIT_DATA_MAX_REQUEST 5 +#define HWC_INIT_DATA_MAX_RESPONSE 6 +#define HWC_INIT_DATA_MAX_NUM_CQS 7 +#define HWC_INIT_DATA_PDID 8 +#define HWC_INIT_DATA_GPA_MKEY 9 +#define HWC_INIT_DATA_PF_DEST_RQ_ID 10 +#define HWC_INIT_DATA_PF_DEST_CQ_ID 11 + +/* Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +union hwc_init_eq_id_db { + u32 as_uint32; + + struct { + u32 eq_id : 16; + u32 doorbell : 16; + }; +}; /* HW DATA */ + +union hwc_init_type_data { + u32 as_uint32; + + struct { + u32 value : 24; + u32 type : 8; + }; +}; /* HW DATA */ + +struct hwc_rx_oob { + u32 type : 6; + u32 eom : 1; + u32 som : 1; + u32 vendor_err : 8; + u32 reserved1 : 16; + + u32 src_virt_wq : 24; + u32 src_vfid : 8; + + u32 reserved2; + + union { + u32 wqe_addr_low; + u32 wqe_offset; + }; + + u32 wqe_addr_high; + + u32 client_data_unit : 14; + u32 reserved3 : 18; + + u32 tx_oob_data_size; + + u32 chunk_offset : 21; + u32 reserved4 : 11; +}; /* HW DATA */ + +struct hwc_tx_oob { + u32 reserved1; + + u32 reserved2; + + u32 vrq_id : 24; + u32 dest_vfid : 8; + + u32 vrcq_id : 24; + u32 reserved3 : 8; + + u32 vscq_id : 24; + u32 loopback : 1; + u32 lso_override: 1; + u32 dest_pf : 1; + u32 reserved4 : 5; + + u32 vsq_id : 24; + u32 reserved5 : 8; +}; /* HW DATA */ + +struct hwc_work_request { + void *buf_va; + void *buf_sge_addr; + u32 buf_len; + u32 msg_size; + + struct gdma_wqe_request wqe_req; + struct hwc_tx_oob tx_oob; + + struct gdma_sge sge; +}; + +/* hwc_dma_buf represents the array of in-flight WQEs. + * mem_info as know as the GDMA mapped memory is partitioned and used by + * in-flight WQEs. + * The number of WQEs is determined by the number of in-flight messages. + */ +struct hwc_dma_buf { + struct gdma_mem_info mem_info; + + u32 gpa_mkey; + + u32 num_reqs; + struct hwc_work_request reqs[]; +}; + +typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, + const struct hwc_rx_oob *rx_oob); + +typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id, + const struct hwc_rx_oob *rx_oob); + +struct hwc_cq { + struct hw_channel_context *hwc; + + struct gdma_queue *gdma_cq; + struct gdma_queue *gdma_eq; + struct gdma_comp *comp_buf; + u16 queue_depth; + + hwc_rx_event_handler_t *rx_event_handler; + void *rx_event_ctx; + + hwc_tx_event_handler_t *tx_event_handler; + void *tx_event_ctx; +}; + +struct hwc_wq { + struct hw_channel_context *hwc; + + struct gdma_queue *gdma_wq; + struct hwc_dma_buf *msg_buf; + u16 queue_depth; + + struct hwc_cq *hwc_cq; +}; + +struct hwc_caller_ctx { + struct completion comp_event; + void *output_buf; + u32 output_buflen; + + u32 error; /* Linux error code */ + u32 status_code; +}; + +struct hw_channel_context { + struct gdma_dev *gdma_dev; + struct device *dev; + + u16 num_inflight_msg; + u32 max_req_msg_size; + + u16 hwc_init_q_depth_max; + u32 hwc_init_max_req_msg_size; + u32 hwc_init_max_resp_msg_size; + + struct completion hwc_init_eqe_comp; + + struct hwc_wq *rxq; + struct hwc_wq *txq; + struct hwc_cq *cq; + + struct semaphore sema; + struct gdma_resource inflight_msg_res; + + u32 pf_dest_vrq_id; + u32 pf_dest_vrcq_id; + + struct hwc_caller_ctx *caller_ctx; +}; + +int mana_hwc_create_channel(struct gdma_context *gc); +void mana_hwc_destroy_channel(struct gdma_context *gc); + +int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, + const void *req, u32 resp_len, void *resp); + +#endif /* _HW_CHANNEL_H */ diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h new file mode 100644 index 000000000000..6e9e86fb4c02 --- /dev/null +++ b/include/net/mana/mana.h @@ -0,0 +1,650 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _MANA_H +#define _MANA_H + +#include "gdma.h" +#include "hw_channel.h" + +/* Microsoft Azure Network Adapter (MANA)'s definitions + * + * Structures labeled with "HW DATA" are exchanged with the hardware. All of + * them are naturally aligned and hence don't need __packed. + */ + +/* MANA protocol version */ +#define MANA_MAJOR_VERSION 0 +#define MANA_MINOR_VERSION 1 +#define MANA_MICRO_VERSION 1 + +typedef u64 mana_handle_t; +#define INVALID_MANA_HANDLE ((mana_handle_t)-1) + +enum TRI_STATE { + TRI_STATE_UNKNOWN = -1, + TRI_STATE_FALSE = 0, + TRI_STATE_TRUE = 1 +}; + +/* Number of entries for hardware indirection table must be in power of 2 */ +#define MANA_INDIRECT_TABLE_SIZE 64 +#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) + +/* The Toeplitz hash key's length in bytes: should be multiple of 8 */ +#define MANA_HASH_KEY_SIZE 40 + +#define COMP_ENTRY_SIZE 64 + +#define ADAPTER_MTU_SIZE 1500 +#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) + +#define RX_BUFFERS_PER_QUEUE 512 + +#define MAX_SEND_BUFFERS_PER_QUEUE 256 + +#define EQ_SIZE (8 * PAGE_SIZE) +#define LOG2_EQ_THROTTLE 3 + +#define MAX_PORTS_IN_MANA_DEV 256 + +struct mana_stats_rx { + u64 packets; + u64 bytes; + u64 xdp_drop; + u64 xdp_tx; + u64 xdp_redirect; + struct u64_stats_sync syncp; +}; + +struct mana_stats_tx { + u64 packets; + u64 bytes; + u64 xdp_xmit; + struct u64_stats_sync syncp; +}; + +struct mana_txq { + struct gdma_queue *gdma_sq; + + union { + u32 gdma_txq_id; + struct { + u32 reserved1 : 10; + u32 vsq_frame : 14; + u32 reserved2 : 8; + }; + }; + + u16 vp_offset; + + struct net_device *ndev; + + /* The SKBs are sent to the HW and we are waiting for the CQEs. */ + struct sk_buff_head pending_skbs; + struct netdev_queue *net_txq; + + atomic_t pending_sends; + + struct mana_stats_tx stats; +}; + +/* skb data and frags dma mappings */ +struct mana_skb_head { + dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; + + u32 size[MAX_SKB_FRAGS + 1]; +}; + +#define MANA_HEADROOM sizeof(struct mana_skb_head) + +enum mana_tx_pkt_format { + MANA_SHORT_PKT_FMT = 0, + MANA_LONG_PKT_FMT = 1, +}; + +struct mana_tx_short_oob { + u32 pkt_fmt : 2; + u32 is_outer_ipv4 : 1; + u32 is_outer_ipv6 : 1; + u32 comp_iphdr_csum : 1; + u32 comp_tcp_csum : 1; + u32 comp_udp_csum : 1; + u32 supress_txcqe_gen : 1; + u32 vcq_num : 24; + + u32 trans_off : 10; /* Transport header offset */ + u32 vsq_frame : 14; + u32 short_vp_offset : 8; +}; /* HW DATA */ + +struct mana_tx_long_oob { + u32 is_encap : 1; + u32 inner_is_ipv6 : 1; + u32 inner_tcp_opt : 1; + u32 inject_vlan_pri_tag : 1; + u32 reserved1 : 12; + u32 pcp : 3; /* 802.1Q */ + u32 dei : 1; /* 802.1Q */ + u32 vlan_id : 12; /* 802.1Q */ + + u32 inner_frame_offset : 10; + u32 inner_ip_rel_offset : 6; + u32 long_vp_offset : 12; + u32 reserved2 : 4; + + u32 reserved3; + u32 reserved4; +}; /* HW DATA */ + +struct mana_tx_oob { + struct mana_tx_short_oob s_oob; + struct mana_tx_long_oob l_oob; +}; /* HW DATA */ + +enum mana_cq_type { + MANA_CQ_TYPE_RX, + MANA_CQ_TYPE_TX, +}; + +enum mana_cqe_type { + CQE_INVALID = 0, + CQE_RX_OKAY = 1, + CQE_RX_COALESCED_4 = 2, + CQE_RX_OBJECT_FENCE = 3, + CQE_RX_TRUNCATED = 4, + + CQE_TX_OKAY = 32, + CQE_TX_SA_DROP = 33, + CQE_TX_MTU_DROP = 34, + CQE_TX_INVALID_OOB = 35, + CQE_TX_INVALID_ETH_TYPE = 36, + CQE_TX_HDR_PROCESSING_ERROR = 37, + CQE_TX_VF_DISABLED = 38, + CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, + CQE_TX_VPORT_DISABLED = 40, + CQE_TX_VLAN_TAGGING_VIOLATION = 41, +}; + +#define MANA_CQE_COMPLETION 1 + +struct mana_cqe_header { + u32 cqe_type : 6; + u32 client_type : 2; + u32 vendor_err : 24; +}; /* HW DATA */ + +/* NDIS HASH Types */ +#define NDIS_HASH_IPV4 BIT(0) +#define NDIS_HASH_TCP_IPV4 BIT(1) +#define NDIS_HASH_UDP_IPV4 BIT(2) +#define NDIS_HASH_IPV6 BIT(3) +#define NDIS_HASH_TCP_IPV6 BIT(4) +#define NDIS_HASH_UDP_IPV6 BIT(5) +#define NDIS_HASH_IPV6_EX BIT(6) +#define NDIS_HASH_TCP_IPV6_EX BIT(7) +#define NDIS_HASH_UDP_IPV6_EX BIT(8) + +#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) +#define MANA_HASH_L4 \ + (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ + NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) + +struct mana_rxcomp_perpkt_info { + u32 pkt_len : 16; + u32 reserved1 : 16; + u32 reserved2; + u32 pkt_hash; +}; /* HW DATA */ + +#define MANA_RXCOMP_OOB_NUM_PPI 4 + +/* Receive completion OOB */ +struct mana_rxcomp_oob { + struct mana_cqe_header cqe_hdr; + + u32 rx_vlan_id : 12; + u32 rx_vlantag_present : 1; + u32 rx_outer_iphdr_csum_succeed : 1; + u32 rx_outer_iphdr_csum_fail : 1; + u32 reserved1 : 1; + u32 rx_hashtype : 9; + u32 rx_iphdr_csum_succeed : 1; + u32 rx_iphdr_csum_fail : 1; + u32 rx_tcp_csum_succeed : 1; + u32 rx_tcp_csum_fail : 1; + u32 rx_udp_csum_succeed : 1; + u32 rx_udp_csum_fail : 1; + u32 reserved2 : 1; + + struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; + + u32 rx_wqe_offset; +}; /* HW DATA */ + +struct mana_tx_comp_oob { + struct mana_cqe_header cqe_hdr; + + u32 tx_data_offset; + + u32 tx_sgl_offset : 5; + u32 tx_wqe_offset : 27; + + u32 reserved[12]; +}; /* HW DATA */ + +struct mana_rxq; + +#define CQE_POLLING_BUFFER 512 + +struct mana_cq { + struct gdma_queue *gdma_cq; + + /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ + u32 gdma_id; + + /* Type of the CQ: TX or RX */ + enum mana_cq_type type; + + /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. + * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. + */ + struct mana_rxq *rxq; + + /* Pointer to the mana_txq that is pushing TX CQEs to the queue. + * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. + */ + struct mana_txq *txq; + + /* Buffer which the CQ handler can copy the CQE's into. */ + struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; + + /* NAPI data */ + struct napi_struct napi; + int work_done; + int budget; +}; + +#define GDMA_MAX_RQE_SGES 15 + +struct mana_recv_buf_oob { + /* A valid GDMA work request representing the data buffer. */ + struct gdma_wqe_request wqe_req; + + void *buf_va; + dma_addr_t buf_dma_addr; + + /* SGL of the buffer going to be sent has part of the work request. */ + u32 num_sge; + struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; + + /* Required to store the result of mana_gd_post_work_request. + * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the + * work queue when the WQE is consumed. + */ + struct gdma_posted_wqe_info wqe_inf; +}; + +struct mana_rxq { + struct gdma_queue *gdma_rq; + /* Cache the gdma receive queue id */ + u32 gdma_id; + + /* Index of RQ in the vPort, not gdma receive queue id */ + u32 rxq_idx; + + u32 datasize; + + mana_handle_t rxobj; + + struct mana_cq rx_cq; + + struct completion fence_event; + + struct net_device *ndev; + + /* Total number of receive buffers to be allocated */ + u32 num_rx_buf; + + u32 buf_index; + + struct mana_stats_rx stats; + + struct bpf_prog __rcu *bpf_prog; + struct xdp_rxq_info xdp_rxq; + struct page *xdp_save_page; + bool xdp_flush; + int xdp_rc; /* XDP redirect return code */ + + /* MUST BE THE LAST MEMBER: + * Each receive buffer has an associated mana_recv_buf_oob. + */ + struct mana_recv_buf_oob rx_oobs[]; +}; + +struct mana_tx_qp { + struct mana_txq txq; + + struct mana_cq tx_cq; + + mana_handle_t tx_object; +}; + +struct mana_ethtool_stats { + u64 stop_queue; + u64 wake_queue; +}; + +struct mana_context { + struct gdma_dev *gdma_dev; + + u16 num_ports; + + struct mana_eq *eqs; + + struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; +}; + +struct mana_port_context { + struct mana_context *ac; + struct net_device *ndev; + + u8 mac_addr[ETH_ALEN]; + + enum TRI_STATE rss_state; + + mana_handle_t default_rxobj; + bool tx_shortform_allowed; + u16 tx_vp_offset; + + struct mana_tx_qp *tx_qp; + + /* Indirection Table for RX & TX. The values are queue indexes */ + u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; + + /* Indirection table containing RxObject Handles */ + mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; + + /* Hash key used by the NIC */ + u8 hashkey[MANA_HASH_KEY_SIZE]; + + /* This points to an array of num_queues of RQ pointers. */ + struct mana_rxq **rxqs; + + struct bpf_prog *bpf_prog; + + /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ + unsigned int max_queues; + unsigned int num_queues; + + mana_handle_t port_handle; + mana_handle_t pf_filter_handle; + + /* Mutex for sharing access to vport_use_count */ + struct mutex vport_mutex; + int vport_use_count; + + u16 port_idx; + + bool port_is_up; + bool port_st_save; /* Saved port state */ + + struct mana_ethtool_stats eth_stats; +}; + +int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); +int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, + bool update_hash, bool update_tab); + +int mana_alloc_queues(struct net_device *ndev); +int mana_attach(struct net_device *ndev); +int mana_detach(struct net_device *ndev, bool from_close); + +int mana_probe(struct gdma_dev *gd, bool resuming); +void mana_remove(struct gdma_dev *gd, bool suspending); + +void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); +int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, + u32 flags); +u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, + struct xdp_buff *xdp, void *buf_va, uint pkt_len); +struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); +void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); +int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); + +extern const struct ethtool_ops mana_ethtool_ops; + +struct mana_obj_spec { + u32 queue_index; + u64 gdma_region; + u32 queue_size; + u32 attached_eq; + u32 modr_ctx_id; +}; + +enum mana_command_code { + MANA_QUERY_DEV_CONFIG = 0x20001, + MANA_QUERY_GF_STAT = 0x20002, + MANA_CONFIG_VPORT_TX = 0x20003, + MANA_CREATE_WQ_OBJ = 0x20004, + MANA_DESTROY_WQ_OBJ = 0x20005, + MANA_FENCE_RQ = 0x20006, + MANA_CONFIG_VPORT_RX = 0x20007, + MANA_QUERY_VPORT_CONFIG = 0x20008, + + /* Privileged commands for the PF mode */ + MANA_REGISTER_FILTER = 0x28000, + MANA_DEREGISTER_FILTER = 0x28001, + MANA_REGISTER_HW_PORT = 0x28003, + MANA_DEREGISTER_HW_PORT = 0x28004, +}; + +/* Query Device Configuration */ +struct mana_query_device_cfg_req { + struct gdma_req_hdr hdr; + + /* MANA Nic Driver Capability flags */ + u64 mn_drv_cap_flags1; + u64 mn_drv_cap_flags2; + u64 mn_drv_cap_flags3; + u64 mn_drv_cap_flags4; + + u32 proto_major_ver; + u32 proto_minor_ver; + u32 proto_micro_ver; + + u32 reserved; +}; /* HW DATA */ + +struct mana_query_device_cfg_resp { + struct gdma_resp_hdr hdr; + + u64 pf_cap_flags1; + u64 pf_cap_flags2; + u64 pf_cap_flags3; + u64 pf_cap_flags4; + + u16 max_num_vports; + u16 reserved; + u32 max_num_eqs; +}; /* HW DATA */ + +/* Query vPort Configuration */ +struct mana_query_vport_cfg_req { + struct gdma_req_hdr hdr; + u32 vport_index; +}; /* HW DATA */ + +struct mana_query_vport_cfg_resp { + struct gdma_resp_hdr hdr; + u32 max_num_sq; + u32 max_num_rq; + u32 num_indirection_ent; + u32 reserved1; + u8 mac_addr[6]; + u8 reserved2[2]; + mana_handle_t vport; +}; /* HW DATA */ + +/* Configure vPort */ +struct mana_config_vport_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u32 pdid; + u32 doorbell_pageid; +}; /* HW DATA */ + +struct mana_config_vport_resp { + struct gdma_resp_hdr hdr; + u16 tx_vport_offset; + u8 short_form_allowed; + u8 reserved; +}; /* HW DATA */ + +/* Create WQ Object */ +struct mana_create_wqobj_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u32 wq_type; + u32 reserved; + u64 wq_gdma_region; + u64 cq_gdma_region; + u32 wq_size; + u32 cq_size; + u32 cq_moderation_ctx_id; + u32 cq_parent_qid; +}; /* HW DATA */ + +struct mana_create_wqobj_resp { + struct gdma_resp_hdr hdr; + u32 wq_id; + u32 cq_id; + mana_handle_t wq_obj; +}; /* HW DATA */ + +/* Destroy WQ Object */ +struct mana_destroy_wqobj_req { + struct gdma_req_hdr hdr; + u32 wq_type; + u32 reserved; + mana_handle_t wq_obj_handle; +}; /* HW DATA */ + +struct mana_destroy_wqobj_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Fence RQ */ +struct mana_fence_rq_req { + struct gdma_req_hdr hdr; + mana_handle_t wq_obj_handle; +}; /* HW DATA */ + +struct mana_fence_rq_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Configure vPort Rx Steering */ +struct mana_cfg_rx_steer_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u16 num_indir_entries; + u16 indir_tab_offset; + u32 rx_enable; + u32 rss_enable; + u8 update_default_rxobj; + u8 update_hashkey; + u8 update_indir_tab; + u8 reserved; + mana_handle_t default_rxobj; + u8 hashkey[MANA_HASH_KEY_SIZE]; +}; /* HW DATA */ + +struct mana_cfg_rx_steer_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Register HW vPort */ +struct mana_register_hw_vport_req { + struct gdma_req_hdr hdr; + u16 attached_gfid; + u8 is_pf_default_vport; + u8 reserved1; + u8 allow_all_ether_types; + u8 reserved2; + u8 reserved3; + u8 reserved4; +}; /* HW DATA */ + +struct mana_register_hw_vport_resp { + struct gdma_resp_hdr hdr; + mana_handle_t hw_vport_handle; +}; /* HW DATA */ + +/* Deregister HW vPort */ +struct mana_deregister_hw_vport_req { + struct gdma_req_hdr hdr; + mana_handle_t hw_vport_handle; +}; /* HW DATA */ + +struct mana_deregister_hw_vport_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +/* Register filter */ +struct mana_register_filter_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + u8 mac_addr[6]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + u8 reserved4; + u16 reserved5; + u32 reserved6; + u32 reserved7; + u32 reserved8; +}; /* HW DATA */ + +struct mana_register_filter_resp { + struct gdma_resp_hdr hdr; + mana_handle_t filter_handle; +}; /* HW DATA */ + +/* Deregister filter */ +struct mana_deregister_filter_req { + struct gdma_req_hdr hdr; + mana_handle_t filter_handle; +}; /* HW DATA */ + +struct mana_deregister_filter_resp { + struct gdma_resp_hdr hdr; +}; /* HW DATA */ + +#define MANA_MAX_NUM_QUEUES 64 + +#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) + +struct mana_tx_package { + struct gdma_wqe_request wqe_req; + struct gdma_sge sgl_array[5]; + struct gdma_sge *sgl_ptr; + + struct mana_tx_oob tx_oob; + + struct gdma_posted_wqe_info wqe_info; +}; + +int mana_create_wq_obj(struct mana_port_context *apc, + mana_handle_t vport, + u32 wq_type, struct mana_obj_spec *wq_spec, + struct mana_obj_spec *cq_spec, + mana_handle_t *wq_obj); + +void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, + mana_handle_t wq_obj); + +int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, + u32 doorbell_pg_id); +void mana_uncfg_vport(struct mana_port_context *apc); +#endif /* _MANA_H */ diff --git a/include/net/mana/mana_auxiliary.h b/include/net/mana/mana_auxiliary.h new file mode 100644 index 000000000000..373d59756846 --- /dev/null +++ b/include/net/mana/mana_auxiliary.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2022, Microsoft Corporation. */ + +#include "mana.h" +#include + +struct mana_adev { + struct auxiliary_device adev; + struct gdma_dev *mdev; +}; diff --git a/include/net/mana/shm_channel.h b/include/net/mana/shm_channel.h new file mode 100644 index 000000000000..5199b41497ff --- /dev/null +++ b/include/net/mana/shm_channel.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2021, Microsoft Corporation. */ + +#ifndef _SHM_CHANNEL_H +#define _SHM_CHANNEL_H + +struct shm_channel { + struct device *dev; + void __iomem *base; +}; + +void mana_smc_init(struct shm_channel *sc, struct device *dev, + void __iomem *base); + +int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, + u64 cq_addr, u64 rq_addr, u64 sq_addr, + u32 eq_msix_index); + +int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf); + +#endif /* _SHM_CHANNEL_H */ -- cgit v1.2.3 From a07b3835b895b0ef772f88c4b7c38e9fed52b1ff Mon Sep 17 00:00:00 2001 From: Cai Huoqing Date: Wed, 9 Nov 2022 15:26:41 +0800 Subject: MAINTAINERS: Update hinic maintainers from orphan HINIC is marked orphan for 14 months from the commit "5cfe5109a1d7", but there are lots of HINIC in use. I have a SP582 NIC (hi1822 inside which is a kind of HINIC SOC), and implement based on hinic driver, and if there are some patches for HINIC, I can test and do some code review. I'm active in linux contribution, if possible, I want to take the hinic maintainership. Add my email here to receive patches. Signed-off-by: Cai Huoqing Signed-off-by: David S. Miller --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index eb5fe2fd4d7a..6aea837ff9d4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9438,8 +9438,9 @@ F: Documentation/devicetree/bindings/iio/humidity/st,hts221.yaml F: drivers/iio/humidity/hts221* HUAWEI ETHERNET DRIVER +M: Cai Huoqing L: netdev@vger.kernel.org -S: Orphan +S: Maintained F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst F: drivers/net/ethernet/huawei/hinic/ -- cgit v1.2.3 From 90050f80509c7e97cd957b025b62e6e49c2c6bc0 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 5 Nov 2022 23:36:23 +0100 Subject: MAINTAINERS: update MEDIATEK ETHERNET entry Update MEDIATEK ETHERNET driver maintainer file enty adding myself to maintainers list Acked-by: Felix Fietkau Acked-by: Sean Wang Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 6aea837ff9d4..14ee1c72d01a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12926,6 +12926,7 @@ M: Felix Fietkau M: John Crispin M: Sean Wang M: Mark Lee +M: Lorenzo Bianconi L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/mediatek/ -- cgit v1.2.3 From 06463f6e98df34908c26aa8e7a31a279646b1f51 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 9 Nov 2022 14:42:49 -0800 Subject: wifi: wl1251: drop support for platform data Remove support for configuring the device via platform data because there are no users of wl1251_platform_data left in the mainline kernel. Signed-off-by: Dmitry Torokhov Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20221109224250.2885119-2-dmitry.torokhov@gmail.com --- MAINTAINERS | 1 - drivers/net/wireless/ti/Kconfig | 8 ----- drivers/net/wireless/ti/wilink_platform_data.c | 35 -------------------- drivers/net/wireless/ti/wl1251/sdio.c | 8 +---- drivers/net/wireless/ti/wl1251/spi.c | 15 ++------- drivers/net/wireless/ti/wlcore/spi.c | 1 - include/linux/wl12xx.h | 44 -------------------------- 7 files changed, 4 insertions(+), 108 deletions(-) delete mode 100644 drivers/net/wireless/ti/wilink_platform_data.c delete mode 100644 include/linux/wl12xx.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 377aedb3808b..56764f76752b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20665,7 +20665,6 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/wl12xx W: https://wireless.wiki.kernel.org/en/users/Drivers/wl1251 T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git F: drivers/net/wireless/ti/ -F: include/linux/wl12xx.h TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER M: John Stultz diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig index 7c0b17a76fe2..3fcd9e395f72 100644 --- a/drivers/net/wireless/ti/Kconfig +++ b/drivers/net/wireless/ti/Kconfig @@ -18,12 +18,4 @@ source "drivers/net/wireless/ti/wl18xx/Kconfig" # keep last for automatic dependencies source "drivers/net/wireless/ti/wlcore/Kconfig" -config WILINK_PLATFORM_DATA - bool "TI WiLink platform data" - depends on WLCORE_SDIO || WL1251_SDIO - default y - help - Small platform data bit needed to pass data to the sdio modules. - - endif # WLAN_VENDOR_TI diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c deleted file mode 100644 index 1de6a62d526f..000000000000 --- a/drivers/net/wireless/ti/wilink_platform_data.c +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * This file is part of wl12xx - * - * Copyright (C) 2010-2011 Texas Instruments, Inc. - */ - -#include -#include -#include - -static struct wl1251_platform_data *wl1251_platform_data; - -int __init wl1251_set_platform_data(const struct wl1251_platform_data *data) -{ - if (wl1251_platform_data) - return -EBUSY; - if (!data) - return -EINVAL; - - wl1251_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL); - if (!wl1251_platform_data) - return -ENOMEM; - - return 0; -} - -struct wl1251_platform_data *wl1251_get_platform_data(void) -{ - if (!wl1251_platform_data) - return ERR_PTR(-ENODEV); - - return wl1251_platform_data; -} -EXPORT_SYMBOL(wl1251_get_platform_data); diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c index c9a4e9a43400..301bd0043a43 100644 --- a/drivers/net/wireless/ti/wl1251/sdio.c +++ b/drivers/net/wireless/ti/wl1251/sdio.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -197,7 +196,6 @@ static int wl1251_sdio_probe(struct sdio_func *func, struct wl1251 *wl; struct ieee80211_hw *hw; struct wl1251_sdio *wl_sdio; - const struct wl1251_platform_data *wl1251_board_data; struct device_node *np = func->dev.of_node; hw = wl1251_alloc_hw(); @@ -225,11 +223,7 @@ static int wl1251_sdio_probe(struct sdio_func *func, wl->if_priv = wl_sdio; wl->if_ops = &wl1251_sdio_ops; - wl1251_board_data = wl1251_get_platform_data(); - if (!IS_ERR(wl1251_board_data)) { - wl->irq = wl1251_board_data->irq; - wl->use_eeprom = wl1251_board_data->use_eeprom; - } else if (np) { + if (np) { wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom"); wl->irq = of_irq_get(np, 0); if (wl->irq == -EPROBE_DEFER) { diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index 9df38726e8b0..08d9814b49c1 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -226,16 +225,13 @@ static const struct wl1251_if_operations wl1251_spi_ops = { static int wl1251_spi_probe(struct spi_device *spi) { - struct wl1251_platform_data *pdata = dev_get_platdata(&spi->dev); struct device_node *np = spi->dev.of_node; struct ieee80211_hw *hw; struct wl1251 *wl; int ret; - if (!np && !pdata) { - wl1251_error("no platform data"); + if (!np) return -ENODEV; - } hw = wl1251_alloc_hw(); if (IS_ERR(hw)) @@ -259,14 +255,9 @@ static int wl1251_spi_probe(struct spi_device *spi) goto out_free; } - if (np) { - wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom"); - wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0); - } else if (pdata) { - wl->power_gpio = pdata->power_gpio; - wl->use_eeprom = pdata->use_eeprom; - } + wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom"); + wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0); if (wl->power_gpio == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_free; diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 7eae1ec2eb2b..2d2edddc77bd 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h deleted file mode 100644 index 03d61f1d23ab..000000000000 --- a/include/linux/wl12xx.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * This file is part of wl12xx - * - * Copyright (C) 2009 Nokia Corporation - * - * Contact: Luciano Coelho - */ - -#ifndef _LINUX_WL12XX_H -#define _LINUX_WL12XX_H - -#include - -struct wl1251_platform_data { - int power_gpio; - /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ - int irq; - bool use_eeprom; -}; - -#ifdef CONFIG_WILINK_PLATFORM_DATA - -int wl1251_set_platform_data(const struct wl1251_platform_data *data); - -struct wl1251_platform_data *wl1251_get_platform_data(void); - -#else - -static inline -int wl1251_set_platform_data(const struct wl1251_platform_data *data) -{ - return -ENOSYS; -} - -static inline -struct wl1251_platform_data *wl1251_get_platform_data(void) -{ - return ERR_PTR(-ENODATA); -} - -#endif - -#endif -- cgit v1.2.3 From 9f63f96aac92751e85f8313f59f2c867b88ea453 Mon Sep 17 00:00:00 2001 From: Vincent Mailhol Date: Thu, 1 Dec 2022 02:46:58 +0900 Subject: Documentation: devlink: add devlink documentation for the etas_es58x driver List all the version information reported by the etas_es58x driver through devlink. Also, update MAINTAINERS with the newly created file. Signed-off-by: Vincent Mailhol Link: https://lore.kernel.org/all/20221130174658.29282-8-mailhol.vincent@wanadoo.fr [mkl: fixed version information table: "bl" -> "fw.bootloader" Signed-off-by: Marc Kleine-Budde --- Documentation/networking/devlink/etas_es58x.rst | 36 +++++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 37 insertions(+) create mode 100644 Documentation/networking/devlink/etas_es58x.rst (limited to 'MAINTAINERS') diff --git a/Documentation/networking/devlink/etas_es58x.rst b/Documentation/networking/devlink/etas_es58x.rst new file mode 100644 index 000000000000..3b857d82a44c --- /dev/null +++ b/Documentation/networking/devlink/etas_es58x.rst @@ -0,0 +1,36 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================== +etas_es58x devlink support +========================== + +This document describes the devlink features implemented by the +``etas_es58x`` device driver. + +Info versions +============= + +The ``etas_es58x`` driver reports the following versions + +.. list-table:: devlink info versions implemented + :widths: 5 5 90 + + * - Name + - Type + - Description + * - ``fw`` + - running + - Version of the firmware running on the device. Also available + through ``ethtool -i`` as the first member of the + ``firmware-version``. + * - ``fw.bootloader`` + - running + - Version of the bootloader running on the device. Also available + through ``ethtool -i`` as the second member of the + ``firmware-version``. + * - ``board.rev`` + - fixed + - The hardware revision of the device. + * - ``serial_number`` + - fixed + - The USB serial number. Also available through ``lsusb -v``. diff --git a/MAINTAINERS b/MAINTAINERS index 955c1be1efb2..71f4f8776779 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7682,6 +7682,7 @@ ETAS ES58X CAN/USB DRIVER M: Vincent Mailhol L: linux-can@vger.kernel.org S: Maintained +F: Documentation/networking/devlink/etas_es58x.rst F: drivers/net/can/usb/etas_es58x/ ETHERNET BRIDGE -- cgit v1.2.3 From 4e426e2534ce2811380ef09ee2a050779d1b5a29 Mon Sep 17 00:00:00 2001 From: Taras Chornyi Date: Fri, 9 Dec 2022 17:45:21 +0200 Subject: MAINTAINERS: Update email address for Marvell Prestera Ethernet Switch driver Taras's Marvell email account will be shut down soon so change it to Plvision. Signed-off-by: Taras Chornyi Signed-off-by: Vadym Kochan Link: https://lore.kernel.org/r/20221209154521.1246881-1-vadym.kochan@plvision.eu Signed-off-by: Jakub Kicinski --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 71f4f8776779..574519bc3036 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12366,7 +12366,7 @@ F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst F: drivers/net/ethernet/marvell/octeontx2/af/ MARVELL PRESTERA ETHERNET SWITCH DRIVER -M: Taras Chornyi +M: Taras Chornyi S: Supported W: https://github.com/Marvell-switching/switchdev-prestera F: drivers/net/ethernet/marvell/prestera/ -- cgit v1.2.3 From 15eb1621762134bd3a0f81020359b0c7745d1080 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 9 Dec 2022 11:15:52 -0600 Subject: dt-bindings: net: Convert Socionext NetSec Ethernet to DT schema Convert the Socionext NetSec Ethernet binding to DT schema format. Signed-off-by: Rob Herring Acked-by: Jassi Brar Link: https://lore.kernel.org/r/20221209171553.3350583-1-robh@kernel.org Signed-off-by: Jakub Kicinski --- .../bindings/net/socionext,synquacer-netsec.yaml | 73 ++++++++++++++++++++++ .../devicetree/bindings/net/socionext-netsec.txt | 56 ----------------- MAINTAINERS | 2 +- 3 files changed, 74 insertions(+), 57 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml delete mode 100644 Documentation/devicetree/bindings/net/socionext-netsec.txt (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml b/Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml new file mode 100644 index 000000000000..a65e6aa215a7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/socionext,synquacer-netsec.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Socionext NetSec Ethernet Controller IP + +maintainers: + - Jassi Brar + - Ilias Apalodimas + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + const: socionext,synquacer-netsec + + reg: + items: + - description: control register area + - description: EEPROM holding the MAC address and microengine firmware + + clocks: + maxItems: 1 + + clock-names: + const: phy_ref_clk + + dma-coherent: true + + interrupts: + maxItems: 1 + + mdio: + $ref: mdio.yaml# + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + - mdio + +unevaluatedProperties: false + +examples: + - | + #include + + ethernet@522d0000 { + compatible = "socionext,synquacer-netsec"; + reg = <0x522d0000 0x10000>, <0x10000000 0x10000>; + interrupts = ; + clocks = <&clk_netsec>; + clock-names = "phy_ref_clk"; + phy-mode = "rgmii"; + max-speed = <1000>; + max-frame-size = <9000>; + phy-handle = <&phy1>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + phy1: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt deleted file mode 100644 index a3c1dffaa4bb..000000000000 --- a/Documentation/devicetree/bindings/net/socionext-netsec.txt +++ /dev/null @@ -1,56 +0,0 @@ -* Socionext NetSec Ethernet Controller IP - -Required properties: -- compatible: Should be "socionext,synquacer-netsec" -- reg: Address and length of the control register area, followed by the - address and length of the EEPROM holding the MAC address and - microengine firmware -- interrupts: Should contain ethernet controller interrupt -- clocks: phandle to the PHY reference clock -- clock-names: Should be "phy_ref_clk" -- phy-mode: See ethernet.txt file in the same directory -- phy-handle: See ethernet.txt in the same directory. - -- mdio device tree subnode: When the Netsec has a phy connected to its local - mdio, there must be device tree subnode with the following - required properties: - - - #address-cells: Must be <1>. - - #size-cells: Must be <0>. - - For each phy on the mdio bus, there must be a node with the following - fields: - - compatible: Refer to phy.txt - - reg: phy id used to communicate to phy. - -Optional properties: (See ethernet.txt file in the same directory) -- dma-coherent: Boolean property, must only be present if memory - accesses performed by the device are cache coherent. -- max-speed: See ethernet.txt in the same directory. -- max-frame-size: See ethernet.txt in the same directory. - -The MAC address will be determined using the optional properties -defined in ethernet.txt. The 'phy-mode' property is required, but may -be set to the empty string if the PHY configuration is programmed by -the firmware or set by hardware straps, and needs to be preserved. - -Example: - eth0: ethernet@522d0000 { - compatible = "socionext,synquacer-netsec"; - reg = <0 0x522d0000 0x0 0x10000>, <0 0x10000000 0x0 0x10000>; - interrupts = ; - clocks = <&clk_netsec>; - clock-names = "phy_ref_clk"; - phy-mode = "rgmii"; - max-speed = <1000>; - max-frame-size = <9000>; - phy-handle = <&phy1>; - - mdio { - #address-cells = <1>; - #size-cells = <0>; - phy1: ethernet-phy@1 { - compatible = "ethernet-phy-ieee802.3-c22"; - reg = <1>; - }; - }; diff --git a/MAINTAINERS b/MAINTAINERS index 574519bc3036..0fbe5135490d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -19066,7 +19066,7 @@ M: Jassi Brar M: Ilias Apalodimas L: netdev@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/net/socionext-netsec.txt +F: Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml F: drivers/net/ethernet/socionext/netsec.c SOCIONEXT (SNI) Synquacer SPI DRIVER -- cgit v1.2.3 From 4e81462a45ce7d758eb29ec60126f5793593c6f7 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 9 Dec 2022 14:05:18 -0800 Subject: MAINTAINERS: Update NXP FEC maintainer Emails to Joakim Zhang bounce, update the list of maintainers per feedback from Clark Wang and designate Wei Fang as the primary maintainer. Signed-off-by: Florian Fainelli Signed-off-by: Jakub Kicinski --- MAINTAINERS | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 1daadaa4d48b..78d8928741ed 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8183,7 +8183,10 @@ S: Maintained F: drivers/i2c/busses/i2c-cpm.c FREESCALE IMX / MXC FEC DRIVER -M: Joakim Zhang +M: Wei Fang +R: Shenwei Wang +R: Clark Wang +R: NXP Linux Team L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/fsl,fec.yaml -- cgit v1.2.3 From c9176e10b2b68dfa4b94dce11ad99214ceecf714 Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Fri, 4 Nov 2022 22:12:58 +0100 Subject: dt-bindings: net: Add Broadcom BCM4377 family PCIe Bluetooth These chips are combined Wi-Fi/Bluetooth radios which expose a PCI subfunction for the Bluetooth part. They are found in Apple machines such as the x86 models with the T2 chip or the arm64 models with the M1 or M2 chips. Signed-off-by: Sven Peter Reviewed-by: Rob Herring Signed-off-by: Luiz Augusto von Dentz --- .../net/bluetooth/brcm,bcm4377-bluetooth.yaml | 81 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 82 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/bluetooth/brcm,bcm4377-bluetooth.yaml (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/net/bluetooth/brcm,bcm4377-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/brcm,bcm4377-bluetooth.yaml new file mode 100644 index 000000000000..37cb39a3a62e --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/brcm,bcm4377-bluetooth.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/brcm,bcm4377-bluetooth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom BCM4377 family PCIe Bluetooth Chips + +maintainers: + - Sven Peter + +description: + This binding describes Broadcom BCM4377 family PCIe-attached bluetooth chips + usually found in Apple machines. The Wi-Fi part of the chip is described in + bindings/net/wireless/brcm,bcm4329-fmac.yaml. + +allOf: + - $ref: bluetooth-controller.yaml# + +properties: + compatible: + enum: + - pci14e4,5fa0 # BCM4377 + - pci14e4,5f69 # BCM4378 + - pci14e4,5f71 # BCM4387 + + reg: + maxItems: 1 + + brcm,board-type: + $ref: /schemas/types.yaml#/definitions/string + description: Board type of the Bluetooth chip. This is used to decouple + the overall system board from the Bluetooth module and used to construct + firmware and calibration data filenames. + On Apple platforms, this should be the Apple module-instance codename + prefixed by "apple,", e.g. "apple,atlantisb". + pattern: '^apple,.*' + + brcm,taurus-cal-blob: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: A per-device calibration blob for the Bluetooth radio. This + should be filled in by the bootloader from platform configuration + data, if necessary, and will be uploaded to the device. + This blob is used if the chip stepping of the Bluetooth module does not + support beamforming. + + brcm,taurus-bf-cal-blob: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: A per-device calibration blob for the Bluetooth radio. This + should be filled in by the bootloader from platform configuration + data, if necessary, and will be uploaded to the device. + This blob is used if the chip stepping of the Bluetooth module supports + beamforming. + + local-bd-address: true + +required: + - compatible + - reg + - local-bd-address + - brcm,board-type + +additionalProperties: false + +examples: + - | + pcie@a0000000 { + #address-cells = <3>; + #size-cells = <2>; + reg = <0xa0000000 0x1000000>; + device_type = "pci"; + ranges = <0x43000000 0x6 0xa0000000 0xa0000000 0x0 0x20000000>; + + bluetooth@0,1 { + compatible = "pci14e4,5f69"; + reg = <0x100 0x0 0x0 0x0 0x0>; + brcm,board-type = "apple,honshu"; + /* To be filled by the bootloader */ + local-bd-address = [00 00 00 00 00 00]; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 0fbe5135490d..45c443aa7c60 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1903,6 +1903,7 @@ F: Documentation/devicetree/bindings/interrupt-controller/apple,* F: Documentation/devicetree/bindings/iommu/apple,dart.yaml F: Documentation/devicetree/bindings/iommu/apple,sart.yaml F: Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml +F: Documentation/devicetree/bindings/net/bluetooth/brcm,bcm4377-bluetooth.yaml F: Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml F: Documentation/devicetree/bindings/nvmem/apple,efuses.yaml F: Documentation/devicetree/bindings/pci/apple,pcie.yaml -- cgit v1.2.3 From 8a06127602de70541e751a8c64a06995ee281f22 Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Fri, 4 Nov 2022 22:13:03 +0100 Subject: Bluetooth: hci_bcm4377: Add new driver for BCM4377 PCIe boards Broadcom BCM4377/4378/4387 are dual WiFi/Bluetooth boards found in Apple machines. This driver adds support for the Bluetooth function which exposes a shared memory IPC protocol over PCIe to tunnel HCI traffic. Signed-off-by: Sven Peter Signed-off-by: Luiz Augusto von Dentz --- MAINTAINERS | 1 + drivers/bluetooth/Kconfig | 12 + drivers/bluetooth/Makefile | 1 + drivers/bluetooth/hci_bcm4377.c | 2514 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 2528 insertions(+) create mode 100644 drivers/bluetooth/hci_bcm4377.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 45c443aa7c60..32dd41574930 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1911,6 +1911,7 @@ F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml F: Documentation/devicetree/bindings/power/apple* F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml F: arch/arm64/boot/dts/apple/ +F: drivers/bluetooth/hci_bcm4377.c F: drivers/clk/clk-apple-nco.c F: drivers/dma/apple-admac.c F: drivers/i2c/busses/i2c-pasemi-core.c diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 89b4d47b7c79..5a1a7bec3c42 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -285,6 +285,18 @@ config BT_HCIBCM203X Say Y here to compile support for HCI BCM203x devices into the kernel or say M to compile it as module (bcm203x). + +config BT_HCIBCM4377 + tristate "HCI BCM4377/4378/4387 PCIe driver" + depends on PCI + select FW_LOADER + help + Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via + PCIe. These are usually found in Apple machines. + + Say Y here to compile support for HCI BCM4377 family devices into the + kernel or say M to compile it as module (hci_bcm4377). + config BT_HCIBPA10X tristate "HCI BPA10x USB driver" depends on USB diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 3321a8aea4a0..e0b261f24fc9 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o obj-$(CONFIG_BT_HCIUART) += hci_uart.o obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o +obj-$(CONFIG_BT_HCIBCM4377) += hci_bcm4377.o obj-$(CONFIG_BT_HCIBPA10X) += bpa10x.o obj-$(CONFIG_BT_HCIBFUSB) += bfusb.o obj-$(CONFIG_BT_HCIDTL1) += dtl1_cs.o diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c new file mode 100644 index 000000000000..74f44562ac33 --- /dev/null +++ b/drivers/bluetooth/hci_bcm4377.c @@ -0,0 +1,2514 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +enum bcm4377_chip { + BCM4377 = 0, + BCM4378, + BCM4387, +}; + +#define BCM4377_DEVICE_ID 0x5fa0 +#define BCM4378_DEVICE_ID 0x5f69 +#define BCM4387_DEVICE_ID 0x5f71 + +#define BCM4377_TIMEOUT 1000 + +/* + * These devices only support DMA transactions inside a 32bit window + * (possibly to avoid 64 bit arithmetic). The window size cannot exceed + * 0xffffffff but is always aligned down to the previous 0x200 byte boundary + * which effectively limits the window to [start, start+0xfffffe00]. + * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't + * run into this limitation. + */ +#define BCM4377_DMA_MASK 0xfffffe00 + +#define BCM4377_PCIECFG_BAR0_WINDOW1 0x80 +#define BCM4377_PCIECFG_BAR0_WINDOW2 0x70 +#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1 0x74 +#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW2 0x78 +#define BCM4377_PCIECFG_BAR2_WINDOW 0x84 + +#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT 0x18011000 +#define BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT 0x19000000 + +#define BCM4377_PCIECFG_SUBSYSTEM_CTRL 0x88 + +#define BCM4377_BAR0_FW_DOORBELL 0x140 +#define BCM4377_BAR0_RTI_CONTROL 0x144 + +#define BCM4377_BAR0_SLEEP_CONTROL 0x150 +#define BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE 0 +#define BCM4377_BAR0_SLEEP_CONTROL_AWAKE 2 +#define BCM4377_BAR0_SLEEP_CONTROL_QUIESCE 3 + +#define BCM4377_BAR0_DOORBELL 0x174 +#define BCM4377_BAR0_DOORBELL_VALUE GENMASK(31, 16) +#define BCM4377_BAR0_DOORBELL_IDX GENMASK(15, 8) +#define BCM4377_BAR0_DOORBELL_RING BIT(5) + +#define BCM4377_BAR0_HOST_WINDOW_LO 0x590 +#define BCM4377_BAR0_HOST_WINDOW_HI 0x594 +#define BCM4377_BAR0_HOST_WINDOW_SIZE 0x598 + +#define BCM4377_BAR2_BOOTSTAGE 0x200454 + +#define BCM4377_BAR2_FW_LO 0x200478 +#define BCM4377_BAR2_FW_HI 0x20047c +#define BCM4377_BAR2_FW_SIZE 0x200480 + +#define BCM4377_BAR2_CONTEXT_ADDR_LO 0x20048c +#define BCM4377_BAR2_CONTEXT_ADDR_HI 0x200450 + +#define BCM4377_BAR2_RTI_STATUS 0x20045c +#define BCM4377_BAR2_RTI_WINDOW_LO 0x200494 +#define BCM4377_BAR2_RTI_WINDOW_HI 0x200498 +#define BCM4377_BAR2_RTI_WINDOW_SIZE 0x20049c + +#define BCM4377_OTP_SIZE 0xe0 +#define BCM4377_OTP_SYS_VENDOR 0x15 +#define BCM4377_OTP_CIS 0x80 +#define BCM4377_OTP_VENDOR_HDR 0x00000008 +#define BCM4377_OTP_MAX_PARAM_LEN 16 + +#define BCM4377_N_TRANSFER_RINGS 9 +#define BCM4377_N_COMPLETION_RINGS 6 + +#define BCM4377_MAX_RING_SIZE 256 + +#define BCM4377_MSGID_GENERATION GENMASK(15, 8) +#define BCM4377_MSGID_ID GENMASK(7, 0) + +#define BCM4377_RING_N_ENTRIES 128 + +#define BCM4377_CONTROL_MSG_SIZE 0x34 +#define BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff) + +#define MAX_ACL_PAYLOAD_SIZE (HCI_MAX_FRAME_SIZE + HCI_ACL_HDR_SIZE) +#define MAX_SCO_PAYLOAD_SIZE (HCI_MAX_SCO_SIZE + HCI_SCO_HDR_SIZE) +#define MAX_EVENT_PAYLOAD_SIZE (HCI_MAX_EVENT_SIZE + HCI_EVENT_HDR_SIZE) + +enum bcm4377_otp_params_type { + BCM4377_OTP_BOARD_PARAMS, + BCM4377_OTP_CHIP_PARAMS +}; + +enum bcm4377_transfer_ring_id { + BCM4377_XFER_RING_CONTROL = 0, + BCM4377_XFER_RING_HCI_H2D = 1, + BCM4377_XFER_RING_HCI_D2H = 2, + BCM4377_XFER_RING_SCO_H2D = 3, + BCM4377_XFER_RING_SCO_D2H = 4, + BCM4377_XFER_RING_ACL_H2D = 5, + BCM4377_XFER_RING_ACL_D2H = 6, +}; + +enum bcm4377_completion_ring_id { + BCM4377_ACK_RING_CONTROL = 0, + BCM4377_ACK_RING_HCI_ACL = 1, + BCM4377_EVENT_RING_HCI_ACL = 2, + BCM4377_ACK_RING_SCO = 3, + BCM4377_EVENT_RING_SCO = 4, +}; + +enum bcm4377_doorbell { + BCM4377_DOORBELL_CONTROL = 0, + BCM4377_DOORBELL_HCI_H2D = 1, + BCM4377_DOORBELL_HCI_D2H = 2, + BCM4377_DOORBELL_ACL_H2D = 3, + BCM4377_DOORBELL_ACL_D2H = 4, + BCM4377_DOORBELL_SCO = 6, +}; + +/* + * Transfer ring entry + * + * flags: Flags to indicate if the payload is appended or mapped + * len: Payload length + * payload: Optional payload DMA address + * id: Message id to recognize the answer in the completion ring entry + */ +struct bcm4377_xfer_ring_entry { +#define BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED BIT(0) +#define BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1) + u8 flags; + __le16 len; + u8 _unk0; + __le64 payload; + __le16 id; + u8 _unk1[2]; +} __packed; +static_assert(sizeof(struct bcm4377_xfer_ring_entry) == 0x10); + +/* + * Completion ring entry + * + * flags: Flags to indicate if the payload is appended or mapped. If the payload + * is mapped it can be found in the buffer of the corresponding transfer + * ring message. + * ring_id: Transfer ring ID which required this message + * msg_id: Message ID specified in transfer ring entry + * len: Payload length + */ +struct bcm4377_completion_ring_entry { + u8 flags; + u8 _unk0; + __le16 ring_id; + __le16 msg_id; + __le32 len; + u8 _unk1[6]; +} __packed; +static_assert(sizeof(struct bcm4377_completion_ring_entry) == 0x10); + +enum bcm4377_control_message_type { + BCM4377_CONTROL_MSG_CREATE_XFER_RING = 1, + BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING = 2, + BCM4377_CONTROL_MSG_DESTROY_XFER_RING = 3, + BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING = 4, +}; + +/* + * Control message used to create a completion ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING + * header_size: Unknown, but probably reserved space in front of the entry + * footer_size: Number of 32 bit words reserved for payloads after the entry + * id/id_again: Completion ring index + * ring_iova: DMA address of the ring buffer + * n_elements: Number of elements inside the ring buffer + * msi: MSI index, doesn't work for all rings though and should be zero + * intmod_delay: Unknown delay + * intmod_bytes: Unknown + */ +struct bcm4377_create_completion_ring_msg { + u8 msg_type; + u8 header_size; + u8 footer_size; + u8 _unk0; + __le16 id; + __le16 id_again; + __le64 ring_iova; + __le16 n_elements; + __le32 unk; + u8 _unk1[6]; + __le16 msi; + __le16 intmod_delay; + __le32 intmod_bytes; + __le16 _unk2; + __le32 _unk3; + u8 _unk4[10]; +} __packed; +static_assert(sizeof(struct bcm4377_create_completion_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * Control ring message used to destroy a completion ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING + * ring_id: Completion ring to be destroyed + */ +struct bcm4377_destroy_completion_ring_msg { + u8 msg_type; + u8 _pad0; + __le16 ring_id; + u8 _pad1[48]; +} __packed; +static_assert(sizeof(struct bcm4377_destroy_completion_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * Control message used to create a transfer ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_XFER_RING + * header_size: Number of 32 bit words reserved for unknown content before the + * entry + * footer_size: Number of 32 bit words reserved for payloads after the entry + * ring_id/ring_id_again: Transfer ring index + * ring_iova: DMA address of the ring buffer + * n_elements: Number of elements inside the ring buffer + * completion_ring_id: Completion ring index for acknowledgements and events + * doorbell: Doorbell index used to notify device of new entries + * flags: Transfer ring flags + * - virtual: set if there is no associated shared memory and only the + * corresponding completion ring is used + * - sync: only set for the SCO rings + */ +struct bcm4377_create_transfer_ring_msg { + u8 msg_type; + u8 header_size; + u8 footer_size; + u8 _unk0; + __le16 ring_id; + __le16 ring_id_again; + __le64 ring_iova; + u8 _unk1[8]; + __le16 n_elements; + __le16 completion_ring_id; + __le16 doorbell; +#define BCM4377_XFER_RING_FLAG_VIRTUAL BIT(7) +#define BCM4377_XFER_RING_FLAG_SYNC BIT(8) + __le16 flags; + u8 _unk2[20]; +} __packed; +static_assert(sizeof(struct bcm4377_create_transfer_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * Control ring message used to destroy a transfer ring + * + * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_XFER_RING + * ring_id: Transfer ring to be destroyed + */ +struct bcm4377_destroy_transfer_ring_msg { + u8 msg_type; + u8 _pad0; + __le16 ring_id; + u8 _pad1[48]; +} __packed; +static_assert(sizeof(struct bcm4377_destroy_transfer_ring_msg) == + BCM4377_CONTROL_MSG_SIZE); + +/* + * "Converged IPC" context struct used to make the device aware of all other + * shared memory structures. A pointer to this structure is configured inside a + * MMIO register. + * + * version: Protocol version, must be 2. + * size: Size of this structure, must be 0x68. + * enabled_caps: Enabled capabilities. Unknown bitfield but should be 2. + * peripheral_info_addr: DMA address for a 0x20 buffer to which the device will + * write unknown contents + * {completion,xfer}_ring_{tails,heads}_addr: DMA pointers to ring heads/tails + * n_completion_rings: Number of completion rings, the firmware only works if + * this is set to BCM4377_N_COMPLETION_RINGS. + * n_xfer_rings: Number of transfer rings, the firmware only works if + * this is set to BCM4377_N_TRANSFER_RINGS. + * control_completion_ring_addr: Control completion ring buffer DMA address + * control_xfer_ring_addr: Control transfer ring buffer DMA address + * control_xfer_ring_n_entries: Number of control transfer ring entries + * control_completion_ring_n_entries: Number of control completion ring entries + * control_xfer_ring_doorbell: Control transfer ring doorbell + * control_completion_ring_doorbell: Control completion ring doorbell, + * must be set to 0xffff + * control_xfer_ring_msi: Control completion ring MSI index, must be 0 + * control_completion_ring_msi: Control completion ring MSI index, must be 0. + * control_xfer_ring_header_size: Number of 32 bit words reserved in front of + * every control transfer ring entry + * control_xfer_ring_footer_size: Number of 32 bit words reserved after every + * control transfer ring entry + * control_completion_ring_header_size: Number of 32 bit words reserved in front + * of every control completion ring entry + * control_completion_ring_footer_size: Number of 32 bit words reserved after + * every control completion ring entry + * scratch_pad: Optional scratch pad DMA address + * scratch_pad_size: Scratch pad size + */ +struct bcm4377_context { + __le16 version; + __le16 size; + __le32 enabled_caps; + + __le64 peripheral_info_addr; + + /* ring heads and tails */ + __le64 completion_ring_heads_addr; + __le64 xfer_ring_tails_addr; + __le64 completion_ring_tails_addr; + __le64 xfer_ring_heads_addr; + __le16 n_completion_rings; + __le16 n_xfer_rings; + + /* control ring configuration */ + __le64 control_completion_ring_addr; + __le64 control_xfer_ring_addr; + __le16 control_xfer_ring_n_entries; + __le16 control_completion_ring_n_entries; + __le16 control_xfer_ring_doorbell; + __le16 control_completion_ring_doorbell; + __le16 control_xfer_ring_msi; + __le16 control_completion_ring_msi; + u8 control_xfer_ring_header_size; + u8 control_xfer_ring_footer_size; + u8 control_completion_ring_header_size; + u8 control_completion_ring_footer_size; + + __le16 _unk0; + __le16 _unk1; + + __le64 scratch_pad; + __le32 scratch_pad_size; + + __le32 _unk3; +} __packed; +static_assert(sizeof(struct bcm4377_context) == 0x68); + +#define BCM4378_CALIBRATION_CHUNK_SIZE 0xe6 +struct bcm4378_hci_send_calibration_cmd { + u8 unk; + __le16 blocks_left; + u8 data[BCM4378_CALIBRATION_CHUNK_SIZE]; +} __packed; + +#define BCM4378_PTB_CHUNK_SIZE 0xcf +struct bcm4378_hci_send_ptb_cmd { + __le16 blocks_left; + u8 data[BCM4378_PTB_CHUNK_SIZE]; +} __packed; + +/* + * Shared memory structure used to store the ring head and tail pointers. + */ +struct bcm4377_ring_state { + __le16 completion_ring_head[BCM4377_N_COMPLETION_RINGS]; + __le16 completion_ring_tail[BCM4377_N_COMPLETION_RINGS]; + __le16 xfer_ring_head[BCM4377_N_TRANSFER_RINGS]; + __le16 xfer_ring_tail[BCM4377_N_TRANSFER_RINGS]; +}; + +/* + * A transfer ring can be used in two configurations: + * 1) Send control or HCI messages to the device which are then acknowledged + * in the corresponding completion ring + * 2) Receiving HCI frames from the devices. In this case the transfer ring + * itself contains empty messages that are acknowledged once data is + * available from the device. If the payloads fit inside the footers + * of the completion ring the transfer ring can be configured to be + * virtual such that it has no ring buffer. + * + * ring_id: ring index hardcoded in the firmware + * doorbell: doorbell index to notify device of new entries + * payload_size: optional in-place payload size + * mapped_payload_size: optional out-of-place payload size + * completion_ring: index of corresponding completion ring + * n_entries: number of entries inside this ring + * generation: ring generation; incremented on hci_open to detect stale messages + * sync: set to true for SCO rings + * virtual: set to true if this ring has no entries and is just required to + * setup a corresponding completion ring for device->host messages + * d2h_buffers_only: set to true if this ring is only used to provide large + * buffers used by device->host messages in the completion + * ring + * allow_wait: allow to wait for messages to be acknowledged + * enabled: true once the ring has been created and can be used + * ring: ring buffer for entries (struct bcm4377_xfer_ring_entry) + * ring_dma: DMA address for ring entry buffer + * payloads: payload buffer for mapped_payload_size payloads + * payloads_dma:DMA address for payload buffer + * events: pointer to array of completions if waiting is allowed + * msgids: bitmap to keep track of used message ids + * lock: Spinlock to protect access to ring structurs used in the irq handler + */ +struct bcm4377_transfer_ring { + enum bcm4377_transfer_ring_id ring_id; + enum bcm4377_doorbell doorbell; + size_t payload_size; + size_t mapped_payload_size; + u8 completion_ring; + u16 n_entries; + u8 generation; + + bool sync; + bool virtual; + bool d2h_buffers_only; + bool allow_wait; + bool enabled; + + void *ring; + dma_addr_t ring_dma; + + void *payloads; + dma_addr_t payloads_dma; + + struct completion **events; + DECLARE_BITMAP(msgids, BCM4377_MAX_RING_SIZE); + spinlock_t lock; +}; + +/* + * A completion ring can be either used to either acknowledge messages sent in + * the corresponding transfer ring or to receive messages associated with the + * transfer ring. When used to receive messages the transfer ring either + * has no ring buffer and is only advanced ("virtual transfer ring") or it + * only contains empty DMA buffers to be used for the payloads. + * + * ring_id: completion ring id, hardcoded in firmware + * payload_size: optional payload size after each entry + * delay: unknown delay + * n_entries: number of entries in this ring + * enabled: true once the ring has been created and can be used + * ring: ring buffer for entries (struct bcm4377_completion_ring_entry) + * ring_dma: DMA address of ring buffer + * transfer_rings: bitmap of corresponding transfer ring ids + */ +struct bcm4377_completion_ring { + enum bcm4377_completion_ring_id ring_id; + u16 payload_size; + u16 delay; + u16 n_entries; + bool enabled; + + void *ring; + dma_addr_t ring_dma; + + unsigned long transfer_rings; +}; + +struct bcm4377_data; + +/* + * Chip-specific configuration struct + * + * id: Chip id (e.g. 0x4377 for BCM4377) + * otp_offset: Offset to the start of the OTP inside BAR0 + * bar0_window1: Backplane address mapped to the first window in BAR0 + * bar0_window2: Backplane address mapped to the second window in BAR0 + * bar0_core2_window2: Optional backplane address mapped to the second core's + * second window in BAR0 + * has_bar0_core2_window2: Set to true if this chip requires the second core's + * second window to be configured + * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the + * vendor-specific subsystem control + * register has to be cleared + * disable_aspm: Set to true if ASPM must be disabled due to hardware errata + * broken_ext_scan: Set to true if the chip erroneously claims to support + * extended scanning + * broken_mws_transport_config: Set to true if the chip erroneously claims to + * support MWS Transport Configuration + * send_calibration: Optional callback to send calibration data + * send_ptb: Callback to send "PTB" regulatory/calibration data + */ +struct bcm4377_hw { + unsigned int id; + + u32 otp_offset; + + u32 bar0_window1; + u32 bar0_window2; + u32 bar0_core2_window2; + + unsigned long has_bar0_core2_window2 : 1; + unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1; + unsigned long disable_aspm : 1; + unsigned long broken_ext_scan : 1; + unsigned long broken_mws_transport_config : 1; + + int (*send_calibration)(struct bcm4377_data *bcm4377); + int (*send_ptb)(struct bcm4377_data *bcm4377, + const struct firmware *fw); +}; + +static const struct bcm4377_hw bcm4377_hw_variants[]; +static const struct dmi_system_id bcm4377_dmi_board_table[]; + +/* + * Private struct associated with each device containing global state + * + * pdev: Pointer to associated struct pci_dev + * hdev: Pointer to associated strucy hci_dev + * bar0: iomem pointing to BAR0 + * bar1: iomem pointing to BAR2 + * bootstage: Current value of the bootstage + * rti_status: Current "RTI" status value + * hw: Pointer to chip-specific struct bcm4377_hw + * taurus_cal_blob: "Taurus" calibration blob used for some chips + * taurus_cal_size: "Taurus" calibration blob size + * taurus_beamforming_cal_blob: "Taurus" beamforming calibration blob used for + * some chips + * taurus_beamforming_cal_size: "Taurus" beamforming calibration blob size + * stepping: Chip stepping read from OTP; used for firmware selection + * vendor: Antenna vendor read from OTP; used for firmware selection + * board_type: Board type from FDT or DMI match; used for firmware selection + * event: Event for changed bootstage or rti_status; used for booting firmware + * ctx: "Converged IPC" context + * ctx_dma: "Converged IPC" context DMA address + * ring_state: Shared memory buffer containing ring head and tail indexes + * ring_state_dma: DMA address for ring_state + * {control,hci_acl,sco}_ack_ring: Completion rings used to acknowledge messages + * {hci_acl,sco}_event_ring: Completion rings used for device->host messages + * control_h2d_ring: Transfer ring used for control messages + * {hci,sco,acl}_h2d_ring: Transfer ring used to transfer HCI frames + * {hci,sco,acl}_d2h_ring: Transfer ring used to receive HCI frames in the + * corresponding completion ring + */ +struct bcm4377_data { + struct pci_dev *pdev; + struct hci_dev *hdev; + + void __iomem *bar0; + void __iomem *bar2; + + u32 bootstage; + u32 rti_status; + + const struct bcm4377_hw *hw; + + const void *taurus_cal_blob; + int taurus_cal_size; + const void *taurus_beamforming_cal_blob; + int taurus_beamforming_cal_size; + + char stepping[BCM4377_OTP_MAX_PARAM_LEN]; + char vendor[BCM4377_OTP_MAX_PARAM_LEN]; + const char *board_type; + + struct completion event; + + struct bcm4377_context *ctx; + dma_addr_t ctx_dma; + + struct bcm4377_ring_state *ring_state; + dma_addr_t ring_state_dma; + + /* + * The HCI and ACL rings have to be merged because this structure is + * hardcoded in the firmware. + */ + struct bcm4377_completion_ring control_ack_ring; + struct bcm4377_completion_ring hci_acl_ack_ring; + struct bcm4377_completion_ring hci_acl_event_ring; + struct bcm4377_completion_ring sco_ack_ring; + struct bcm4377_completion_ring sco_event_ring; + + struct bcm4377_transfer_ring control_h2d_ring; + struct bcm4377_transfer_ring hci_h2d_ring; + struct bcm4377_transfer_ring hci_d2h_ring; + struct bcm4377_transfer_ring sco_h2d_ring; + struct bcm4377_transfer_ring sco_d2h_ring; + struct bcm4377_transfer_ring acl_h2d_ring; + struct bcm4377_transfer_ring acl_d2h_ring; +}; + +static void bcm4377_ring_doorbell(struct bcm4377_data *bcm4377, u8 doorbell, + u16 val) +{ + u32 db = 0; + + db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_VALUE, val); + db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_IDX, doorbell); + db |= BCM4377_BAR0_DOORBELL_RING; + + dev_dbg(&bcm4377->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val, + doorbell, db); + iowrite32(db, bcm4377->bar0 + BCM4377_BAR0_DOORBELL); +} + +static int bcm4377_extract_msgid(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, + u16 raw_msgid, u8 *msgid) +{ + u8 generation = FIELD_GET(BCM4377_MSGID_GENERATION, raw_msgid); + *msgid = FIELD_GET(BCM4377_MSGID_ID, raw_msgid); + + if (generation != ring->generation) { + dev_warn( + &bcm4377->pdev->dev, + "invalid message generation %d should be %d in entry for ring %d\n", + generation, ring->generation, ring->ring_id); + return -EINVAL; + } + + if (*msgid >= ring->n_entries) { + dev_warn(&bcm4377->pdev->dev, + "invalid message id in entry for ring %d: %d > %d\n", + ring->ring_id, *msgid, ring->n_entries); + return -EINVAL; + } + + return 0; +} + +static void bcm4377_handle_event(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, + u16 raw_msgid, u8 entry_flags, u8 type, + void *payload, size_t len) +{ + struct sk_buff *skb; + u16 head; + u8 msgid; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (!ring->enabled) { + dev_warn(&bcm4377->pdev->dev, + "event for disabled transfer ring %d\n", + ring->ring_id); + goto out; + } + + if (ring->d2h_buffers_only && + entry_flags & BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED) { + if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid)) + goto out; + + if (len > ring->mapped_payload_size) { + dev_warn( + &bcm4377->pdev->dev, + "invalid payload len in event for ring %d: %zu > %zu\n", + ring->ring_id, len, ring->mapped_payload_size); + goto out; + } + + payload = ring->payloads + msgid * ring->mapped_payload_size; + } + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + goto out; + + memcpy(skb_put(skb, len), payload, len); + hci_skb_pkt_type(skb) = type; + hci_recv_frame(bcm4377->hdev, skb); + +out: + head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]); + head = (head + 1) % ring->n_entries; + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head); + + bcm4377_ring_doorbell(bcm4377, ring->doorbell, head); + + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcm4377_handle_ack(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, + u16 raw_msgid) +{ + unsigned long flags; + u8 msgid; + + spin_lock_irqsave(&ring->lock, flags); + + if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid)) + goto unlock; + + if (!test_bit(msgid, ring->msgids)) { + dev_warn( + &bcm4377->pdev->dev, + "invalid message id in ack for ring %d: %d is not used\n", + ring->ring_id, msgid); + goto unlock; + } + + if (ring->allow_wait && ring->events[msgid]) { + complete(ring->events[msgid]); + ring->events[msgid] = NULL; + } + + bitmap_release_region(ring->msgids, msgid, ring->n_entries); + +unlock: + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcm4377_handle_completion(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring, + u16 pos) +{ + struct bcm4377_completion_ring_entry *entry; + u16 msg_id, transfer_ring; + size_t entry_size, data_len; + void *data; + + if (pos >= ring->n_entries) { + dev_warn(&bcm4377->pdev->dev, + "invalid offset %d for completion ring %d\n", pos, + ring->ring_id); + return; + } + + entry_size = sizeof(*entry) + ring->payload_size; + entry = ring->ring + pos * entry_size; + data = ring->ring + pos * entry_size + sizeof(*entry); + data_len = le32_to_cpu(entry->len); + msg_id = le16_to_cpu(entry->msg_id); + transfer_ring = le16_to_cpu(entry->ring_id); + + if ((ring->transfer_rings & BIT(transfer_ring)) == 0) { + dev_warn( + &bcm4377->pdev->dev, + "invalid entry at offset %d for transfer ring %d in completion ring %d\n", + pos, transfer_ring, ring->ring_id); + return; + } + + dev_dbg(&bcm4377->pdev->dev, + "entry in completion ring %d for transfer ring %d with msg_id %d\n", + ring->ring_id, transfer_ring, msg_id); + + switch (transfer_ring) { + case BCM4377_XFER_RING_CONTROL: + bcm4377_handle_ack(bcm4377, &bcm4377->control_h2d_ring, msg_id); + break; + case BCM4377_XFER_RING_HCI_H2D: + bcm4377_handle_ack(bcm4377, &bcm4377->hci_h2d_ring, msg_id); + break; + case BCM4377_XFER_RING_SCO_H2D: + bcm4377_handle_ack(bcm4377, &bcm4377->sco_h2d_ring, msg_id); + break; + case BCM4377_XFER_RING_ACL_H2D: + bcm4377_handle_ack(bcm4377, &bcm4377->acl_h2d_ring, msg_id); + break; + + case BCM4377_XFER_RING_HCI_D2H: + bcm4377_handle_event(bcm4377, &bcm4377->hci_d2h_ring, msg_id, + entry->flags, HCI_EVENT_PKT, data, + data_len); + break; + case BCM4377_XFER_RING_SCO_D2H: + bcm4377_handle_event(bcm4377, &bcm4377->sco_d2h_ring, msg_id, + entry->flags, HCI_SCODATA_PKT, data, + data_len); + break; + case BCM4377_XFER_RING_ACL_D2H: + bcm4377_handle_event(bcm4377, &bcm4377->acl_d2h_ring, msg_id, + entry->flags, HCI_ACLDATA_PKT, data, + data_len); + break; + + default: + dev_warn( + &bcm4377->pdev->dev, + "entry in completion ring %d for unknown transfer ring %d with msg_id %d\n", + ring->ring_id, transfer_ring, msg_id); + } +} + +static void bcm4377_poll_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + u16 tail; + __le16 *heads = bcm4377->ring_state->completion_ring_head; + __le16 *tails = bcm4377->ring_state->completion_ring_tail; + + if (!ring->enabled) + return; + + tail = le16_to_cpu(tails[ring->ring_id]); + dev_dbg(&bcm4377->pdev->dev, + "completion ring #%d: head: %d, tail: %d\n", ring->ring_id, + le16_to_cpu(heads[ring->ring_id]), tail); + + while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) { + /* + * ensure the CPU doesn't speculate through the comparison. + * otherwise it might already read the (empty) queue entry + * before the updated head has been loaded and checked. + */ + dma_rmb(); + + bcm4377_handle_completion(bcm4377, ring, tail); + + tail = (tail + 1) % ring->n_entries; + tails[ring->ring_id] = cpu_to_le16(tail); + } +} + +static irqreturn_t bcm4377_irq(int irq, void *data) +{ + struct bcm4377_data *bcm4377 = data; + u32 bootstage, rti_status; + + bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); + + if (bootstage != bcm4377->bootstage || + rti_status != bcm4377->rti_status) { + dev_dbg(&bcm4377->pdev->dev, + "bootstage = %d -> %d, rti state = %d -> %d\n", + bcm4377->bootstage, bootstage, bcm4377->rti_status, + rti_status); + complete(&bcm4377->event); + bcm4377->bootstage = bootstage; + bcm4377->rti_status = rti_status; + } + + if (rti_status > 2) + dev_err(&bcm4377->pdev->dev, "RTI status is %d\n", rti_status); + + bcm4377_poll_completion_ring(bcm4377, &bcm4377->control_ack_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_event_ring); + + return IRQ_HANDLED; +} + +static int bcm4377_enqueue(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring, void *data, + size_t len, bool wait) +{ + unsigned long flags; + struct bcm4377_xfer_ring_entry *entry; + void *payload; + size_t offset; + u16 head, tail, new_head; + u16 raw_msgid; + int ret, msgid; + DECLARE_COMPLETION_ONSTACK(event); + + if (len > ring->payload_size && len > ring->mapped_payload_size) { + dev_warn( + &bcm4377->pdev->dev, + "payload len %zu is too large for ring %d (max is %zu or %zu)\n", + len, ring->ring_id, ring->payload_size, + ring->mapped_payload_size); + return -EINVAL; + } + if (wait && !ring->allow_wait) + return -EINVAL; + if (ring->virtual) + return -EINVAL; + + spin_lock_irqsave(&ring->lock, flags); + + head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]); + tail = le16_to_cpu(bcm4377->ring_state->xfer_ring_tail[ring->ring_id]); + + new_head = (head + 1) % ring->n_entries; + + if (new_head == tail) { + dev_warn(&bcm4377->pdev->dev, + "can't send message because ring %d is full\n", + ring->ring_id); + ret = -EINVAL; + goto out; + } + + msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0); + if (msgid < 0) { + dev_warn(&bcm4377->pdev->dev, + "can't find message id for ring %d\n", ring->ring_id); + ret = -EINVAL; + goto out; + } + + raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation); + raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, msgid); + + offset = head * (sizeof(*entry) + ring->payload_size); + entry = ring->ring + offset; + + memset(entry, 0, sizeof(*entry)); + entry->id = cpu_to_le16(raw_msgid); + entry->len = cpu_to_le16(len); + + if (len <= ring->payload_size) { + entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER; + payload = ring->ring + offset + sizeof(*entry); + } else { + entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED; + entry->payload = cpu_to_le64(ring->payloads_dma + + msgid * ring->mapped_payload_size); + payload = ring->payloads + msgid * ring->mapped_payload_size; + } + + memcpy(payload, data, len); + + if (wait) + ring->events[msgid] = &event; + + /* + * The 4377 chips stop responding to any commands as soon as they + * have been idle for a while. Poking the sleep control register here + * makes them come alive again. + */ + iowrite32(BCM4377_BAR0_SLEEP_CONTROL_AWAKE, + bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); + + dev_dbg(&bcm4377->pdev->dev, + "updating head for transfer queue #%d to %d\n", ring->ring_id, + new_head); + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = + cpu_to_le16(new_head); + + if (!ring->sync) + bcm4377_ring_doorbell(bcm4377, ring->doorbell, new_head); + ret = 0; + +out: + spin_unlock_irqrestore(&ring->lock, flags); + + if (ret == 0 && wait) { + ret = wait_for_completion_interruptible_timeout( + &event, BCM4377_TIMEOUT); + if (ret == 0) + ret = -ETIMEDOUT; + else if (ret > 0) + ret = 0; + + spin_lock_irqsave(&ring->lock, flags); + ring->events[msgid] = NULL; + spin_unlock_irqrestore(&ring->lock, flags); + } + + return ret; +} + +static int bcm4377_create_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + struct bcm4377_create_completion_ring_msg msg; + int ret; + + if (ring->enabled) { + dev_warn(&bcm4377->pdev->dev, + "completion ring %d already enabled\n", ring->ring_id); + return 0; + } + + memset(ring->ring, 0, + ring->n_entries * (sizeof(struct bcm4377_completion_ring_entry) + + ring->payload_size)); + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING; + msg.id = cpu_to_le16(ring->ring_id); + msg.id_again = cpu_to_le16(ring->ring_id); + msg.ring_iova = cpu_to_le64(ring->ring_dma); + msg.n_elements = cpu_to_le16(ring->n_entries); + msg.intmod_bytes = cpu_to_le32(0xffffffff); + msg.unk = cpu_to_le32(0xffffffff); + msg.intmod_delay = cpu_to_le16(ring->delay); + msg.footer_size = ring->payload_size / 4; + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + if (!ret) + ring->enabled = true; + + return ret; +} + +static int bcm4377_destroy_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + struct bcm4377_destroy_completion_ring_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + if (ret) + dev_warn(&bcm4377->pdev->dev, + "failed to destroy completion ring %d\n", + ring->ring_id); + + ring->enabled = false; + return ret; +} + +static int bcm4377_create_transfer_ring(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring) +{ + struct bcm4377_create_transfer_ring_msg msg; + u16 flags = 0; + int ret, i; + unsigned long spinlock_flags; + + if (ring->virtual) + flags |= BCM4377_XFER_RING_FLAG_VIRTUAL; + if (ring->sync) + flags |= BCM4377_XFER_RING_FLAG_SYNC; + + spin_lock_irqsave(&ring->lock, spinlock_flags); + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_CREATE_XFER_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + msg.ring_id_again = cpu_to_le16(ring->ring_id); + msg.ring_iova = cpu_to_le64(ring->ring_dma); + msg.n_elements = cpu_to_le16(ring->n_entries); + msg.completion_ring_id = cpu_to_le16(ring->completion_ring); + msg.doorbell = cpu_to_le16(ring->doorbell); + msg.flags = cpu_to_le16(flags); + msg.footer_size = ring->payload_size / 4; + + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = 0; + bcm4377->ring_state->xfer_ring_tail[ring->ring_id] = 0; + ring->generation++; + spin_unlock_irqrestore(&ring->lock, spinlock_flags); + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + + spin_lock_irqsave(&ring->lock, spinlock_flags); + + if (ring->d2h_buffers_only) { + for (i = 0; i < ring->n_entries; ++i) { + struct bcm4377_xfer_ring_entry *entry = + ring->ring + i * sizeof(*entry); + u16 raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, + ring->generation); + raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, i); + + memset(entry, 0, sizeof(*entry)); + entry->id = cpu_to_le16(raw_msgid); + entry->len = cpu_to_le16(ring->mapped_payload_size); + entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED; + entry->payload = + cpu_to_le64(ring->payloads_dma + + i * ring->mapped_payload_size); + } + } + + /* + * send some messages if this is a device->host ring to allow the device + * to reply by acknowledging them in the completion ring + */ + if (ring->virtual || ring->d2h_buffers_only) { + bcm4377->ring_state->xfer_ring_head[ring->ring_id] = + cpu_to_le16(0xf); + bcm4377_ring_doorbell(bcm4377, ring->doorbell, 0xf); + } + + ring->enabled = true; + spin_unlock_irqrestore(&ring->lock, spinlock_flags); + + return ret; +} + +static int bcm4377_destroy_transfer_ring(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring) +{ + struct bcm4377_destroy_transfer_ring_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_XFER_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + + ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, + sizeof(msg), true); + if (ret) + dev_warn(&bcm4377->pdev->dev, + "failed to destroy transfer ring %d\n", ring->ring_id); + + ring->enabled = false; + return ret; +} + +static int __bcm4378_send_calibration_chunk(struct bcm4377_data *bcm4377, + const void *data, size_t data_len, + u16 blocks_left) +{ + struct bcm4378_hci_send_calibration_cmd cmd; + struct sk_buff *skb; + + if (data_len > sizeof(cmd.data)) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.unk = 0x03; + cmd.blocks_left = cpu_to_le16(blocks_left); + memcpy(cmd.data, data, data_len); + + skb = __hci_cmd_sync(bcm4377->hdev, 0xfd97, sizeof(cmd), &cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int __bcm4378_send_calibration(struct bcm4377_data *bcm4377, + const void *data, size_t data_size) +{ + int ret; + size_t i, left, transfer_len; + size_t blocks = + DIV_ROUND_UP(data_size, (size_t)BCM4378_CALIBRATION_CHUNK_SIZE); + + if (!data) { + dev_err(&bcm4377->pdev->dev, + "no calibration data available.\n"); + return -ENOENT; + } + + for (i = 0, left = data_size; i < blocks; ++i, left -= transfer_len) { + transfer_len = + min_t(size_t, left, BCM4378_CALIBRATION_CHUNK_SIZE); + + ret = __bcm4378_send_calibration_chunk( + bcm4377, data + i * BCM4378_CALIBRATION_CHUNK_SIZE, + transfer_len, blocks - i - 1); + if (ret) { + dev_err(&bcm4377->pdev->dev, + "send calibration chunk failed with %d\n", ret); + return ret; + } + } + + return 0; +} + +static int bcm4378_send_calibration(struct bcm4377_data *bcm4377) +{ + if ((strcmp(bcm4377->stepping, "b1") == 0) || + strcmp(bcm4377->stepping, "b3") == 0) + return __bcm4378_send_calibration( + bcm4377, bcm4377->taurus_beamforming_cal_blob, + bcm4377->taurus_beamforming_cal_size); + else + return __bcm4378_send_calibration(bcm4377, + bcm4377->taurus_cal_blob, + bcm4377->taurus_cal_size); +} + +static int bcm4387_send_calibration(struct bcm4377_data *bcm4377) +{ + if (strcmp(bcm4377->stepping, "c2") == 0) + return __bcm4378_send_calibration( + bcm4377, bcm4377->taurus_beamforming_cal_blob, + bcm4377->taurus_beamforming_cal_size); + else + return __bcm4378_send_calibration(bcm4377, + bcm4377->taurus_cal_blob, + bcm4377->taurus_cal_size); +} + +static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377, + const char *suffix) +{ + const struct firmware *fw; + char name0[64], name1[64]; + int ret; + + snprintf(name0, sizeof(name0), "brcm/brcmbt%04x%s-%s-%s.%s", + bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type, + bcm4377->vendor, suffix); + snprintf(name1, sizeof(name1), "brcm/brcmbt%04x%s-%s.%s", + bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type, + suffix); + dev_dbg(&bcm4377->pdev->dev, "Trying to load firmware: '%s' or '%s'\n", + name0, name1); + + ret = firmware_request_nowarn(&fw, name0, &bcm4377->pdev->dev); + if (!ret) + return fw; + ret = firmware_request_nowarn(&fw, name1, &bcm4377->pdev->dev); + if (!ret) + return fw; + + dev_err(&bcm4377->pdev->dev, + "Unable to load firmware; tried '%s' and '%s'\n", name0, name1); + return NULL; +} + +static int bcm4377_send_ptb(struct bcm4377_data *bcm4377, + const struct firmware *fw) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(bcm4377->hdev, 0xfd98, fw->size, fw->data, + HCI_INIT_TIMEOUT); + /* + * This command seems to always fail on more recent firmware versions + * (even in traces taken from the macOS driver). It's unclear why this + * happens but because the PTB file contains calibration and/or + * regulatory data and may be required on older firmware we still try to + * send it here just in case and just ignore if it fails. + */ + if (!IS_ERR(skb)) + kfree_skb(skb); + return 0; +} + +static int bcm4378_send_ptb_chunk(struct bcm4377_data *bcm4377, + const void *data, size_t data_len, + u16 blocks_left) +{ + struct bcm4378_hci_send_ptb_cmd cmd; + struct sk_buff *skb; + + if (data_len > BCM4378_PTB_CHUNK_SIZE) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.blocks_left = cpu_to_le16(blocks_left); + memcpy(cmd.data, data, data_len); + + skb = __hci_cmd_sync(bcm4377->hdev, 0xfe0d, sizeof(cmd), &cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + return 0; +} + +static int bcm4378_send_ptb(struct bcm4377_data *bcm4377, + const struct firmware *fw) +{ + size_t chunks = DIV_ROUND_UP(fw->size, (size_t)BCM4378_PTB_CHUNK_SIZE); + size_t i, left, transfer_len; + int ret; + + for (i = 0, left = fw->size; i < chunks; ++i, left -= transfer_len) { + transfer_len = min_t(size_t, left, BCM4378_PTB_CHUNK_SIZE); + + dev_dbg(&bcm4377->pdev->dev, "sending ptb chunk %zu/%zu\n", + i + 1, chunks); + ret = bcm4378_send_ptb_chunk( + bcm4377, fw->data + i * BCM4378_PTB_CHUNK_SIZE, + transfer_len, chunks - i - 1); + if (ret) { + dev_err(&bcm4377->pdev->dev, + "sending ptb chunk %zu failed (%d)", i, ret); + return ret; + } + } + + return 0; +} + +static int bcm4377_hci_open(struct hci_dev *hdev) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + int ret; + + dev_dbg(&bcm4377->pdev->dev, "creating rings\n"); + + ret = bcm4377_create_completion_ring(bcm4377, + &bcm4377->hci_acl_ack_ring); + if (ret) + return ret; + ret = bcm4377_create_completion_ring(bcm4377, + &bcm4377->hci_acl_event_ring); + if (ret) + goto destroy_hci_acl_ack; + ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + if (ret) + goto destroy_hci_acl_event; + ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_event_ring); + if (ret) + goto destroy_sco_ack; + dev_dbg(&bcm4377->pdev->dev, + "all completion rings successfully created!\n"); + + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); + if (ret) + goto destroy_sco_event; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); + if (ret) + goto destroy_hci_h2d; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); + if (ret) + goto destroy_hci_d2h; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); + if (ret) + goto destroy_sco_h2d; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); + if (ret) + goto destroy_sco_d2h; + ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); + if (ret) + goto destroy_acl_h2d; + dev_dbg(&bcm4377->pdev->dev, + "all transfer rings successfully created!\n"); + + return 0; + +destroy_acl_h2d: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); +destroy_sco_d2h: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); +destroy_sco_h2d: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); +destroy_hci_d2h: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); +destroy_hci_h2d: + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); +destroy_sco_event: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring); +destroy_sco_ack: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring); +destroy_hci_acl_event: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); +destroy_hci_acl_ack: + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); + + dev_err(&bcm4377->pdev->dev, "Creating rings failed with %d\n", ret); + return ret; +} + +static int bcm4377_hci_close(struct hci_dev *hdev) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + + dev_dbg(&bcm4377->pdev->dev, "destroying rings in hci_close\n"); + + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); + bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); + + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring); + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); + bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); + + return 0; +} + +static bool bcm4377_is_valid_bdaddr(struct bcm4377_data *bcm4377, + bdaddr_t *addr) +{ + if (addr->b[0] != 0x93) + return true; + if (addr->b[1] != 0x76) + return true; + if (addr->b[2] != 0x00) + return true; + if (addr->b[4] != (bcm4377->hw->id & 0xff)) + return true; + if (addr->b[5] != (bcm4377->hw->id >> 8)) + return true; + return false; +} + +static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377) +{ + struct hci_rp_read_bd_addr *bda; + struct sk_buff *skb; + + skb = __hci_cmd_sync(bcm4377->hdev, HCI_OP_READ_BD_ADDR, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + int err = PTR_ERR(skb); + + dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR failed (%d)", + err); + return err; + } + + if (skb->len != sizeof(*bda)) { + dev_err(&bcm4377->pdev->dev, + "HCI_OP_READ_BD_ADDR reply length invalid"); + kfree_skb(skb); + return -EIO; + } + + bda = (struct hci_rp_read_bd_addr *)skb->data; + if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr)) + set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks); + + kfree_skb(skb); + return 0; +} + +static int bcm4377_hci_setup(struct hci_dev *hdev) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + const struct firmware *fw; + int ret; + + if (bcm4377->hw->send_calibration) { + ret = bcm4377->hw->send_calibration(bcm4377); + if (ret) + return ret; + } + + fw = bcm4377_request_blob(bcm4377, "ptb"); + if (!fw) { + dev_err(&bcm4377->pdev->dev, "failed to load PTB data"); + return -ENOENT; + } + + ret = bcm4377->hw->send_ptb(bcm4377, fw); + release_firmware(fw); + if (ret) + return ret; + + return bcm4377_check_bdaddr(bcm4377); +} + +static int bcm4377_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + struct bcm4377_transfer_ring *ring; + int ret; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + ring = &bcm4377->hci_h2d_ring; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + ring = &bcm4377->acl_h2d_ring; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + ring = &bcm4377->sco_h2d_ring; + break; + + default: + return -EILSEQ; + } + + ret = bcm4377_enqueue(bcm4377, ring, skb->data, skb->len, false); + if (ret < 0) { + hdev->stat.err_tx++; + return ret; + } + + hdev->stat.byte_tx += skb->len; + kfree_skb(skb); + return ret; +} + +static int bcm4377_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + dev_err(&bcm4377->pdev->dev, + "Change address command failed (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} + +static int bcm4377_alloc_transfer_ring(struct bcm4377_data *bcm4377, + struct bcm4377_transfer_ring *ring) +{ + size_t entry_size; + + spin_lock_init(&ring->lock); + ring->payload_size = ALIGN(ring->payload_size, 4); + ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4); + + if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) + return -EINVAL; + if (ring->n_entries > BCM4377_MAX_RING_SIZE) + return -EINVAL; + if (ring->virtual && ring->allow_wait) + return -EINVAL; + + if (ring->d2h_buffers_only) { + if (ring->virtual) + return -EINVAL; + if (ring->payload_size) + return -EINVAL; + if (!ring->mapped_payload_size) + return -EINVAL; + } + if (ring->virtual) + return 0; + + entry_size = + ring->payload_size + sizeof(struct bcm4377_xfer_ring_entry); + ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev, + ring->n_entries * entry_size, + &ring->ring_dma, GFP_KERNEL); + if (!ring->ring) + return -ENOMEM; + + if (ring->allow_wait) { + ring->events = devm_kcalloc(&bcm4377->pdev->dev, + ring->n_entries, + sizeof(*ring->events), GFP_KERNEL); + if (!ring->events) + return -ENOMEM; + } + + if (ring->mapped_payload_size) { + ring->payloads = dmam_alloc_coherent( + &bcm4377->pdev->dev, + ring->n_entries * ring->mapped_payload_size, + &ring->payloads_dma, GFP_KERNEL); + if (!ring->payloads) + return -ENOMEM; + } + + return 0; +} + +static int bcm4377_alloc_completion_ring(struct bcm4377_data *bcm4377, + struct bcm4377_completion_ring *ring) +{ + size_t entry_size; + + ring->payload_size = ALIGN(ring->payload_size, 4); + if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) + return -EINVAL; + if (ring->n_entries > BCM4377_MAX_RING_SIZE) + return -EINVAL; + + entry_size = ring->payload_size + + sizeof(struct bcm4377_completion_ring_entry); + + ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev, + ring->n_entries * entry_size, + &ring->ring_dma, GFP_KERNEL); + if (!ring->ring) + return -ENOMEM; + return 0; +} + +static int bcm4377_init_context(struct bcm4377_data *bcm4377) +{ + struct device *dev = &bcm4377->pdev->dev; + dma_addr_t peripheral_info_dma; + + bcm4377->ctx = dmam_alloc_coherent(dev, sizeof(*bcm4377->ctx), + &bcm4377->ctx_dma, GFP_KERNEL); + if (!bcm4377->ctx) + return -ENOMEM; + memset(bcm4377->ctx, 0, sizeof(*bcm4377->ctx)); + + bcm4377->ring_state = + dmam_alloc_coherent(dev, sizeof(*bcm4377->ring_state), + &bcm4377->ring_state_dma, GFP_KERNEL); + if (!bcm4377->ring_state) + return -ENOMEM; + memset(bcm4377->ring_state, 0, sizeof(*bcm4377->ring_state)); + + bcm4377->ctx->version = cpu_to_le16(1); + bcm4377->ctx->size = cpu_to_le16(sizeof(*bcm4377->ctx)); + bcm4377->ctx->enabled_caps = cpu_to_le32(2); + + /* + * The BT device will write 0x20 bytes of data to this buffer but + * the exact contents are unknown. It only needs to exist for BT + * to work such that we can just allocate and then ignore it. + */ + if (!dmam_alloc_coherent(&bcm4377->pdev->dev, 0x20, + &peripheral_info_dma, GFP_KERNEL)) + return -ENOMEM; + bcm4377->ctx->peripheral_info_addr = cpu_to_le64(peripheral_info_dma); + + bcm4377->ctx->xfer_ring_heads_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, xfer_ring_head)); + bcm4377->ctx->xfer_ring_tails_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, xfer_ring_tail)); + bcm4377->ctx->completion_ring_heads_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, completion_ring_head)); + bcm4377->ctx->completion_ring_tails_addr = cpu_to_le64( + bcm4377->ring_state_dma + + offsetof(struct bcm4377_ring_state, completion_ring_tail)); + + bcm4377->ctx->n_completion_rings = + cpu_to_le16(BCM4377_N_COMPLETION_RINGS); + bcm4377->ctx->n_xfer_rings = cpu_to_le16(BCM4377_N_TRANSFER_RINGS); + + bcm4377->ctx->control_completion_ring_addr = + cpu_to_le64(bcm4377->control_ack_ring.ring_dma); + bcm4377->ctx->control_completion_ring_n_entries = + cpu_to_le16(bcm4377->control_ack_ring.n_entries); + bcm4377->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff); + bcm4377->ctx->control_completion_ring_msi = 0; + bcm4377->ctx->control_completion_ring_header_size = 0; + bcm4377->ctx->control_completion_ring_footer_size = 0; + + bcm4377->ctx->control_xfer_ring_addr = + cpu_to_le64(bcm4377->control_h2d_ring.ring_dma); + bcm4377->ctx->control_xfer_ring_n_entries = + cpu_to_le16(bcm4377->control_h2d_ring.n_entries); + bcm4377->ctx->control_xfer_ring_doorbell = + cpu_to_le16(bcm4377->control_h2d_ring.doorbell); + bcm4377->ctx->control_xfer_ring_msi = 0; + bcm4377->ctx->control_xfer_ring_header_size = 0; + bcm4377->ctx->control_xfer_ring_footer_size = + bcm4377->control_h2d_ring.payload_size / 4; + + dev_dbg(&bcm4377->pdev->dev, "context initialized at IOVA %pad", + &bcm4377->ctx_dma); + + return 0; +} + +static int bcm4377_prepare_rings(struct bcm4377_data *bcm4377) +{ + int ret; + + /* + * Even though many of these settings appear to be configurable + * when sending the "create ring" messages most of these are + * actually hardcoded in some (and quite possibly all) firmware versions + * and changing them on the host has no effect. + * Specifically, this applies to at least the doorbells, the transfer + * and completion ring ids and their mapping (e.g. both HCI and ACL + * entries will always be queued in completion rings 1 and 2 no matter + * what we configure here). + */ + bcm4377->control_ack_ring.ring_id = BCM4377_ACK_RING_CONTROL; + bcm4377->control_ack_ring.n_entries = 32; + bcm4377->control_ack_ring.transfer_rings = + BIT(BCM4377_XFER_RING_CONTROL); + + bcm4377->hci_acl_ack_ring.ring_id = BCM4377_ACK_RING_HCI_ACL; + bcm4377->hci_acl_ack_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES; + bcm4377->hci_acl_ack_ring.transfer_rings = + BIT(BCM4377_XFER_RING_HCI_H2D) | BIT(BCM4377_XFER_RING_ACL_H2D); + bcm4377->hci_acl_ack_ring.delay = 1000; + + /* + * A payload size of MAX_EVENT_PAYLOAD_SIZE is enough here since large + * ACL packets will be transmitted inside buffers mapped via + * acl_d2h_ring anyway. + */ + bcm4377->hci_acl_event_ring.ring_id = BCM4377_EVENT_RING_HCI_ACL; + bcm4377->hci_acl_event_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE; + bcm4377->hci_acl_event_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES; + bcm4377->hci_acl_event_ring.transfer_rings = + BIT(BCM4377_XFER_RING_HCI_D2H) | BIT(BCM4377_XFER_RING_ACL_D2H); + bcm4377->hci_acl_event_ring.delay = 1000; + + bcm4377->sco_ack_ring.ring_id = BCM4377_ACK_RING_SCO; + bcm4377->sco_ack_ring.n_entries = BCM4377_RING_N_ENTRIES; + bcm4377->sco_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_H2D); + + bcm4377->sco_event_ring.ring_id = BCM4377_EVENT_RING_SCO; + bcm4377->sco_event_ring.payload_size = MAX_SCO_PAYLOAD_SIZE; + bcm4377->sco_event_ring.n_entries = BCM4377_RING_N_ENTRIES; + bcm4377->sco_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_D2H); + + bcm4377->control_h2d_ring.ring_id = BCM4377_XFER_RING_CONTROL; + bcm4377->control_h2d_ring.doorbell = BCM4377_DOORBELL_CONTROL; + bcm4377->control_h2d_ring.payload_size = BCM4377_CONTROL_MSG_SIZE; + bcm4377->control_h2d_ring.completion_ring = BCM4377_ACK_RING_CONTROL; + bcm4377->control_h2d_ring.allow_wait = true; + bcm4377->control_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->hci_h2d_ring.ring_id = BCM4377_XFER_RING_HCI_H2D; + bcm4377->hci_h2d_ring.doorbell = BCM4377_DOORBELL_HCI_H2D; + bcm4377->hci_h2d_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE; + bcm4377->hci_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL; + bcm4377->hci_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->hci_d2h_ring.ring_id = BCM4377_XFER_RING_HCI_D2H; + bcm4377->hci_d2h_ring.doorbell = BCM4377_DOORBELL_HCI_D2H; + bcm4377->hci_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL; + bcm4377->hci_d2h_ring.virtual = true; + bcm4377->hci_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->sco_h2d_ring.ring_id = BCM4377_XFER_RING_SCO_H2D; + bcm4377->sco_h2d_ring.doorbell = BCM4377_DOORBELL_SCO; + bcm4377->sco_h2d_ring.payload_size = MAX_SCO_PAYLOAD_SIZE; + bcm4377->sco_h2d_ring.completion_ring = BCM4377_ACK_RING_SCO; + bcm4377->sco_h2d_ring.sync = true; + bcm4377->sco_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + bcm4377->sco_d2h_ring.ring_id = BCM4377_XFER_RING_SCO_D2H; + bcm4377->sco_d2h_ring.doorbell = BCM4377_DOORBELL_SCO; + bcm4377->sco_d2h_ring.completion_ring = BCM4377_EVENT_RING_SCO; + bcm4377->sco_d2h_ring.virtual = true; + bcm4377->sco_d2h_ring.sync = true; + bcm4377->sco_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; + + /* + * This ring has to use mapped_payload_size because the largest ACL + * packet doesn't fit inside the largest possible footer + */ + bcm4377->acl_h2d_ring.ring_id = BCM4377_XFER_RING_ACL_H2D; + bcm4377->acl_h2d_ring.doorbell = BCM4377_DOORBELL_ACL_H2D; + bcm4377->acl_h2d_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE; + bcm4377->acl_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL; + bcm4377->acl_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; + + /* + * This ring only contains empty buffers to be used by incoming + * ACL packets that do not fit inside the footer of hci_acl_event_ring + */ + bcm4377->acl_d2h_ring.ring_id = BCM4377_XFER_RING_ACL_D2H; + bcm4377->acl_d2h_ring.doorbell = BCM4377_DOORBELL_ACL_D2H; + bcm4377->acl_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL; + bcm4377->acl_d2h_ring.d2h_buffers_only = true; + bcm4377->acl_d2h_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE; + bcm4377->acl_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; + + /* + * no need for any cleanup since this is only called from _probe + * and only devres-managed allocations are used + */ + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->control_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); + if (ret) + return ret; + ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); + if (ret) + return ret; + + ret = bcm4377_alloc_completion_ring(bcm4377, + &bcm4377->control_ack_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, + &bcm4377->hci_acl_ack_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, + &bcm4377->hci_acl_event_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_ack_ring); + if (ret) + return ret; + ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_event_ring); + if (ret) + return ret; + + dev_dbg(&bcm4377->pdev->dev, "all rings allocated and prepared\n"); + + return 0; +} + +static int bcm4377_boot(struct bcm4377_data *bcm4377) +{ + const struct firmware *fw; + void *bfr; + dma_addr_t fw_dma; + int ret = 0; + u32 bootstage, rti_status; + + bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); + + if (bootstage != 0) { + dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n", + bootstage); + return -EINVAL; + } + + if (rti_status != 0) { + dev_err(&bcm4377->pdev->dev, "RTI status is %d and not 0\n", + rti_status); + return -EINVAL; + } + + fw = bcm4377_request_blob(bcm4377, "bin"); + if (!fw) { + dev_err(&bcm4377->pdev->dev, "Failed to load firmware\n"); + return -ENOENT; + } + + bfr = dma_alloc_coherent(&bcm4377->pdev->dev, fw->size, &fw_dma, + GFP_KERNEL); + if (!bfr) { + ret = -ENOMEM; + goto out_release_fw; + } + + memcpy(bfr, fw->data, fw->size); + + iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_LO); + iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_HI); + iowrite32(BCM4377_DMA_MASK, + bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE); + + iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO); + iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI); + iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE); + iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL); + + dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n"); + + ret = wait_for_completion_interruptible_timeout(&bcm4377->event, + BCM4377_TIMEOUT); + if (ret == 0) { + ret = -ETIMEDOUT; + goto out_dma_free; + } else if (ret < 0) { + goto out_dma_free; + } + + if (bcm4377->bootstage != 2) { + dev_err(&bcm4377->pdev->dev, "boostage %d != 2\n", + bcm4377->bootstage); + ret = -ENXIO; + goto out_dma_free; + } + + dev_dbg(&bcm4377->pdev->dev, "firmware has booted (stage = %x)\n", + bcm4377->bootstage); + ret = 0; + +out_dma_free: + dma_free_coherent(&bcm4377->pdev->dev, fw->size, bfr, fw_dma); +out_release_fw: + release_firmware(fw); + return ret; +} + +static int bcm4377_setup_rti(struct bcm4377_data *bcm4377) +{ + int ret; + + dev_dbg(&bcm4377->pdev->dev, "starting RTI\n"); + iowrite32(1, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); + + ret = wait_for_completion_interruptible_timeout(&bcm4377->event, + BCM4377_TIMEOUT); + if (ret == 0) { + dev_err(&bcm4377->pdev->dev, + "timed out while waiting for RTI to transition to state 1"); + return -ETIMEDOUT; + } else if (ret < 0) { + return ret; + } + + if (bcm4377->rti_status != 1) { + dev_err(&bcm4377->pdev->dev, "RTI did not ack state 1 (%d)\n", + bcm4377->rti_status); + return -ENODEV; + } + dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n"); + + /* allow access to the entire IOVA space again */ + iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO); + iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI); + iowrite32(BCM4377_DMA_MASK, + bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE); + + /* setup "Converged IPC" context */ + iowrite32(lower_32_bits(bcm4377->ctx_dma), + bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO); + iowrite32(upper_32_bits(bcm4377->ctx_dma), + bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI); + iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); + + ret = wait_for_completion_interruptible_timeout(&bcm4377->event, + BCM4377_TIMEOUT); + if (ret == 0) { + dev_err(&bcm4377->pdev->dev, + "timed out while waiting for RTI to transition to state 2"); + return -ETIMEDOUT; + } else if (ret < 0) { + return ret; + } + + if (bcm4377->rti_status != 2) { + dev_err(&bcm4377->pdev->dev, "RTI did not ack state 2 (%d)\n", + bcm4377->rti_status); + return -ENODEV; + } + + dev_dbg(&bcm4377->pdev->dev, + "RTI is in state 2; control ring is ready\n"); + bcm4377->control_ack_ring.enabled = true; + + return 0; +} + +static int bcm4377_parse_otp_board_params(struct bcm4377_data *bcm4377, + char tag, const char *val, size_t len) +{ + if (tag != 'V') + return 0; + if (len >= sizeof(bcm4377->vendor)) + return -EINVAL; + + strscpy(bcm4377->vendor, val, len + 1); + return 0; +} + +static int bcm4377_parse_otp_chip_params(struct bcm4377_data *bcm4377, char tag, + const char *val, size_t len) +{ + size_t idx = 0; + + if (tag != 's') + return 0; + if (len >= sizeof(bcm4377->stepping)) + return -EINVAL; + + while (len != 0) { + bcm4377->stepping[idx] = tolower(val[idx]); + if (val[idx] == '\0') + return 0; + + idx++; + len--; + } + + bcm4377->stepping[idx] = '\0'; + return 0; +} + +static int bcm4377_parse_otp_str(struct bcm4377_data *bcm4377, const u8 *str, + enum bcm4377_otp_params_type type) +{ + const char *p; + int ret; + + p = skip_spaces(str); + while (*p) { + char tag = *p++; + const char *end; + size_t len; + + if (*p++ != '=') /* implicit NUL check */ + return -EINVAL; + + /* *p might be NUL here, if so end == p and len == 0 */ + end = strchrnul(p, ' '); + len = end - p; + + /* leave 1 byte for NUL in destination string */ + if (len > (BCM4377_OTP_MAX_PARAM_LEN - 1)) + return -EINVAL; + + switch (type) { + case BCM4377_OTP_BOARD_PARAMS: + ret = bcm4377_parse_otp_board_params(bcm4377, tag, p, + len); + break; + case BCM4377_OTP_CHIP_PARAMS: + ret = bcm4377_parse_otp_chip_params(bcm4377, tag, p, + len); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) + return ret; + + /* Skip to next arg, if any */ + p = skip_spaces(end); + } + + return 0; +} + +static int bcm4377_parse_otp_sys_vendor(struct bcm4377_data *bcm4377, u8 *otp, + size_t size) +{ + int idx = 4; + const char *chip_params; + const char *board_params; + int ret; + + /* 4-byte header and two empty strings */ + if (size < 6) + return -EINVAL; + + if (get_unaligned_le32(otp) != BCM4377_OTP_VENDOR_HDR) + return -EINVAL; + + chip_params = &otp[idx]; + + /* Skip first string, including terminator */ + idx += strnlen(chip_params, size - idx) + 1; + if (idx >= size) + return -EINVAL; + + board_params = &otp[idx]; + + /* Skip to terminator of second string */ + idx += strnlen(board_params, size - idx); + if (idx >= size) + return -EINVAL; + + /* At this point both strings are guaranteed NUL-terminated */ + dev_dbg(&bcm4377->pdev->dev, + "OTP: chip_params='%s' board_params='%s'\n", chip_params, + board_params); + + ret = bcm4377_parse_otp_str(bcm4377, chip_params, + BCM4377_OTP_CHIP_PARAMS); + if (ret) + return ret; + + ret = bcm4377_parse_otp_str(bcm4377, board_params, + BCM4377_OTP_BOARD_PARAMS); + if (ret) + return ret; + + if (!bcm4377->stepping[0] || !bcm4377->vendor[0]) + return -EINVAL; + + dev_dbg(&bcm4377->pdev->dev, "OTP: stepping=%s, vendor=%s\n", + bcm4377->stepping, bcm4377->vendor); + return 0; +} + +static int bcm4377_parse_otp(struct bcm4377_data *bcm4377) +{ + u8 *otp; + int i; + int ret = -ENOENT; + + otp = kzalloc(BCM4377_OTP_SIZE, GFP_KERNEL); + if (!otp) + return -ENOMEM; + + for (i = 0; i < BCM4377_OTP_SIZE; ++i) + otp[i] = ioread8(bcm4377->bar0 + bcm4377->hw->otp_offset + i); + + i = 0; + while (i < (BCM4377_OTP_SIZE - 1)) { + u8 type = otp[i]; + u8 length = otp[i + 1]; + + if (type == 0) + break; + + if ((i + 2 + length) > BCM4377_OTP_SIZE) + break; + + switch (type) { + case BCM4377_OTP_SYS_VENDOR: + dev_dbg(&bcm4377->pdev->dev, + "OTP @ 0x%x (%d): SYS_VENDOR", i, length); + ret = bcm4377_parse_otp_sys_vendor(bcm4377, &otp[i + 2], + length); + break; + case BCM4377_OTP_CIS: + dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): CIS", i, + length); + break; + default: + dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): unknown", + i, length); + break; + } + + i += 2 + length; + } + + kfree(otp); + return ret; +} + +static int bcm4377_init_cfg(struct bcm4377_data *bcm4377) +{ + int ret; + u32 ctrl; + + ret = pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_BAR0_WINDOW1, + bcm4377->hw->bar0_window1); + if (ret) + return ret; + + ret = pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_BAR0_WINDOW2, + bcm4377->hw->bar0_window2); + if (ret) + return ret; + + ret = pci_write_config_dword( + bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1, + BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT); + if (ret) + return ret; + + if (bcm4377->hw->has_bar0_core2_window2) { + ret = pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_BAR0_CORE2_WINDOW2, + bcm4377->hw->bar0_core2_window2); + if (ret) + return ret; + } + + ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR2_WINDOW, + BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT); + if (ret) + return ret; + + ret = pci_read_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_SUBSYSTEM_CTRL, &ctrl); + if (ret) + return ret; + + if (bcm4377->hw->clear_pciecfg_subsystem_ctrl_bit19) + ctrl &= ~BIT(19); + ctrl |= BIT(16); + + return pci_write_config_dword(bcm4377->pdev, + BCM4377_PCIECFG_SUBSYSTEM_CTRL, ctrl); +} + +static int bcm4377_probe_dmi(struct bcm4377_data *bcm4377) +{ + const struct dmi_system_id *board_type_dmi_id; + + board_type_dmi_id = dmi_first_match(bcm4377_dmi_board_table); + if (board_type_dmi_id && board_type_dmi_id->driver_data) { + bcm4377->board_type = board_type_dmi_id->driver_data; + dev_dbg(&bcm4377->pdev->dev, + "found board type via DMI match: %s\n", + bcm4377->board_type); + } + + return 0; +} + +static int bcm4377_probe_of(struct bcm4377_data *bcm4377) +{ + struct device_node *np = bcm4377->pdev->dev.of_node; + int ret; + + if (!np) + return 0; + + ret = of_property_read_string(np, "brcm,board-type", + &bcm4377->board_type); + if (ret) { + dev_err(&bcm4377->pdev->dev, "no brcm,board-type property\n"); + return ret; + } + + bcm4377->taurus_beamforming_cal_blob = + of_get_property(np, "brcm,taurus-bf-cal-blob", + &bcm4377->taurus_beamforming_cal_size); + if (!bcm4377->taurus_beamforming_cal_blob) { + dev_err(&bcm4377->pdev->dev, + "no brcm,taurus-bf-cal-blob property\n"); + return -ENOENT; + } + bcm4377->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob", + &bcm4377->taurus_cal_size); + if (!bcm4377->taurus_cal_blob) { + dev_err(&bcm4377->pdev->dev, + "no brcm,taurus-cal-blob property\n"); + return -ENOENT; + } + + return 0; +} + +static void bcm4377_disable_aspm(struct bcm4377_data *bcm4377) +{ + pci_disable_link_state(bcm4377->pdev, + PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + + /* + * pci_disable_link_state can fail if either CONFIG_PCIEASPM is disabled + * or if the BIOS hasn't handed over control to us. We must *always* + * disable ASPM for this device due to hardware errata though. + */ + pcie_capability_clear_word(bcm4377->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC); +} + +static void bcm4377_pci_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void bcm4377_hci_free_dev(void *data) +{ + hci_free_dev(data); +} + +static void bcm4377_hci_unregister_dev(void *data) +{ + hci_unregister_dev(data); +} + +static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct bcm4377_data *bcm4377; + struct hci_dev *hdev; + int ret, irq; + + ret = dma_set_mask_and_coherent(&pdev->dev, BCM4377_DMA_MASK); + if (ret) + return ret; + + bcm4377 = devm_kzalloc(&pdev->dev, sizeof(*bcm4377), GFP_KERNEL); + if (!bcm4377) + return -ENOMEM; + + bcm4377->pdev = pdev; + bcm4377->hw = &bcm4377_hw_variants[id->driver_data]; + init_completion(&bcm4377->event); + + ret = bcm4377_prepare_rings(bcm4377); + if (ret) + return ret; + + ret = bcm4377_init_context(bcm4377); + if (ret) + return ret; + + ret = bcm4377_probe_dmi(bcm4377); + if (ret) + return ret; + ret = bcm4377_probe_of(bcm4377); + if (ret) + return ret; + if (!bcm4377->board_type) { + dev_err(&pdev->dev, "unable to determine board type\n"); + return -ENODEV; + } + + if (bcm4377->hw->disable_aspm) + bcm4377_disable_aspm(bcm4377); + + ret = pci_reset_function_locked(pdev); + if (ret) + dev_warn( + &pdev->dev, + "function level reset failed with %d; trying to continue anyway\n", + ret); + + /* + * If this number is too low and we try to access any BAR too + * early the device will crash. Experiments have shown that + * approximately 50 msec is the minimum amount we have to wait. + * Let's double that to be safe. + */ + msleep(100); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = bcm4377_init_cfg(bcm4377); + if (ret) + return ret; + + bcm4377->bar0 = pcim_iomap(pdev, 0, 0); + if (!bcm4377->bar0) + return -EBUSY; + bcm4377->bar2 = pcim_iomap(pdev, 2, 0); + if (!bcm4377->bar2) + return -EBUSY; + + ret = bcm4377_parse_otp(bcm4377); + if (ret) { + dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret); + return ret; + } + + /* + * Legacy interrupts result in an IRQ storm because we don't know where + * the interrupt mask and status registers for these chips are. + * MSIs are acked automatically instead. + */ + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) + return -ENODEV; + ret = devm_add_action_or_reset(&pdev->dev, bcm4377_pci_free_irq_vectors, + pdev); + if (ret) + return ret; + + irq = pci_irq_vector(pdev, 0); + if (irq <= 0) + return -ENODEV; + + ret = devm_request_irq(&pdev->dev, irq, bcm4377_irq, 0, "bcm4377", + bcm4377); + if (ret) + return ret; + + hdev = hci_alloc_dev(); + if (!hdev) + return -ENOMEM; + ret = devm_add_action_or_reset(&pdev->dev, bcm4377_hci_free_dev, hdev); + if (ret) + return ret; + + bcm4377->hdev = hdev; + + hdev->bus = HCI_PCI; + hdev->dev_type = HCI_PRIMARY; + hdev->open = bcm4377_hci_open; + hdev->close = bcm4377_hci_close; + hdev->send = bcm4377_hci_send_frame; + hdev->set_bdaddr = bcm4377_hci_set_bdaddr; + hdev->setup = bcm4377_hci_setup; + + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + if (bcm4377->hw->broken_mws_transport_config) + set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks); + if (bcm4377->hw->broken_ext_scan) + set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); + + pci_set_drvdata(pdev, bcm4377); + hci_set_drvdata(hdev, bcm4377); + SET_HCIDEV_DEV(hdev, &pdev->dev); + + ret = bcm4377_boot(bcm4377); + if (ret) + return ret; + + ret = bcm4377_setup_rti(bcm4377); + if (ret) + return ret; + + ret = hci_register_dev(hdev); + if (ret) + return ret; + return devm_add_action_or_reset(&pdev->dev, bcm4377_hci_unregister_dev, + hdev); +} + +static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev); + int ret; + + ret = hci_suspend_dev(bcm4377->hdev); + if (ret) + return ret; + + iowrite32(BCM4377_BAR0_SLEEP_CONTROL_QUIESCE, + bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); + + return 0; +} + +static int bcm4377_resume(struct pci_dev *pdev) +{ + struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev); + + iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE, + bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); + + return hci_resume_dev(bcm4377->hdev); +} + +static const struct dmi_system_id bcm4377_dmi_board_table[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), + }, + .driver_data = "apple,formosa", + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro15,4"), + }, + .driver_data = "apple,formosa", + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,3"), + }, + .driver_data = "apple,formosa", + }, + {} +}; + +static const struct bcm4377_hw bcm4377_hw_variants[] = { + [BCM4377] = { + .id = 0x4377, + .otp_offset = 0x4120, + .bar0_window1 = 0x1800b000, + .bar0_window2 = 0x1810c000, + .disable_aspm = true, + .broken_ext_scan = true, + .send_ptb = bcm4377_send_ptb, + }, + + [BCM4378] = { + .id = 0x4378, + .otp_offset = 0x4120, + .bar0_window1 = 0x18002000, + .bar0_window2 = 0x1810a000, + .bar0_core2_window2 = 0x18107000, + .has_bar0_core2_window2 = true, + .broken_mws_transport_config = true, + .send_calibration = bcm4378_send_calibration, + .send_ptb = bcm4378_send_ptb, + }, + + [BCM4387] = { + .id = 0x4387, + .otp_offset = 0x413c, + .bar0_window1 = 0x18002000, + .bar0_window2 = 0x18109000, + .bar0_core2_window2 = 0x18106000, + .has_bar0_core2_window2 = true, + .clear_pciecfg_subsystem_ctrl_bit19 = true, + .broken_mws_transport_config = true, + .send_calibration = bcm4387_send_calibration, + .send_ptb = bcm4378_send_ptb, + }, +}; + +#define BCM4377_DEVID_ENTRY(id) \ + { \ + PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID, \ + PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \ + BCM##id \ + } + +static const struct pci_device_id bcm4377_devid_table[] = { + BCM4377_DEVID_ENTRY(4377), + BCM4377_DEVID_ENTRY(4378), + BCM4377_DEVID_ENTRY(4387), + {}, +}; +MODULE_DEVICE_TABLE(pci, bcm4377_devid_table); + +static struct pci_driver bcm4377_pci_driver = { + .name = "hci_bcm4377", + .id_table = bcm4377_devid_table, + .probe = bcm4377_probe, + .suspend = bcm4377_suspend, + .resume = bcm4377_resume, +}; +module_pci_driver(bcm4377_pci_driver); + +MODULE_AUTHOR("Sven Peter "); +MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_FIRMWARE("brcm/brcmbt4377*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4377*.ptb"); +MODULE_FIRMWARE("brcm/brcmbt4378*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4378*.ptb"); +MODULE_FIRMWARE("brcm/brcmbt4387*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4387*.ptb"); -- cgit v1.2.3