diff options
author | Lendacky, Thomas <Thomas.Lendacky@amd.com> | 2015-05-14 11:44:03 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-05-15 15:21:43 -0400 |
commit | 34bf65dfa34369d283582cfff2ec916f62043043 (patch) | |
tree | baddc52c3097b2d65c030b2d5c4a1b5b6898a5fa | |
parent | 5452b2dfe685ba6774ceca19ec1df1fa499702fb (diff) | |
download | linux-34bf65dfa34369d283582cfff2ec916f62043043.tar.bz2 |
amd-xgbe: Add netif_* message support to the driver
Add support for the network interface message level settings for
determining whether to issue some of the driver messages. Make
use of the netif_* interface where appropriate.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 70 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 101 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-main.c | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 83 | ||||
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe.h | 23 |
7 files changed, 191 insertions, 151 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index 8a50b01c2686..a6b9899e285f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, tc_ets = 0; tc_ets_weight = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i, - ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]); - DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); + netif_dbg(pdata, drv, netdev, + "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i, + ets->tc_tx_bw[i], ets->tc_rx_bw[i], + ets->tc_tsa[i]); + netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i, + ets->prio_tc[i]); if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && (i >= pdata->hw_feat.tc_cnt)) @@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); - DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n", - pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + netif_dbg(pdata, drv, netdev, + "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n", + pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); if (!pdata->pfc) { pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), @@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev) static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx) { + struct xgbe_prv_data *pdata = netdev_priv(netdev); u8 support = xgbe_dcb_getdcbx(netdev); - DBGPR(" DCBX=%#hhx\n", dcbx); + netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx); if (dcbx & ~support) return 1; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index d81fc6bd4759..87b73d43391c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata, if (!ring->rdata) return -ENOMEM; - DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n", - ring->rdesc, ring->rdesc_dma, ring->rdata); + netif_dbg(pdata, drv, pdata->netdev, + "rdesc=%p, rdesc_dma=%pad, rdata=%p\n", + ring->rdesc, &ring->rdesc_dma, ring->rdata); DBGPR("<--xgbe_init_ring\n"); @@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { - DBGPR(" %s - tx_ring:\n", channel->name); + netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", + channel->name); + ret = xgbe_init_ring(pdata, channel->tx_ring, pdata->tx_desc_count); if (ret) { @@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) goto err_ring; } - DBGPR(" %s - rx_ring:\n", channel->name); + netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", + channel->name); + ret = xgbe_init_ring(pdata, channel->rx_ring, pdata->rx_desc_count); if (ret) { netdev_alert(pdata->netdev, - "error initializing Tx ring\n"); + "error initializing Rx ring\n"); goto err_ring; } } @@ -518,8 +523,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) rdata = XGBE_GET_DESC_DATA(ring, cur_index); if (tso) { - DBGPR(" TSO packet\n"); - /* Map the TSO header */ skb_dma = dma_map_single(pdata->dev, skb->data, packet->header_len, DMA_TO_DEVICE); @@ -529,6 +532,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) } rdata->skb_dma = skb_dma; rdata->skb_dma_len = packet->header_len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb header: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, packet->header_len); offset = packet->header_len; @@ -550,8 +556,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) } rdata->skb_dma = skb_dma; rdata->skb_dma_len = len; - DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n", - cur_index, skb_dma, len); + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb data: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, len); datalen -= len; offset += len; @@ -563,7 +570,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - DBGPR(" mapping frag %u\n", i); + netif_dbg(pdata, tx_queued, pdata->netdev, + "mapping frag %u\n", i); frag = &skb_shinfo(skb)->frags[i]; offset = 0; @@ -582,8 +590,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) rdata->skb_dma = skb_dma; rdata->skb_dma_len = len; rdata->mapped_as_page = 1; - DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n", - cur_index, skb_dma, len); + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb frag: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, len); datalen -= len; offset += len; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 6f593a552f9f..dab3a1ed566b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) return 0; - DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving"); + netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", + enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); return 0; @@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) return 0; - DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving"); + netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", + enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); return 0; @@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, mac_addr[0] = ha->addr[4]; mac_addr[1] = ha->addr[5]; - DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr, - *mac_reg); + netif_dbg(pdata, drv, pdata->netdev, + "adding mac address %pM at %#x\n", + ha->addr, *mac_reg); XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); } @@ -1322,7 +1325,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: - DBGPR(" TC%u using SP\n", i); + netif_dbg(pdata, drv, pdata->netdev, + "TC%u using SP\n", i); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP); break; @@ -1330,7 +1334,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) weight = total_weight * ets->tc_tx_bw[i] / 100; weight = clamp(weight, min_weight, total_weight); - DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); + netif_dbg(pdata, drv, pdata->netdev, + "TC%u using DWRR (weight %u)\n", i, weight); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_ETS); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, @@ -1359,7 +1364,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) } mask &= 0xff; - DBGPR(" TC%u PFC mask=%#x\n", tc, mask); + netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", + tc, mask); reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); reg_val = XGMAC_IOREAD(pdata, reg); @@ -1457,8 +1463,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) /* Create a context descriptor if this is a TSO packet */ if (tso_context || vlan_context) { if (tso_context) { - DBGPR(" TSO context descriptor, mss=%u\n", - packet->mss); + netif_dbg(pdata, tx_queued, pdata->netdev, + "TSO context descriptor, mss=%u\n", + packet->mss); /* Set the MSS size */ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, @@ -1476,8 +1483,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) } if (vlan_context) { - DBGPR(" VLAN context descriptor, ctag=%u\n", - packet->vlan_ctag); + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + packet->vlan_ctag); /* Mark it as a CONTEXT descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, @@ -1596,9 +1604,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) rdesc = rdata->rdesc; XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); -#ifdef XGMAC_ENABLE_TX_DESC_DUMP - xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1); -#endif + if (netif_msg_tx_queued(pdata)) + xgbe_dump_tx_desc(pdata, ring, start_index, + packet->rdesc_count, 1); /* Make sure ownership is written to the descriptor */ dma_wmb(); @@ -1640,9 +1648,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) /* Make sure descriptor fields are read after reading the OWN bit */ dma_rmb(); -#ifdef XGMAC_ENABLE_RX_DESC_DUMP - xgbe_dump_rx_desc(ring, rdesc, ring->cur); -#endif + if (netif_msg_rx_status(pdata)) + xgbe_dump_rx_desc(pdata, ring, ring->cur); if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { /* Timestamp Context Descriptor */ @@ -1713,7 +1720,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); - DBGPR(" err=%u, etlt=%#x\n", err, etlt); + netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ @@ -1724,7 +1731,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, RX_NORMAL_DESC0, OVT); - DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + packet->vlan_ctag); } } else { if ((etlt == 0x05) || (etlt == 0x06)) @@ -2032,9 +2040,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); - netdev_notice(pdata->netdev, - "%d Tx hardware queues, %d byte fifo per queue\n", - pdata->tx_q_count, ((fifo_size + 1) * 256)); + netif_info(pdata, drv, pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo_size + 1) * 256)); } static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) @@ -2048,9 +2056,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); - netdev_notice(pdata->netdev, - "%d Rx hardware queues, %d byte fifo per queue\n", - pdata->rx_q_count, ((fifo_size + 1) * 256)); + netif_info(pdata, drv, pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, ((fifo_size + 1) * 256)); } static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) @@ -2069,14 +2077,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { - DBGPR(" TXq%u mapped to TC%u\n", queue, i); + netif_dbg(pdata, drv, pdata->netdev, + "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } if (i < qptc_extra) { - DBGPR(" TXq%u mapped to TC%u\n", queue, i); + netif_dbg(pdata, drv, pdata->netdev, + "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; @@ -2094,13 +2104,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { - DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } if (i < ppq_extra) { - DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index d45fd1b1fb7d..cc5af67357f5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) channel->rx_ring = rx_ring++; } - DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", - channel->name, channel->queue_index, channel->dma_regs, - channel->dma_irq, channel->tx_ring, channel->rx_ring); + netif_dbg(pdata, drv, pdata->netdev, + "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", + channel->name, channel->dma_regs, channel->dma_irq, + channel->tx_ring, channel->rx_ring); } pdata->channel = channel_mem; @@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, struct xgbe_prv_data *pdata = channel->pdata; if (count > xgbe_tx_avail_desc(ring)) { - DBGPR(" Tx queue stopped, not enough descriptors available\n"); + netif_info(pdata, drv, pdata->netdev, + "Tx queue stopped, not enough descriptors available\n"); netif_stop_subqueue(pdata->netdev, channel->queue_index); ring->tx.queue_stopped = 1; @@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) if (!dma_isr) goto isr_done; - DBGPR(" DMA_ISR = %08x\n", dma_isr); + netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) @@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data) channel = pdata->channel + i; dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); - DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", + i, dma_ch_isr); /* The TI or RI interrupt bits may still be set even if using * per channel DMA interrupts. Check to be sure those are not @@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data) } } - DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); - isr_done: return IRQ_HANDLED; } @@ -448,7 +449,6 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; - DBGPR(" %s adding tx timer\n", channel->name); setup_timer(&channel->tx_timer, xgbe_tx_timer, (unsigned long)channel); } @@ -468,7 +468,6 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; - DBGPR(" %s deleting tx timer\n", channel->name); del_timer_sync(&channel->tx_timer); } @@ -848,8 +847,9 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) ret = -ENODEV; goto err_phy_connect; } - DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n", - dev_name(&phydev->dev), phydev->link); + netif_dbg(pdata, ifup, pdata->netdev, + "phy_connect_direct succeeded for PHY %s\n", + dev_name(&phydev->dev)); return 0; @@ -1478,7 +1478,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) ret = NETDEV_TX_OK; if (skb->len == 0) { - netdev_err(netdev, "empty skb received from stack\n"); + netif_err(pdata, tx_err, netdev, + "empty skb received from stack\n"); dev_kfree_skb_any(skb); goto tx_netdev_return; } @@ -1494,7 +1495,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) ret = xgbe_prep_tso(skb, packet); if (ret) { - netdev_err(netdev, "error processing TSO packet\n"); + netif_err(pdata, tx_err, netdev, + "error processing TSO packet\n"); dev_kfree_skb_any(skb); goto tx_netdev_return; } @@ -1513,9 +1515,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) /* Configure required descriptor fields for transmission */ hw_if->dev_xmit(channel); -#ifdef XGMAC_ENABLE_TX_PKT_DUMP - xgbe_print_pkt(netdev, skb, true); -#endif + if (netif_msg_pktdata(pdata)) + xgbe_print_pkt(netdev, skb, true); /* Stop the queue in advance if there may not be enough descriptors */ xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); @@ -1710,7 +1711,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc) (pdata->q2tc_map[queue] == i)) queue++; - DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1); + netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n", + i, offset, queue - 1); netdev_set_tc_queue(netdev, i, queue - offset, offset); offset = queue; } @@ -1877,9 +1879,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) * bit */ dma_rmb(); -#ifdef XGMAC_ENABLE_TX_DESC_DUMP - xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); -#endif + if (netif_msg_tx_done(pdata)) + xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); if (hw_if->is_last_desc(rdesc)) { tx_packets += rdata->tx.packets; @@ -1983,7 +1984,8 @@ read_again: if (error || packet->errors) { if (packet->errors) - DBGPR("Error in received packet\n"); + netif_err(pdata, rx_err, netdev, + "error in received packet\n"); dev_kfree_skb(skb); goto next_packet; } @@ -2033,14 +2035,14 @@ skip_data: max_len += VLAN_HLEN; if (skb->len > max_len) { - DBGPR("packet length exceeds configured MTU\n"); + netif_err(pdata, rx_err, netdev, + "packet length exceeds configured MTU\n"); dev_kfree_skb(skb); goto next_packet; } -#ifdef XGMAC_ENABLE_RX_PKT_DUMP - xgbe_print_pkt(netdev, skb, false); -#endif + if (netif_msg_pktdata(pdata)) + xgbe_print_pkt(netdev, skb, false); skb_checksum_none_assert(skb); if (XGMAC_GET_BITS(packet->attributes, @@ -2164,8 +2166,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget) return processed; } -void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, - unsigned int count, unsigned int flag) +void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, + unsigned int idx, unsigned int count, unsigned int flag) { struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; @@ -2173,20 +2175,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, while (count--) { rdata = XGBE_GET_DESC_DATA(ring, idx); rdesc = rdata->rdesc; - pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, - (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", - le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), - le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); + netdev_dbg(pdata->netdev, + "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(rdesc->desc0), + le32_to_cpu(rdesc->desc1), + le32_to_cpu(rdesc->desc2), + le32_to_cpu(rdesc->desc3)); idx++; } } -void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, +void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, unsigned int idx) { - pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, - le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), - le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + + rdata = XGBE_GET_DESC_DATA(ring, idx); + rdesc = rdata->rdesc; + netdev_dbg(pdata->netdev, + "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", + idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), + le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); } void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) @@ -2196,21 +2207,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) unsigned char buffer[128]; unsigned int i, j; - netdev_alert(netdev, "\n************** SKB dump ****************\n"); + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); - netdev_alert(netdev, "%s packet of %d bytes\n", - (tx_rx ? "TX" : "RX"), skb->len); + netdev_dbg(netdev, "%s packet of %d bytes\n", + (tx_rx ? "TX" : "RX"), skb->len); - netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest); - netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source); - netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto)); + netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); + netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); + netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); for (i = 0, j = 0; i < skb->len;) { j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", buf[i++]); if ((i % 32) == 0) { - netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer); + netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer); j = 0; } else if ((i % 16) == 0) { buffer[j++] = ' '; @@ -2220,7 +2231,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) } } if (i % 32) - netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer); + netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer); - netdev_alert(netdev, "\n************** SKB dump ****************\n"); + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 714905384900..ae869d41cec8 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -136,6 +136,13 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(XGBE_DRV_VERSION); MODULE_DESCRIPTION(XGBE_DRV_DESC); +static int debug = -1; +module_param(debug, int, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(debug, " Network interface message level setting"); + +static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP); + static void xgbe_default_config(struct xgbe_prv_data *pdata) { DBGPR("-->xgbe_default_config\n"); @@ -289,6 +296,8 @@ static int xgbe_probe(struct platform_device *pdev) mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); + pdata->msg_enable = netif_msg_init(debug, default_msg_level); + /* Check if we should use ACPI or DT */ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1; @@ -318,7 +327,8 @@ static int xgbe_probe(struct platform_device *pdev) ret = PTR_ERR(pdata->xgmac_regs); goto err_io; } - DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs); + if (netif_msg_probe(pdata)) + dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); pdata->xpcs_regs = devm_ioremap_resource(dev, res); @@ -327,7 +337,8 @@ static int xgbe_probe(struct platform_device *pdev) ret = PTR_ERR(pdata->xpcs_regs); goto err_io; } - DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); + if (netif_msg_probe(pdata)) + dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); /* Retrieve the MAC address */ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 59e267f3f1b7..532a67f728e0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -159,48 +159,48 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg, void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) { struct device *dev = pdata->dev; - struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD]; + struct phy_device *phydev = pdata->phydev; int i; - dev_alert(dev, "\n************* PHY Reg dump **********************\n"); - - dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1, - XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); - dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1, - XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); - dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1, - XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); - dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2, - XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); - dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1, - XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); - dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2, - XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); - - dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1, - XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); - dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1, - XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); - dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n", - MDIO_AN_ADVERTISE, - XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); - dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n", - MDIO_AN_ADVERTISE + 1, - XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); - dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n", - MDIO_AN_ADVERTISE + 2, - XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); - dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n", - MDIO_AN_COMP_STAT, - XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); - - dev_alert(dev, "MMD Device Mask = %#x\n", - phydev->c45_ids.devices_in_package); + dev_dbg(dev, "\n************* PHY Reg dump **********************\n"); + + dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); + dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); + dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); + dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); + dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); + dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); + + dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); + dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); + dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n", + MDIO_AN_ADVERTISE, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); + dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n", + MDIO_AN_ADVERTISE + 1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); + dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n", + MDIO_AN_ADVERTISE + 2, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); + dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n", + MDIO_AN_COMP_STAT, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); + + dev_dbg(dev, "MMD Device Mask = %#x\n", + phydev->c45_ids.devices_in_package); for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++) - dev_alert(dev, " MMD %d: ID = %#08x\n", i, - phydev->c45_ids.device_ids[i]); + dev_dbg(dev, " MMD %d: ID = %#08x\n", i, + phydev->c45_ids.device_ids[i]); - dev_alert(dev, "\n*************************************************\n"); + dev_dbg(dev, "\n*************************************************\n"); } int xgbe_mdio_register(struct xgbe_prv_data *pdata) @@ -230,7 +230,9 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata) dev_err(pdata->dev, "mdiobus_register failed\n"); goto err_mdiobus_alloc; } - DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id); + if (netif_msg_drv(pdata)) + dev_dbg(pdata->dev, "mdiobus_register succeeded for %s\n", + pdata->mii_bus_id); /* Probe the PCS using Clause 45 */ phydev = get_phy_device(mii, XGBE_PRTAD, true); @@ -275,7 +277,8 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata) pdata->phydev = phydev; - DBGPHY_REGS(pdata); + if (netif_msg_drv(pdata)) + xgbe_dump_phy_registers(pdata); DBGPR("<--xgbe_mdio_register\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ef3d1e577787..8313b0761b8f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -793,6 +793,9 @@ struct xgbe_prv_data { /* Keeps track of power mode */ unsigned int power_down; + /* Network interface message level setting */ + u32 msg_enable; + #ifdef CONFIG_DEBUG_FS struct dentry *xgbe_debugfs; @@ -818,9 +821,9 @@ void xgbe_mdio_unregister(struct xgbe_prv_data *); void xgbe_dump_phy_registers(struct xgbe_prv_data *); void xgbe_ptp_register(struct xgbe_prv_data *); void xgbe_ptp_unregister(struct xgbe_prv_data *); -void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int, - unsigned int); -void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *, +void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *, + unsigned int, unsigned int, unsigned int); +void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *, unsigned int); void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool); void xgbe_get_all_hw_features(struct xgbe_prv_data *); @@ -837,18 +840,6 @@ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {} static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {} #endif /* CONFIG_DEBUG_FS */ -/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */ -#if 0 -#define XGMAC_ENABLE_TX_DESC_DUMP -#define XGMAC_ENABLE_RX_DESC_DUMP -#endif - -/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */ -#if 0 -#define XGMAC_ENABLE_TX_PKT_DUMP -#define XGMAC_ENABLE_RX_PKT_DUMP -#endif - /* NOTE: Uncomment for function trace log messages in KERNEL LOG */ #if 0 #define YDEBUG @@ -858,10 +849,8 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {} /* For debug prints */ #ifdef YDEBUG #define DBGPR(x...) pr_alert(x) -#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x) #else #define DBGPR(x...) do { } while (0) -#define DBGPHY_REGS(x...) do { } while (0) #endif #ifdef YDEBUG_MDIO |