diff options
author | David S. Miller <davem@davemloft.net> | 2013-08-16 15:37:26 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-08-16 15:37:26 -0700 |
commit | 2ff1cf12c9fe70e75e600404e6a4274b19d293ed (patch) | |
tree | beafddac0a8098e3f07d2ec60e44a2a7d006e605 /drivers/net/ethernet | |
parent | 16b304f3404f8e0243d5ee2b70b68767b7b59b2b (diff) | |
parent | 0f7dd1aa8f959216f1faa71513b9d3c1a9065e5a (diff) | |
download | linux-2ff1cf12c9fe70e75e600404e6a4274b19d293ed.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/net/ethernet')
25 files changed, 344 insertions, 222 deletions
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int work_done; - for (work_done = 0; work_done <= budget; work_done++) { + for (work_done = 0; work_done < budget; work_done++) { unsigned int *last_rx_bd = &priv->last_rx_bd; struct net_device_stats *stats = &priv->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 7ecb44ad24fb..126dec4342e6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1502,6 +1502,7 @@ struct bnx2x { #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) #define INTERRUPTS_ENABLED_FLAG (1 << 23) +#define BC_SUPPORTS_RMMOD_CMD (1 << 24) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1830,6 +1831,8 @@ struct bnx2x { int fp_array_size; u32 dump_preset_idx; + bool stats_started; + struct semaphore stats_sema; }; /* Tx queues may be less or equal to Rx queues */ @@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed { BNX2X_PCI_LINK_SPEED_5000 = 5000, BNX2X_PCI_LINK_SPEED_8000 = 8000 }; + +void bnx2x_set_local_cmng(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..f9122f2d6b65 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); + + /* ets may affect cmng configuration: reinit it in hw */ + bnx2x_set_local_cmng(bp); + bnx2x_dcbx_resume_hw_tx(bp); return; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1300,6 +1300,9 @@ struct drv_func_mb { #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 #define REQ_BC_VER_4_SET_MF_BW 0x00060202 #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 @@ -1372,6 +1375,8 @@ struct drv_func_mb { #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 39d7db58bf86..7f4ec80f0cb3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) input.port_rate = bp->link_vars.line_speed; - if (cmng_type == CMNG_FNS_MINMAX) { + if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { int vn; /* read mf conf from shmem */ @@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp, } } +/* init cmng mode in HW according to local configuration */ +void bnx2x_set_local_cmng(struct bnx2x *bp) +{ + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else { + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } +} + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); } - if (bp->link_vars.link_up && bp->link_vars.line_speed) { - int cmng_fns = bnx2x_get_cmng_fns_mode(bp); - - if (cmng_fns != CMNG_FNS_NONE) { - bnx2x_cmng_fns_init(bp, false, cmng_fns); - storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - } else - /* rate shaping and fairness are disabled */ - DP(NETIF_MSG_IFUP, - "single function mode without fairness\n"); - } + if (bp->link_vars.link_up && bp->link_vars.line_speed) + bnx2x_set_local_cmng(bp); __bnx2x_link_report(bp); @@ -10360,6 +10366,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; + + bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? + BC_SUPPORTS_RMMOD_CMD : 0; + boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; @@ -11522,6 +11532,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); + sema_init(&bp->stats_sema, 1); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); @@ -12825,13 +12836,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, bnx2x_dcbnl_update_applist(bp, true); #endif + if (IS_PF(bp) && + !BP_NOMCP(bp) && + (bp->flags & BC_SUPPORTS_RMMOD_CMD)) + bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); + /* Close the interface - either directly or implicitly */ if (remove_netdev) { unregister_netdev(dev); } else { rtnl_lock(); - if (netif_running(dev)) - bnx2x_close(dev); + dev_close(dev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6291324913e9..1d925fd9cdc6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3471,7 +3471,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) alloc_mem_err: BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, sizeof(struct bnx2x_vf_mbx_msg)); - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, sizeof(union pf_vf_bulletin)); return -ENOMEM; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..d63d1327b051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) * Statistics service functions */ -static void bnx2x_stats_pmf_update(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; @@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) *stats_comp = 0; } -static void bnx2x_stats_start(struct bnx2x *bp) +/* should be called under stats_sema */ +static void __bnx2x_stats_start(struct bnx2x *bp) { /* vfs travel through here as part of the statistics FSM, but no action * is required @@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + + bp->stats_started = true; +} + +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_pmf_update(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_pmf_update(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} + +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + __bnx2x_stats_pmf_update(bp); + up(&bp->stats_sema); } static void bnx2x_stats_restart(struct bnx2x *bp) @@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) */ if (IS_VF(bp)) return; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); bnx2x_stats_comp(bp); - bnx2x_stats_start(bp); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) @@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ - spin_lock_bh(&bp->stats_lock); cur_stats_counter = bp->stats_counter - 1; - spin_unlock_bh(&bp->stats_lock); /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { @@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); - if (bnx2x_edebug_stats_stopped(bp)) + /* we run update from timer context, so give up + * if somebody is in the middle of transition + */ + if (down_trylock(&bp->stats_sema)) return; + if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) + goto out; + if (IS_PF(bp)) { if (*stats_comp != DMAE_COMP_VAL) - return; + goto out; if (bp->port.pmf) bnx2x_hw_stats_update(bp); @@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) BNX2X_ERR("storm stats were not updated for 3 times\n"); bnx2x_panic(); } - return; + goto out; } } else { /* vf doesn't collect HW statistics, and doesn't get completions @@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) /* vf is done */ if (IS_VF(bp)) - return; + goto out; if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; @@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); + +out: + up(&bp->stats_sema); } static void bnx2x_port_stats_stop(struct bnx2x *bp) @@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) { int update = 0; + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + + bp->stats_started = false; + bnx2x_stats_comp(bp); if (bp->port.pmf) @@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } + + up(&bp->stats_sema); } static void bnx2x_stats_do_nothing(struct bnx2x *bp) @@ -1376,15 +1416,17 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; + void (*action)(struct bnx2x *bp); if (unlikely(bp->panic)) return; spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; + action = bnx2x_stats_stm[state][event].action; spin_unlock_bh(&bp->stats_lock); - bnx2x_stats_stm[state][event].action(bp); + action(bp); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff28081146f2..95b8995187d7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17858,8 +17858,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, done: if (state == pci_channel_io_perm_failure) { - tg3_napi_enable(tp); - dev_close(netdev); + if (netdev) { + tg3_napi_enable(tp); + dev_close(netdev); + } err = PCI_ERS_RESULT_DISCONNECT; } else { pci_disable_device(pdev); @@ -17889,7 +17891,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rtnl_lock(); if (pci_enable_device(pdev)) { - netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); goto done; } @@ -17897,7 +17900,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) pci_restore_state(pdev); pci_save_state(pdev); - if (!netif_running(netdev)) { + if (!netdev || !netif_running(netdev)) { rc = PCI_ERS_RESULT_RECOVERED; goto done; } @@ -17909,7 +17912,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rc = PCI_ERS_RESULT_RECOVERED; done: - if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { tg3_napi_enable(tp); dev_close(netdev); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { - __free_pages(q->pg_chunk.page, order); - q->pg_chunk.page = NULL; - return -EIO; - } q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) return flits_to_desc(flits); } - -/* map_skb - map a packet main body and its page fragments - * @pdev: the PCI device - * @skb: the packet - * @addr: placeholder to save the mapped addresses - * - * map the main body of an sk_buff and its page fragments, if any. - */ -static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, - dma_addr_t *addr) -{ - const skb_frag_t *fp, *end; - const struct skb_shared_info *si; - - *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, *addr)) - goto out_err; - - si = skb_shinfo(skb); - end = &si->frags[si->nr_frags]; - - for (fp = si->frags; fp < end; fp++) { - *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), - DMA_TO_DEVICE); - if (pci_dma_mapping_error(pdev, *addr)) - goto unwind; - } - return 0; - -unwind: - while (fp-- > si->frags) - dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), - DMA_TO_DEVICE); - - pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); -out_err: - return -ENOMEM; -} - /** - * write_sgl - populate a scatter/gather list for a packet + * make_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL - * @addr: the list of the mapped addresses + * @pdev: the PCI device * - * Copies the scatter/gather list for the buffers that make up a packet + * Generates a scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ -static inline unsigned int write_sgl(const struct sk_buff *skb, +static inline unsigned int make_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, - unsigned int len, const dma_addr_t *addr) + unsigned int len, struct pci_dev *pdev) { - unsigned int i, j = 0, k = 0, nfrags; + dma_addr_t mapping; + unsigned int i, j = 0, nfrags; if (len) { + mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); - sgp->addr[j++] = cpu_to_be64(addr[k++]); + sgp->addr[0] = cpu_to_be64(mapping); + j = 1; } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); - sgp->addr[j] = cpu_to_be64(addr[k++]); + sgp->addr[j] = cpu_to_be64(mapping); j ^= 1; if (j == 0) ++sgp; @@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, - unsigned int compl, const dma_addr_t *addr) + unsigned int compl) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; @@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); + sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), @@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* * The chip min packet length is 9 octets but play safe and reject @@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); @@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!skb_shared(skb))) skb_orphan(skb); - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } @@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc, - const dma_addr_t *addr) + unsigned int gen, unsigned int ndesc) { unsigned int sgl_flits, flits; struct work_request_hdr *from; @@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), - skb_tail_pointer(skb) - - skb_transport_header(skb), addr); + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + skb->tail - skb->transport_header, + adap->pdev); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); goto again; } - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { - spin_unlock(&q->lock); - return NET_XMIT_SUCCESS; - } - gen = q->gen; q->in_use += ndesc; pidx = q->pidx; @@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); } spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } @@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; - unsigned int written = 0; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); @@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); break; } - if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) - break; - gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; - written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; @@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc, - (dma_addr_t *)skb->head); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); spin_lock(&q->lock); } spin_unlock(&q->lock); @@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); - if (likely(written)) - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 5175f9729970..85923e2d63b9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3168,6 +3168,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) adapter->max_event_queues = le16_to_cpu(desc->eq_count); adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); + + /* Clear flags that driver is not interested in */ + adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; } err: mutex_unlock(&adapter->mbox_lock); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 9b916086c335..6237192a55d1 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -564,6 +564,12 @@ enum be_if_flags { BE_IF_FLAGS_MULTICAST = 0x1000 }; +#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ + BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ + BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ + BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ + BE_IF_FLAGS_UNTAGGED) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) } /* Allocate and setup a new buffer for receiving */ -static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, - struct sk_buff *skb, unsigned int bufsize) +static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) { struct skge_rx_desc *rd = e->desc; - u64 map; + dma_addr_t map; map = pci_map_single(skge->hw->pdev, skb->data, bufsize, PCI_DMA_FROMDEVICE); - rd->dma_lo = map; - rd->dma_hi = map >> 32; + if (pci_dma_mapping_error(skge->hw->pdev, map)) + return -1; + + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); e->skb = skb; rd->csum1_start = ETH_HLEN; rd->csum2_start = ETH_HLEN; @@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, bufsize); + return 0; } /* Resume receiving using existing skb, @@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) return -ENOMEM; skb_reserve(skb, NET_IP_ALIGN); - skge_rx_setup(skge, e, skb, skge->rx_buf_size); + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { + dev_kfree_skb(skb); + return -EIO; + } } while ((e = e->next) != ring->start); ring->to_clean = ring->start; @@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) BUG_ON(skge->dma & 7); - if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); err = -EINVAL; goto free_pci_mem; @@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, struct skge_tx_desc *td; int i; u32 control, len; - u64 map; + dma_addr_t map; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; @@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, e->skb = skb; len = skb_headlen(skb); map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(hw->pdev, map)) + goto mapping_error; + dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, len); - td->dma_lo = map; - td->dma_hi = map >> 32; + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); if (skb->ip_summed == CHECKSUM_PARTIAL) { const int offset = skb_checksum_start_offset(skb); @@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; e = e->next; e->skb = skb; tf = e->desc; BUG_ON(tf->control & BMU_OWN); - tf->dma_lo = map; - tf->dma_hi = (u64) map >> 32; + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, skb_frag_size(frag)); @@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, } return NETDEV_TX_OK; + +mapping_unwind: + e = skge->tx_ring.to_use; + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + while (i-- > 0) { + e = e->next; + pci_unmap_page(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; } @@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, pci_dma_sync_single_for_cpu(skge->hw->pdev, dma_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(e->skb, skb->data, len); pci_dma_sync_single_for_device(skge->hw->pdev, dma_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; @@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, if (!nskb) goto resubmit; + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { + dev_kfree_skb(nskb); + goto resubmit; + } + pci_unmap_single(skge->hw->pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_FROMDEVICE); skb = e->skb; prefetch(skb->data); - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); } skb_put(skb, len); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -46,7 +46,7 @@ #include "mlx5_core.h" enum { - CMD_IF_REV = 4, + CMD_IF_REV = 5, }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) case MLX5_EVENT_TYPE_PAGE_REQUEST: { u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); - s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); + s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); mlx5_core_req_pages_handler(dev, func_id, npages); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; caps->log_max_mcg = out->hca_cap.log_max_mcg; - caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -55,33 +55,9 @@ enum { }; static DEFINE_SPINLOCK(health_lock); - static LIST_HEAD(health_list); static struct work_struct health_work; -static health_handler_t reg_handler; -int mlx5_register_health_report_handler(health_handler_t handler) -{ - spin_lock_irq(&health_lock); - if (reg_handler) { - spin_unlock_irq(&health_lock); - return -EEXIST; - } - reg_handler = handler; - spin_unlock_irq(&health_lock); - - return 0; -} -EXPORT_SYMBOL(mlx5_register_health_report_handler); - -void mlx5_unregister_health_report_handler(void) -{ - spin_lock_irq(&health_lock); - reg_handler = NULL; - spin_unlock_irq(&health_lock); -} -EXPORT_SYMBOL(mlx5_unregister_health_report_handler); - static void health_care(struct work_struct *work) { struct mlx5_core_health *health, *n; @@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); + /* nothing yet */ spin_lock_irq(&health_lock); - if (reg_handler) - reg_handler(dev->pdev, health->health, - sizeof(health->health)); - list_del_init(&health->list); spin_unlock_irq(&health_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -43,10 +43,16 @@ enum { MLX5_PAGES_TAKE = 2 }; +enum { + MLX5_BOOT_PAGES = 1, + MLX5_INIT_PAGES = 2, + MLX5_POST_INIT_PAGES = 3 +}; + struct mlx5_pages_req { struct mlx5_core_dev *dev; u32 func_id; - s16 npages; + s32 npages; struct work_struct work; }; @@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { struct mlx5_query_pages_outbox { struct mlx5_outbox_hdr hdr; - __be16 num_boot_pages; + __be16 rsvd; __be16 func_id; - __be16 init_pages; - __be16 num_pages; + __be32 num_pages; }; struct mlx5_manage_pages_inbox { struct mlx5_inbox_hdr hdr; - __be16 rsvd0; + __be16 rsvd; __be16 func_id; - __be16 rsvd1; - __be16 num_entries; - u8 rsvd2[16]; + __be32 num_entries; __be64 pas[0]; }; struct mlx5_manage_pages_outbox { struct mlx5_outbox_hdr hdr; - u8 rsvd0[2]; - __be16 num_entries; - u8 rsvd1[20]; + __be32 num_entries; + u8 rsvd[4]; __be64 pas[0]; }; @@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) } static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, - s16 *pages, s16 *init_pages, u16 *boot_pages) + s32 *npages, int boot) { struct mlx5_query_pages_inbox in; struct mlx5_query_pages_outbox out; @@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; @@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); - if (pages) - *pages = be16_to_cpu(out.num_pages); - - if (init_pages) - *init_pages = be16_to_cpu(out.init_pages); - - if (boot_pages) - *boot_pages = be16_to_cpu(out.num_boot_pages); - + *npages = be32_to_cpu(out.num_pages); *func_id = be16_to_cpu(out.func_id); return err; @@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->func_id = cpu_to_be16(func_id); - in->num_entries = cpu_to_be16(npages); + in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); mlx5_core_dbg(dev, "err %d\n", err); if (err) { @@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.func_id = cpu_to_be16(func_id); - in.num_entries = cpu_to_be16(npages); + in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) { @@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, goto out_free; } - num_claimed = be16_to_cpu(out->num_entries); + num_claimed = be32_to_cpu(out->num_entries); if (nclaimed) *nclaimed = num_claimed; @@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) } void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages) + s32 npages) { struct mlx5_pages_req *req; @@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { - u16 uninitialized_var(boot_pages); - s16 uninitialized_var(init_pages); u16 uninitialized_var(func_id); + s32 uninitialized_var(npages); int err; - err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, - &boot_pages); + err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err; + mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", + npages, boot ? "boot" : "init", func_id); - mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", - init_pages, boot_pages, func_id); - return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); + return give_pages(dev, func_id, npages, 0); } static int optimal_reclaimed_pages(void) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 78ca75576b9e..e53caaaf1303 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3272,6 +3272,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) u8 val; int ret, max_sds_rings = adapter->max_sds_rings; + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); + return -EBUSY; + } + if (qlcnic_get_diag_lock(adapter)) { netdev_info(netdev, "Device in diagnostics mode\n"); return -EBUSY; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 17c26a1158a0..c97e2e07b279 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -633,7 +633,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) return -EIO; } - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); qlcnic_83xx_idc_attach_driver(adapter); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b8242bc0293b..a780b73e8b2b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2161,7 +2161,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_disable_mbx_intr; - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); pci_set_drvdata(pdev, adapter); @@ -3083,7 +3084,8 @@ done: adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_RESETTING, &adapter->state); - qlcnic_set_drv_version(adapter); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); if (!qlcnic_clr_drv_state(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { err = qlcnic_get_beacon_state(adapter, &h_beacon_state); - if (!err) { - dev_info(&adapter->pdev->dev, - "Failed to get current beacon state\n"); + if (err) { + netdev_err(adapter->netdev, + "Failed to get current beacon state\n"); } else { if (h_beacon_state == QLCNIC_BEACON_DISABLE) ahw->beacon_state = 0; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -524,6 +524,7 @@ rx_status_loop: PCI_DMA_FROMDEVICE); if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { dev->stats.rx_dropped++; + kfree_skb(new_skb); goto rx_next; } diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; unsigned int entry = priv->cur_tx % txsize; - struct dma_desc *desc = priv->dma_tx + entry; + struct dma_desc *desc; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax, len; + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + if (priv->plat->enh_desc) bmax = BUF_SIZE_8KiB; else @@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) STMMAC_RING_MODE); wmb(); entry = (++priv->cur_tx) % txsize; - desc = priv->dma_tx + entry; + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; desc->des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, GFP_KERNEL); - if (unlikely(skb == NULL)) { + if (!skb) { pr_err("%s: Rx init fails; skb is NULL\n", __func__); - return 1; + return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); priv->rx_skbuff[i] = skb; priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, priv->dma_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { + pr_err("%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } p->des2 = priv->rx_skbuff_dma[i]; @@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return 0; } +static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) +{ + if (priv->rx_skbuff[i]) { + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + } + priv->rx_skbuff[i] = NULL; +} + /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure @@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, * and allocates the socket buffers. It suppors the chained and ring * modes. */ -static void init_dma_desc_rings(struct net_device *dev) +static int init_dma_desc_rings(struct net_device *dev) { int i; struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = 0; + int ret = -ENOMEM; /* Set the max buffer size according to the DESC mode * and the MTU. Note that RING mode allows 16KiB bsize. @@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) dma_extended_desc), &priv->dma_rx_phy, GFP_KERNEL); + if (!priv->dma_erx) + goto err_dma; + priv->dma_etx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_extended_desc), &priv->dma_tx_phy, GFP_KERNEL); - if ((!priv->dma_erx) || (!priv->dma_etx)) - return; + if (!priv->dma_etx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + goto err_dma; + } } else { priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma; + priv->dma_tx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); - if ((!priv->dma_rx) || (!priv->dma_tx)) - return; + if (!priv->dma_tx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + goto err_dma; + } } priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), GFP_KERNEL); + if (!priv->rx_skbuff_dma) + goto err_rx_skbuff_dma; + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), GFP_KERNEL); + if (!priv->rx_skbuff) + goto err_rx_skbuff; + priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), GFP_KERNEL); + if (!priv->tx_skbuff_dma) + goto err_tx_skbuff_dma; + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), GFP_KERNEL); + if (!priv->tx_skbuff) + goto err_tx_skbuff; + if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); @@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) else p = priv->dma_rx + i; - if (stmmac_init_rx_buffers(priv, p, i)) - break; + ret = stmmac_init_rx_buffers(priv, p, i); + if (ret) + goto err_init_rx_buffers; if (netif_msg_probe(priv)) pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], @@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) if (netif_msg_hw(priv)) stmmac_display_rings(priv); + + return 0; +err_init_rx_buffers: + while (--i >= 0) + stmmac_free_rx_buffers(priv, i); + kfree(priv->tx_skbuff); +err_tx_skbuff: + kfree(priv->tx_skbuff_dma); +err_tx_skbuff_dma: + kfree(priv->rx_skbuff); +err_rx_skbuff: + kfree(priv->rx_skbuff_dma); +err_rx_skbuff_dma: + if (priv->extend_desc) { + dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_extended_desc), + priv->dma_etx, priv->dma_tx_phy); + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + } else { + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + } +err_dma: + return ret; } static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_rx_size; i++) { - if (priv->rx_skbuff[i]) { - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); - } - priv->rx_skbuff[i] = NULL; - } + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffers(priv, i); } static void dma_free_tx_skbufs(struct stmmac_priv *priv) @@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); - init_dma_desc_rings(dev); + + ret = init_dma_desc_rings(dev); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto dma_desc_error; + } /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { - pr_err("%s: DMA initialization failed\n", __func__); + pr_err("%s: DMA engine initialization failed\n", __func__); goto init_error; } @@ -1672,6 +1744,7 @@ wolirq_error: init_error: free_dma_desc_resources(priv); +dma_desc_error: if (priv->phydev) phy_disconnect(priv->phydev); phy_error: diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index ef776310fab1..d022bf936572 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - netif_rx(skb); + netif_receive_skb(skb); stats->rx_bytes += pkt_len; stats->rx_packets++; @@ -2904,6 +2904,7 @@ out: return ret; err_iounmap: + netif_napi_del(&vptr->napi); iounmap(regs); err_free_dev: free_netdev(netdev); @@ -2924,6 +2925,7 @@ static int velocity_remove(struct device *dev) struct velocity_info *vptr = netdev_priv(netdev); unregister_netdev(netdev); + netif_napi_del(&vptr->napi); iounmap(vptr->mac_regs); free_netdev(netdev); velocity_nics--; |