summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/realtek
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/realtek')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
2 files changed, 18 insertions, 8 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc79076f867..b3287c0fe279 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp)
cpw8(Cmd, RxOn | TxOn);
}
+static void cp_enable_irq(struct cp_private *cp)
+{
+ cpw16_f(IntrMask, cp_intr_mask);
+}
+
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
@@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw16(MultiIntr, 0);
- cpw16_f(IntrMask, cp_intr_mask);
-
cpw8_f(Cfg9346, Cfg9346_Lock);
}
@@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev)
if (rc)
goto err_out_hw;
+ cp_enable_irq(cp);
+
netif_carrier_off(dev);
mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
netif_start_queue(dev);
@@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev)
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
cp_init_hw (cp);
+ cp_enable_irq(cp);
netif_start_queue (dev);
spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f54509377efa..ce6b44d1f252 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -61,8 +61,12 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-#define TX_BUFFS_AVAIL(tp) \
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+#define TX_SLOTS_AVAIL(tp) \
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
+
+/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+#define TX_FRAGS_READY_FOR(tp,nr_frags) \
+ (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts[2];
int frags;
- if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
mmiowb();
- if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't.
*/
smp_mb();
- if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
*/
smp_mb();
if (netif_queue_stopped(dev) &&
- (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*