summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/sched/sch_teql.c4
5 files changed, 15 insertions, 10 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6f85db3535e2..4a3f54e358e5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -996,7 +996,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
-static inline int netif_subqueue_stopped(const struct net_device *dev,
+static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
@@ -1007,6 +1007,11 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
#endif
}
+static inline int netif_subqueue_stopped(const struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
+}
/**
* netif_wake_subqueue - allow sending packets on subqueue
diff --git a/net/core/dev.c b/net/core/dev.c
index 1672cc134853..872658927e47 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1553,7 +1553,7 @@ gso:
return rc;
}
if (unlikely((netif_queue_stopped(dev) ||
- netif_subqueue_stopped(dev, skb->queue_mapping)) &&
+ netif_subqueue_stopped(dev, skb)) &&
skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
@@ -1692,7 +1692,7 @@ gso:
HARD_TX_LOCK(dev, cpu);
if (!netif_queue_stopped(dev) &&
- !netif_subqueue_stopped(dev, skb->queue_mapping)) {
+ !netif_subqueue_stopped(dev, skb)) {
rc = 0;
if (!dev_hard_start_xmit(skb, dev)) {
HARD_TX_UNLOCK(dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 95daba624967..bf8d18f1b013 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -67,7 +67,7 @@ static void queue_process(struct work_struct *work)
local_irq_save(flags);
netif_tx_lock(dev);
if ((netif_queue_stopped(dev) ||
- netif_subqueue_stopped(dev, skb->queue_mapping)) ||
+ netif_subqueue_stopped(dev, skb)) ||
dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
netif_tx_unlock(dev);
@@ -269,7 +269,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
tries > 0; --tries) {
if (netif_tx_trylock(dev)) {
if (!netif_queue_stopped(dev) &&
- !netif_subqueue_stopped(dev, skb->queue_mapping))
+ !netif_subqueue_stopped(dev, skb))
status = dev->hard_start_xmit(skb, dev);
netif_tx_unlock(dev);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b78235e5a7f4..de33f36947e9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3383,7 +3383,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
if ((netif_queue_stopped(odev) ||
(pkt_dev->skb &&
- netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) ||
+ netif_subqueue_stopped(odev, pkt_dev->skb))) ||
need_resched()) {
idle_start = getCurUs();
@@ -3400,7 +3400,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->idle_acc += getCurUs() - idle_start;
if (netif_queue_stopped(odev) ||
- netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
+ netif_subqueue_stopped(odev, pkt_dev->skb)) {
pkt_dev->next_tx_us = getCurUs(); /* TODO */
pkt_dev->next_tx_ns = 0;
goto out; /* Try the next interface */
@@ -3429,7 +3429,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
netif_tx_lock_bh(odev);
if (!netif_queue_stopped(odev) &&
- !netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) {
+ !netif_subqueue_stopped(odev, pkt_dev->skb)) {
atomic_inc(&(pkt_dev->skb->users));
retry_now:
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a9fad7162b5d..421281d9dd1d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -284,7 +284,7 @@ restart:
if (slave->qdisc_sleeping != q)
continue;
if (netif_queue_stopped(slave) ||
- netif_subqueue_stopped(slave, subq) ||
+ __netif_subqueue_stopped(slave, subq) ||
!netif_running(slave)) {
busy = 1;
continue;
@@ -294,7 +294,7 @@ restart:
case 0:
if (netif_tx_trylock(slave)) {
if (!netif_queue_stopped(slave) &&
- !netif_subqueue_stopped(slave, subq) &&
+ !__netif_subqueue_stopped(slave, subq) &&
slave->hard_start_xmit(skb, slave) == 0) {
netif_tx_unlock(slave);
master->slaves = NEXT_SLAVE(q);