summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/adaptec
diff options
context:
space:
mode:
authorPradeep A Dalvi <netdev@pradeepdalvi.com>2012-02-05 02:49:09 +0000
committerDavid S. Miller <davem@davemloft.net>2012-02-06 11:41:20 -0500
commit1d266430546acf01438ae42d0a7370db4817e2ad (patch)
tree149268ba8ad2b3288ae001e5ab28146e4361a6d0 /drivers/net/ethernet/adaptec
parent3238a9be4d7ad95c741bcfe6c147406eeef62d95 (diff)
downloadlinux-1d266430546acf01438ae42d0a7370db4817e2ad.tar.bz2
netdev: ethernet dev_alloc_skb to netdev_alloc_skb
Replaced deprecating dev_alloc_skb with netdev_alloc_skb in drivers/net/ethernet - Removed extra skb->dev = dev after netdev_alloc_skb Signed-off-by: Pradeep A Dalvi <netdev@pradeepdalvi.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/adaptec')
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 11fc2eccb0fd..d896816512ca 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1179,12 +1179,11 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_info[i].skb = skb;
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- skb->dev = dev; /* Mark as being used by this device. */
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1472,7 +1471,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak &&
- (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev,
np->rx_info[entry].mapping,
@@ -1596,13 +1595,12 @@ static void refill_rx_ring(struct net_device *dev)
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_info[entry].skb == NULL) {
- skb = dev_alloc_skb(np->rx_buf_sz);
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz);
np->rx_info[entry].skb = skb;
if (skb == NULL)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- skb->dev = dev; /* Mark as being used by this device. */
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}