summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 12:06:44 -0600
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 12:06:44 -0600
commit0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch)
treedcced72d230d69fd0c5816ac6dd03ab84799a93e /drivers/net
parente138a5d2356729b8752e88520cc1525fae9794ac (diff)
parentf26b90440cd74c78fe10c9bd5160809704a9627c (diff)
downloadlinux-0bd2af46839ad6262d25714a6ec0365db9d6b98f.tar.bz2
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig21
-rw-r--r--drivers/net/arcnet/com20020.c7
-rw-r--r--drivers/net/arm/ep93xx_eth.c40
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/b44.c14
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/e100.c54
-rw-r--r--drivers/net/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/e1000/e1000_hw.h6
-rw-r--r--drivers/net/e1000/e1000_main.c52
-rw-r--r--drivers/net/ehea/ehea.h16
-rw-r--r--drivers/net/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ehea/ehea_main.c35
-rw-r--r--drivers/net/ehea/ehea_phyp.c575
-rw-r--r--drivers/net/ehea/ehea_phyp.h6
-rw-r--r--drivers/net/ehea/ehea_qmr.c17
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/forcedeth.c31
-rw-r--r--drivers/net/ibmveth.c64
-rw-r--r--drivers/net/ioc3-eth.c4
-rw-r--r--drivers/net/irda/stir4200.c3
-rw-r--r--drivers/net/loopback.c39
-rw-r--r--drivers/net/mv643xx_eth.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/myri_code.h8
-rw-r--r--drivers/net/myri_sbus.c4
-rw-r--r--drivers/net/r8169.c45
-rw-r--r--drivers/net/s2io.c11
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/skge.c223
-rw-r--r--drivers/net/skge.h25
-rw-r--r--drivers/net/sky2.c278
-rw-r--r--drivers/net/sky2.h60
-rw-r--r--drivers/net/smc91x.h36
-rw-r--r--drivers/net/spider_net.c246
-rw-r--r--drivers/net/spider_net.h35
-rw-r--r--drivers/net/spider_net_ethtool.c6
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c6
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/tg3.c67
-rw-r--r--drivers/net/tokenring/proteon.c9
-rw-r--r--drivers/net/tokenring/skisa.c9
-rw-r--r--drivers/net/tulip/de2104x.c8
-rw-r--r--drivers/net/ucc_geth.c633
-rw-r--r--drivers/net/ucc_geth.h248
-rw-r--r--drivers/net/ucc_geth_phy.c26
-rw-r--r--drivers/net/ucc_geth_phy.h2
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/n2.c6
-rw-r--r--drivers/net/wan/pc300_drv.c24
-rw-r--r--drivers/net/wireless/airo.c105
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c28
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.h18
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c9
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c72
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c4
-rw-r--r--drivers/net/wireless/orinoco.c16
-rw-r--r--drivers/net/wireless/ray_cs.c1
-rw-r--r--drivers/net/wireless/zd1201.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
67 files changed, 1840 insertions, 1471 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ab92cc794c64..6e863aa9894c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -486,7 +486,7 @@ config SGI_IOC3_ETH_HW_TX_CSUM
config MIPS_SIM_NET
tristate "MIPS simulator Network device (EXPERIMENTAL)"
- depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL
+ depends on MIPS_SIM && EXPERIMENTAL
help
The MIPSNET device is a simple Ethernet network device which is
emulated by the MIPS Simulator.
@@ -2112,7 +2112,7 @@ config SKGE
config SKY2
tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ depends on PCI
select CRC32
---help---
This driver supports Gigabit Ethernet adapters based on the
@@ -2120,8 +2120,8 @@ config SKY2
Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
- This driver does not support the original Yukon chipset: a seperate
- driver, skge, is provided for Yukon-based adapters.
+ There is companion driver for the older Marvell Yukon and
+ Genesis based adapters: skge.
To compile this driver as a module, choose M here: the module
will be called sky2. This is recommended.
@@ -2288,7 +2288,7 @@ config UGETH_TX_ON_DEMOND
config UGETH_HAS_GIGA
bool
- depends on UCC_GETH && MPC836x
+ depends on UCC_GETH && PPC_MPC836x
config MV643XX_ETH
tristate "MV-643XX Ethernet support"
@@ -2467,7 +2467,7 @@ config ISERIES_VETH
config RIONET
tristate "RapidIO Ethernet over messaging driver support"
- depends on NETDEVICES && RAPIDIO
+ depends on RAPIDIO
config RIONET_TX_SIZE
int "Number of outbound queue entries"
@@ -2717,6 +2717,7 @@ config PPP_MPPE
select CRYPTO
select CRYPTO_SHA1
select CRYPTO_ARC4
+ select CRYPTO_ECB
---help---
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
@@ -2832,7 +2833,7 @@ config NET_FC
"SCSI generic support".
config SHAPER
- tristate "Traffic Shaper (EXPERIMENTAL)"
+ tristate "Traffic Shaper (OBSOLETE)"
depends on EXPERIMENTAL
---help---
The traffic shaper is a virtual network device that allows you to
@@ -2841,9 +2842,9 @@ config SHAPER
these virtual devices. See
<file:Documentation/networking/shaper.txt> for more information.
- An alternative to this traffic shaper is the experimental
- Class-Based Queuing (CBQ) scheduling support which you get if you
- say Y to "QoS and/or fair queuing" above.
+ An alternative to this traffic shaper are traffic schedulers which
+ you'll get if you say Y to "QoS and/or fair queuing" in
+ "Networking options".
To compile this driver as a module, choose M here: the module
will be called shaper. If unsure, say N.
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 0dc70c7b7940..aa9dd8f11269 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -337,13 +337,16 @@ static void com20020_set_mc_list(struct net_device *dev)
}
}
-#ifdef MODULE
-
+#if defined(CONFIG_ARCNET_COM20020_PCI_MODULE) || \
+ defined(CONFIG_ARCNET_COM20020_ISA_MODULE)
EXPORT_SYMBOL(com20020_check);
EXPORT_SYMBOL(com20020_found);
+#endif
MODULE_LICENSE("GPL");
+#ifdef MODULE
+
int init_module(void)
{
BUGLVL(D_NORMAL) printk(VERSION);
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 127561c782fd..8ebd68e2af98 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -193,12 +193,9 @@ static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
static int ep93xx_rx(struct net_device *dev, int *budget)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- int tail_offset;
int rx_done;
int processed;
- tail_offset = rdl(ep, REG_RXSTSQCURADD) - ep->descs_dma_addr;
-
rx_done = 0;
processed = 0;
while (*budget > 0) {
@@ -211,36 +208,28 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
entry = ep->rx_pointer;
rstat = ep->descs->rstat + entry;
- if ((void *)rstat - (void *)ep->descs == tail_offset) {
+
+ rstat0 = rstat->rstat0;
+ rstat1 = rstat->rstat1;
+ if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) {
rx_done = 1;
break;
}
- rstat0 = rstat->rstat0;
- rstat1 = rstat->rstat1;
rstat->rstat0 = 0;
rstat->rstat1 = 0;
- if (!(rstat0 & RSTAT0_RFP))
- printk(KERN_CRIT "ep93xx_rx: buffer not done "
- " %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOF))
printk(KERN_CRIT "ep93xx_rx: not end-of-frame "
" %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOB))
printk(KERN_CRIT "ep93xx_rx: not end-of-buffer "
" %.8x %.8x\n", rstat0, rstat1);
- if (!(rstat1 & RSTAT1_RFP))
- printk(KERN_CRIT "ep93xx_rx: buffer1 not done "
- " %.8x %.8x\n", rstat0, rstat1);
if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
printk(KERN_CRIT "ep93xx_rx: entry mismatch "
" %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
- printk(KERN_NOTICE "ep93xx_rx: receive error "
- " %.8x %.8x\n", rstat0, rstat1);
-
ep->stats.rx_errors++;
if (rstat0 & RSTAT0_OE)
ep->stats.rx_fifo_errors++;
@@ -301,13 +290,8 @@ err:
static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
{
- struct ep93xx_rstat *rstat;
- int tail_offset;
-
- rstat = ep->descs->rstat + ep->rx_pointer;
- tail_offset = rdl(ep, REG_RXSTSQCURADD) - ep->descs_dma_addr;
-
- return !((void *)rstat - (void *)ep->descs == tail_offset);
+ struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
+ return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
}
static int ep93xx_poll(struct net_device *dev, int *budget)
@@ -347,7 +331,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
struct ep93xx_priv *ep = netdev_priv(dev);
int entry;
- if (unlikely(skb->len) > MAX_PKT_SIZE) {
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
ep->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -379,10 +363,8 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
static void ep93xx_tx_complete(struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
- int tail_offset;
int wake;
- tail_offset = rdl(ep, REG_TXSTSQCURADD) - ep->descs_dma_addr;
wake = 0;
spin_lock(&ep->tx_pending_lock);
@@ -393,15 +375,13 @@ static void ep93xx_tx_complete(struct net_device *dev)
entry = ep->tx_clean_pointer;
tstat = ep->descs->tstat + entry;
- if ((void *)tstat - (void *)ep->descs == tail_offset)
- break;
tstat0 = tstat->tstat0;
+ if (!(tstat0 & TSTAT0_TXFP))
+ break;
+
tstat->tstat0 = 0;
- if (!(tstat0 & TSTAT0_TXFP))
- printk(KERN_CRIT "ep93xx_tx_complete: buffer not done "
- " %.8x\n", tstat0);
if (tstat0 & TSTAT0_FA)
printk(KERN_CRIT "ep93xx_tx_complete: frame aborted "
" %.8x\n", tstat0);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4873dc610d22..7db3c8af0894 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -102,7 +102,7 @@ static void enable_mac(struct net_device *, int);
// externs
extern int get_ethernet_addr(char *ethernet_addr);
extern void str2eaddr(unsigned char *ea, unsigned char *str);
-extern char * __init prom_getcmdline(void);
+extern char * prom_getcmdline(void);
/*
* Theory of operation
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index b124eee4eb10..474a4e3438db 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -908,8 +908,9 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
istat = br32(bp, B44_ISTAT);
imask = br32(bp, B44_IMASK);
- /* ??? What the fuck is the purpose of the interrupt mask
- * ??? register if we have to mask it out by hand anyways?
+ /* The interrupt mask register controls which interrupt bits
+ * will actually raise an interrupt to the CPU when set by hw/firmware,
+ * but doesn't mask off the bits.
*/
istat &= imask;
if (istat) {
@@ -1706,14 +1707,15 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_set_mac_addr(bp);
- if (dev->flags & IFF_ALLMULTI)
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (dev->mc_count > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
- for (; i < 64; i++) {
+ for (; i < 64; i++)
__b44_cam_write(bp, zero, i);
- }
+
bw32(bp, B44_RXCONFIG, val);
val = br32(bp, B44_CAM_CTRL);
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
@@ -2055,7 +2057,7 @@ static int b44_read_eeprom(struct b44 *bp, u8 *data)
u16 *ptr = (u16 *) data;
for (i = 0; i < 128; i += 2)
- ptr[i / 2] = readw(bp->regs + 4096 + i);
+ ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
return 0;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e83bc825f6af..32923162179e 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1433,7 +1433,7 @@ void bond_alb_monitor(struct bonding *bond)
* write lock to protect from other code that also
* sets the promiscuity.
*/
- write_lock(&bond->curr_slave_lock);
+ write_lock_bh(&bond->curr_slave_lock);
if (bond_info->primary_is_promisc &&
(++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
@@ -1448,7 +1448,7 @@ void bond_alb_monitor(struct bonding *bond)
bond_info->primary_is_promisc = 0;
}
- write_unlock(&bond->curr_slave_lock);
+ write_unlock_bh(&bond->curr_slave_lock);
if (bond_info->rlb_rebalance) {
bond_info->rlb_rebalance = 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c0bbddae4ec4..17a461152d39 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4692,6 +4692,8 @@ static int bond_check_params(struct bond_params *params)
return 0;
}
+static struct lock_class_key bonding_netdev_xmit_lock_key;
+
/* Create a new bond based on the specified name and bonding parameters.
* Caller must NOT hold rtnl_lock; we need to release it here before we
* set up our sysfs entries.
@@ -4727,6 +4729,9 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
if (res < 0) {
goto out_bond;
}
+
+ lockdep_set_class(&bond_dev->_xmit_lock, &bonding_netdev_xmit_lock_key);
+
if (newbond)
*newbond = bond_dev->priv;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 966b563e42bb..a03d781f6d0a 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -509,6 +509,8 @@ etrax_ethernet_init(void)
* does not share cacheline with any other data (to avoid cache bug)
*/
RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
+ if (!RxDescList[i].skb)
+ return -ENOMEM;
RxDescList[i].descr.ctrl = 0;
RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 27d5d2f02533..19ab3441269c 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2039,7 +2039,6 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-#ifdef CONFIG_PM
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
@@ -2048,7 +2047,6 @@ static int e100_asf(struct nic *nic)
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
}
-#endif
static int e100_up(struct nic *nic)
{
@@ -2715,34 +2713,35 @@ static void __devexit e100_remove(struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PM
static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
+#ifdef CONFIG_E100_NAPI
if (netif_running(netdev))
- e100_down(nic);
- e100_hw_reset(nic);
- netif_device_detach(netdev);
+ netif_poll_disable(nic->netdev);
+#endif
+ del_timer_sync(&nic->watchdog);
+ netif_carrier_off(nic->netdev);
-#ifdef CONFIG_PM
pci_save_state(pdev);
- if (nic->flags & (wol_magic | e100_asf(nic)))
-#else
- if (nic->flags & (wol_magic))
-#endif
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- else
- /* disable PME */
- pci_enable_wake(pdev, 0, 0);
+
+ if ((nic->flags & wol_magic) | e100_asf(nic)) {
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+ }
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-#ifdef CONFIG_PM
static int e100_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2764,7 +2763,26 @@ static int e100_resume(struct pci_dev *pdev)
static void e100_shutdown(struct pci_dev *pdev)
{
- e100_suspend(pdev, PMSG_SUSPEND);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nic *nic = netdev_priv(netdev);
+
+#ifdef CONFIG_E100_NAPI
+ if (netif_running(netdev))
+ netif_poll_disable(nic->netdev);
+#endif
+ del_timer_sync(&nic->watchdog);
+ netif_carrier_off(nic->netdev);
+
+ if ((nic->flags & wol_magic) | e100_asf(nic)) {
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
}
/* ------------------ PCI Error Recovery infrastructure -------------- */
@@ -2848,9 +2866,9 @@ static struct pci_driver e100_driver = {
.id_table = e100_id_table,
.probe = e100_probe,
.remove = __devexit_p(e100_remove),
+#ifdef CONFIG_PM
/* Power Management hooks */
.suspend = e100_suspend,
-#ifdef CONFIG_PM
.resume = e100_resume,
#endif
.shutdown = e100_shutdown,
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 773821e4cf57..c564adbd669b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -461,7 +461,8 @@ e1000_get_regs(struct net_device *netdev,
regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
if (hw->mac_type >= e1000_82540 &&
- hw->media_type == e1000_media_type_copper) {
+ hw->mac_type < e1000_82571 &&
+ hw->media_type == e1000_media_type_copper) {
regs_buff[26] = E1000_READ_REG(hw, MANC);
}
}
@@ -1690,6 +1691,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
int retval = 1; /* fail by default */
switch (hw->device_id) {
+ case E1000_DEV_ID_82542:
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 112447fd8bf2..449a60303e07 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1961,9 +1961,9 @@ struct e1000_hw {
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
/* Transmit Descriptor Control */
-#define E1000_TXDCTL_PTHRESH 0x000000FF /* TXDCTL Prefetch Threshold */
-#define E1000_TXDCTL_HTHRESH 0x0000FF00 /* TXDCTL Host Threshold */
-#define E1000_TXDCTL_WTHRESH 0x00FF0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ce0d35fe3947..726ec5e88ab2 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -35,7 +35,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.2.9-k2"DRIVERNAPI
+#define DRV_VERSION "7.2.9-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -699,7 +699,10 @@ e1000_reset(struct e1000_adapter *adapter)
phy_data);
}
- if ((adapter->en_mng_pt) && (adapter->hw.mac_type < e1000_82571)) {
+ if ((adapter->en_mng_pt) &&
+ (adapter->hw.mac_type >= e1000_82540) &&
+ (adapter->hw.mac_type < e1000_82571) &&
+ (adapter->hw.media_type == e1000_media_type_copper)) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
E1000_WRITE_REG(&adapter->hw, MANC, manc);
@@ -1076,8 +1079,9 @@ e1000_remove(struct pci_dev *pdev)
flush_scheduled_work();
- if (adapter->hw.mac_type < e1000_82571 &&
- adapter->hw.media_type == e1000_media_type_copper) {
+ if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type < e1000_82571 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
@@ -1804,9 +1808,11 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
* followed by the page buffers. Therefore, skb->data is
* sized to hold the largest protocol header.
*/
+ /* allocations using alloc_page take too long for regular MTU
+ * so only enable packet split for jumbo frames */
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
- PAGE_SIZE <= 16384)
+ if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) &&
+ PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
else
adapter->rx_ps_pages = 0;
@@ -2986,6 +2992,11 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+ /* 82571 and newer doesn't need the workaround that limited descriptor
+ * length to 4kB */
+ if (adapter->hw.mac_type >= e1000_82571)
+ max_per_txd = 8192;
+
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->gso_size;
/* The controller does a simple calculation to
@@ -3775,9 +3786,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
- /* adjust length to remove Ethernet CRC */
- length -= 4;
-
if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
@@ -3805,6 +3813,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
}
}
+ /* adjust length to remove Ethernet CRC, this must be
+ * done after the TBI_ACCEPT workaround above */
+ length -= 4;
+
/* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack */
@@ -4773,8 +4785,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3cold, 0);
}
- if (adapter->hw.mac_type < e1000_82571 &&
- adapter->hw.media_type == e1000_media_type_copper) {
+ if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type < e1000_82571 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
@@ -4787,6 +4800,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
if (adapter->hw.phy_type == e1000_phy_igp_3)
e1000_phy_powerdown_workaround(&adapter->hw);
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
e1000_release_hw_control(adapter);
@@ -4817,6 +4833,10 @@ e1000_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
+ return err;
+
+ e1000_power_up_phy(adapter);
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4825,8 +4845,9 @@ e1000_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- if (adapter->hw.mac_type < e1000_82571 &&
- adapter->hw.media_type == e1000_media_type_copper) {
+ if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type < e1000_82571 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc &= ~(E1000_MANC_ARP_EN);
E1000_WRITE_REG(&adapter->hw, MANC, manc);
@@ -4914,10 +4935,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- /* Perform card reset only on one instance of the card */
- if (PCI_FUNC (pdev->devfn) != 0)
- return PCI_ERS_RESULT_RECOVERED;
-
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4948,6 +4965,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.mac_type < e1000_82571 &&
adapter->hw.media_type == e1000_media_type_copper) {
manc = E1000_READ_REG(&adapter->hw, MANC);
manc &= ~(E1000_MANC_ARP_EN);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 23b451a8ae12..39ad9f73d1ec 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0028"
+#define DRV_VERSION "EHEA_0043"
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -50,6 +50,7 @@
#define EHEA_MAX_ENTRIES_SQ 32767
#define EHEA_MIN_ENTRIES_QP 127
+#define EHEA_SMALL_QUEUES
#define EHEA_NUM_TX_QP 1
#ifdef EHEA_SMALL_QUEUES
@@ -59,11 +60,11 @@
#define EHEA_DEF_ENTRIES_RQ2 1023
#define EHEA_DEF_ENTRIES_RQ3 1023
#else
-#define EHEA_MAX_CQE_COUNT 32000
-#define EHEA_DEF_ENTRIES_SQ 16000
-#define EHEA_DEF_ENTRIES_RQ1 32080
-#define EHEA_DEF_ENTRIES_RQ2 4020
-#define EHEA_DEF_ENTRIES_RQ3 4020
+#define EHEA_MAX_CQE_COUNT 4080
+#define EHEA_DEF_ENTRIES_SQ 4080
+#define EHEA_DEF_ENTRIES_RQ1 8160
+#define EHEA_DEF_ENTRIES_RQ2 2040
+#define EHEA_DEF_ENTRIES_RQ3 2040
#endif
#define EHEA_MAX_ENTRIES_EQ 20
@@ -104,9 +105,6 @@
#define EHEA_BCMC_VLANID_ALL 0x01
#define EHEA_BCMC_VLANID_SINGLE 0x00
-/* Use this define to kmallocate pHYP control blocks */
-#define H_CB_ALIGNMENT 4096
-
#define EHEA_CACHE_LINE 128
/* Memory Regions */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 82eb2fb8c75e..9f57c2e78ced 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -238,7 +238,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
data[i++] = port->port_res[0].swqe_refill_th;
data[i++] = port->resets;
- cb6 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb6 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb6) {
ehea_error("no mem for cb6");
return;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index c6b31775e26b..6ad696101418 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -92,7 +92,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
memset(stats, 0, sizeof(*stats));
- cb2 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
@@ -586,8 +586,8 @@ int ehea_sense_port_attr(struct ehea_port *port)
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
- if (!cb0) {
+ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
+ if (!cb0) { /* ehea_neq_tasklet() */
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
@@ -670,7 +670,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
u64 hret;
int ret = 0;
- cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
@@ -765,8 +765,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
if (!netif_carrier_ok(port->netdev)) {
- ret = ehea_sense_port_attr(
- adapter->port[portnum]);
+ ret = ehea_sense_port_attr(port);
if (ret) {
ehea_error("failed resensing port "
"attributes");
@@ -818,7 +817,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
netif_stop_queue(port->netdev);
break;
default:
- ehea_error("unknown event code %x", ec);
+ ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
break;
}
}
@@ -986,7 +985,7 @@ static int ehea_configure_port(struct ehea_port *port)
struct hcp_ehea_port_cb0 *cb0;
ret = -ENOMEM;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0)
goto out;
@@ -1444,7 +1443,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
goto out;
}
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
@@ -1502,7 +1501,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
if ((enable && port->promisc) || (!enable && !port->promisc))
return;
- cb7 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (!cb7) {
ehea_error("no mem for cb7");
goto out;
@@ -1606,7 +1605,7 @@ static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
struct ehea_mc_list *ehea_mcl_entry;
u64 hret;
- ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_KERNEL);
+ ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
if (!ehea_mcl_entry) {
ehea_error("no mem for mcl_entry");
return;
@@ -1841,7 +1840,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
- ehea_dump(swqe, sizeof(*swqe), "swqe");
+ ehea_dump(swqe, 512, "swqe");
}
ehea_post_swqe(pr->qp, swqe);
@@ -1871,7 +1870,7 @@ static void ehea_vlan_rx_register(struct net_device *dev,
port->vgrp = grp;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
@@ -1900,7 +1899,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
int index;
u64 hret;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
@@ -1936,7 +1935,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
if (port->vgrp)
port->vgrp->vlan_devices[vid] = NULL;
- cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
@@ -1969,7 +1968,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
u64 dummy64 = 0;
struct hcp_modify_qp_cb0* cb0;
- cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
@@ -2270,7 +2269,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
u64 hret;
int ret;
- cb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb) {
ret = -ENOMEM;
goto out;
@@ -2341,7 +2340,7 @@ static int ehea_setup_single_port(struct ehea_port *port,
goto out;
/* Enable Jumbo frames */
- cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
} else {
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 4a85aca4c7e9..0cfc2bc1a27b 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -44,71 +44,99 @@ static inline u16 get_order_of_qentries(u16 queue_entries)
#define H_ALL_RES_TYPE_MR 5
#define H_ALL_RES_TYPE_MW 6
-static long ehea_hcall_9arg_9ret(unsigned long opcode,
- unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4,
- unsigned long arg5, unsigned long arg6,
- unsigned long arg7, unsigned long arg8,
- unsigned long arg9, unsigned long *out1,
- unsigned long *out2,unsigned long *out3,
- unsigned long *out4,unsigned long *out5,
- unsigned long *out6,unsigned long *out7,
- unsigned long *out8,unsigned long *out9)
+static long ehea_plpar_hcall_norets(unsigned long opcode,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7)
{
- long hret;
+ long ret;
int i, sleep_msecs;
for (i = 0; i < 5; i++) {
- hret = plpar_hcall_9arg_9ret(opcode,arg1, arg2, arg3, arg4,
- arg5, arg6, arg7, arg8, arg9, out1,
- out2, out3, out4, out5, out6, out7,
- out8, out9);
- if (H_IS_LONG_BUSY(hret)) {
- sleep_msecs = get_longbusy_msecs(hret);
+ ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7);
+
+ if (H_IS_LONG_BUSY(ret)) {
+ sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
- if (hret < H_SUCCESS)
- ehea_error("op=%lx hret=%lx "
- "i1=%lx i2=%lx i3=%lx i4=%lx i5=%lx i6=%lx "
- "i7=%lx i8=%lx i9=%lx "
- "o1=%lx o2=%lx o3=%lx o4=%lx o5=%lx o6=%lx "
- "o7=%lx o8=%lx o9=%lx",
- opcode, hret, arg1, arg2, arg3, arg4, arg5,
- arg6, arg7, arg8, arg9, *out1, *out2, *out3,
- *out4, *out5, *out6, *out7, *out8, *out9);
- return hret;
+ if (ret < H_SUCCESS)
+ ehea_error("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx ",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7);
+
+ return ret;
}
+
return H_BUSY;
}
-u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
- const u64 qp_handle, const u64 sel_mask, void *cb_addr)
+static long ehea_plpar_hcall9(unsigned long opcode,
+ unsigned long *outs, /* array of 9 outputs */
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7,
+ unsigned long arg8,
+ unsigned long arg9)
{
- u64 dummy;
+ long ret;
+ int i, sleep_msecs;
- if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) {
- ehea_error("not on pageboundary");
- return H_PARAMETER;
+ for (i = 0; i < 5; i++) {
+ ret = plpar_hcall9(opcode, outs,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9);
+
+ if (H_IS_LONG_BUSY(ret)) {
+ sleep_msecs = get_longbusy_msecs(ret);
+ msleep_interruptible(sleep_msecs);
+ continue;
+ }
+
+ if (ret < H_SUCCESS)
+ ehea_error("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+ " arg9=%lx"
+ " out1=%lx out2=%lx out3=%lx out4=%lx"
+ " out5=%lx out6=%lx out7=%lx out8=%lx"
+ " out9=%lx",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9,
+ outs[0], outs[1], outs[2], outs[3],
+ outs[4], outs[5], outs[6], outs[7],
+ outs[8]);
+
+ return ret;
}
- return ehea_hcall_9arg_9ret(H_QUERY_HEA_QP,
- adapter_handle, /* R4 */
- qp_category, /* R5 */
- qp_handle, /* R6 */
- sel_mask, /* R7 */
- virt_to_abs(cb_addr), /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return H_BUSY;
+}
+
+u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
+ const u64 qp_handle, const u64 sel_mask, void *cb_addr)
+{
+ return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
+ adapter_handle, /* R4 */
+ qp_category, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
+ virt_to_abs(cb_addr), /* R8 */
+ 0, 0);
}
/* input param R5 */
@@ -180,6 +208,7 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
u64 *qp_handle, struct h_epas *h_epas)
{
u64 hret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
u64 allocate_controls =
EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
@@ -219,45 +248,29 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
| EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
- u64 r5_out = 0;
- u64 r6_out = 0;
- u64 r7_out = 0;
- u64 r8_out = 0;
- u64 r9_out = 0;
- u64 g_la_user_out = 0;
- u64 r11_out = 0;
- u64 r12_out = 0;
-
- hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
- adapter_handle, /* R4 */
- allocate_controls, /* R5 */
- init_attr->send_cq_handle, /* R6 */
- init_attr->recv_cq_handle, /* R7 */
- init_attr->aff_eq_handle, /* R8 */
- r9_reg, /* R9 */
- max_r10_reg, /* R10 */
- r11_in, /* R11 */
- threshold, /* R12 */
- qp_handle, /* R4 */
- &r5_out, /* R5 */
- &r6_out, /* R6 */
- &r7_out, /* R7 */
- &r8_out, /* R8 */
- &r9_out, /* R9 */
- &g_la_user_out, /* R10 */
- &r11_out, /* R11 */
- &r12_out); /* R12 */
-
- init_attr->qp_nr = (u32)r5_out;
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ allocate_controls, /* R5 */
+ init_attr->send_cq_handle, /* R6 */
+ init_attr->recv_cq_handle, /* R7 */
+ init_attr->aff_eq_handle, /* R8 */
+ r9_reg, /* R9 */
+ max_r10_reg, /* R10 */
+ r11_in, /* R11 */
+ threshold); /* R12 */
+
+ *qp_handle = outs[0];
+ init_attr->qp_nr = (u32)outs[1];
init_attr->act_nr_send_wqes =
- (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, r6_out);
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
init_attr->act_nr_rwqes_rq1 =
- (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, r6_out);
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
init_attr->act_nr_rwqes_rq2 =
- (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, r6_out);
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
init_attr->act_nr_rwqes_rq3 =
- (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, r6_out);
+ (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
@@ -265,25 +278,25 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
init_attr->nr_sq_pages =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, r8_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
init_attr->nr_rq1_pages =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, r8_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
init_attr->nr_rq2_pages =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, r9_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
init_attr->nr_rq3_pages =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, r9_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
init_attr->liobn_sq =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, r11_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
init_attr->liobn_rq1 =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, r11_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
init_attr->liobn_rq2 =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, r12_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
init_attr->liobn_rq3 =
- (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, r12_out);
+ (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
if (!hret)
- hcp_epas_ctor(h_epas, g_la_user_out, g_la_user_out);
+ hcp_epas_ctor(h_epas, outs[6], outs[6]);
return hret;
}
@@ -292,31 +305,24 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
struct ehea_cq_attr *cq_attr,
u64 *cq_handle, struct h_epas *epas)
{
- u64 hret, dummy, act_nr_of_cqes_out, act_pages_out;
- u64 g_la_privileged_out, g_la_user_out;
-
- hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
- adapter_handle, /* R4 */
- H_ALL_RES_TYPE_CQ, /* R5 */
- cq_attr->eq_handle, /* R6 */
- cq_attr->cq_token, /* R7 */
- cq_attr->max_nr_of_cqes, /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- cq_handle, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &act_nr_of_cqes_out, /* R7 */
- &act_pages_out, /* R8 */
- &g_la_privileged_out, /* R9 */
- &g_la_user_out, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
-
- cq_attr->act_nr_of_cqes = act_nr_of_cqes_out;
- cq_attr->nr_pages = act_pages_out;
+ u64 hret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ H_ALL_RES_TYPE_CQ, /* R5 */
+ cq_attr->eq_handle, /* R6 */
+ cq_attr->cq_token, /* R7 */
+ cq_attr->max_nr_of_cqes, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+
+ *cq_handle = outs[0];
+ cq_attr->act_nr_of_cqes = outs[3];
+ cq_attr->nr_pages = outs[4];
if (!hret)
- hcp_epas_ctor(epas, g_la_privileged_out, g_la_user_out);
+ hcp_epas_ctor(epas, outs[5], outs[6]);
return hret;
}
@@ -361,9 +367,8 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
struct ehea_eq_attr *eq_attr, u64 *eq_handle)
{
- u64 hret, dummy, eq_liobn, allocate_controls;
- u64 ist1_out, ist2_out, ist3_out, ist4_out;
- u64 act_nr_of_eqes_out, act_pages_out;
+ u64 hret, allocate_controls;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
/* resource type */
allocate_controls =
@@ -372,27 +377,20 @@ u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
| EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
| EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
- hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
- adapter_handle, /* R4 */
- allocate_controls, /* R5 */
- eq_attr->max_nr_of_eqes, /* R6 */
- 0, 0, 0, 0, 0, 0, /* R7-R10 */
- eq_handle, /* R4 */
- &dummy, /* R5 */
- &eq_liobn, /* R6 */
- &act_nr_of_eqes_out, /* R7 */
- &act_pages_out, /* R8 */
- &ist1_out, /* R9 */
- &ist2_out, /* R10 */
- &ist3_out, /* R11 */
- &ist4_out); /* R12 */
-
- eq_attr->act_nr_of_eqes = act_nr_of_eqes_out;
- eq_attr->nr_pages = act_pages_out;
- eq_attr->ist1 = ist1_out;
- eq_attr->ist2 = ist2_out;
- eq_attr->ist3 = ist3_out;
- eq_attr->ist4 = ist4_out;
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ allocate_controls, /* R5 */
+ eq_attr->max_nr_of_eqes, /* R6 */
+ 0, 0, 0, 0, 0, 0); /* R7-R10 */
+
+ *eq_handle = outs[0];
+ eq_attr->act_nr_of_eqes = outs[3];
+ eq_attr->nr_pages = outs[4];
+ eq_attr->ist1 = outs[5];
+ eq_attr->ist2 = outs[6];
+ eq_attr->ist3 = outs[7];
+ eq_attr->ist4 = outs[8];
return hret;
}
@@ -402,31 +400,22 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
u16 *out_swr, u16 *out_rwr)
{
- u64 hret, dummy, act_out_swr, act_out_rwr;
-
- if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) {
- ehea_error("not on page boundary");
- return H_PARAMETER;
- }
-
- hret = ehea_hcall_9arg_9ret(H_MODIFY_HEA_QP,
- adapter_handle, /* R4 */
- (u64) cat, /* R5 */
- qp_handle, /* R6 */
- sel_mask, /* R7 */
- virt_to_abs(cb_addr), /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- inv_attr_id, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &act_out_swr, /* R7 */
- &act_out_rwr, /* R8 */
- proc_mask, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
- *out_swr = act_out_swr;
- *out_rwr = act_out_rwr;
+ u64 hret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
+ outs,
+ adapter_handle, /* R4 */
+ (u64) cat, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
+ virt_to_abs(cb_addr), /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+
+ *inv_attr_id = outs[0];
+ *out_swr = outs[3];
+ *out_rwr = outs[4];
+ *proc_mask = outs[5];
return hret;
}
@@ -435,122 +424,81 @@ u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
const u8 queue_type, const u64 resource_handle,
const u64 log_pageaddr, u64 count)
{
- u64 dummy, reg_control;
+ u64 reg_control;
reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
| EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
- return ehea_hcall_9arg_9ret(H_REGISTER_HEA_RPAGES,
- adapter_handle, /* R4 */
- reg_control, /* R5 */
- resource_handle, /* R6 */
- log_pageaddr, /* R7 */
- count, /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
+ adapter_handle, /* R4 */
+ reg_control, /* R5 */
+ resource_handle, /* R6 */
+ log_pageaddr, /* R7 */
+ count, /* R8 */
+ 0, 0); /* R9-R10 */
}
u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
struct ehea_mr *mr)
{
- u64 hret, dummy, lkey_out;
-
- hret = ehea_hcall_9arg_9ret(H_REGISTER_SMR,
- adapter_handle , /* R4 */
- orig_mr_handle, /* R5 */
- vaddr_in, /* R6 */
- (((u64)access_ctrl) << 32ULL), /* R7 */
- pd, /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- &mr->handle, /* R4 */
- &dummy, /* R5 */
- &lkey_out, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
- mr->lkey = (u32)lkey_out;
+ u64 hret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_REGISTER_SMR,
+ outs,
+ adapter_handle , /* R4 */
+ orig_mr_handle, /* R5 */
+ vaddr_in, /* R6 */
+ (((u64)access_ctrl) << 32ULL), /* R7 */
+ pd, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
+
+ mr->handle = outs[0];
+ mr->lkey = (u32)outs[2];
return hret;
}
u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
{
- u64 hret, dummy, ladr_next_sq_wqe_out;
- u64 ladr_next_rq1_wqe_out, ladr_next_rq2_wqe_out, ladr_next_rq3_wqe_out;
-
- hret = ehea_hcall_9arg_9ret(H_DISABLE_AND_GET_HEA,
- adapter_handle, /* R4 */
- H_DISABLE_GET_EHEA_WQE_P, /* R5 */
- qp_handle, /* R6 */
- 0, 0, 0, 0, 0, 0, /* R7-R12 */
- &ladr_next_sq_wqe_out, /* R4 */
- &ladr_next_rq1_wqe_out, /* R5 */
- &ladr_next_rq2_wqe_out, /* R6 */
- &ladr_next_rq3_wqe_out, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
- return hret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
+ outs,
+ adapter_handle, /* R4 */
+ H_DISABLE_GET_EHEA_WQE_P, /* R5 */
+ qp_handle, /* R6 */
+ 0, 0, 0, 0, 0, 0); /* R7-R12 */
}
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle)
{
- u64 dummy;
-
- return ehea_hcall_9arg_9ret(H_FREE_RESOURCE,
- adapter_handle, /* R4 */
- res_handle, /* R5 */
- 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle, /* R4 */
+ res_handle, /* R5 */
+ 0, 0, 0, 0, 0); /* R6-R10 */
}
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u64 length, const u32 access_ctrl,
const u32 pd, u64 *mr_handle, u32 *lkey)
{
- u64 hret, dummy, lkey_out;
-
- hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
- adapter_handle, /* R4 */
- 5, /* R5 */
- vaddr, /* R6 */
- length, /* R7 */
- (((u64) access_ctrl) << 32ULL),/* R8 */
- pd, /* R9 */
- 0, 0, 0, /* R10-R12 */
- mr_handle, /* R4 */
- &dummy, /* R5 */
- &lkey_out, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
- *lkey = (u32) lkey_out;
-
+ u64 hret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+ outs,
+ adapter_handle, /* R4 */
+ 5, /* R5 */
+ vaddr, /* R6 */
+ length, /* R7 */
+ (((u64) access_ctrl) << 32ULL), /* R8 */
+ pd, /* R9 */
+ 0, 0, 0); /* R10-R12 */
+
+ *mr_handle = outs[0];
+ *lkey = (u32)outs[2];
return hret;
}
@@ -558,7 +506,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u8 pagesize, const u8 queue_type,
const u64 log_pageaddr, const u64 count)
{
- if ((count > 1) && (log_pageaddr & 0xfff)) {
+ if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
ehea_error("not on pageboundary");
return H_PARAMETER;
}
@@ -570,23 +518,14 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
{
- u64 hret, dummy, cb_logaddr;
+ u64 hret, cb_logaddr;
cb_logaddr = virt_to_abs(cb_addr);
- hret = ehea_hcall_9arg_9ret(H_QUERY_HEA,
- adapter_handle, /* R4 */
- cb_logaddr, /* R5 */
- 0, 0, 0, 0, 0, 0, 0, /* R6-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
+ adapter_handle, /* R4 */
+ cb_logaddr, /* R5 */
+ 0, 0, 0, 0, 0); /* R6-R10 */
#ifdef DEBUG
ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
#endif
@@ -597,36 +536,28 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
void *cb_addr)
{
- u64 port_info, dummy;
+ u64 port_info;
u64 cb_logaddr = virt_to_abs(cb_addr);
u64 arr_index = 0;
port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
| EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
- return ehea_hcall_9arg_9ret(H_QUERY_HEA_PORT,
- adapter_handle, /* R4 */
- port_info, /* R5 */
- select_mask, /* R6 */
- arr_index, /* R7 */
- cb_logaddr, /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
+ adapter_handle, /* R4 */
+ port_info, /* R5 */
+ select_mask, /* R6 */
+ arr_index, /* R7 */
+ cb_logaddr, /* R8 */
+ 0, 0); /* R9-R10 */
}
u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
void *cb_addr)
{
- u64 port_info, dummy, inv_attr_ident, proc_mask;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+ u64 port_info;
u64 arr_index = 0;
u64 cb_logaddr = virt_to_abs(cb_addr);
@@ -635,29 +566,21 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
#ifdef DEBUG
ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
#endif
- return ehea_hcall_9arg_9ret(H_MODIFY_HEA_PORT,
- adapter_handle, /* R4 */
- port_info, /* R5 */
- select_mask, /* R6 */
- arr_index, /* R7 */
- cb_logaddr, /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- &inv_attr_ident, /* R4 */
- &proc_mask, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
+ outs,
+ adapter_handle, /* R4 */
+ port_info, /* R5 */
+ select_mask, /* R6 */
+ arr_index, /* R7 */
+ cb_logaddr, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
}
u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
const u8 reg_type, const u64 mc_mac_addr,
const u16 vlan_id, const u32 hcall_id)
{
- u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id, dummy;
+ u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
u64 mac_addr = mc_mac_addr >> 16;
r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
@@ -665,41 +588,21 @@ u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
- return ehea_hcall_9arg_9ret(hcall_id,
- adapter_handle, /* R4 */
- r5_port_num, /* R5 */
- r6_reg_type, /* R6 */
- r7_mc_mac_addr, /* R7 */
- r8_vlan_id, /* R8 */
- 0, 0, 0, 0, /* R9-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return ehea_plpar_hcall_norets(hcall_id,
+ adapter_handle, /* R4 */
+ r5_port_num, /* R5 */
+ r6_reg_type, /* R6 */
+ r7_mc_mac_addr, /* R7 */
+ r8_vlan_id, /* R8 */
+ 0, 0); /* R9-R12 */
}
u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
const u64 event_mask)
{
- u64 dummy;
-
- return ehea_hcall_9arg_9ret(H_RESET_EVENTS,
- adapter_handle, /* R4 */
- neq_handle, /* R5 */
- event_mask, /* R6 */
- 0, 0, 0, 0, 0, 0, /* R7-R12 */
- &dummy, /* R4 */
- &dummy, /* R5 */
- &dummy, /* R6 */
- &dummy, /* R7 */
- &dummy, /* R8 */
- &dummy, /* R9 */
- &dummy, /* R10 */
- &dummy, /* R11 */
- &dummy); /* R12 */
+ return ehea_plpar_hcall_norets(H_RESET_EVENTS,
+ adapter_handle, /* R4 */
+ neq_handle, /* R5 */
+ event_mask, /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
}
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index fa51e3b5bb05..919f94b75933 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -81,14 +81,16 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code)
static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
u64 paddr_user)
{
- epas->kernel.addr = ioremap(paddr_kernel, PAGE_SIZE);
+ /* To support 64k pages we must round to 64k page boundary */
+ epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
+ (paddr_kernel & ~PAGE_MASK);
epas->user.addr = paddr_user;
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
- iounmap(epas->kernel.addr);
+ iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 3e1862326c88..72ef7bde3346 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -209,11 +209,11 @@ int ehea_destroy_cq(struct ehea_cq *cq)
{
u64 adapter_handle, hret;
- adapter_handle = cq->adapter->handle;
-
if (!cq)
return 0;
+ adapter_handle = cq->adapter->handle;
+
/* deregister all previous registered pages */
hret = ehea_h_free_resource(adapter_handle, cq->fw_handle);
if (hret != H_SUCCESS) {
@@ -512,7 +512,7 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
start = KERNELBASE;
end = (u64)high_memory;
- nr_pages = (end - start) / PAGE_SIZE;
+ nr_pages = (end - start) / EHEA_PAGESIZE;
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!pt) {
@@ -538,9 +538,9 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
if (nr_pages > 1) {
u64 num_pages = min(nr_pages, (u64)512);
for (i = 0; i < num_pages; i++)
- pt[i] = virt_to_abs((void*)(((u64)start)
- + ((k++) *
- PAGE_SIZE)));
+ pt[i] = virt_to_abs((void*)(((u64)start) +
+ ((k++) *
+ EHEA_PAGESIZE)));
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
@@ -548,8 +548,9 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
num_pages);
nr_pages -= num_pages;
} else {
- u64 abs_adr = virt_to_abs((void*)(((u64)start)
- + (k * PAGE_SIZE)));
+ u64 abs_adr = virt_to_abs((void*)(((u64)start) +
+ (k * EHEA_PAGESIZE)));
+
hret = ehea_h_register_rpage_mr(adapter->handle,
adapter->mr.handle, 0,
0, abs_adr,1);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 8cc3c331aca8..b7b8bc2a6307 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -162,9 +162,9 @@ static char *version =
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 99b7a411db28..c5ed635bce36 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2497,6 +2497,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
+ unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
@@ -2508,16 +2509,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
if (!(events & np->irqmask))
break;
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
nv_tx_done(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2527,7 +2528,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
@@ -2601,6 +2602,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
+ unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
@@ -2614,14 +2616,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
nv_rx_process(dev, dev->weight);
if (nv_alloc_rx(dev)) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2631,7 +2633,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
}
@@ -2648,6 +2650,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
u8 __iomem *base = get_hwbase(dev);
u32 events;
int i;
+ unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
@@ -2660,14 +2663,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
break;
if (events & NVREG_IRQ_LINK) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
}
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2675,7 +2678,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -2685,7 +2688,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 4bac3cd8f235..44c9f993dcc4 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -212,7 +212,8 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
break;
}
- free_index = pool->consumer_index++ % pool->size;
+ free_index = pool->consumer_index;
+ pool->consumer_index = (pool->consumer_index + 1) % pool->size;
index = pool->free_map[free_index];
ibmveth_assert(index != IBM_VETH_INVALID_MAP);
@@ -238,7 +239,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
if(lpar_rc != H_SUCCESS) {
pool->free_map[free_index] = index;
pool->skbuff[index] = NULL;
- pool->consumer_index--;
+ if (pool->consumer_index == 0)
+ pool->consumer_index = pool->size - 1;
+ else
+ pool->consumer_index--;
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
@@ -325,7 +329,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
adapter->rx_buff_pool[pool].buff_size,
DMA_FROM_DEVICE);
- free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
+ free_index = adapter->rx_buff_pool[pool].producer_index;
+ adapter->rx_buff_pool[pool].producer_index
+ = (adapter->rx_buff_pool[pool].producer_index + 1)
+ % adapter->rx_buff_pool[pool].size;
adapter->rx_buff_pool[pool].free_map[free_index] = index;
mb();
@@ -437,6 +444,31 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
&adapter->rx_buff_pool[i]);
}
+static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
+ union ibmveth_buf_desc rxq_desc, u64 mac_address)
+{
+ int rc, try_again = 1;
+
+ /* After a kexec the adapter will still be open, so our attempt to
+ * open it will fail. So if we get a failure we free the adapter and
+ * try again, but only once. */
+retry:
+ rc = h_register_logical_lan(adapter->vdev->unit_address,
+ adapter->buffer_list_dma, rxq_desc.desc,
+ adapter->filter_list_dma, mac_address);
+
+ if (rc != H_SUCCESS && try_again) {
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+
+ try_again = 0;
+ goto retry;
+ }
+
+ return rc;
+}
+
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev->priv;
@@ -502,12 +534,9 @@ static int ibmveth_open(struct net_device *netdev)
ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
+ h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
- lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
- adapter->buffer_list_dma,
- rxq_desc.desc,
- adapter->filter_list_dma,
- mac_address);
+ lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
@@ -905,6 +934,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ibmveth_poll_controller(struct net_device *dev)
+{
+ ibmveth_replenish_task(dev->priv);
+ ibmveth_interrupt(dev->irq, dev);
+}
+#endif
+
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
int rc, i;
@@ -977,6 +1014,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->ethtool_ops = &netdev_ethtool_ops;
netdev->change_mtu = ibmveth_change_mtu;
SET_NETDEV_DEV(netdev, &dev->dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = ibmveth_poll_controller;
+#endif
netdev->features |= NETIF_F_LLTX;
spin_lock_init(&adapter->stats_lock);
@@ -1132,7 +1172,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
{
struct proc_dir_entry *entry;
if (ibmveth_proc_dir) {
- entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir);
+ char u_addr[10];
+ sprintf(u_addr, "%x", adapter->vdev->unit_address);
+ entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
if (!entry) {
ibmveth_error_printk("Cannot create adapter proc entry");
} else {
@@ -1147,7 +1189,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
{
if (ibmveth_proc_dir) {
- remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir);
+ char u_addr[10];
+ sprintf(u_addr, "%x", adapter->vdev->unit_address);
+ remove_proc_entry(u_addr, ibmveth_proc_dir);
}
}
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index e963dbf816be..f56b00ee385e 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1017,7 +1017,7 @@ static void ioc3_init(struct net_device *dev)
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
- del_timer(&ip->ioc3_timer); /* Kill if running */
+ del_timer_sync(&ip->ioc3_timer); /* Kill if running */
ioc3_w_emcr(EMCR_RST); /* Reset */
(void) ioc3_r_emcr(); /* Flush WB */
@@ -1081,7 +1081,7 @@ static int ioc3_close(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- del_timer(&ip->ioc3_timer);
+ del_timer_sync(&ip->ioc3_timer);
netif_stop_queue(dev);
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index be8a66e702b0..3b4c47875935 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -15,8 +15,7 @@
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
+* the Free Software Foundation; either version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4178b4b1d2df..82c10dec1b5a 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -58,7 +58,11 @@
#include <linux/tcp.h>
#include <linux/percpu.h>
-static DEFINE_PER_CPU(struct net_device_stats, loopback_stats);
+struct pcpu_lstats {
+ unsigned long packets;
+ unsigned long bytes;
+};
+static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats);
#define LOOPBACK_OVERHEAD (128 + MAX_HEADER + 16 + 16)
@@ -128,7 +132,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
*/
static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_device_stats *lb_stats;
+ struct pcpu_lstats *lb_stats;
skb_orphan(skb);
@@ -149,16 +153,14 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
dev->last_rx = jiffies;
- lb_stats = &per_cpu(loopback_stats, get_cpu());
- lb_stats->rx_bytes += skb->len;
- lb_stats->tx_bytes = lb_stats->rx_bytes;
- lb_stats->rx_packets++;
- lb_stats->tx_packets = lb_stats->rx_packets;
- put_cpu();
+ /* it's OK to use __get_cpu_var() because BHs are off */
+ lb_stats = &__get_cpu_var(pcpu_lstats);
+ lb_stats->bytes += skb->len;
+ lb_stats->packets++;
netif_rx(skb);
- return(0);
+ return 0;
}
static struct net_device_stats loopback_stats;
@@ -166,20 +168,21 @@ static struct net_device_stats loopback_stats;
static struct net_device_stats *get_stats(struct net_device *dev)
{
struct net_device_stats *stats = &loopback_stats;
+ unsigned long bytes = 0;
+ unsigned long packets = 0;
int i;
- memset(stats, 0, sizeof(struct net_device_stats));
-
for_each_possible_cpu(i) {
- struct net_device_stats *lb_stats;
+ const struct pcpu_lstats *lb_stats;
- lb_stats = &per_cpu(loopback_stats, i);
- stats->rx_bytes += lb_stats->rx_bytes;
- stats->tx_bytes += lb_stats->tx_bytes;
- stats->rx_packets += lb_stats->rx_packets;
- stats->tx_packets += lb_stats->tx_packets;
+ lb_stats = &per_cpu(pcpu_lstats, i);
+ bytes += lb_stats->bytes;
+ packets += lb_stats->packets;
}
-
+ stats->rx_packets = packets;
+ stats->tx_packets = packets;
+ stats->rx_bytes = bytes;
+ stats->tx_bytes = bytes;
return stats;
}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 2ffa3a59e704..9997081c6dae 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2155,7 +2155,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
offset += 4)
- *(u32 *)((char *)p + offset) = read_mib(mp, offset);
+ *(u32 *)((char *)p + offset) += read_mib(mp, offset);
p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
p->good_octets_sent +=
@@ -2164,7 +2164,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
for (offset = ETH_MIB_GOOD_FRAMES_SENT;
offset <= ETH_MIB_LATE_COLLISION;
offset += 4)
- *(u32 *)((char *)p + offset) = read_mib(mp, offset);
+ *(u32 *)((char *)p + offset) += read_mib(mp, offset);
}
/*
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index fdbb0d7213b0..806081b59733 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2416,7 +2416,6 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
* firmware image, and set tx.boundary to 4KB.
*/
-#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
#define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7
#define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa
diff --git a/drivers/net/myri_code.h b/drivers/net/myri_code.h
index e21ec9b2c706..ba7b8652c501 100644
--- a/drivers/net/myri_code.h
+++ b/drivers/net/myri_code.h
@@ -1,8 +1,8 @@
/* This is the Myrinet MCP code for LANai4.x */
/* Generated by cat $MYRI_HOME/lib/lanai/mcp4.dat > myri_code4.h */
-static unsigned int lanai4_code_off = 0x0000; /* half-word offset */
-static unsigned char lanai4_code[76256] __initdata = {
+static unsigned int __devinitdata lanai4_code_off = 0x0000; /* half-word offset */
+static unsigned char __devinitdata lanai4_code[76256] = {
0xF2,0x0E,
0xFE,0x00, 0xC2,0x90, 0x00,0x00, 0x07,0x88, 0x00,0x08, 0xE0,0x01, 0x01,0x4C, 0x97,0x93,
0xFF,0xFC, 0xE0,0x00, 0x00,0x14, 0x00,0x00, 0x00,0x01, 0x00,0x00, 0x00,0x00, 0x92,0x93,
@@ -4774,8 +4774,8 @@ static unsigned char lanai4_code[76256] __initdata = {
/* This is the LANai data */
-static unsigned int lanai4_data_off = 0x94F0; /* half-word offset */
-static unsigned char lanai4_data[20472] __initdata;
+static unsigned int __devinitdata lanai4_data_off = 0x94F0; /* half-word offset */
+static unsigned char __devinitdata lanai4_data[20472];
#ifdef SYMBOL_DEFINES_COMPILED
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 466b484c9fa4..7747bfd99f91 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -168,7 +168,7 @@ static int myri_do_handshake(struct myri_eth *mp)
return 0;
}
-static int myri_load_lanai(struct myri_eth *mp)
+static int __devinit myri_load_lanai(struct myri_eth *mp)
{
struct net_device *dev = mp->dev;
struct myri_shmem __iomem *shmem = mp->shmem;
@@ -891,7 +891,7 @@ static void dump_eeprom(struct myri_eth *mp)
}
#endif
-static int __init myri_ether_init(struct sbus_dev *sdev)
+static int __devinit myri_ether_init(struct sbus_dev *sdev)
{
static int num;
static unsigned version_printed;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index f1c75751cab7..27f90b2139c0 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -214,6 +214,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
@@ -1396,41 +1397,6 @@ static void rtl8169_netpoll(struct net_device *dev)
}
#endif
-static void __rtl8169_set_mac_addr(struct net_device *dev, void __iomem *ioaddr)
-{
- unsigned int i, j;
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- for (i = 0; i < 2; i++) {
- __le32 l = 0;
-
- for (j = 0; j < 4; j++) {
- l <<= 8;
- l |= dev->dev_addr[4*i + j];
- }
- RTL_W32(MAC0 + 4*i, cpu_to_be32(l));
- }
- RTL_W8(Cfg9346, Cfg9346_Lock);
-}
-
-static int rtl8169_set_mac_addr(struct net_device *dev, void *p)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-
- if (netif_running(dev)) {
- spin_lock_irq(&tp->lock);
- __rtl8169_set_mac_addr(dev, tp->mmio_addr);
- spin_unlock_irq(&tp->lock);
- }
- return 0;
-}
-
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
{
@@ -1680,7 +1646,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stop = rtl8169_close;
dev->tx_timeout = rtl8169_tx_timeout;
dev->set_multicast_list = rtl8169_set_rx_mode;
- dev->set_mac_address = rtl8169_set_mac_addr;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
@@ -1928,8 +1893,6 @@ rtl8169_hw_start(struct net_device *dev)
/* Enable all known interrupts by setting the interrupt mask. */
RTL_W16(IntrMask, rtl8169_intr_mask);
- __rtl8169_set_mac_addr(dev, ioaddr);
-
netif_start_queue(dev);
}
@@ -2700,6 +2663,7 @@ static void rtl8169_down(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned int poll_locked = 0;
+ unsigned int intrmask;
rtl8169_delete_timer(dev);
@@ -2738,8 +2702,11 @@ core_down:
* 2) dev->change_mtu
* -> rtl8169_poll can not be issued again and re-enable the
* interruptions. Let's simply issue the IRQ down sequence again.
+ *
+ * No loop if hotpluged or major error (0xffff).
*/
- if (RTL_R16(IntrMask))
+ intrmask = RTL_R16(IntrMask);
+ if (intrmask && (intrmask != 0xffff))
goto core_down;
rtl8169_tx_clear(tp);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index a231ab7d28dd..33569ec9dbfc 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5985,6 +5985,11 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
} else {
*skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
+ dev->name);
+ return -ENOMEM;
+ }
((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
@@ -6007,7 +6012,11 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
} else {
*skb = dev_alloc_skb(size);
-
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
+ dev->name);
+ return -ENOMEM;
+ }
((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index db2324939b69..1eae16b72b4b 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2903,7 +2903,7 @@ sbmac_init_module(void)
dev = alloc_etherdev(sizeof(struct sbmac_softc));
if (!dev)
- return -ENOMEM; /* return ENOMEM */
+ return -ENOMEM;
printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a4a58e4e93a1..b2949035f66a 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -11,8 +11,7 @@
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * the Free Software Foundation; either version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -43,7 +42,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.8"
+#define DRV_VERSION "1.9"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
@@ -197,8 +196,8 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
else if (hw->chip_id == CHIP_ID_YUKON)
supported &= ~SUPPORTED_1000baseT_Half;
} else
- supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
- | SUPPORTED_Autoneg;
+ supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
+ | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
return supported;
}
@@ -487,31 +486,37 @@ static void skge_get_pauseparam(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND)
- || (skge->flow_control == FLOW_MODE_SYMMETRIC);
- ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND)
- || (skge->flow_control == FLOW_MODE_SYMMETRIC);
+ ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC)
+ || (skge->flow_control == FLOW_MODE_SYM_OR_REM);
+ ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND);
- ecmd->autoneg = skge->autoneg;
+ ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
}
static int skge_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd)
{
struct skge_port *skge = netdev_priv(dev);
+ struct ethtool_pauseparam old;
- skge->autoneg = ecmd->autoneg;
- if (ecmd->rx_pause && ecmd->tx_pause)
- skge->flow_control = FLOW_MODE_SYMMETRIC;
- else if (ecmd->rx_pause && !ecmd->tx_pause)
- skge->flow_control = FLOW_MODE_REM_SEND;
- else if (!ecmd->rx_pause && ecmd->tx_pause)
- skge->flow_control = FLOW_MODE_LOC_SEND;
- else
- skge->flow_control = FLOW_MODE_NONE;
+ skge_get_pauseparam(dev, &old);
+
+ if (ecmd->autoneg != old.autoneg)
+ skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
+ else {
+ if (ecmd->rx_pause && ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_SYMMETRIC;
+ else if (ecmd->rx_pause && !ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_SYM_OR_REM;
+ else if (!ecmd->rx_pause && ecmd->tx_pause)
+ skge->flow_control = FLOW_MODE_LOC_SEND;
+ else
+ skge->flow_control = FLOW_MODE_NONE;
+ }
if (netif_running(dev))
skge_phy_reset(skge);
+
return 0;
}
@@ -854,6 +859,23 @@ static int skge_rx_fill(struct net_device *dev)
return 0;
}
+static const char *skge_pause(enum pause_status status)
+{
+ switch(status) {
+ case FLOW_STAT_NONE:
+ return "none";
+ case FLOW_STAT_REM_SEND:
+ return "rx only";
+ case FLOW_STAT_LOC_SEND:
+ return "tx_only";
+ case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */
+ return "both";
+ default:
+ return "indeterminated";
+ }
+}
+
+
static void skge_link_up(struct skge_port *skge)
{
skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
@@ -862,16 +884,13 @@ static void skge_link_up(struct skge_port *skge)
netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev);
- if (netif_msg_link(skge))
+ if (netif_msg_link(skge)) {
printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
skge->netdev->name, skge->speed,
skge->duplex == DUPLEX_FULL ? "full" : "half",
- (skge->flow_control == FLOW_MODE_NONE) ? "none" :
- (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" :
- (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
- (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
- "unknown");
+ skge_pause(skge->flow_status));
+ }
}
static void skge_link_down(struct skge_port *skge)
@@ -884,6 +903,29 @@ static void skge_link_down(struct skge_port *skge)
printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
}
+
+static void xm_link_down(struct skge_hw *hw, int port)
+{
+ struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
+ u16 cmd, msk;
+
+ if (hw->phy_type == SK_PHY_XMAC) {
+ msk = xm_read16(hw, port, XM_IMSK);
+ msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND;
+ xm_write16(hw, port, XM_IMSK, msk);
+ }
+
+ cmd = xm_read16(hw, port, XM_MMU_CMD);
+ cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+ xm_write16(hw, port, XM_MMU_CMD, cmd);
+ /* dummy read to ensure writing */
+ (void) xm_read16(hw, port, XM_MMU_CMD);
+
+ if (netif_carrier_ok(dev))
+ skge_link_down(skge);
+}
+
static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
{
int i;
@@ -992,7 +1034,15 @@ static const u16 phy_pause_map[] = {
[FLOW_MODE_NONE] = 0,
[FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
[FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
- [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+ [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+};
+
+/* special defines for FIBER (88E1011S only) */
+static const u16 fiber_pause_map[] = {
+ [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
+ [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
+ [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
+ [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
};
@@ -1008,14 +1058,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
status = xm_phy_read(hw, port, PHY_BCOM_STAT);
if ((status & PHY_ST_LSYNC) == 0) {
- u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
- cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
- xm_write16(hw, port, XM_MMU_CMD, cmd);
- /* dummy read to ensure writing */
- (void) xm_read16(hw, port, XM_MMU_CMD);
-
- if (netif_carrier_ok(dev))
- skge_link_down(skge);
+ xm_link_down(hw, port);
return;
}
@@ -1048,20 +1091,19 @@ static void bcom_check_link(struct skge_hw *hw, int port)
return;
}
-
/* We are using IEEE 802.3z/D5.0 Table 37-4 */
switch (aux & PHY_B_AS_PAUSE_MSK) {
case PHY_B_AS_PAUSE_MSK:
- skge->flow_control = FLOW_MODE_SYMMETRIC;
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
break;
case PHY_B_AS_PRR:
- skge->flow_control = FLOW_MODE_REM_SEND;
+ skge->flow_status = FLOW_STAT_REM_SEND;
break;
case PHY_B_AS_PRT:
- skge->flow_control = FLOW_MODE_LOC_SEND;
+ skge->flow_status = FLOW_STAT_LOC_SEND;
break;
default:
- skge->flow_control = FLOW_MODE_NONE;
+ skge->flow_status = FLOW_STAT_NONE;
}
skge->speed = SPEED_1000;
}
@@ -1191,17 +1233,7 @@ static void xm_phy_init(struct skge_port *skge)
if (skge->advertising & ADVERTISED_1000baseT_Full)
ctrl |= PHY_X_AN_FD;
- switch(skge->flow_control) {
- case FLOW_MODE_NONE:
- ctrl |= PHY_X_P_NO_PAUSE;
- break;
- case FLOW_MODE_LOC_SEND:
- ctrl |= PHY_X_P_ASYM_MD;
- break;
- case FLOW_MODE_SYMMETRIC:
- ctrl |= PHY_X_P_BOTH_MD;
- break;
- }
+ ctrl |= fiber_pause_map[skge->flow_control];
xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
@@ -1235,14 +1267,7 @@ static void xm_check_link(struct net_device *dev)
status = xm_phy_read(hw, port, PHY_XMAC_STAT);
if ((status & PHY_ST_LSYNC) == 0) {
- u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
- cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
- xm_write16(hw, port, XM_MMU_CMD, cmd);
- /* dummy read to ensure writing */
- (void) xm_read16(hw, port, XM_MMU_CMD);
-
- if (netif_carrier_ok(dev))
- skge_link_down(skge);
+ xm_link_down(hw, port);
return;
}
@@ -1276,15 +1301,20 @@ static void xm_check_link(struct net_device *dev)
}
/* We are using IEEE 802.3z/D5.0 Table 37-4 */
- if (lpa & PHY_X_P_SYM_MD)
- skge->flow_control = FLOW_MODE_SYMMETRIC;
- else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
- skge->flow_control = FLOW_MODE_REM_SEND;
- else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
- skge->flow_control = FLOW_MODE_LOC_SEND;
+ if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
+ skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
+ (lpa & PHY_X_P_SYM_MD))
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
+ else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
+ (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
+ /* Enable PAUSE receive, disable PAUSE transmit */
+ skge->flow_status = FLOW_STAT_REM_SEND;
+ else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
+ (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
+ /* Disable PAUSE receive, enable PAUSE transmit */
+ skge->flow_status = FLOW_STAT_LOC_SEND;
else
- skge->flow_control = FLOW_MODE_NONE;
-
+ skge->flow_status = FLOW_STAT_NONE;
skge->speed = SPEED_1000;
}
@@ -1568,6 +1598,10 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
skge->netdev->name, status);
+ if (hw->phy_type == SK_PHY_XMAC &&
+ (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC)))
+ xm_link_down(hw, port);
+
if (status & XM_IS_TXF_UR) {
xm_write32(hw, port, XM_MODE, XM_MD_FTF);
++skge->net_stats.tx_fifo_errors;
@@ -1582,7 +1616,7 @@ static void genesis_link_up(struct skge_port *skge)
{
struct skge_hw *hw = skge->hw;
int port = skge->port;
- u16 cmd;
+ u16 cmd, msk;
u32 mode;
cmd = xm_read16(hw, port, XM_MMU_CMD);
@@ -1591,8 +1625,8 @@ static void genesis_link_up(struct skge_port *skge)
* enabling pause frame reception is required for 1000BT
* because the XMAC is not reset if the link is going down
*/
- if (skge->flow_control == FLOW_MODE_NONE ||
- skge->flow_control == FLOW_MODE_LOC_SEND)
+ if (skge->flow_status == FLOW_STAT_NONE ||
+ skge->flow_status == FLOW_STAT_LOC_SEND)
/* Disable Pause Frame Reception */
cmd |= XM_MMU_IGN_PF;
else
@@ -1602,8 +1636,8 @@ static void genesis_link_up(struct skge_port *skge)
xm_write16(hw, port, XM_MMU_CMD, cmd);
mode = xm_read32(hw, port, XM_MODE);
- if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
- skge->flow_control == FLOW_MODE_LOC_SEND) {
+ if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
+ skge->flow_status == FLOW_STAT_LOC_SEND) {
/*
* Configure Pause Frame Generation
* Use internal and external Pause Frame Generation.
@@ -1631,7 +1665,11 @@ static void genesis_link_up(struct skge_port *skge)
}
xm_write32(hw, port, XM_MODE, mode);
- xm_write16(hw, port, XM_IMSK, XM_DEF_MSK);
+ msk = XM_DEF_MSK;
+ if (hw->phy_type != SK_PHY_XMAC)
+ msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */
+
+ xm_write16(hw, port, XM_IMSK, msk);
xm_read16(hw, port, XM_ISRC);
/* get MMU Command Reg. */
@@ -1779,11 +1817,17 @@ static void yukon_init(struct skge_hw *hw, int port)
adv |= PHY_M_AN_10_FD;
if (skge->advertising & ADVERTISED_10baseT_Half)
adv |= PHY_M_AN_10_HD;
- } else /* special defines for FIBER (88E1011S only) */
- adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
- /* Set Flow-control capabilities */
- adv |= phy_pause_map[skge->flow_control];
+ /* Set Flow-control capabilities */
+ adv |= phy_pause_map[skge->flow_control];
+ } else {
+ if (skge->advertising & ADVERTISED_1000baseT_Full)
+ adv |= PHY_M_AN_1000X_AFD;
+ if (skge->advertising & ADVERTISED_1000baseT_Half)
+ adv |= PHY_M_AN_1000X_AHD;
+
+ adv |= fiber_pause_map[skge->flow_control];
+ }
/* Restart Auto-negotiation */
ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
@@ -1917,6 +1961,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
case FLOW_MODE_LOC_SEND:
/* disable Rx flow-control */
reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+ break;
+ case FLOW_MODE_SYMMETRIC:
+ case FLOW_MODE_SYM_OR_REM:
+ /* enable Tx & Rx flow-control */
+ break;
}
gma_write16(hw, port, GM_GP_CTRL, reg);
@@ -2111,13 +2160,11 @@ static void yukon_link_down(struct skge_port *skge)
ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl);
- if (skge->flow_control == FLOW_MODE_REM_SEND) {
+ if (skge->flow_status == FLOW_STAT_REM_SEND) {
+ ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+ ctrl |= PHY_M_AN_ASP;
/* restore Asymmetric Pause bit */
- gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
- gm_phy_read(hw, port,
- PHY_MARV_AUNE_ADV)
- | PHY_M_AN_ASP);
-
+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
}
yukon_reset(hw, port);
@@ -2164,19 +2211,19 @@ static void yukon_phy_intr(struct skge_port *skge)
/* We are using IEEE 802.3z/D5.0 Table 37-4 */
switch (phystat & PHY_M_PS_PAUSE_MSK) {
case PHY_M_PS_PAUSE_MSK:
- skge->flow_control = FLOW_MODE_SYMMETRIC;
+ skge->flow_status = FLOW_STAT_SYMMETRIC;
break;
case PHY_M_PS_RX_P_EN:
- skge->flow_control = FLOW_MODE_REM_SEND;
+ skge->flow_status = FLOW_STAT_REM_SEND;
break;
case PHY_M_PS_TX_P_EN:
- skge->flow_control = FLOW_MODE_LOC_SEND;
+ skge->flow_status = FLOW_STAT_LOC_SEND;
break;
default:
- skge->flow_control = FLOW_MODE_NONE;
+ skge->flow_status = FLOW_STAT_NONE;
}
- if (skge->flow_control == FLOW_MODE_NONE ||
+ if (skge->flow_status == FLOW_STAT_NONE ||
(skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
else
@@ -3399,7 +3446,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* Auto speed and flow control */
skge->autoneg = AUTONEG_ENABLE;
- skge->flow_control = FLOW_MODE_SYMMETRIC;
+ skge->flow_control = FLOW_MODE_SYM_OR_REM;
skge->duplex = -1;
skge->speed = -1;
skge->advertising = skge_supported_modes(hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index d0b47d46cf9d..537c0aaa1db8 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2195,7 +2195,8 @@ enum {
XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
};
-#define XM_DEF_MSK (~(XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_RXF_OV | XM_IS_TXF_UR))
+#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \
+ XM_IS_RXF_OV | XM_IS_TXF_UR))
/* XM_HW_CFG 16 bit r/w Hardware Config Register */
@@ -2426,13 +2427,24 @@ struct skge_hw {
struct mutex phy_mutex;
};
-enum {
- FLOW_MODE_NONE = 0, /* No Flow-Control */
- FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
- FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
+enum pause_control {
+ FLOW_MODE_NONE = 1, /* No Flow-Control */
+ FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */
FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
+ FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or
+ * just the remote station may send PAUSE
+ */
+};
+
+enum pause_status {
+ FLOW_STAT_INDETERMINATED=0, /* indeterminated */
+ FLOW_STAT_NONE, /* No Flow Control */
+ FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */
+ FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */
+ FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */
};
+
struct skge_port {
u32 msg_enable;
struct skge_hw *hw;
@@ -2445,9 +2457,10 @@ struct skge_port {
struct net_device_stats net_stats;
struct work_struct link_thread;
+ enum pause_control flow_control;
+ enum pause_status flow_status;
u8 rx_csum;
u8 blink_on;
- u8 flow_control;
u8 wol;
u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 459c845d6648..16616f5440d0 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -10,8 +10,7 @@
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * the Free Software Foundation; either version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -50,7 +49,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.9"
+#define DRV_VERSION "1.10"
#define PFX DRV_NAME " "
/*
@@ -96,9 +95,9 @@ static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
-static int idle_timeout = 100;
+static int idle_timeout = 0;
module_param(idle_timeout, int, 0);
-MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
+MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)");
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
@@ -284,6 +283,31 @@ static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
gma_write16(hw, port, GM_RX_CTRL, reg);
}
+/* flow control to advertise bits */
+static const u16 copper_fc_adv[] = {
+ [FC_NONE] = 0,
+ [FC_TX] = PHY_M_AN_ASP,
+ [FC_RX] = PHY_M_AN_PC,
+ [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP,
+};
+
+/* flow control to advertise bits when using 1000BaseX */
+static const u16 fiber_fc_adv[] = {
+ [FC_BOTH] = PHY_M_P_BOTH_MD_X,
+ [FC_TX] = PHY_M_P_ASYM_MD_X,
+ [FC_RX] = PHY_M_P_SYM_MD_X,
+ [FC_NONE] = PHY_M_P_NO_PAUSE_X,
+};
+
+/* flow control to GMA disable bits */
+static const u16 gm_fc_disable[] = {
+ [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
+ [FC_TX] = GM_GPCR_FC_RX_DIS,
+ [FC_RX] = GM_GPCR_FC_TX_DIS,
+ [FC_BOTH] = 0,
+};
+
+
static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
{
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -356,16 +380,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
}
- ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
- if (sky2->autoneg == AUTONEG_DISABLE)
- ctrl &= ~PHY_CT_ANE;
- else
- ctrl |= PHY_CT_ANE;
-
- ctrl |= PHY_CT_RESET;
- gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
-
- ctrl = 0;
+ ctrl = PHY_CT_RESET;
ct1000 = 0;
adv = PHY_AN_CSMA;
reg = 0;
@@ -384,20 +399,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
adv |= PHY_M_AN_10_FD;
if (sky2->advertising & ADVERTISED_10baseT_Half)
adv |= PHY_M_AN_10_HD;
+
+ adv |= copper_fc_adv[sky2->flow_mode];
} else { /* special defines for FIBER (88E1040S only) */
if (sky2->advertising & ADVERTISED_1000baseT_Full)
adv |= PHY_M_AN_1000X_AFD;
if (sky2->advertising & ADVERTISED_1000baseT_Half)
adv |= PHY_M_AN_1000X_AHD;
- }
- /* Set Flow-control capabilities */
- if (sky2->tx_pause && sky2->rx_pause)
- adv |= PHY_AN_PAUSE_CAP; /* symmetric */
- else if (sky2->rx_pause && !sky2->tx_pause)
- adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
- else if (!sky2->rx_pause && sky2->tx_pause)
- adv |= PHY_AN_PAUSE_ASYM; /* local */
+ adv |= fiber_fc_adv[sky2->flow_mode];
+ }
/* Restart Auto-negotiation */
ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
@@ -422,25 +433,17 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
if (sky2->duplex == DUPLEX_FULL) {
reg |= GM_GPCR_DUP_FULL;
ctrl |= PHY_CT_DUP_MD;
- } else if (sky2->speed != SPEED_1000 && hw->chip_id != CHIP_ID_YUKON_EC_U) {
- /* Turn off flow control for 10/100mbps */
- sky2->rx_pause = 0;
- sky2->tx_pause = 0;
- }
+ } else if (sky2->speed < SPEED_1000)
+ sky2->flow_mode = FC_NONE;
- if (!sky2->rx_pause)
- reg |= GM_GPCR_FC_RX_DIS;
- if (!sky2->tx_pause)
- reg |= GM_GPCR_FC_TX_DIS;
+ reg |= gm_fc_disable[sky2->flow_mode];
/* Forward pause packets to GMAC? */
- if (sky2->tx_pause || sky2->rx_pause)
+ if (sky2->flow_mode & FC_RX)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
else
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
-
- ctrl |= PHY_CT_RESET;
}
gma_write16(hw, port, GM_GP_CTRL, reg);
@@ -683,7 +686,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
+ sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
if (hw->dev[port]->mtu > ETH_DATA_LEN) {
/* set Tx GMAC FIFO Almost Empty Threshold */
@@ -695,16 +698,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
}
-/* Assign Ram Buffer allocation.
- * start and end are in units of 4k bytes
- * ram registers are in units of 64bit words
- */
-static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
+/* Assign Ram Buffer allocation in units of 64bit (8 bytes) */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 end)
{
- u32 start, end;
-
- start = startk * 4096/8;
- end = (endk * 4096/8) - 1;
+ pr_debug(PFX "q %d %#x %#x\n", q, start, end);
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -713,7 +710,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
sky2_write32(hw, RB_ADDR(q, RB_RP), start);
if (q == Q_R1 || q == Q_R2) {
- u32 space = (endk - startk) * 4096/8;
+ u32 space = end - start + 1;
u32 tp = space - space/4;
/* On receive queue's set the thresholds
@@ -1195,19 +1192,16 @@ static int sky2_up(struct net_device *dev)
sky2_mac_init(hw, port);
- /* Determine available ram buffer space (in 4K blocks).
- * Note: not sure about the FE setting below yet
- */
- if (hw->chip_id == CHIP_ID_YUKON_FE)
- ramsize = 4;
- else
- ramsize = sky2_read8(hw, B2_E_0);
+ /* Determine available ram buffer space in qwords. */
+ ramsize = sky2_read8(hw, B2_E_0) * 4096/8;
- /* Give transmitter one third (rounded up) */
- rxspace = ramsize - (ramsize + 2) / 3;
+ if (ramsize > 6*1024/8)
+ rxspace = ramsize - (ramsize + 2) / 3;
+ else
+ rxspace = ramsize / 2;
- sky2_ramset(hw, rxqaddr[port], 0, rxspace);
- sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
+ sky2_ramset(hw, rxqaddr[port], 0, rxspace-1);
+ sky2_ramset(hw, txqaddr[port], rxspace, ramsize-1);
/* Make sure SyncQ is disabled */
sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1499,6 +1493,11 @@ static int sky2_down(struct net_device *dev)
/* Stop more packets from being queued */
netif_stop_queue(dev);
+ /* Disable port IRQ */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~portirq_msk[port];
+ sky2_write32(hw, B0_IMSK, imask);
+
sky2_gmac_reset(hw, port);
/* Stop transmitter */
@@ -1549,11 +1548,6 @@ static int sky2_down(struct net_device *dev)
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
- /* Disable port IRQ */
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~portirq_msk[port];
- sky2_write32(hw, B0_IMSK, imask);
-
sky2_phy_power(hw, port, 0);
/* turn off LED's */
@@ -1605,6 +1599,12 @@ static void sky2_link_up(struct sky2_port *sky2)
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 reg;
+ static const char *fc_name[] = {
+ [FC_NONE] = "none",
+ [FC_TX] = "tx",
+ [FC_RX] = "rx",
+ [FC_BOTH] = "both",
+ };
/* enable Rx/Tx */
reg = gma_read16(hw, port, GM_GP_CTRL);
@@ -1648,8 +1648,7 @@ static void sky2_link_up(struct sky2_port *sky2)
"%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
sky2->netdev->name, sky2->speed,
sky2->duplex == DUPLEX_FULL ? "full" : "half",
- (sky2->tx_pause && sky2->rx_pause) ? "both" :
- sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
+ fc_name[sky2->flow_status]);
}
static void sky2_link_down(struct sky2_port *sky2)
@@ -1664,7 +1663,7 @@ static void sky2_link_down(struct sky2_port *sky2)
reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
gma_write16(hw, port, GM_GP_CTRL, reg);
- if (sky2->rx_pause && !sky2->tx_pause) {
+ if (sky2->flow_status == FC_RX) {
/* restore Asymmetric Pause bit */
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
@@ -1683,6 +1682,14 @@ static void sky2_link_down(struct sky2_port *sky2)
sky2_phy_init(hw, port);
}
+static enum flow_control sky2_flow(int rx, int tx)
+{
+ if (rx)
+ return tx ? FC_BOTH : FC_RX;
+ else
+ return tx ? FC_TX : FC_NONE;
+}
+
static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
{
struct sky2_hw *hw = sky2->hw;
@@ -1703,39 +1710,20 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
}
sky2->speed = sky2_phy_speed(hw, aux);
- if (sky2->speed == SPEED_1000) {
- u16 ctl2 = gm_phy_read(hw, port, PHY_MARV_1000T_CTRL);
- u16 lpa2 = gm_phy_read(hw, port, PHY_MARV_1000T_STAT);
- if (lpa2 & PHY_B_1000S_MSF) {
- printk(KERN_ERR PFX "%s: master/slave fault",
- sky2->netdev->name);
- return -1;
- }
-
- if ((ctl2 & PHY_M_1000C_AFD) && (lpa2 & PHY_B_1000S_LP_FD))
- sky2->duplex = DUPLEX_FULL;
- else
- sky2->duplex = DUPLEX_HALF;
- } else {
- u16 adv = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
- if ((aux & adv) & PHY_AN_FULL)
- sky2->duplex = DUPLEX_FULL;
- else
- sky2->duplex = DUPLEX_HALF;
- }
+ sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
/* Pause bits are offset (9..8) */
if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
aux >>= 6;
- sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
- sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
+ sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
+ aux & PHY_M_PS_TX_P_EN);
- if (sky2->duplex == DUPLEX_HALF && sky2->speed != SPEED_1000
+ if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
&& hw->chip_id != CHIP_ID_YUKON_EC_U)
- sky2->rx_pause = sky2->tx_pause = 0;
+ sky2->flow_status = FC_NONE;
- if (sky2->rx_pause || sky2->tx_pause)
+ if (aux & PHY_M_PS_RX_P_EN)
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
else
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1750,13 +1738,13 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
struct sky2_port *sky2 = netdev_priv(dev);
u16 istatus, phystat;
+ if (!netif_running(dev))
+ return;
+
spin_lock(&sky2->phy_lock);
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (!netif_running(dev))
- goto out;
-
if (netif_msg_intr(sky2))
printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
sky2->netdev->name, istatus, phystat);
@@ -1907,7 +1895,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
re->skb->ip_summed = CHECKSUM_NONE;
- __skb_put(skb, length);
+ skb_put(skb, length);
}
return skb;
}
@@ -1970,7 +1958,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length);
else
- skb_put(skb, hdr_space);
+ skb_put(skb, length);
return skb;
}
@@ -2016,6 +2004,10 @@ oversize:
error:
++sky2->net_stats.rx_errors;
+ if (status & GMR_FS_RX_FF_OV) {
+ sky2->net_stats.rx_fifo_errors++;
+ goto resubmit;
+ }
if (netif_msg_rx_err(sky2) && net_ratelimit())
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
@@ -2027,8 +2019,6 @@ error:
sky2->net_stats.rx_frame_errors++;
if (status & GMR_FS_CRC_ERR)
sky2->net_stats.rx_crc_errors++;
- if (status & GMR_FS_RX_FF_OV)
- sky2->net_stats.rx_fifo_errors++;
goto resubmit;
}
@@ -2220,8 +2210,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* PCI-Express uncorrectable Error occurred */
u32 pex_err;
- pex_err = sky2_pci_read32(hw,
- hw->err_cap + PCI_ERR_UNCOR_STATUS);
+ pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2229,20 +2218,15 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- sky2_pci_write32(hw,
- hw->err_cap + PCI_ERR_UNCOR_STATUS,
- 0xffffffffUL);
+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
+ 0xffffffffUL);
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-
- /* In case of fatal error mask off to keep from getting stuck */
- if (pex_err & (PCI_ERR_UNC_POISON_TLP | PCI_ERR_UNC_FCP
- | PCI_ERR_UNC_DLP)) {
+ if (pex_err & PEX_FATAL_ERRORS) {
u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
hwmsk &= ~Y2_IS_PCI_EXP;
sky2_write32(hw, B0_HWE_IMSK, hwmsk);
}
-
}
if (status & Y2_HWE_L1_MASK)
@@ -2423,7 +2407,6 @@ static int sky2_reset(struct sky2_hw *hw)
u16 status;
u8 t8;
int i;
- u32 msk;
sky2_write8(hw, B0_CTST, CS_RST_CLR);
@@ -2464,13 +2447,9 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */
- if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
- hw->err_cap = pci_find_ext_capability(hw->pdev, PCI_EXT_CAP_ID_ERR);
- if (hw->err_cap)
- sky2_pci_write32(hw,
- hw->err_cap + PCI_ERR_UNCOR_STATUS,
- 0xffffffffUL);
- }
+ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
+
hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->ports = 1;
@@ -2527,10 +2506,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
}
- msk = Y2_HWE_ALL_MASK;
- if (!hw->err_cap)
- msk &= ~Y2_IS_PCI_EXP;
- sky2_write32(hw, B0_HWE_IMSK, msk);
+ sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
for (i = 0; i < hw->ports; i++)
sky2_gmac_reset(hw, i);
@@ -2762,7 +2738,7 @@ static int sky2_nway_reset(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
- if (sky2->autoneg != AUTONEG_ENABLE)
+ if (!netif_running(dev) || sky2->autoneg != AUTONEG_ENABLE)
return -EINVAL;
sky2_phy_reinit(sky2);
@@ -2864,6 +2840,14 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
+{
+ u32 bit;
+
+ bit = ether_crc(ETH_ALEN, addr) & 63;
+ filter[bit >> 3] |= 1 << (bit & 7);
+}
+
static void sky2_set_multicast(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -2872,7 +2856,10 @@ static void sky2_set_multicast(struct net_device *dev)
struct dev_mc_list *list = dev->mc_list;
u16 reg;
u8 filter[8];
+ int rx_pause;
+ static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
+ rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
memset(filter, 0, sizeof(filter));
reg = gma_read16(hw, port, GM_RX_CTRL);
@@ -2880,18 +2867,19 @@ static void sky2_set_multicast(struct net_device *dev)
if (dev->flags & IFF_PROMISC) /* promiscuous */
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 16) /* all multicast */
+ else if (dev->flags & IFF_ALLMULTI)
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0) /* no multicast */
+ else if (dev->mc_count == 0 && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
reg |= GM_RXCR_MCF_ENA;
- for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
- u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
- filter[bit / 8] |= 1 << (bit % 8);
- }
+ if (rx_pause)
+ sky2_add_filter(filter, pause_mc_addr);
+
+ for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ sky2_add_filter(filter, list->dmi_addr);
}
gma_write16(hw, port, GM_MC_ADDR_H1,
@@ -3004,8 +2992,20 @@ static void sky2_get_pauseparam(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- ecmd->tx_pause = sky2->tx_pause;
- ecmd->rx_pause = sky2->rx_pause;
+ switch (sky2->flow_mode) {
+ case FC_NONE:
+ ecmd->tx_pause = ecmd->rx_pause = 0;
+ break;
+ case FC_TX:
+ ecmd->tx_pause = 1, ecmd->rx_pause = 0;
+ break;
+ case FC_RX:
+ ecmd->tx_pause = 0, ecmd->rx_pause = 1;
+ break;
+ case FC_BOTH:
+ ecmd->tx_pause = ecmd->rx_pause = 1;
+ }
+
ecmd->autoneg = sky2->autoneg;
}
@@ -3015,10 +3015,10 @@ static int sky2_set_pauseparam(struct net_device *dev,
struct sky2_port *sky2 = netdev_priv(dev);
sky2->autoneg = ecmd->autoneg;
- sky2->tx_pause = ecmd->tx_pause != 0;
- sky2->rx_pause = ecmd->rx_pause != 0;
+ sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
- sky2_phy_reinit(sky2);
+ if (netif_running(dev))
+ sky2_phy_reinit(sky2);
return 0;
}
@@ -3238,7 +3238,11 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
dev->poll = sky2_poll;
dev->weight = NAPI_WEIGHT;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = sky2_netpoll;
+ /* Network console (only works on port 0)
+ * because netpoll makes assumptions about NAPI
+ */
+ if (port == 0)
+ dev->poll_controller = sky2_netpoll;
#endif
sky2 = netdev_priv(dev);
@@ -3248,8 +3252,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
/* Auto speed and flow control */
sky2->autoneg = AUTONEG_ENABLE;
- sky2->tx_pause = 1;
- sky2->rx_pause = 1;
+ sky2->flow_mode = FC_BOTH;
+
sky2->duplex = -1;
sky2->speed = -1;
sky2->advertising = sky2_supported_modes(hw);
@@ -3340,9 +3344,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
if (!hw->msi_detected) {
/* MSI test failed, go back to INTx mode */
- printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
- "switching to INTx mode. Please report this failure to "
- "the PCI maintainer and include system chipset information.\n",
+ printk(KERN_INFO PFX "%s: No interrupt generated using MSI, "
+ "switching to INTx mode.\n",
pci_name(pdev));
err = -EOPNOTSUPP;
@@ -3350,6 +3353,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
}
sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
free_irq(pdev->irq, hw);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index f66109a96d95..6d2a23f66c9a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -6,15 +6,24 @@
#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
-/* PCI device specific config registers */
+/* PCI config registers */
enum {
PCI_DEV_REG1 = 0x40,
PCI_DEV_REG2 = 0x44,
+ PCI_DEV_STATUS = 0x7c,
PCI_DEV_REG3 = 0x80,
PCI_DEV_REG4 = 0x84,
PCI_DEV_REG5 = 0x88,
};
+enum {
+ PEX_DEV_CAP = 0xe4,
+ PEX_DEV_CTRL = 0xe8,
+ PEX_DEV_STA = 0xea,
+ PEX_LNK_STAT = 0xf2,
+ PEX_UNC_ERR_STAT= 0x104,
+};
+
/* Yukon-2 */
enum pci_dev_reg_1 {
PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
@@ -63,6 +72,39 @@ enum pci_dev_reg_4 {
PCI_STATUS_REC_MASTER_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \
PCI_STATUS_PARITY)
+
+enum pex_dev_ctrl {
+ PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
+ PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
+ PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
+ PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
+ PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
+ PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
+ PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
+ PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
+ PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
+ PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
+ PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
+};
+#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
+
+/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
+enum pex_err {
+ PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
+
+ PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
+
+ PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
+
+ PEX_COMP_TO = 1<<14, /* Completion Timeout */
+ PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
+ PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
+
+ PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
+ PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
+};
+
+
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
@@ -1534,7 +1576,7 @@ enum {
GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
- GMR_FS_MII_ERR | GMR_FS_BAD_FC |
+ GMR_FS_MII_ERR | GMR_FS_GOOD_FC | GMR_FS_BAD_FC |
GMR_FS_UN_SIZE | GMR_FS_JABBER,
};
@@ -1786,6 +1828,13 @@ struct rx_ring_info {
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
};
+enum flow_control {
+ FC_NONE = 0,
+ FC_TX = 1,
+ FC_RX = 2,
+ FC_BOTH = 3,
+};
+
struct sky2_port {
struct sky2_hw *hw;
struct net_device *netdev;
@@ -1818,13 +1867,13 @@ struct sky2_port {
dma_addr_t rx_le_map;
dma_addr_t tx_le_map;
- u32 advertising; /* ADVERTISED_ bits */
+ u16 advertising; /* ADVERTISED_ bits */
u16 speed; /* SPEED_1000, SPEED_100, ... */
u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
- u8 rx_pause;
- u8 tx_pause;
u8 rx_csum;
+ enum flow_control flow_mode;
+ enum flow_control flow_status;
struct net_device_stats net_stats;
@@ -1836,7 +1885,6 @@ struct sky2_hw {
struct net_device *dev[2];
int pm_cap;
- int err_cap;
u8 chip_id;
u8 chip_rev;
u8 pmd_type;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 636dbfcdf8cb..a8640169fc77 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -398,6 +398,42 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define SMC_IRQ_FLAGS (0)
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
#else
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 46a009085f7c..418138dd6c68 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -55,12 +55,13 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
"<Jens.Osterkamp@de.ibm.com>");
MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-module_param(rx_descriptors, int, 0644);
-module_param(tx_descriptors, int, 0644);
+module_param(rx_descriptors, int, 0444);
+module_param(tx_descriptors, int, 0444);
MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
"in rx chains");
@@ -300,7 +301,7 @@ static int
spider_net_init_chain(struct spider_net_card *card,
struct spider_net_descr_chain *chain,
struct spider_net_descr *start_descr,
- int direction, int no)
+ int no)
{
int i;
struct spider_net_descr *descr;
@@ -315,7 +316,7 @@ spider_net_init_chain(struct spider_net_card *card,
buf = pci_map_single(card->pdev, descr,
SPIDER_NET_DESCR_SIZE,
- direction);
+ PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(buf))
goto iommu_error;
@@ -329,11 +330,6 @@ spider_net_init_chain(struct spider_net_card *card,
(descr-1)->next = start_descr;
start_descr->prev = descr-1;
- descr = start_descr;
- if (direction == PCI_DMA_FROMDEVICE)
- for (i=0; i < no; i++, descr++)
- descr->next_descr_addr = descr->next->bus_addr;
-
spin_lock_init(&chain->lock);
chain->head = start_descr;
chain->tail = start_descr;
@@ -346,7 +342,7 @@ iommu_error:
if (descr->bus_addr)
pci_unmap_single(card->pdev, descr->bus_addr,
SPIDER_NET_DESCR_SIZE,
- direction);
+ PCI_DMA_BIDIRECTIONAL);
return -ENOMEM;
}
@@ -362,15 +358,15 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
struct spider_net_descr *descr;
descr = card->rx_chain.head;
- while (descr->next != card->rx_chain.head) {
+ do {
if (descr->skb) {
dev_kfree_skb(descr->skb);
pci_unmap_single(card->pdev, descr->buf_addr,
SPIDER_NET_MAX_FRAME,
- PCI_DMA_FROMDEVICE);
+ PCI_DMA_BIDIRECTIONAL);
}
descr = descr->next;
- }
+ } while (descr != card->rx_chain.head);
}
/**
@@ -645,26 +641,41 @@ static int
spider_net_prepare_tx_descr(struct spider_net_card *card,
struct sk_buff *skb)
{
- struct spider_net_descr *descr = card->tx_chain.head;
+ struct spider_net_descr *descr;
dma_addr_t buf;
+ unsigned long flags;
+ int length;
- buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ length = skb->len;
+ if (length < ETH_ZLEN) {
+ if (skb_pad(skb, ETH_ZLEN-length))
+ return 0;
+ length = ETH_ZLEN;
+ }
+
+ buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(buf)) {
if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
+ "Dropping packet\n", skb->data, length);
card->spider_stats.tx_iommu_map_error++;
return -ENOMEM;
}
+ spin_lock_irqsave(&card->tx_chain.lock, flags);
+ descr = card->tx_chain.head;
+ card->tx_chain.head = descr->next;
+
descr->buf_addr = buf;
- descr->buf_size = skb->len;
+ descr->buf_size = length;
descr->next_descr_addr = 0;
descr->skb = skb;
descr->data_status = 0;
descr->dmac_cmd_status =
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
+ spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+
if (skb->protocol == htons(ETH_P_IP))
switch (skb->nh.iph->protocol) {
case IPPROTO_TCP:
@@ -675,32 +686,51 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
break;
}
+ /* Chain the bus address, so that the DMA engine finds this descr. */
descr->prev->next_descr_addr = descr->bus_addr;
+ card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
return 0;
}
-/**
- * spider_net_release_tx_descr - processes a used tx descriptor
- * @card: card structure
- * @descr: descriptor to release
- *
- * releases a used tx descriptor (unmapping, freeing of skb)
- */
-static inline void
-spider_net_release_tx_descr(struct spider_net_card *card)
+static int
+spider_net_set_low_watermark(struct spider_net_card *card)
{
+ unsigned long flags;
+ int status;
+ int cnt=0;
+ int i;
struct spider_net_descr *descr = card->tx_chain.tail;
- struct sk_buff *skb;
- card->tx_chain.tail = card->tx_chain.tail->next;
- descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+ /* Measure the length of the queue. Measurement does not
+ * need to be precise -- does not need a lock. */
+ while (descr != card->tx_chain.head) {
+ status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
+ if (status == SPIDER_NET_DESCR_NOT_IN_USE)
+ break;
+ descr = descr->next;
+ cnt++;
+ }
- /* unmap the skb */
- skb = descr->skb;
- pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb_any(skb);
+ /* If TX queue is short, don't even bother with interrupts */
+ if (cnt < card->num_tx_desc/4)
+ return cnt;
+
+ /* Set low-watermark 3/4th's of the way into the queue. */
+ descr = card->tx_chain.tail;
+ cnt = (cnt*3)/4;
+ for (i=0;i<cnt; i++)
+ descr = descr->next;
+
+ /* Set the new watermark, clear the old watermark */
+ spin_lock_irqsave(&card->tx_chain.lock, flags);
+ descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
+ if (card->low_watermark && card->low_watermark != descr)
+ card->low_watermark->dmac_cmd_status =
+ card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
+ card->low_watermark = descr;
+ spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+ return cnt;
}
/**
@@ -719,21 +749,29 @@ static int
spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
{
struct spider_net_descr_chain *chain = &card->tx_chain;
+ struct spider_net_descr *descr;
+ struct sk_buff *skb;
+ u32 buf_addr;
+ unsigned long flags;
int status;
- spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
-
while (chain->tail != chain->head) {
- status = spider_net_get_descr_status(chain->tail);
+ spin_lock_irqsave(&chain->lock, flags);
+ descr = chain->tail;
+
+ status = spider_net_get_descr_status(descr);
switch (status) {
case SPIDER_NET_DESCR_COMPLETE:
card->netdev_stats.tx_packets++;
- card->netdev_stats.tx_bytes += chain->tail->skb->len;
+ card->netdev_stats.tx_bytes += descr->skb->len;
break;
case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal)
+ if (!brutal) {
+ spin_unlock_irqrestore(&chain->lock, flags);
return 1;
+ }
+
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
@@ -750,11 +788,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
default:
card->netdev_stats.tx_dropped++;
- return 1;
+ if (!brutal) {
+ spin_unlock_irqrestore(&chain->lock, flags);
+ return 1;
+ }
}
- spider_net_release_tx_descr(card);
- }
+ chain->tail = descr->next;
+ descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+ skb = descr->skb;
+ buf_addr = descr->buf_addr;
+ spin_unlock_irqrestore(&chain->lock, flags);
+
+ /* unmap the skb */
+ if (skb) {
+ int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+ pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ }
+ }
return 0;
}
@@ -763,8 +815,12 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
* @card: card structure
* @descr: descriptor address to enable TX processing at
*
- * spider_net_kick_tx_dma writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission DMA engine
+ * This routine will start the transmit DMA running if
+ * it is not already running. This routine ned only be
+ * called when queueing a new packet to an empty tx queue.
+ * Writes the current tx chain head as start address
+ * of the tx descriptor chain and enables the transmission
+ * DMA engine.
*/
static inline void
spider_net_kick_tx_dma(struct spider_net_card *card)
@@ -804,65 +860,43 @@ out:
static int
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
+ int cnt;
struct spider_net_card *card = netdev_priv(netdev);
struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr = chain->head;
- unsigned long flags;
- int result;
-
- spin_lock_irqsave(&chain->lock, flags);
spider_net_release_tx_chain(card, 0);
- if (chain->head->next == chain->tail->prev) {
- card->netdev_stats.tx_dropped++;
- result = NETDEV_TX_LOCKED;
- goto out;
- }
+ if ((chain->head->next == chain->tail->prev) ||
+ (spider_net_prepare_tx_descr(card, skb) != 0)) {
- if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
card->netdev_stats.tx_dropped++;
- result = NETDEV_TX_LOCKED;
- goto out;
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
}
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- card->netdev_stats.tx_dropped++;
- result = NETDEV_TX_BUSY;
- goto out;
- }
-
- result = NETDEV_TX_OK;
-
- spider_net_kick_tx_dma(card);
- card->tx_chain.head = card->tx_chain.head->next;
-
-out:
- spin_unlock_irqrestore(&chain->lock, flags);
- netif_wake_queue(netdev);
- return result;
+ cnt = spider_net_set_low_watermark(card);
+ if (cnt < 5)
+ spider_net_kick_tx_dma(card);
+ return NETDEV_TX_OK;
}
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
* @card: card structure
*
- * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
- * interrupts to cleanup our TX ring) and returns sent packets to the stack
- * by freeing them
+ * spider_net_cleanup_tx_ring is called by either the tx_timer
+ * or from the NAPI polling routine.
+ * This routine releases resources associted with transmitted
+ * packets, including updating the queue tail pointer.
*/
static void
spider_net_cleanup_tx_ring(struct spider_net_card *card)
{
- unsigned long flags;
-
- spin_lock_irqsave(&card->tx_chain.lock, flags);
-
if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP))
+ (card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card);
-
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+ netif_wake_queue(card->netdev);
+ }
}
/**
@@ -1053,6 +1087,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
int packets_to_do, packets_done = 0;
int no_more_packets = 0;
+ spider_net_cleanup_tx_ring(card);
packets_to_do = min(*budget, netdev->quota);
while (packets_to_do) {
@@ -1243,12 +1278,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
case SPIDER_NET_PHYINT:
case SPIDER_NET_GMAC2INT:
case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GIPSINT:
case SPIDER_NET_GFIFOINT:
case SPIDER_NET_DMACINT:
case SPIDER_NET_GSYSINT:
break; */
+ case SPIDER_NET_GIPSINT:
+ show_error = 0;
+ break;
+
case SPIDER_NET_GPWOPCMPINT:
/* PHY write operation completed */
show_error = 0;
@@ -1307,9 +1345,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
case SPIDER_NET_GDTDCEINT:
/* chain end. If a descriptor should be sent, kick off
* tx dma
- if (card->tx_chain.tail == card->tx_chain.head)
+ if (card->tx_chain.tail != card->tx_chain.head)
spider_net_kick_tx_dma(card);
- show_error = 0; */
+ */
+ show_error = 0;
break;
/* case SPIDER_NET_G1TMCNTINT: not used. print a message */
@@ -1354,7 +1393,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
if (netif_msg_intr(card))
pr_err("got descriptor chain end interrupt, "
"restarting DMAC %c.\n",
- 'D'+i-SPIDER_NET_GDDDCEINT);
+ 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
show_error = 0;
@@ -1423,8 +1462,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
}
if ((show_error) && (netif_msg_intr(card)))
- pr_err("Got error interrupt, GHIINT0STS = 0x%08x, "
+ pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
"GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
+ card->netdev->name,
status_reg, error_reg1, error_reg2);
/* clear interrupt sources */
@@ -1460,6 +1500,8 @@ spider_net_interrupt(int irq, void *ptr)
spider_net_rx_irq_off(card);
netif_rx_schedule(netdev);
}
+ if (status_reg & SPIDER_NET_TXINT)
+ netif_rx_schedule(netdev);
if (status_reg & SPIDER_NET_ERRINT )
spider_net_handle_error_irq(card, status_reg);
@@ -1599,7 +1641,7 @@ spider_net_enable_card(struct spider_net_card *card)
SPIDER_NET_INT2_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTDCEIDIS);
+ SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
}
/**
@@ -1615,17 +1657,26 @@ int
spider_net_open(struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
- int result;
+ struct spider_net_descr *descr;
+ int i, result;
result = -ENOMEM;
if (spider_net_init_chain(card, &card->tx_chain, card->descr,
- PCI_DMA_TODEVICE, card->tx_desc))
+ card->num_tx_desc))
goto alloc_tx_failed;
+
+ card->low_watermark = NULL;
+
+ /* rx_chain is after tx_chain, so offset is descr + tx_count */
if (spider_net_init_chain(card, &card->rx_chain,
- card->descr + card->rx_desc,
- PCI_DMA_FROMDEVICE, card->rx_desc))
+ card->descr + card->num_tx_desc,
+ card->num_rx_desc))
goto alloc_rx_failed;
+ descr = card->rx_chain.head;
+ for (i=0; i < card->num_rx_desc; i++, descr++)
+ descr->next_descr_addr = descr->next->bus_addr;
+
/* allocate rx skbs */
if (spider_net_alloc_rx_skbs(card))
goto alloc_skbs_failed;
@@ -1878,10 +1929,7 @@ spider_net_stop(struct net_device *netdev)
spider_net_disable_rxdmac(card);
/* release chains */
- if (spin_trylock(&card->tx_chain.lock)) {
- spider_net_release_tx_chain(card, 1);
- spin_unlock(&card->tx_chain.lock);
- }
+ spider_net_release_tx_chain(card, 1);
spider_net_free_chain(card, &card->tx_chain);
spider_net_free_chain(card, &card->rx_chain);
@@ -2012,8 +2060,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
- card->tx_desc = tx_descriptors;
- card->rx_desc = rx_descriptors;
+ card->num_tx_desc = tx_descriptors;
+ card->num_rx_desc = rx_descriptors;
spider_net_setup_netdev_ops(netdev);
@@ -2252,6 +2300,8 @@ static struct pci_driver spider_net_driver = {
*/
static int __init spider_net_init(void)
{
+ printk(KERN_INFO "Spidernet version %s.\n", VERSION);
+
if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index a59deda2f95e..b3b46119b424 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -24,6 +24,8 @@
#ifndef _SPIDER_NET_H
#define _SPIDER_NET_H
+#define VERSION "1.1 A"
+
#include "sungem_phy.h"
extern int spider_net_stop(struct net_device *netdev);
@@ -47,7 +49,7 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-#define SPIDER_NET_TX_TIMER 20
+#define SPIDER_NET_TX_TIMER (HZ/5)
#define SPIDER_NET_RX_CSUM_DEFAULT 1
@@ -189,7 +191,9 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_MACMODE_VALUE 0x00000001
#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-/* 1(0) enable r/tx dma
+/* DMAC control register GDMACCNTR
+ *
+ * 1(0) enable r/tx dma
* 0000000 fixed to 0
*
* 000000 fixed to 0
@@ -198,6 +202,7 @@ extern char spider_net_driver_name[];
*
* 000000 fixed to 0
* 00 burst alignment: 128 bytes
+ * 11 burst alignment: 1024 bytes
*
* 00000 fixed to 0
* 0 descr writeback size 32 bytes
@@ -208,10 +213,13 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_DMA_RX_VALUE 0x80000000
#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS
+#define SPIDER_NET_TX_DMA_EN 0x80000000
+#define SPIDER_NET_GDTBSTA 0x00000300
+#define SPIDER_NET_GDTDCEIDIS 0x00000002
+#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
+ SPIDER_NET_GDTBSTA | \
+ SPIDER_NET_GDTDCEIDIS
+
#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -320,13 +328,10 @@ enum spider_net_int2_status {
SPIDER_NET_GRISPDNGINT
};
-#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GTTEDINT) | \
- (1 << SPIDER_NET_GDTDCEINT) | \
- (1 << SPIDER_NET_GDTFDCINT) )
+#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GDTFDCINT) )
-/* we rely on flagged descriptor interrupts*/
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
- (1 << SPIDER_NET_GRMFLLINT) )
+/* We rely on flagged descriptor interrupts */
+#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
#define SPIDER_NET_ERRINT ( 0xffffffff & \
(~SPIDER_NET_TXINT) & \
@@ -349,6 +354,7 @@ enum spider_net_int2_status {
#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
+#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
struct spider_net_descr {
/* as defined by the hardware */
@@ -433,6 +439,7 @@ struct spider_net_card {
struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain;
+ struct spider_net_descr *low_watermark;
struct net_device_stats netdev_stats;
@@ -448,8 +455,8 @@ struct spider_net_card {
/* for ethtool */
int msg_enable;
- int rx_desc;
- int tx_desc;
+ int num_rx_desc;
+ int num_tx_desc;
struct spider_net_extra_stats spider_stats;
struct spider_net_descr descr[0];
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 589e43658dee..91b995102915 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -76,7 +76,7 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
/* clear and fill out info */
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strncpy(drvinfo->driver, spider_net_driver_name, 32);
- strncpy(drvinfo->version, "0.1", 32);
+ strncpy(drvinfo->version, VERSION, 32);
strcpy(drvinfo->fw_version, "no information");
strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
}
@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
struct spider_net_card *card = netdev->priv;
ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_desc;
+ ering->tx_pending = card->num_tx_desc;
ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_desc;
+ ering->rx_pending = card->num_rx_desc;
}
static int spider_net_get_stats_count(struct net_device *netdev)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index d1d1885b0295..a3220a96524f 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -330,7 +330,7 @@ out2:
out1:
free_netdev(dev);
out:
- iounmap((void *)ioaddr);
+ iounmap((void __iomem *)ioaddr);
return ERR_PTR(err);
}
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 91c76544e4dd..b865db363ba0 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -286,7 +286,7 @@ struct net_device * __init sun3lance_probe(int unit)
out1:
#ifdef CONFIG_SUN3
- iounmap((void *)dev->base_addr);
+ iounmap((void __iomem *)dev->base_addr);
#endif
out:
free_netdev(dev);
@@ -326,7 +326,7 @@ static int __init lance_probe( struct net_device *dev)
ioaddr_probe[1] = tmp2;
#ifdef CONFIG_SUN3
- iounmap((void *)ioaddr);
+ iounmap((void __iomem *)ioaddr);
#endif
return 0;
}
@@ -956,7 +956,7 @@ void cleanup_module(void)
{
unregister_netdev(sun3lance_dev);
#ifdef CONFIG_SUN3
- iounmap((void *)sun3lance_dev->base_addr);
+ iounmap((void __iomem *)sun3lance_dev->base_addr);
#endif
free_netdev(sun3lance_dev);
}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 6439b0cef1e4..18f88853e1e5 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -42,7 +42,7 @@
#define DRV_RELDATE "11/24/03"
#define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
-static char version[] __initdata =
+static char version[] =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 327836b1014e..c20bb998e0e5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.66"
-#define DRV_MODULE_RELDATE "September 23, 2006"
+#define DRV_MODULE_VERSION "3.69"
+#define DRV_MODULE_RELDATE "November 15, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -129,7 +129,7 @@
#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
/* minimum number of free TX descriptors required to wake up TX process */
-#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
+#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
/* number of ETHTOOL_GSTATS u64's */
#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
@@ -3075,10 +3075,10 @@ static void tg3_tx(struct tg3 *tp)
smp_mb();
if (unlikely(netif_queue_stopped(tp->dev) &&
- (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
netif_tx_lock(tp->dev);
if (netif_queue_stopped(tp->dev) &&
- (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
+ (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
netif_wake_queue(tp->dev);
netif_tx_unlock(tp->dev);
}
@@ -3928,7 +3928,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
tp->tx_prod = entry;
if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
netif_wake_queue(tp->dev);
}
@@ -4143,7 +4143,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
tp->tx_prod = entry;
if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
- if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
+ if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
netif_wake_queue(tp->dev);
}
@@ -4728,10 +4728,11 @@ static int tg3_poll_fw(struct tg3 *tp)
u32 val;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- for (i = 0; i < 400; i++) {
+ /* Wait up to 20ms for init done. */
+ for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
return 0;
- udelay(10);
+ udelay(100);
}
return -ENODEV;
}
@@ -6014,7 +6015,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_abort_hw(tp, 1);
}
- if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
+ if (reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -6574,7 +6575,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
}
- err = tg3_setup_phy(tp, reset_phy);
+ err = tg3_setup_phy(tp, 0);
if (err)
return err;
@@ -6978,8 +6979,10 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
err = tg3_set_power_state(tp, PCI_D0);
- if (err)
+ if (err) {
+ tg3_full_unlock(tp);
return err;
+ }
tg3_disable_ints(tp);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
@@ -8106,7 +8109,10 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
- (ering->tx_pending > TG3_TX_RING_SIZE - 1))
+ (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS) ||
+ ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
+ (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
return -EINVAL;
if (netif_running(dev)) {
@@ -10209,7 +10215,7 @@ skip_phy_reset:
static void __devinit tg3_read_partno(struct tg3 *tp)
{
unsigned char vpd_data[256];
- int i;
+ unsigned int i;
u32 magic;
if (tg3_nvram_read_swab(tp, 0x0, &magic))
@@ -10255,9 +10261,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
}
/* Now parse and find the part number. */
- for (i = 0; i < 256; ) {
+ for (i = 0; i < 254; ) {
unsigned char val = vpd_data[i];
- int block_end;
+ unsigned int block_end;
if (val == 0x82 || val == 0x91) {
i = (i + 3 +
@@ -10273,21 +10279,26 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
(vpd_data[i + 1] +
(vpd_data[i + 2] << 8)));
i += 3;
- while (i < block_end) {
+
+ if (block_end > 256)
+ goto out_not_found;
+
+ while (i < (block_end - 2)) {
if (vpd_data[i + 0] == 'P' &&
vpd_data[i + 1] == 'N') {
int partno_len = vpd_data[i + 2];
- if (partno_len > 24)
+ i += 3;
+ if (partno_len > 24 || (partno_len + i) > 256)
goto out_not_found;
memcpy(tp->board_part_number,
- &vpd_data[i + 3],
- partno_len);
+ &vpd_data[i], partno_len);
/* Success. */
return;
}
+ i += 3 + vpd_data[i + 2];
}
/* Part number not found. */
@@ -10357,7 +10368,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
u32 pci_state_reg, grc_misc_cfg;
u32 val;
u16 pci_cmd;
- int err;
+ int err, pcie_cap;
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
@@ -10532,8 +10543,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
- if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
+ pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap != 0) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u16 lnkctl;
+
+ pci_read_config_word(tp->pdev,
+ pcie_cap + PCI_EXP_LNKCTL,
+ &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
+ }
+ }
/* If we have an AMD 762 or VIA K8T800 chipset, write
* reordering to the mailbox registers done by the host
@@ -11800,6 +11822,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
} else {
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 4f756960db2a..cb7dbb63c9d9 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -370,6 +370,10 @@ static int __init proteon_init(void)
dev->dma = dma[i];
pdev = platform_device_register_simple("proteon",
i, NULL, 0);
+ if (IS_ERR(pdev)) {
+ free_netdev(dev);
+ continue;
+ }
err = setup_card(dev, &pdev->dev);
if (!err) {
proteon_dev[i] = pdev;
@@ -385,9 +389,10 @@ static int __init proteon_init(void)
/* Probe for cards. */
if (num == 0) {
printk(KERN_NOTICE "proteon.c: No cards found.\n");
- return (-ENODEV);
+ platform_driver_unregister(&proteon_driver);
+ return -ENODEV;
}
- return (0);
+ return 0;
}
static void __exit proteon_cleanup(void)
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index d6ba41cf3110..33afea31d87b 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -380,6 +380,10 @@ static int __init sk_isa_init(void)
dev->dma = dma[i];
pdev = platform_device_register_simple("skisa",
i, NULL, 0);
+ if (IS_ERR(pdev)) {
+ free_netdev(dev);
+ continue;
+ }
err = setup_card(dev, &pdev->dev);
if (!err) {
sk_isa_dev[i] = pdev;
@@ -395,9 +399,10 @@ static int __init sk_isa_init(void)
/* Probe for cards. */
if (num == 0) {
printk(KERN_NOTICE "skisa.c: No cards found.\n");
- return (-ENODEV);
+ platform_driver_unregister(&sk_isa_driver);
+ return -ENODEV;
}
- return (0);
+ return 0;
}
static void __exit sk_isa_cleanup(void)
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 2cfd9634895a..f6b3a94e97bf 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1730,7 +1730,7 @@ static void __init de21040_get_media_info(struct de_private *de)
}
/* Note: this routine returns extra data bits for size detection. */
-static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
+static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
{
int i;
unsigned retval = 0;
@@ -1926,7 +1926,7 @@ bad_srom:
goto fill_defaults;
}
-static int __init de_init_one (struct pci_dev *pdev,
+static int __devinit de_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -2082,7 +2082,7 @@ err_out_free:
return rc;
}
-static void __exit de_remove_one (struct pci_dev *pdev)
+static void __devexit de_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct de_private *de = dev->priv;
@@ -2164,7 +2164,7 @@ static struct pci_driver de_driver = {
.name = DRV_NAME,
.id_table = de_pci_tbl,
.probe = de_init_one,
- .remove = __exit_p(de_remove_one),
+ .remove = __devexit_p(de_remove_one),
#ifdef CONFIG_PM
.suspend = de_suspend,
.resume = de_resume,
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 12cd7b561f35..b37888011067 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2,14 +2,11 @@
* Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
*
* Author: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
*
* Description:
* QE UCC Gigabit Ethernet Driver
*
- * Changelog:
- * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
- * - Rearrange code and style fixes
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -31,9 +28,9 @@
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
#include <linux/ethtool.h>
-#include <linux/platform_device.h>
#include <linux/mii.h>
+#include <asm/of_device.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -70,7 +67,7 @@
static DEFINE_SPINLOCK(ugeth_lock);
-static ucc_geth_info_t ugeth_primary_info = {
+static struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
.bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
@@ -163,7 +160,7 @@ static ucc_geth_info_t ugeth_primary_info = {
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
-static ucc_geth_info_t ugeth_info[8];
+static struct ucc_geth_info ugeth_info[8];
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
@@ -219,8 +216,8 @@ static struct list_head *dequeue(struct list_head *lh)
}
}
-static int get_interface_details(enet_interface_e enet_interface,
- enet_speed_e *speed,
+static int get_interface_details(enum enet_interface enet_interface,
+ enum enet_speed *speed,
int *r10m,
int *rmm,
int *rpm,
@@ -283,7 +280,7 @@ static int get_interface_details(enet_interface_e enet_interface,
return 0;
}
-static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
+static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
{
struct sk_buff *skb = NULL;
@@ -303,21 +300,19 @@ static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
skb->dev = ugeth->dev;
- BD_BUFFER_SET(bd,
+ out_be32(&((struct qe_bd *)bd)->buf,
dma_map_single(NULL,
skb->data,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
DMA_FROM_DEVICE));
- BD_STATUS_AND_LENGTH_SET(bd,
- (R_E | R_I |
- (BD_STATUS_AND_LENGTH(bd) & R_W)));
+ out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W)));
return skb;
}
-static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
+static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
{
u8 *bd;
u32 bd_status;
@@ -328,7 +323,7 @@ static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
i = 0;
do {
- bd_status = BD_STATUS_AND_LENGTH(bd);
+ bd_status = in_be32((u32*)bd);
skb = get_new_skb(ugeth, bd);
if (!skb) /* If can not allocate data buffer,
@@ -338,19 +333,19 @@ static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
ugeth->rx_skbuff[rxQ][i] = skb;
/* advance the BD pointer */
- bd += UCC_GETH_SIZE_OF_BD;
+ bd += sizeof(struct qe_bd);
i++;
} while (!(bd_status & R_W));
return 0;
}
-static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
+static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
volatile u32 *p_start,
u8 num_entries,
u32 thread_size,
u32 thread_alignment,
- qe_risc_allocation_e risc,
+ enum qe_risc_allocation risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
@@ -383,10 +378,10 @@ static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
return 0;
}
-static int return_init_enet_entries(ucc_geth_private_t *ugeth,
+static int return_init_enet_entries(struct ucc_geth_private *ugeth,
volatile u32 *p_start,
u8 num_entries,
- qe_risc_allocation_e risc,
+ enum qe_risc_allocation risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
@@ -416,11 +411,11 @@ static int return_init_enet_entries(ucc_geth_private_t *ugeth,
}
#ifdef DEBUG
-static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
+static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
volatile u32 *p_start,
u8 num_entries,
u32 thread_size,
- qe_risc_allocation_e risc,
+ enum qe_risc_allocation risc,
int skip_page_for_first_entry)
{
u32 init_enet_offset;
@@ -456,14 +451,14 @@ static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
#endif
#ifdef CONFIG_UGETH_FILTERING
-static enet_addr_container_t *get_enet_addr_container(void)
+static struct enet_addr_container *get_enet_addr_container(void)
{
- enet_addr_container_t *enet_addr_cont;
+ struct enet_addr_container *enet_addr_cont;
/* allocate memory */
- enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
+ enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
if (!enet_addr_cont) {
- ugeth_err("%s: No memory for enet_addr_container_t object.",
+ ugeth_err("%s: No memory for enet_addr_container object.",
__FUNCTION__);
return NULL;
}
@@ -472,45 +467,43 @@ static enet_addr_container_t *get_enet_addr_container(void)
}
#endif /* CONFIG_UGETH_FILTERING */
-static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
+static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
{
kfree(enet_addr_cont);
}
+static int set_mac_addr(__be16 __iomem *reg, u8 *mac)
+{
+ out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
+ out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
+ out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
+}
+
#ifdef CONFIG_UGETH_FILTERING
-static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
- enet_addr_t *p_enet_addr, u8 paddr_num)
+static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
+ u8 *p_enet_addr, u8 paddr_num)
{
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
if (!(paddr_num < NUM_OF_PADDRS)) {
- ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
+ ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__);
return -EINVAL;
}
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
addressfiltering;
/* Ethernet frames are defined in Little Endian mode, */
/* therefore to insert the address we reverse the bytes. */
- out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
- (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
- (u16) (*p_enet_addr)[4]));
- out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
- (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
- (u16) (*p_enet_addr)[2]));
- out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
- (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
- (u16) (*p_enet_addr)[0]));
-
+ set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
return 0;
}
#endif /* CONFIG_UGETH_FILTERING */
-static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
+static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
{
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
if (!(paddr_num < NUM_OF_PADDRS)) {
ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
@@ -518,7 +511,7 @@ static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
}
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
addressfiltering;
/* Writing address ff.ff.ff.ff.ff.ff disables address
@@ -530,14 +523,14 @@ static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
return 0;
}
-static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
- enet_addr_t *p_enet_addr)
+static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
+ u8 *p_enet_addr)
{
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
u32 cecr_subblock;
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
addressfiltering;
cecr_subblock =
@@ -546,25 +539,18 @@ static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
/* Ethernet frames are defined in Little Endian mode,
therefor to insert */
/* the address to the hash (Big Endian mode), we reverse the bytes.*/
- out_be16(&p_82xx_addr_filt->taddr.h,
- (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
- (u16) (*p_enet_addr)[4]));
- out_be16(&p_82xx_addr_filt->taddr.m,
- (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
- (u16) (*p_enet_addr)[2]));
- out_be16(&p_82xx_addr_filt->taddr.l,
- (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
- (u16) (*p_enet_addr)[0]));
+
+ set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
- (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+ QE_CR_PROTOCOL_ETHERNET, 0);
}
#ifdef CONFIG_UGETH_MAGIC_PACKET
-static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
+static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
{
- ucc_fast_private_t *uccf;
- ucc_geth_t *ug_regs;
+ struct ucc_fast_private *uccf;
+ struct ucc_geth *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf;
@@ -581,10 +567,10 @@ static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
out_be32(&ug_regs->maccfg2, maccfg2);
}
-static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
+static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
{
- ucc_fast_private_t *uccf;
- ucc_geth_t *ug_regs;
+ struct ucc_fast_private *uccf;
+ struct ucc_geth *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf;
@@ -602,26 +588,26 @@ static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
}
#endif /* MAGIC_PACKET */
-static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
+static inline int compare_addr(u8 **addr1, u8 **addr2)
{
return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
}
#ifdef DEBUG
-static void get_statistics(ucc_geth_private_t *ugeth,
- ucc_geth_tx_firmware_statistics_t *
+static void get_statistics(struct ucc_geth_private *ugeth,
+ struct ucc_geth_tx_firmware_statistics *
tx_firmware_statistics,
- ucc_geth_rx_firmware_statistics_t *
+ struct ucc_geth_rx_firmware_statistics *
rx_firmware_statistics,
- ucc_geth_hardware_statistics_t *hardware_statistics)
+ struct ucc_geth_hardware_statistics *hardware_statistics)
{
- ucc_fast_t *uf_regs;
- ucc_geth_t *ug_regs;
- ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
- ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
+ struct ucc_fast *uf_regs;
+ struct ucc_geth *ug_regs;
+ struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
+ struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
ug_regs = ugeth->ug_regs;
- uf_regs = (ucc_fast_t *) ug_regs;
+ uf_regs = (struct ucc_fast *) ug_regs;
p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
@@ -727,7 +713,7 @@ static void get_statistics(ucc_geth_private_t *ugeth,
}
}
-static void dump_bds(ucc_geth_private_t *ugeth)
+static void dump_bds(struct ucc_geth_private *ugeth)
{
int i;
int length;
@@ -736,7 +722,7 @@ static void dump_bds(ucc_geth_private_t *ugeth)
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
- UCC_GETH_SIZE_OF_BD);
+ sizeof(struct qe_bd));
ugeth_info("TX BDs[%d]", i);
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
@@ -745,14 +731,14 @@ static void dump_bds(ucc_geth_private_t *ugeth)
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
- UCC_GETH_SIZE_OF_BD);
+ sizeof(struct qe_bd));
ugeth_info("RX BDs[%d]", i);
mem_disp(ugeth->p_rx_bd_ring[i], length);
}
}
}
-static void dump_regs(ucc_geth_private_t *ugeth)
+static void dump_regs(struct ucc_geth_private *ugeth)
{
int i;
@@ -893,7 +879,7 @@ static void dump_regs(ucc_geth_private_t *ugeth)
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_thread_data_tx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
- sizeof(ucc_geth_thread_data_tx_t));
+ sizeof(struct ucc_geth_thread_data_tx));
}
}
if (ugeth->p_thread_data_rx) {
@@ -927,7 +913,7 @@ static void dump_regs(ucc_geth_private_t *ugeth)
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_thread_data_rx[i]);
mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
- sizeof(ucc_geth_thread_data_rx_t));
+ sizeof(struct ucc_geth_thread_data_rx));
}
}
if (ugeth->p_exf_glbl_param) {
@@ -1105,7 +1091,7 @@ static void dump_regs(ucc_geth_private_t *ugeth)
ugeth_info("Base address: 0x%08x",
(u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
- sizeof(ucc_geth_send_queue_qd_t));
+ sizeof(struct ucc_geth_send_queue_qd));
}
}
if (ugeth->p_scheduler) {
@@ -1187,7 +1173,7 @@ static void dump_regs(ucc_geth_private_t *ugeth)
qe_muram_addr(in_be32
(&ugeth->p_rx_bd_qs_tbl[i].
bdbaseptr)),
- sizeof(ucc_geth_rx_prefetched_bds_t));
+ sizeof(struct ucc_geth_rx_prefetched_bds));
}
}
if (ugeth->p_init_enet_param_shadow) {
@@ -1198,7 +1184,7 @@ static void dump_regs(ucc_geth_private_t *ugeth)
mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
sizeof(*ugeth->p_init_enet_param_shadow));
- size = sizeof(ucc_geth_thread_rx_pram_t);
+ size = sizeof(struct ucc_geth_thread_rx_pram);
if (ugeth->ug_info->rxExtendedFiltering) {
size +=
THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
@@ -1216,7 +1202,7 @@ static void dump_regs(ucc_geth_private_t *ugeth)
&(ugeth->p_init_enet_param_shadow->
txthread[0]),
ENET_INIT_PARAM_MAX_ENTRIES_TX,
- sizeof(ucc_geth_thread_tx_pram_t),
+ sizeof(struct ucc_geth_thread_tx_pram),
ugeth->ug_info->riscTx, 0);
dump_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -1578,12 +1564,12 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(ucc_geth_private_t *ugeth)
+static int adjust_enet_interface(struct ucc_geth_private *ugeth)
{
- ucc_geth_info_t *ug_info;
- ucc_geth_t *ug_regs;
- ucc_fast_t *uf_regs;
- enet_speed_e speed;
+ struct ucc_geth_info *ug_info;
+ struct ucc_geth *ug_regs;
+ struct ucc_fast *uf_regs;
+ enum enet_speed speed;
int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
0, limited_to_full_duplex = 0;
u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
@@ -1691,8 +1677,8 @@ static int adjust_enet_interface(ucc_geth_private_t *ugeth)
*/
static void adjust_link(struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
- ucc_geth_t *ug_regs;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth *ug_regs;
u32 tempval;
struct ugeth_mii_info *mii_info = ugeth->mii_info;
@@ -1722,7 +1708,7 @@ static void adjust_link(struct net_device *dev)
if (mii_info->speed != ugeth->oldspeed) {
switch (mii_info->speed) {
case 1000:
-#ifdef CONFIG_MPC836x
+#ifdef CONFIG_PPC_MPC836x
/* FIXME: This code is for 100Mbs BUG fixing,
remove this when it is fixed!!! */
if (ugeth->ug_info->enet_interface ==
@@ -1768,7 +1754,7 @@ remove this when it is fixed!!! */
break;
case 100:
case 10:
-#ifdef CONFIG_MPC836x
+#ifdef CONFIG_PPC_MPC836x
/* FIXME: This code is for 100Mbs BUG fixing,
remove this lines when it will be fixed!!! */
ugeth->ug_info->enet_interface = ENET_100_RGMII;
@@ -1827,9 +1813,9 @@ remove this lines when it will be fixed!!! */
*/
static int init_phy(struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
struct phy_info *curphy;
- ucc_mii_mng_t *mii_regs;
+ struct ucc_mii_mng *mii_regs;
struct ugeth_mii_info *mii_info;
int err;
@@ -1914,17 +1900,17 @@ static int init_phy(struct net_device *dev)
}
#ifdef CONFIG_UGETH_TX_ON_DEMOND
-static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
+static int ugeth_transmit_on_demand(struct ucc_geth_private *ugeth)
{
- ucc_fast_transmit_on_demand(ugeth->uccf);
+ struct ucc_fastransmit_on_demand(ugeth->uccf);
return 0;
}
#endif
-static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
+static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_private *uccf;
u32 cecr_subblock;
u32 temp;
@@ -1940,7 +1926,7 @@ static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
- (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+ QE_CR_PROTOCOL_ETHERNET, 0);
/* Wait for command to complete */
do {
@@ -1952,9 +1938,9 @@ static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
return 0;
}
-static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
+static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_private *uccf;
u32 cecr_subblock;
u8 temp;
@@ -1973,7 +1959,7 @@ static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
- (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+ QE_CR_PROTOCOL_ETHERNET, 0);
temp = ugeth->p_rx_glbl_pram->rxgstpack;
} while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
@@ -1983,41 +1969,40 @@ static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
return 0;
}
-static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
+static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_private *uccf;
u32 cecr_subblock;
uccf = ugeth->uccf;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
- qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
- 0);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
uccf->stopped_tx = 0;
return 0;
}
-static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
+static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_private *uccf;
u32 cecr_subblock;
uccf = ugeth->uccf;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
- qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
0);
uccf->stopped_rx = 0;
return 0;
}
-static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
+static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_private *uccf;
int enabled_tx, enabled_rx;
uccf = ugeth->uccf;
@@ -2044,9 +2029,9 @@ static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
}
-static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
+static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
{
- ucc_fast_private_t *uccf;
+ struct ucc_fast_private *uccf;
uccf = ugeth->uccf;
@@ -2069,7 +2054,7 @@ static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
return 0;
}
-static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
+static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
{
#ifdef DEBUG
ucc_fast_dump_regs(ugeth->uccf);
@@ -2079,9 +2064,9 @@ static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
}
#ifdef CONFIG_UGETH_FILTERING
-static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
+static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
p_UccGethTadParams,
- qe_fltr_tad_t *qe_fltr_tad)
+ struct qe_fltr_tad *qe_fltr_tad)
{
u16 temp;
@@ -2119,11 +2104,11 @@ static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
return 0;
}
-static enet_addr_container_t
- *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
- enet_addr_t *p_enet_addr)
+static struct enet_addr_container_t
+ *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
+ struct enet_addr *p_enet_addr)
{
- enet_addr_container_t *enet_addr_cont;
+ struct enet_addr_container *enet_addr_cont;
struct list_head *p_lh;
u16 i, num;
int32_t j;
@@ -2144,7 +2129,7 @@ static enet_addr_container_t
for (i = 0; i < num; i++) {
enet_addr_cont =
- (enet_addr_container_t *)
+ (struct enet_addr_container *)
ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
@@ -2157,11 +2142,11 @@ static enet_addr_container_t
return NULL;
}
-static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
- enet_addr_t *p_enet_addr)
+static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
+ struct enet_addr *p_enet_addr)
{
- ucc_geth_enet_address_recognition_location_e location;
- enet_addr_container_t *enet_addr_cont;
+ enum ucc_geth_enet_address_recognition_location location;
+ struct enet_addr_container *enet_addr_cont;
struct list_head *p_lh;
u8 i;
u32 limit;
@@ -2196,18 +2181,17 @@ static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
++(*p_counter);
- hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
-
+ hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
return 0;
}
-static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
- enet_addr_t *p_enet_addr)
+static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
+ struct enet_addr *p_enet_addr)
{
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
- enet_addr_container_t *enet_addr_cont;
- ucc_fast_private_t *uccf;
- comm_dir_e comm_dir;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct enet_addr_container *enet_addr_cont;
+ struct ucc_fast_private *uccf;
+ enum comm_dir comm_dir;
u16 i, num;
struct list_head *p_lh;
u32 *addr_h, *addr_l;
@@ -2216,7 +2200,7 @@ static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
uccf = ugeth->uccf;
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
addressfiltering;
if (!
@@ -2256,9 +2240,9 @@ static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
num = --(*p_counter);
for (i = 0; i < num; i++) {
enet_addr_cont =
- (enet_addr_container_t *)
+ (struct enet_addr_container *)
ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
- hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
+ hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
}
@@ -2269,14 +2253,14 @@ static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
}
#endif /* CONFIG_UGETH_FILTERING */
-static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
+static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
ugeth,
- enet_addr_type_e
+ enum enet_addr_type
enet_addr_type)
{
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
- ucc_fast_private_t *uccf;
- comm_dir_e comm_dir;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct ucc_fast_private *uccf;
+ enum comm_dir comm_dir;
struct list_head *p_lh;
u16 i, num;
u32 *addr_h, *addr_l;
@@ -2285,7 +2269,7 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
uccf = ugeth->uccf;
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
addressfiltering;
if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
@@ -2331,8 +2315,8 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
}
#ifdef CONFIG_UGETH_FILTERING
-static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
- enet_addr_t *p_enet_addr,
+static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
+ struct enet_addr *p_enet_addr,
u8 paddr_num)
{
int i;
@@ -2352,14 +2336,14 @@ static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
}
#endif /* CONFIG_UGETH_FILTERING */
-static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
+static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
u8 paddr_num)
{
ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
}
-static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
+static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
{
u16 i, j;
u8 *bd;
@@ -2433,8 +2417,8 @@ static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
if (ugeth->tx_skbuff[i][j]) {
dma_unmap_single(NULL,
- BD_BUFFER_ARG(bd),
- (BD_STATUS_AND_LENGTH(bd) &
+ ((qe_bd_t *)bd)->buf,
+ (in_be32((u32 *)bd) &
BD_LENGTH_MASK),
DMA_TO_DEVICE);
dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
@@ -2460,18 +2444,17 @@ static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
bd = ugeth->p_rx_bd_ring[i];
for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
if (ugeth->rx_skbuff[i][j]) {
- dma_unmap_single(NULL, BD_BUFFER(bd),
- ugeth->ug_info->
- uf_info.
- max_rx_buf_length +
- UCC_GETH_RX_DATA_BUF_ALIGNMENT,
- DMA_FROM_DEVICE);
-
- dev_kfree_skb_any(ugeth->
- rx_skbuff[i][j]);
+ dma_unmap_single(NULL,
+ ((struct qe_bd *)bd)->buf,
+ ugeth->ug_info->
+ uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(
+ ugeth->rx_skbuff[i][j]);
ugeth->rx_skbuff[i][j] = NULL;
}
- bd += UCC_GETH_SIZE_OF_BD;
+ bd += sizeof(struct qe_bd);
}
kfree(ugeth->rx_skbuff[i]);
@@ -2496,11 +2479,11 @@ static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
static void ucc_geth_set_multi(struct net_device *dev)
{
- ucc_geth_private_t *ugeth;
+ struct ucc_geth_private *ugeth;
struct dev_mc_list *dmi;
- ucc_fast_t *uf_regs;
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
- enet_addr_t tempaddr;
+ struct ucc_fast *uf_regs;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ u8 tempaddr[6];
u8 *mcptr, *tdptr;
int i, j;
@@ -2517,7 +2500,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
uf_regs->upsmr &= ~UPSMR_PRO;
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
p_rx_glbl_pram->addressfiltering;
if (dev->flags & IFF_ALLMULTI) {
@@ -2546,23 +2529,22 @@ static void ucc_geth_set_multi(struct net_device *dev)
* copy bytes MSB first from dmi_addr.
*/
mcptr = (u8 *) dmi->dmi_addr + 5;
- tdptr = (u8 *) & tempaddr;
+ tdptr = (u8 *) tempaddr;
for (j = 0; j < 6; j++)
*tdptr++ = *mcptr--;
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
- hw_add_addr_in_hash(ugeth, &tempaddr);
-
+ hw_add_addr_in_hash(ugeth, tempaddr);
}
}
}
}
-static void ucc_geth_stop(ucc_geth_private_t *ugeth)
+static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
- ucc_geth_t *ug_regs = ugeth->ug_regs;
+ struct ucc_geth *ug_regs = ugeth->ug_regs;
u32 tempval;
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -2605,15 +2587,15 @@ static void ucc_geth_stop(ucc_geth_private_t *ugeth)
ucc_geth_memclean(ugeth);
}
-static int ucc_geth_startup(ucc_geth_private_t *ugeth)
+static int ucc_geth_startup(struct ucc_geth_private *ugeth)
{
- ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
- ucc_geth_init_pram_t *p_init_enet_pram;
- ucc_fast_private_t *uccf;
- ucc_geth_info_t *ug_info;
- ucc_fast_info_t *uf_info;
- ucc_fast_t *uf_regs;
- ucc_geth_t *ug_regs;
+ struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct ucc_geth_init_pram *p_init_enet_pram;
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ struct ucc_fast *uf_regs;
+ struct ucc_geth *ug_regs;
int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
@@ -2788,7 +2770,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
uf_regs = uccf->uf_regs;
- ug_regs = (ucc_geth_t *) (uccf->uf_regs);
+ ug_regs = (struct ucc_geth *) (uccf->uf_regs);
ugeth->ug_regs = ug_regs;
init_default_reg_vals(&uf_regs->upsmr,
@@ -2869,10 +2851,10 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Allocate in multiple of
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
according to spec */
- length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
+ length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
/ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
* UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
+ if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
@@ -2904,13 +2886,13 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
/* Zero unused end of bd ring, according to spec */
memset(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
- length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
+ ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0,
+ length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
}
/* Allocate Rx bds */
for (j = 0; j < ug_info->numQueuesRx; j++) {
- length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
+ length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
u32 align = 4;
if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
@@ -2960,12 +2942,15 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
- BD_BUFFER_CLEAR(bd);
- BD_STATUS_AND_LENGTH_SET(bd, 0);
- bd += UCC_GETH_SIZE_OF_BD;
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd *)bd)->buf, 0);
+ /* set bd status and length */
+ out_be32((u32 *)bd, 0);
+ bd += sizeof(struct qe_bd);
}
- bd -= UCC_GETH_SIZE_OF_BD;
- BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */
}
/* Init Rx bds */
@@ -2989,12 +2974,15 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
- BD_STATUS_AND_LENGTH_SET(bd, R_I);
- BD_BUFFER_CLEAR(bd);
- bd += UCC_GETH_SIZE_OF_BD;
+ /* set bd status and length */
+ out_be32((u32 *)bd, R_I);
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd *)bd)->buf, 0);
+ bd += sizeof(struct qe_bd);
}
- bd -= UCC_GETH_SIZE_OF_BD;
- BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */
}
/*
@@ -3003,7 +2991,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
ugeth->tx_glbl_pram_offset =
- qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
+ qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
ugeth_err
@@ -3013,10 +3001,10 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
return -ENOMEM;
}
ugeth->p_tx_glbl_pram =
- (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth->
tx_glbl_pram_offset);
/* Zero out p_tx_glbl_pram */
- memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
+ memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
/* Fill global PRAM */
@@ -3024,7 +3012,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Size varies with number of Tx threads */
ugeth->thread_dat_tx_offset =
qe_muram_alloc(numThreadsTxNumerical *
- sizeof(ucc_geth_thread_data_tx_t) +
+ sizeof(struct ucc_geth_thread_data_tx) +
32 * (numThreadsTxNumerical == 1),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
@@ -3036,7 +3024,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_thread_data_tx =
- (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth->
thread_dat_tx_offset);
out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
@@ -3053,7 +3041,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
qe_muram_alloc(ug_info->numQueuesTx *
- sizeof(ucc_geth_send_queue_qd_t),
+ sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
ugeth_err
@@ -3064,7 +3052,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_send_q_mem_reg =
- (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth->
send_q_mem_reg_offset);
out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
@@ -3073,7 +3061,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
for (i = 0; i < ug_info->numQueuesTx; i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
- 1) * UCC_GETH_SIZE_OF_BD;
+ 1) * sizeof(struct qe_bd);
if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
(u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
@@ -3096,7 +3084,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
if (ug_info->numQueuesTx > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
- qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
+ qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
UCC_GETH_SCHEDULER_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
ugeth_err
@@ -3107,12 +3095,12 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_scheduler =
- (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_scheduler *) qe_muram_addr(ugeth->
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
/* Zero out p_scheduler */
- memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
+ memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
@@ -3144,7 +3132,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
ugeth->tx_fw_statistics_pram_offset =
qe_muram_alloc(sizeof
- (ucc_geth_tx_firmware_statistics_pram_t),
+ (struct ucc_geth_tx_firmware_statistics_pram),
UCC_GETH_TX_STATISTICS_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
ugeth_err
@@ -3154,11 +3142,11 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
return -ENOMEM;
}
ugeth->p_tx_fw_statistics_pram =
- (ucc_geth_tx_firmware_statistics_pram_t *)
+ (struct ucc_geth_tx_firmware_statistics_pram *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
/* Zero out p_tx_fw_statistics_pram */
memset(ugeth->p_tx_fw_statistics_pram,
- 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
+ 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
@@ -3183,7 +3171,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
ugeth->rx_glbl_pram_offset =
- qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
+ qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
ugeth_err
@@ -3193,10 +3181,10 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
return -ENOMEM;
}
ugeth->p_rx_glbl_pram =
- (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth->
rx_glbl_pram_offset);
/* Zero out p_rx_glbl_pram */
- memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
+ memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
/* Fill global PRAM */
@@ -3204,7 +3192,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Size varies with number of Rx threads */
ugeth->thread_dat_rx_offset =
qe_muram_alloc(numThreadsRxNumerical *
- sizeof(ucc_geth_thread_data_rx_t),
+ sizeof(struct ucc_geth_thread_data_rx),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
ugeth_err
@@ -3215,7 +3203,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_thread_data_rx =
- (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth->
thread_dat_rx_offset);
out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
@@ -3227,7 +3215,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
ugeth->rx_fw_statistics_pram_offset =
qe_muram_alloc(sizeof
- (ucc_geth_rx_firmware_statistics_pram_t),
+ (struct ucc_geth_rx_firmware_statistics_pram),
UCC_GETH_RX_STATISTICS_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
ugeth_err
@@ -3237,11 +3225,11 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
return -ENOMEM;
}
ugeth->p_rx_fw_statistics_pram =
- (ucc_geth_rx_firmware_statistics_pram_t *)
+ (struct ucc_geth_rx_firmware_statistics_pram *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
/* Zero out p_rx_fw_statistics_pram */
memset(ugeth->p_rx_fw_statistics_pram, 0,
- sizeof(ucc_geth_rx_firmware_statistics_pram_t));
+ sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
@@ -3249,7 +3237,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
qe_muram_alloc(ug_info->numQueuesRx *
- sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
+ sizeof(struct ucc_geth_rx_interrupt_coalescing_entry),
UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
ugeth_err
@@ -3260,7 +3248,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_rx_irq_coalescing_tbl =
- (ucc_geth_rx_interrupt_coalescing_table_t *)
+ (struct ucc_geth_rx_interrupt_coalescing_table *)
qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
ugeth->rx_irq_coalescing_tbl_offset);
@@ -3300,7 +3288,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
l3qt = 0;
for (i = 0; i < 8; i++)
l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
- out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
+ out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
}
/* vlantype */
@@ -3316,8 +3304,8 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
qe_muram_alloc(ug_info->numQueuesRx *
- (sizeof(ucc_geth_rx_bd_queues_entry_t) +
- sizeof(ucc_geth_rx_prefetched_bds_t)),
+ (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
ugeth_err
@@ -3328,14 +3316,14 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_rx_bd_qs_tbl =
- (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
/* Zero out p_rx_bd_qs_tbl */
memset(ugeth->p_rx_bd_qs_tbl,
0,
- ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
- sizeof(ucc_geth_rx_prefetched_bds_t)));
+ ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
@@ -3406,7 +3394,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Allocate memory for extended filtering Mode Global
Parameters */
ugeth->exf_glbl_param_offset =
- qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
+ qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
ugeth_err
@@ -3417,7 +3405,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_exf_glbl_param =
- (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
+ (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth->
exf_glbl_param_offset);
out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
ugeth->exf_glbl_param_offset);
@@ -3439,7 +3427,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
INIT_LIST_HEAD(&ugeth->ind_hash_q);
}
p_82xx_addr_filt =
- (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
+ (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
p_rx_glbl_pram->addressfiltering;
ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
@@ -3462,7 +3450,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
- (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
+ (struct ucc_geth_init_pram *) kmalloc(sizeof(struct ucc_geth_init_pram),
GFP_KERNEL))) {
ugeth_err
("%s: Can not allocate memory for"
@@ -3472,7 +3460,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
/* Zero out *p_init_enet_param_shadow */
memset((char *)ugeth->p_init_enet_param_shadow,
- 0, sizeof(ucc_geth_init_pram_t));
+ 0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
@@ -3506,7 +3494,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
ug_info->largestexternallookupkeysize;
- size = sizeof(ucc_geth_thread_rx_pram_t);
+ size = sizeof(struct ucc_geth_thread_rx_pram);
if (ug_info->rxExtendedFiltering) {
size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
if (ug_info->largestexternallookupkeysize ==
@@ -3537,7 +3525,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
txthread[0]), numThreadsTxNumerical,
- sizeof(ucc_geth_thread_tx_pram_t),
+ sizeof(struct ucc_geth_thread_tx_pram),
UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
ug_info->riscTx, 0)) != 0) {
ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
@@ -3557,7 +3545,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
}
/* Allocate InitEnet command parameter structure */
- init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
+ init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
if (IS_MURAM_ERR(init_enet_pram_offset)) {
ugeth_err
("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
@@ -3566,7 +3554,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
return -ENOMEM;
}
p_init_enet_pram =
- (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
+ (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset);
/* Copy shadow InitEnet command parameter structure into PRAM */
p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
@@ -3591,7 +3579,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* Issue QE command */
cecr_subblock =
ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
- qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
init_enet_pram_offset);
/* Free InitEnet command parameter */
@@ -3603,7 +3591,7 @@ static int ucc_geth_startup(ucc_geth_private_t *ugeth)
/* returns a net_device_stats structure pointer */
static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
return &(ugeth->stats);
}
@@ -3614,7 +3602,7 @@ static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
* starting over will fix the problem. */
static void ucc_geth_timeout(struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -3634,7 +3622,7 @@ static void ucc_geth_timeout(struct net_device *dev)
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
u8 *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
@@ -3647,7 +3635,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Start from the next BD that should be filled */
bd = ugeth->txBd[txQ];
- bd_status = BD_STATUS_AND_LENGTH(bd);
+ bd_status = in_be32((u32 *)bd);
/* Save the skb pointer so we can free it later */
ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
@@ -3657,20 +3645,21 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
/* set up the buffer descriptor */
- BD_BUFFER_SET(bd,
+ out_be32(&((struct qe_bd *)bd)->buf,
dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
- //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
+ /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
- BD_STATUS_AND_LENGTH_SET(bd, bd_status);
+ /* set bd status and length */
+ out_be32((u32 *)bd, bd_status);
dev->trans_start = jiffies;
/* Move to next BD in the ring */
if (!(bd_status & T_W))
- ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
+ ugeth->txBd[txQ] = bd + sizeof(struct qe_bd);
else
ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
@@ -3695,7 +3684,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
+static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
{
struct sk_buff *skb;
u8 *bd;
@@ -3709,11 +3698,11 @@ static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
/* collect received buffers */
bd = ugeth->rxBd[rxQ];
- bd_status = BD_STATUS_AND_LENGTH(bd);
+ bd_status = in_be32((u32 *)bd);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
- bdBuffer = (u8 *) BD_BUFFER(bd);
+ bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf);
length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
@@ -3768,9 +3757,9 @@ static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
if (bd_status & R_W)
bd = ugeth->p_rx_bd_ring[rxQ];
else
- bd += UCC_GETH_SIZE_OF_BD;
+ bd += sizeof(struct qe_bd);
- bd_status = BD_STATUS_AND_LENGTH(bd);
+ bd_status = in_be32((u32 *)bd);
}
ugeth->rxBd[rxQ] = bd;
@@ -3781,12 +3770,12 @@ static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
u8 *bd; /* BD pointer */
u32 bd_status;
bd = ugeth->confBd[txQ];
- bd_status = BD_STATUS_AND_LENGTH(bd);
+ bd_status = in_be32((u32 *)bd);
/* Normal processing. */
while ((bd_status & T_R) == 0) {
@@ -3813,7 +3802,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W))
- ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
+ ugeth->confBd[txQ] += sizeof(struct qe_bd);
else
ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
}
@@ -3823,7 +3812,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
#ifdef CONFIG_UGETH_NAPI
static int ucc_geth_poll(struct net_device *dev, int *budget)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
int howmany;
int rx_work_limit = *budget;
u8 rxQ = 0;
@@ -3847,9 +3836,9 @@ static int ucc_geth_poll(struct net_device *dev, int *budget)
static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
{
struct net_device *dev = (struct net_device *)info;
- ucc_geth_private_t *ugeth = netdev_priv(dev);
- ucc_fast_private_t *uccf;
- ucc_geth_info_t *ug_info;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
register u32 ucce = 0;
register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
@@ -3912,7 +3901,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
static irqreturn_t phy_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -3932,8 +3921,8 @@ static irqreturn_t phy_interrupt(int irq, void *dev_id)
static void ugeth_phy_change(void *data)
{
struct net_device *dev = (struct net_device *)data;
- ucc_geth_private_t *ugeth = netdev_priv(dev);
- ucc_geth_t *ug_regs;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth *ug_regs;
int result = 0;
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -3963,7 +3952,7 @@ static void ugeth_phy_change(void *data)
static void ugeth_phy_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
schedule_work(&ugeth->tq);
@@ -3979,7 +3968,7 @@ static void ugeth_phy_timer(unsigned long data)
static void ugeth_phy_startup_timer(unsigned long data)
{
struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
- ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(mii_info->dev);
static int secondary = UGETH_AN_TIMEOUT;
int result;
@@ -4034,7 +4023,7 @@ static void ugeth_phy_startup_timer(unsigned long data)
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
int err;
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -4111,7 +4100,7 @@ static int ucc_geth_open(struct net_device *dev)
/* Stops the kernel queue, and halts the controller */
static int ucc_geth_close(struct net_device *dev)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -4130,30 +4119,53 @@ static int ucc_geth_close(struct net_device *dev)
const struct ethtool_ops ucc_geth_ethtool_ops = { };
-static int ucc_geth_probe(struct device *device)
+static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
{
- struct platform_device *pdev = to_platform_device(device);
- struct ucc_geth_platform_data *ugeth_pdata;
+ struct device *device = &ofdev->dev;
+ struct device_node *np = ofdev->node;
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
- int err;
+ struct resource res;
+ struct device_node *phy;
+ int err, ucc_num, phy_interface;
static int mii_mng_configured = 0;
+ const phandle *ph;
+ const unsigned int *prop;
ugeth_vdbg("%s: IN", __FUNCTION__);
- ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
+ prop = get_property(np, "device-id", NULL);
+ ucc_num = *prop - 1;
+ if ((ucc_num < 0) || (ucc_num > 7))
+ return -ENODEV;
+
+ ug_info = &ugeth_info[ucc_num];
+ ug_info->uf_info.ucc_num = ucc_num;
+ prop = get_property(np, "rx-clock", NULL);
+ ug_info->uf_info.rx_clock = *prop;
+ prop = get_property(np, "tx-clock", NULL);
+ ug_info->uf_info.tx_clock = *prop;
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return -EINVAL;
+
+ ug_info->uf_info.regs = res.start;
+ ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ ph = get_property(np, "phy-handle", NULL);
+ phy = of_find_node_by_phandle(*ph);
- ug_info = &ugeth_info[pdev->id];
- ug_info->uf_info.ucc_num = pdev->id;
- ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
- ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
- ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
- ug_info->uf_info.irq = platform_get_irq(pdev, 0);
- ug_info->phy_address = ugeth_pdata->phy_id;
- ug_info->enet_interface = ugeth_pdata->phy_interface;
- ug_info->board_flags = ugeth_pdata->board_flags;
- ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
+ if (phy == NULL)
+ return -ENODEV;
+
+ prop = get_property(phy, "reg", NULL);
+ ug_info->phy_address = *prop;
+ prop = get_property(phy, "interface", NULL);
+ ug_info->enet_interface = *prop;
+ ug_info->phy_interrupt = irq_of_parse_and_map(phy, 0);
+ ug_info->board_flags = (ug_info->phy_interrupt == NO_IRQ)?
+ 0:FSL_UGETH_BRD_HAS_PHY_INTR;
printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
@@ -4161,12 +4173,44 @@ static int ucc_geth_probe(struct device *device)
if (ug_info == NULL) {
ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
- pdev->id);
+ ucc_num);
return -ENODEV;
}
+ /* FIXME: Work around for early chip rev. */
+ /* There's a bug in initial chip rev(s) in the RGMII ac */
+ /* timing. */
+ /* The following compensates by writing to the reserved */
+ /* QE Port Output Hold Registers (CPOH1?). */
+ prop = get_property(phy, "interface", NULL);
+ phy_interface = *prop;
+ if ((phy_interface == ENET_1000_RGMII) ||
+ (phy_interface == ENET_100_RGMII) ||
+ (phy_interface == ENET_10_RGMII)) {
+ struct device_node *soc;
+ phys_addr_t immrbase = -1;
+ u32 *tmp_reg;
+ u32 tmp_val;
+
+ soc = of_find_node_by_type(NULL, "soc");
+ if (soc) {
+ unsigned int size;
+ const void *prop = get_property(soc, "reg", &size);
+ immrbase = of_translate_address(soc, prop);
+ of_node_put(soc);
+ };
+
+ tmp_reg = (u32 *) ioremap(immrbase + 0x14A8, 0x4);
+ tmp_val = in_be32(tmp_reg);
+ if (ucc_num == 1)
+ out_be32(tmp_reg, tmp_val | 0x00003000);
+ else if (ucc_num == 2)
+ out_be32(tmp_reg, tmp_val | 0x0c000000);
+ iounmap(tmp_reg);
+ }
+
if (!mii_mng_configured) {
- ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
+ ucc_set_qe_mux_mii_mng(ucc_num);
mii_mng_configured = 1;
}
@@ -4213,13 +4257,14 @@ static int ucc_geth_probe(struct device *device)
ugeth->ug_info = ug_info;
ugeth->dev = dev;
- memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
+ memcpy(dev->dev_addr, get_property(np, "mac-address", NULL), 6);
return 0;
}
-static int ucc_geth_remove(struct device *device)
+static int ucc_geth_remove(struct of_device* ofdev)
{
+ struct device *device = &ofdev->dev;
struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -4230,28 +4275,38 @@ static int ucc_geth_remove(struct device *device)
return 0;
}
-/* Structure for a device driver */
-static struct device_driver ucc_geth_driver = {
- .name = DRV_NAME,
- .bus = &platform_bus_type,
- .probe = ucc_geth_probe,
- .remove = ucc_geth_remove,
+static struct of_device_id ucc_geth_match[] = {
+ {
+ .type = "network",
+ .compatible = "ucc_geth",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ucc_geth_match);
+
+static struct of_platform_driver ucc_geth_driver = {
+ .name = DRV_NAME,
+ .match_table = ucc_geth_match,
+ .probe = ucc_geth_probe,
+ .remove = ucc_geth_remove,
};
static int __init ucc_geth_init(void)
{
int i;
+
printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
for (i = 0; i < 8; i++)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
- return driver_register(&ucc_geth_driver);
+ return of_register_driver(&ucc_geth_driver);
}
static void __exit ucc_geth_exit(void)
{
- driver_unregister(&ucc_geth_driver);
+ of_unregister_driver(&ucc_geth_driver);
}
module_init(ucc_geth_init);
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 005965f5dd9b..a66561253593 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -36,24 +36,24 @@
#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
-typedef struct ucc_mii_mng {
+struct ucc_mii_mng {
u32 miimcfg; /* MII management configuration reg */
u32 miimcom; /* MII management command reg */
u32 miimadd; /* MII management address reg */
u32 miimcon; /* MII management control reg */
u32 miimstat; /* MII management status reg */
u32 miimind; /* MII management indication reg */
-} __attribute__ ((packed)) ucc_mii_mng_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth {
- ucc_fast_t uccf;
+struct ucc_geth {
+ struct ucc_fast uccf;
u32 maccfg1; /* mac configuration reg. 1 */
u32 maccfg2; /* mac configuration reg. 2 */
u32 ipgifg; /* interframe gap reg. */
u32 hafdup; /* half-duplex reg. */
u8 res1[0x10];
- ucc_mii_mng_t miimng; /* MII management structure */
+ struct ucc_mii_mng miimng; /* MII management structure */
u32 ifctl; /* interface control reg */
u32 ifstat; /* interface statux reg */
u32 macstnaddr1; /* mac station address part 1 reg */
@@ -111,7 +111,7 @@ typedef struct ucc_geth {
u32 scar; /* Statistics carry register */
u32 scam; /* Statistics caryy mask register */
u8 res5[0x200 - 0x1c4];
-} __attribute__ ((packed)) ucc_geth_t;
+} __attribute__ ((packed));
/* UCC GETH TEMODR Register */
#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
@@ -508,39 +508,39 @@ typedef struct ucc_geth {
/* UCC GETH UDSR (Data Synchronization Register) */
#define UDSR_MAGIC 0x067E
-typedef struct ucc_geth_thread_data_tx {
+struct ucc_geth_thread_data_tx {
u8 res0[104];
-} __attribute__ ((packed)) ucc_geth_thread_data_tx_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_thread_data_rx {
+struct ucc_geth_thread_data_rx {
u8 res0[40];
-} __attribute__ ((packed)) ucc_geth_thread_data_rx_t;
+} __attribute__ ((packed));
/* Send Queue Queue-Descriptor */
-typedef struct ucc_geth_send_queue_qd {
+struct ucc_geth_send_queue_qd {
u32 bd_ring_base; /* pointer to BD ring base address */
u8 res0[0x8];
u32 last_bd_completed_address;/* initialize to last entry in BD ring */
u8 res1[0x30];
-} __attribute__ ((packed)) ucc_geth_send_queue_qd_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_send_queue_mem_region {
- ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES];
-} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t;
+struct ucc_geth_send_queue_mem_region {
+ struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
+} __attribute__ ((packed));
-typedef struct ucc_geth_thread_tx_pram {
+struct ucc_geth_thread_tx_pram {
u8 res0[64];
-} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_thread_rx_pram {
+struct ucc_geth_thread_rx_pram {
u8 res0[128];
-} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t;
+} __attribute__ ((packed));
#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
-typedef struct ucc_geth_scheduler {
+struct ucc_geth_scheduler {
u16 cpucount0; /* CPU packet counter */
u16 cpucount1; /* CPU packet counter */
u16 cecount0; /* QE packet counter */
@@ -574,9 +574,9 @@ typedef struct ucc_geth_scheduler {
/**< weight factor for queues */
u32 minw; /* temporary variable handled by QE */
u8 res1[0x70 - 0x64];
-} __attribute__ ((packed)) ucc_geth_scheduler_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_tx_firmware_statistics_pram {
+struct ucc_geth_tx_firmware_statistics_pram {
u32 sicoltx; /* single collision */
u32 mulcoltx; /* multiple collision */
u32 latecoltxfr; /* late collision */
@@ -596,9 +596,9 @@ typedef struct ucc_geth_tx_firmware_statistics_pram {
and 1518 octets */
u32 txpktsjumbo; /* total packets (including bad) between 1024
and MAXLength octets */
-} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_rx_firmware_statistics_pram {
+struct ucc_geth_rx_firmware_statistics_pram {
u32 frrxfcser; /* frames with crc error */
u32 fraligner; /* frames with alignment error */
u32 inrangelenrxer; /* in range length error */
@@ -630,33 +630,33 @@ typedef struct ucc_geth_rx_firmware_statistics_pram {
replaced */
u32 insertvlan; /* total frames that had their VLAN tag
inserted */
-} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_rx_interrupt_coalescing_entry {
+struct ucc_geth_rx_interrupt_coalescing_entry {
u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
value */
u32 interruptcoalescingcounter; /* interrupt coalescing counter,
initialize to
interruptcoalescingmaxvalue */
-} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_rx_interrupt_coalescing_table {
- ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES];
+struct ucc_geth_rx_interrupt_coalescing_table {
+ struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
/**< interrupt coalescing entry */
-} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_rx_prefetched_bds {
- qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
-} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t;
+struct ucc_geth_rx_prefetched_bds {
+ struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
+} __attribute__ ((packed));
-typedef struct ucc_geth_rx_bd_queues_entry {
+struct ucc_geth_rx_bd_queues_entry {
u32 bdbaseptr; /* BD base pointer */
u32 bdptr; /* BD pointer */
u32 externalbdbaseptr; /* external BD base pointer */
u32 externalbdptr; /* external BD pointer */
-} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_tx_global_pram {
+struct ucc_geth_tx_global_pram {
u16 temoder;
u8 res0[0x38 - 0x02];
u32 sqptr; /* a base pointer to send queue memory region */
@@ -670,15 +670,15 @@ typedef struct ucc_geth_tx_global_pram {
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
u8 res2[0x80 - 0x74];
-} __attribute__ ((packed)) ucc_geth_tx_global_pram_t;
+} __attribute__ ((packed));
/* structure representing Extended Filtering Global Parameters in PRAM */
-typedef struct ucc_geth_exf_global_pram {
+struct ucc_geth_exf_global_pram {
u32 l2pcdptr; /* individual address filter, high */
u8 res0[0x10 - 0x04];
-} __attribute__ ((packed)) ucc_geth_exf_global_pram_t;
+} __attribute__ ((packed));
-typedef struct ucc_geth_rx_global_pram {
+struct ucc_geth_rx_global_pram {
u32 remoder; /* ethernet mode reg. */
u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
u32 res0[0x1];
@@ -710,12 +710,12 @@ typedef struct ucc_geth_rx_global_pram {
u32 exfGlobalParam; /* base address for extended filtering global
parameters */
u8 res6[0x100 - 0xC4]; /* Initialize to zero */
-} __attribute__ ((packed)) ucc_geth_rx_global_pram_t;
+} __attribute__ ((packed));
#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
/* structure representing InitEnet command */
-typedef struct ucc_geth_init_pram {
+struct ucc_geth_init_pram {
u8 resinit1;
u8 resinit2;
u8 resinit3;
@@ -729,7 +729,7 @@ typedef struct ucc_geth_init_pram {
u32 txglobal; /* tx global */
u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
u8 res3[0x1];
-} __attribute__ ((packed)) ucc_geth_init_pram_t;
+} __attribute__ ((packed));
#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
@@ -746,27 +746,27 @@ typedef struct ucc_geth_init_pram {
#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
/* structure representing 82xx Address Filtering Enet Address in PRAM */
-typedef struct ucc_geth_82xx_enet_address {
+struct ucc_geth_82xx_enet_address {
u8 res1[0x2];
u16 h; /* address (MSB) */
u16 m; /* address */
u16 l; /* address (LSB) */
-} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t;
+} __attribute__ ((packed));
/* structure representing 82xx Address Filtering PRAM */
-typedef struct ucc_geth_82xx_address_filtering_pram {
+struct ucc_geth_82xx_address_filtering_pram {
u32 iaddr_h; /* individual address filter, high */
u32 iaddr_l; /* individual address filter, low */
u32 gaddr_h; /* group address filter, high */
u32 gaddr_l; /* group address filter, low */
- ucc_geth_82xx_enet_address_t taddr;
- ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS];
+ struct ucc_geth_82xx_enet_address taddr;
+ struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS];
u8 res0[0x40 - 0x38];
-} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t;
+} __attribute__ ((packed));
/* GETH Tx firmware statistics structure, used when calling
UCC_GETH_GetStatistics. */
-typedef struct ucc_geth_tx_firmware_statistics {
+struct ucc_geth_tx_firmware_statistics {
u32 sicoltx; /* single collision */
u32 mulcoltx; /* multiple collision */
u32 latecoltxfr; /* late collision */
@@ -786,11 +786,11 @@ typedef struct ucc_geth_tx_firmware_statistics {
and 1518 octets */
u32 txpktsjumbo; /* total packets (including bad) between 1024
and MAXLength octets */
-} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t;
+} __attribute__ ((packed));
/* GETH Rx firmware statistics structure, used when calling
UCC_GETH_GetStatistics. */
-typedef struct ucc_geth_rx_firmware_statistics {
+struct ucc_geth_rx_firmware_statistics {
u32 frrxfcser; /* frames with crc error */
u32 fraligner; /* frames with alignment error */
u32 inrangelenrxer; /* in range length error */
@@ -822,11 +822,11 @@ typedef struct ucc_geth_rx_firmware_statistics {
replaced */
u32 insertvlan; /* total frames that had their VLAN tag
inserted */
-} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t;
+} __attribute__ ((packed));
/* GETH hardware statistics structure, used when calling
UCC_GETH_GetStatistics. */
-typedef struct ucc_geth_hardware_statistics {
+struct ucc_geth_hardware_statistics {
u32 tx64; /* Total number of frames (including bad
frames) transmitted that were exactly of the
minimal length (64 for un tagged, 68 for
@@ -871,7 +871,7 @@ typedef struct ucc_geth_hardware_statistics {
u32 rbca; /* Total number of frames received succesfully
that had destination address equal to the
broadcast address */
-} __attribute__ ((packed)) ucc_geth_hardware_statistics_t;
+} __attribute__ ((packed));
/* UCC GETH Tx errors returned via TxConf callback */
#define TX_ERRORS_DEF 0x0200
@@ -1013,21 +1013,21 @@ typedef struct ucc_geth_hardware_statistics {
(MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112)
/* Ethernet speed */
-typedef enum enet_speed {
+enum enet_speed {
ENET_SPEED_10BT, /* 10 Base T */
ENET_SPEED_100BT, /* 100 Base T */
ENET_SPEED_1000BT /* 1000 Base T */
-} enet_speed_e;
+};
/* Ethernet Address Type. */
-typedef enum enet_addr_type {
+enum enet_addr_type {
ENET_ADDR_TYPE_INDIVIDUAL,
ENET_ADDR_TYPE_GROUP,
ENET_ADDR_TYPE_BROADCAST
-} enet_addr_type_e;
+};
/* TBI / MII Set Register */
-typedef enum enet_tbi_mii_reg {
+enum enet_tbi_mii_reg {
ENET_TBI_MII_CR = 0x00, /* Control (CR ) */
ENET_TBI_MII_SR = 0x01, /* Status (SR ) */
ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */
@@ -1040,10 +1040,10 @@ typedef enum enet_tbi_mii_reg {
ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */
ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */
ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */
-} enet_tbi_mii_reg_e;
+};
/* UCC GETH 82xx Ethernet Address Recognition Location */
-typedef enum ucc_geth_enet_address_recognition_location {
+enum ucc_geth_enet_address_recognition_location {
UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
address */
UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
@@ -1065,10 +1065,10 @@ typedef enum ucc_geth_enet_address_recognition_location {
UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
hash */
-} ucc_geth_enet_address_recognition_location_e;
+};
/* UCC GETH vlan operation tagged */
-typedef enum ucc_geth_vlan_operation_tagged {
+enum ucc_geth_vlan_operation_tagged {
UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
= 0x1, /* Tagged - replace vid portion of q tag */
@@ -1076,18 +1076,18 @@ typedef enum ucc_geth_vlan_operation_tagged {
= 0x2, /* Tagged - if vid0 replace vid with default value */
UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
= 0x3 /* Tagged - extract q tag from frame */
-} ucc_geth_vlan_operation_tagged_e;
+};
/* UCC GETH vlan operation non-tagged */
-typedef enum ucc_geth_vlan_operation_non_tagged {
+enum ucc_geth_vlan_operation_non_tagged {
UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
q tag insert
*/
-} ucc_geth_vlan_operation_non_tagged_e;
+};
/* UCC GETH Rx Quality of Service Mode */
-typedef enum ucc_geth_qos_mode {
+enum ucc_geth_qos_mode {
UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
determined
@@ -1097,11 +1097,11 @@ typedef enum ucc_geth_qos_mode {
determined
by L3
criteria */
-} ucc_geth_qos_mode_e;
+};
/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
for combined functionality */
-typedef enum ucc_geth_statistics_gathering_mode {
+enum ucc_geth_statistics_gathering_mode {
UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
statistics
gathering */
@@ -1122,10 +1122,10 @@ typedef enum ucc_geth_statistics_gathering_mode {
statistics
gathering
*/
-} ucc_geth_statistics_gathering_mode_e;
+};
/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
-typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
+enum ucc_geth_maccfg2_pad_and_crc_mode {
UCC_GETH_PAD_AND_CRC_MODE_NONE
= MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
short frames
@@ -1135,61 +1135,59 @@ typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
CRC only */
UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
-} ucc_geth_maccfg2_pad_and_crc_mode_e;
+};
/* UCC GETH upsmr Flow Control Mode */
-typedef enum ucc_geth_flow_control_mode {
+enum ucc_geth_flow_control_mode {
UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
flow control
*/
UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
= 0x00004000 /* Send pause frame when RxFIFO reaches its
emergency threshold */
-} ucc_geth_flow_control_mode_e;
+};
/* UCC GETH number of threads */
-typedef enum ucc_geth_num_of_threads {
+enum ucc_geth_num_of_threads {
UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
-} ucc_geth_num_of_threads_e;
+};
/* UCC GETH number of station addresses */
-typedef enum ucc_geth_num_of_station_addresses {
+enum ucc_geth_num_of_station_addresses {
UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
-} ucc_geth_num_of_station_addresses_e;
-
-typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS];
+};
/* UCC GETH 82xx Ethernet Address Container */
-typedef struct enet_addr_container {
- enet_addr_t address; /* ethernet address */
- ucc_geth_enet_address_recognition_location_e location; /* location in
+struct enet_addr_container {
+ u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ enum ucc_geth_enet_address_recognition_location location; /* location in
82xx address
recognition
hardware */
struct list_head node;
-} enet_addr_container_t;
+};
-#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node)
+#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node)
/* UCC GETH Termination Action Descriptor (TAD) structure. */
-typedef struct ucc_geth_tad_params {
+struct ucc_geth_tad_params {
int rx_non_dynamic_extended_features_mode;
int reject_frame;
- ucc_geth_vlan_operation_tagged_e vtag_op;
- ucc_geth_vlan_operation_non_tagged_e vnontag_op;
- ucc_geth_qos_mode_e rqos;
+ enum ucc_geth_vlan_operation_tagged vtag_op;
+ enum ucc_geth_vlan_operation_non_tagged vnontag_op;
+ enum ucc_geth_qos_mode rqos;
u8 vpri;
u16 vid;
-} ucc_geth_tad_params_t;
+};
/* GETH protocol initialization structure */
-typedef struct ucc_geth_info {
- ucc_fast_info_t uf_info;
+struct ucc_geth_info {
+ struct ucc_fast_info uf_info;
u8 numQueuesTx;
u8 numQueuesRx;
int ipCheckSumCheck;
@@ -1251,51 +1249,51 @@ typedef struct ucc_geth_info {
u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
u16 bdRingLenTx[NUM_TX_QUEUES];
u16 bdRingLenRx[NUM_RX_QUEUES];
- enet_interface_e enet_interface;
- ucc_geth_num_of_station_addresses_e numStationAddresses;
- qe_fltr_largest_external_tbl_lookup_key_size_e
+ enum enet_interface enet_interface;
+ enum ucc_geth_num_of_station_addresses numStationAddresses;
+ enum qe_fltr_largest_external_tbl_lookup_key_size
largestexternallookupkeysize;
- ucc_geth_statistics_gathering_mode_e statisticsMode;
- ucc_geth_vlan_operation_tagged_e vlanOperationTagged;
- ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged;
- ucc_geth_qos_mode_e rxQoSMode;
- ucc_geth_flow_control_mode_e aufc;
- ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc;
- ucc_geth_num_of_threads_e numThreadsTx;
- ucc_geth_num_of_threads_e numThreadsRx;
- qe_risc_allocation_e riscTx;
- qe_risc_allocation_e riscRx;
-} ucc_geth_info_t;
+ enum ucc_geth_statistics_gathering_mode statisticsMode;
+ enum ucc_geth_vlan_operation_tagged vlanOperationTagged;
+ enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged;
+ enum ucc_geth_qos_mode rxQoSMode;
+ enum ucc_geth_flow_control_mode aufc;
+ enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc;
+ enum ucc_geth_num_of_threads numThreadsTx;
+ enum ucc_geth_num_of_threads numThreadsRx;
+ enum qe_risc_allocation riscTx;
+ enum qe_risc_allocation riscRx;
+};
/* structure representing UCC GETH */
-typedef struct ucc_geth_private {
- ucc_geth_info_t *ug_info;
- ucc_fast_private_t *uccf;
+struct ucc_geth_private {
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_private *uccf;
struct net_device *dev;
struct net_device_stats stats; /* linux network statistics */
- ucc_geth_t *ug_regs;
- ucc_geth_init_pram_t *p_init_enet_param_shadow;
- ucc_geth_exf_global_pram_t *p_exf_glbl_param;
+ struct ucc_geth *ug_regs;
+ struct ucc_geth_init_pram *p_init_enet_param_shadow;
+ struct ucc_geth_exf_global_pram *p_exf_glbl_param;
u32 exf_glbl_param_offset;
- ucc_geth_rx_global_pram_t *p_rx_glbl_pram;
+ struct ucc_geth_rx_global_pram *p_rx_glbl_pram;
u32 rx_glbl_pram_offset;
- ucc_geth_tx_global_pram_t *p_tx_glbl_pram;
+ struct ucc_geth_tx_global_pram *p_tx_glbl_pram;
u32 tx_glbl_pram_offset;
- ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg;
+ struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
- ucc_geth_thread_data_tx_t *p_thread_data_tx;
+ struct ucc_geth_thread_data_tx *p_thread_data_tx;
u32 thread_dat_tx_offset;
- ucc_geth_thread_data_rx_t *p_thread_data_rx;
+ struct ucc_geth_thread_data_rx *p_thread_data_rx;
u32 thread_dat_rx_offset;
- ucc_geth_scheduler_t *p_scheduler;
+ struct ucc_geth_scheduler *p_scheduler;
u32 scheduler_offset;
- ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
+ struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
u32 tx_fw_statistics_pram_offset;
- ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
+ struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
u32 rx_fw_statistics_pram_offset;
- ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl;
+ struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl;
u32 rx_irq_coalescing_tbl_offset;
- ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
+ struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
u8 *p_tx_bd_ring[NUM_TX_QUEUES];
u32 tx_bd_ring_offset[NUM_TX_QUEUES];
@@ -1308,7 +1306,7 @@ typedef struct ucc_geth_private {
u16 cpucount[NUM_TX_QUEUES];
volatile u16 *p_cpucount[NUM_TX_QUEUES];
int indAddrRegUsed[NUM_OF_PADDRS];
- enet_addr_t paddr[NUM_OF_PADDRS];
+ u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
u8 numGroupAddrInHash;
u8 numIndAddrInHash;
u8 numIndAddrInReg;
@@ -1334,6 +1332,6 @@ typedef struct ucc_geth_private {
int oldspeed;
int oldduplex;
int oldlink;
-} ucc_geth_private_t;
+};
#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
index 67260eb3188a..5360ec05eaa3 100644
--- a/drivers/net/ucc_geth_phy.c
+++ b/drivers/net/ucc_geth_phy.c
@@ -42,7 +42,6 @@
#include "ucc_geth.h"
#include "ucc_geth_phy.h"
-#include <platforms/83xx/mpc8360e_pb.h>
#define ugphy_printk(level, format, arg...) \
printk(level format "\n", ## arg)
@@ -72,16 +71,14 @@ static int genmii_read_status(struct ugeth_mii_info *mii_info);
u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum);
void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val);
-static u8 *bcsr_regs = NULL;
-
/* Write value to the PHY for this device to the register at regnum, */
/* waiting until the write is done before it returns. All PHY */
/* configuration has to be done through the TSEC1 MIIM regs */
void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
- ucc_mii_mng_t *mii_regs;
- enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_mii_mng *mii_regs;
+ enum enet_tbi_mii_reg mii_reg = (enum enet_tbi_mii_reg) regnum;
u32 tmp_reg;
ugphy_vdbg("%s: IN", __FUNCTION__);
@@ -116,9 +113,9 @@ void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
/* configuration has to be done through the TSEC1 MIIM regs */
int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
{
- ucc_geth_private_t *ugeth = netdev_priv(dev);
- ucc_mii_mng_t *mii_regs;
- enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_mii_mng *mii_regs;
+ enum enet_tbi_mii_reg mii_reg = (enum enet_tbi_mii_reg) regnum;
u32 tmp_reg;
u16 value;
@@ -634,11 +631,6 @@ static void dm9161_close(struct ugeth_mii_info *mii_info)
static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info)
{
-/* FIXME: This lines are for BUG fixing in the mpc8325.
-Remove this from here when it's fixed */
- if (bcsr_regs == NULL)
- bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
- bcsr_regs[14] |= 0x40;
ugphy_vdbg("%s: IN", __FUNCTION__);
/* Clear the interrupts by reading the reg */
@@ -650,12 +642,6 @@ Remove this from here when it's fixed */
static int dm9161_config_intr(struct ugeth_mii_info *mii_info)
{
-/* FIXME: This lines are for BUG fixing in the mpc8325.
-Remove this from here when it's fixed */
- if (bcsr_regs == NULL) {
- bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
- bcsr_regs[14] &= ~0x40;
- }
ugphy_vdbg("%s: IN", __FUNCTION__);
if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h
index 2f98b8f1bb0a..f5740783670f 100644
--- a/drivers/net/ucc_geth_phy.h
+++ b/drivers/net/ucc_geth_phy.h
@@ -126,7 +126,7 @@ struct ugeth_mii_info {
/* And management functions */
struct phy_info *phyinfo;
- ucc_mii_mng_t *mii_regs;
+ struct ucc_mii_mng *mii_regs;
/* forced speed & duplex (no autoneg)
* partner speed & duplex & pause (autoneg)
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 58b7efbb0750..b5d0d7fb647a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -127,7 +127,7 @@ config LANMEDIA
# There is no way to detect a Sealevel board. Force it modular
config SEALEVEL_4021
tristate "Sealevel Systems 4021 support"
- depends on WAN && ISA && m && ISA_DMA_API
+ depends on WAN && ISA && m && ISA_DMA_API && INET
help
This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index dcf46add3adf..5c322dfb79f6 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -500,7 +500,7 @@ static int __init n2_init(void)
#ifdef MODULE
printk(KERN_INFO "n2: no card initialized\n");
#endif
- return -ENOSYS; /* no parameters specified, abort */
+ return -EINVAL; /* no parameters specified, abort */
}
printk(KERN_INFO "%s\n", version);
@@ -538,11 +538,11 @@ static int __init n2_init(void)
n2_run(io, irq, ram, valid[0], valid[1]);
if (*hw == '\x0')
- return first_card ? 0 : -ENOSYS;
+ return first_card ? 0 : -EINVAL;
}while(*hw++ == ':');
printk(KERN_ERR "n2: invalid hardware parameters\n");
- return first_card ? 0 : -ENOSYS;
+ return first_card ? 0 : -EINVAL;
}
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 5823e3bca178..36d1c3ff7078 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -2867,7 +2867,6 @@ static int ch_config(pc300dev_t * d)
uclong clktype = chan->conf.phys_settings.clock_type;
ucshort encoding = chan->conf.proto_settings.encoding;
ucshort parity = chan->conf.proto_settings.parity;
- int tmc, br;
ucchar md0, md2;
/* Reset the channel */
@@ -2940,8 +2939,12 @@ static int ch_config(pc300dev_t * d)
case PC300_RSV:
case PC300_X21:
if (clktype == CLOCK_INT || clktype == CLOCK_TXINT) {
+ int tmc, br;
+
/* Calculate the clkrate parameters */
tmc = clock_rate_calc(clkrate, card->hw.clock, &br);
+ if (tmc < 0)
+ return -EIO;
cpc_writeb(scabase + M_REG(TMCT, ch), tmc);
cpc_writeb(scabase + M_REG(TXS, ch),
(TXS_DTRXC | TXS_IBRG | br));
@@ -3097,14 +3100,16 @@ static int cpc_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-static void cpc_opench(pc300dev_t * d)
+static int cpc_opench(pc300dev_t * d)
{
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
- int ch = chan->channel;
+ int ch = chan->channel, rc;
void __iomem *scabase = card->hw.scabase;
- ch_config(d);
+ rc = ch_config(d);
+ if (rc)
+ return rc;
rx_config(d);
@@ -3113,6 +3118,8 @@ static void cpc_opench(pc300dev_t * d)
/* Assert RTS and DTR */
cpc_writeb(scabase + M_REG(CTL, ch),
cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
+
+ return 0;
}
static void cpc_closech(pc300dev_t * d)
@@ -3168,9 +3175,16 @@ int cpc_open(struct net_device *dev)
}
sprintf(ifr.ifr_name, "%s", dev->name);
- cpc_opench(d);
+ result = cpc_opench(d);
+ if (result)
+ goto err_out;
+
netif_start_queue(dev);
return 0;
+
+err_out:
+ hdlc_close(dev);
+ return result;
}
static int cpc_close(struct net_device *dev)
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 0a33c8a56e13..efcdaf1c5f73 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2897,6 +2897,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
goto err_out_map;
}
ai->wifidev = init_wifidev(ai, dev);
+ if (!ai->wifidev)
+ goto err_out_reg;
set_bit(FLAG_REGISTERED,&ai->flags);
airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x",
@@ -2908,11 +2910,18 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
for( i = 0; i < MAX_FIDS; i++ )
ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
- setup_proc_entry( dev, dev->priv ); /* XXX check for failure */
+ if (setup_proc_entry(dev, dev->priv) < 0)
+ goto err_out_wifi;
+
netif_start_queue(dev);
SET_MODULE_OWNER(dev);
return dev;
+err_out_wifi:
+ unregister_netdev(ai->wifidev);
+ free_netdev(ai->wifidev);
+err_out_reg:
+ unregister_netdev(dev);
err_out_map:
if (test_bit(FLAG_MPI,&ai->flags) && pci) {
pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
@@ -3089,7 +3098,8 @@ static int airo_thread(void *data) {
set_bit(JOB_AUTOWEP, &ai->jobs);
break;
}
- if (!kthread_should_stop()) {
+ if (!kthread_should_stop() &&
+ !freezing(current)) {
unsigned long wake_at;
if (!ai->expires || !ai->scan_timeout) {
wake_at = max(ai->expires,
@@ -3101,7 +3111,8 @@ static int airo_thread(void *data) {
schedule_timeout(wake_at - jiffies);
continue;
}
- } else if (!kthread_should_stop()) {
+ } else if (!kthread_should_stop() &&
+ !freezing(current)) {
schedule();
continue;
}
@@ -4495,91 +4506,128 @@ static int setup_proc_entry( struct net_device *dev,
apriv->proc_entry = create_proc_entry(apriv->proc_name,
S_IFDIR|airo_perm,
airo_entry);
- apriv->proc_entry->uid = proc_uid;
- apriv->proc_entry->gid = proc_gid;
- apriv->proc_entry->owner = THIS_MODULE;
+ if (!apriv->proc_entry)
+ goto fail;
+ apriv->proc_entry->uid = proc_uid;
+ apriv->proc_entry->gid = proc_gid;
+ apriv->proc_entry->owner = THIS_MODULE;
/* Setup the StatsDelta */
entry = create_proc_entry("StatsDelta",
S_IFREG | (S_IRUGO&proc_perm),
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_stats_delta;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_statsdelta_ops);
/* Setup the Stats */
entry = create_proc_entry("Stats",
S_IFREG | (S_IRUGO&proc_perm),
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_stats;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_stats_ops);
/* Setup the Status */
entry = create_proc_entry("Status",
S_IFREG | (S_IRUGO&proc_perm),
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_status;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_status_ops);
/* Setup the Config */
entry = create_proc_entry("Config",
S_IFREG | proc_perm,
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_config;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_config_ops);
/* Setup the SSID */
entry = create_proc_entry("SSID",
S_IFREG | proc_perm,
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_ssid;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_SSID_ops);
/* Setup the APList */
entry = create_proc_entry("APList",
S_IFREG | proc_perm,
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_aplist;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_APList_ops);
/* Setup the BSSList */
entry = create_proc_entry("BSSList",
S_IFREG | proc_perm,
apriv->proc_entry);
+ if (!entry)
+ goto fail_bsslist;
entry->uid = proc_uid;
entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_BSSList_ops);
/* Setup the WepKey */
entry = create_proc_entry("WepKey",
S_IFREG | proc_perm,
apriv->proc_entry);
- entry->uid = proc_uid;
- entry->gid = proc_gid;
+ if (!entry)
+ goto fail_wepkey;
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
entry->data = dev;
- entry->owner = THIS_MODULE;
+ entry->owner = THIS_MODULE;
SETPROC_OPS(entry, proc_wepkey_ops);
return 0;
+
+fail_wepkey:
+ remove_proc_entry("BSSList", apriv->proc_entry);
+fail_bsslist:
+ remove_proc_entry("APList", apriv->proc_entry);
+fail_aplist:
+ remove_proc_entry("SSID", apriv->proc_entry);
+fail_ssid:
+ remove_proc_entry("Config", apriv->proc_entry);
+fail_config:
+ remove_proc_entry("Status", apriv->proc_entry);
+fail_status:
+ remove_proc_entry("Stats", apriv->proc_entry);
+fail_stats:
+ remove_proc_entry("StatsDelta", apriv->proc_entry);
+fail_stats_delta:
+ remove_proc_entry(apriv->proc_name, airo_entry);
+fail:
+ return -ENOMEM;
}
static int takedown_proc_entry( struct net_device *dev,
@@ -5924,7 +5972,6 @@ static int airo_get_essid(struct net_device *dev,
/* Get the current SSID */
memcpy(extra, status_rid.SSID, status_rid.SSIDlen);
- extra[status_rid.SSIDlen] = '\0';
/* If none, we may want to get the one that was set */
/* Push it out ! */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 31eed85de60f..0c07b8b7250d 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1678,11 +1678,9 @@ static int atmel_get_essid(struct net_device *dev,
/* Get the current SSID */
if (priv->new_SSID_size != 0) {
memcpy(extra, priv->new_SSID, priv->new_SSID_size);
- extra[priv->new_SSID_size] = '\0';
dwrq->length = priv->new_SSID_size;
} else {
memcpy(extra, priv->SSID, priv->SSID_size);
- extra[priv->SSID_size] = '\0';
dwrq->length = priv->SSID_size;
}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index 76e3aed4b471..978ed099e285 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -705,11 +705,30 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
struct bcm43xx_dmaring *ring;
int err = -ENOMEM;
int dma64 = 0;
- u32 sbtmstatehi;
+ u64 mask = bcm43xx_get_supported_dma_mask(bcm);
+ int nobits;
- sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
- if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
+ if (mask == DMA_64BIT_MASK) {
dma64 = 1;
+ nobits = 64;
+ } else if (mask == DMA_32BIT_MASK)
+ nobits = 32;
+ else
+ nobits = 30;
+ err = pci_set_dma_mask(bcm->pci_dev, mask);
+ err |= pci_set_consistent_dma_mask(bcm->pci_dev, mask);
+ if (err) {
+#ifdef CONFIG_BCM43XX_PIO
+ printk(KERN_WARNING PFX "DMA not supported on this device."
+ " Falling back to PIO.\n");
+ bcm->__using_pio = 1;
+ return -ENOSYS;
+#else
+ printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
+ "Please recompile the driver with PIO support.\n");
+ return -ENODEV;
+#endif /* CONFIG_BCM43XX_PIO */
+ }
/* setup TX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
@@ -755,8 +774,7 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
dma->rx_ring3 = ring;
}
- dprintk(KERN_INFO PFX "%s DMA initialized\n",
- dma64 ? "64-bit" : "32-bit");
+ dprintk(KERN_INFO PFX "%d-bit DMA initialized\n", nobits);
err = 0;
out:
return err;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
index e04bcaddd1d0..d1105e569a41 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
#include <linux/linkage.h>
#include <asm/atomic.h>
@@ -314,6 +315,23 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
struct ieee80211_txb *txb);
void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
+/* Helper function that returns the dma mask for this device. */
+static inline
+u64 bcm43xx_get_supported_dma_mask(struct bcm43xx_private *bcm)
+{
+ int dma64 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH) &
+ BCM43xx_SBTMSTATEHIGH_DMA64BIT;
+ u16 mmio_base = bcm43xx_dmacontroller_base(dma64, 0);
+ u32 mask = BCM43xx_DMA32_TXADDREXT_MASK;
+
+ if (dma64)
+ return DMA_64BIT_MASK;
+ bcm43xx_write32(bcm, mmio_base + BCM43xx_DMA32_TXCTL, mask);
+ if (bcm43xx_read32(bcm, mmio_base + BCM43xx_DMA32_TXCTL) & mask)
+ return DMA_32BIT_MASK;
+ return DMA_30BIT_MASK;
+}
+
#else /* CONFIG_BCM43XX_DMA */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index c3f90c8563d9..7d383a27b927 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -189,20 +189,24 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
case BCM43xx_LED_INACTIVE:
continue;
case BCM43xx_LED_OFF:
+ case BCM43xx_LED_BCM4303_3:
break;
case BCM43xx_LED_ON:
turn_on = 1;
break;
case BCM43xx_LED_ACTIVITY:
+ case BCM43xx_LED_BCM4303_0:
turn_on = activity;
break;
case BCM43xx_LED_RADIO_ALL:
turn_on = radio->enabled;
break;
case BCM43xx_LED_RADIO_A:
+ case BCM43xx_LED_BCM4303_2:
turn_on = (radio->enabled && phy->type == BCM43xx_PHYTYPE_A);
break;
case BCM43xx_LED_RADIO_B:
+ case BCM43xx_LED_BCM4303_1:
turn_on = (radio->enabled &&
(phy->type == BCM43xx_PHYTYPE_B ||
phy->type == BCM43xx_PHYTYPE_G));
@@ -242,7 +246,7 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
//TODO
break;
case BCM43xx_LED_ASSOC:
- if (bcm->softmac->associated)
+ if (bcm->softmac->associnfo.associated)
turn_on = 1;
break;
#ifdef CONFIG_BCM43XX_DEBUG
@@ -257,7 +261,8 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
continue;
#endif /* CONFIG_BCM43XX_DEBUG */
default:
- assert(0);
+ dprintkl(KERN_INFO PFX "Bad value in leds_update,"
+ " led->behaviour: 0x%x\n", led->behaviour);
};
if (led->activelow)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.h b/drivers/net/wireless/bcm43xx/bcm43xx_leds.h
index d3716cf3aebc..811e14a81198 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.h
@@ -46,6 +46,12 @@ enum { /* LED behaviour values */
BCM43xx_LED_TEST_BLINKSLOW,
BCM43xx_LED_TEST_BLINKMEDIUM,
BCM43xx_LED_TEST_BLINKFAST,
+
+ /* Misc values for BCM4303 */
+ BCM43xx_LED_BCM4303_0 = 0x2B,
+ BCM43xx_LED_BCM4303_1 = 0x78,
+ BCM43xx_LED_BCM4303_2 = 0x2E,
+ BCM43xx_LED_BCM4303_3 = 0x19,
};
int bcm43xx_leds_init(struct bcm43xx_private *bcm);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index bad3452ea893..a1b783813d8e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -746,7 +746,7 @@ int bcm43xx_sprom_write(struct bcm43xx_private *bcm, const u16 *sprom)
if (err)
goto err_ctlreg;
spromctl |= 0x10; /* SPROM WRITE enable. */
- bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_SPROMCTL, spromctl);
+ err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_SPROMCTL, spromctl);
if (err)
goto err_ctlreg;
/* We must burn lots of CPU cycles here, but that does not
@@ -768,7 +768,7 @@ int bcm43xx_sprom_write(struct bcm43xx_private *bcm, const u16 *sprom)
mdelay(20);
}
spromctl &= ~0x10; /* SPROM WRITE enable. */
- bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_SPROMCTL, spromctl);
+ err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_SPROMCTL, spromctl);
if (err)
goto err_ctlreg;
mdelay(500);
@@ -1463,6 +1463,23 @@ static void handle_irq_transmit_status(struct bcm43xx_private *bcm)
}
}
+static void drain_txstatus_queue(struct bcm43xx_private *bcm)
+{
+ u32 dummy;
+
+ if (bcm->current_core->rev < 5)
+ return;
+ /* Read all entries from the microcode TXstatus FIFO
+ * and throw them away.
+ */
+ while (1) {
+ dummy = bcm43xx_read32(bcm, BCM43xx_MMIO_XMITSTAT_0);
+ if (!dummy)
+ break;
+ dummy = bcm43xx_read32(bcm, BCM43xx_MMIO_XMITSTAT_1);
+ }
+}
+
static void bcm43xx_generate_noise_sample(struct bcm43xx_private *bcm)
{
bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x408, 0x7F7F);
@@ -2925,10 +2942,13 @@ static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm,
bcm43xx_write16(bcm, 0x043C, 0x000C);
if (active_wlcore) {
- if (bcm43xx_using_pio(bcm))
+ if (bcm43xx_using_pio(bcm)) {
err = bcm43xx_pio_init(bcm);
- else
+ } else {
err = bcm43xx_dma_init(bcm);
+ if (err == -ENOSYS)
+ err = bcm43xx_pio_init(bcm);
+ }
if (err)
goto err_chip_cleanup;
}
@@ -3160,17 +3180,30 @@ static int estimate_periodic_work_badness(unsigned int state)
static void bcm43xx_periodic_work_handler(void *d)
{
struct bcm43xx_private *bcm = d;
+ struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
int badness;
+ unsigned long orig_trans_start = 0;
+ mutex_lock(&bcm->mutex);
badness = estimate_periodic_work_badness(bcm->periodic_state);
if (badness > BADNESS_LIMIT) {
/* Periodic work will take a long time, so we want it to
* be preemtible.
*/
- mutex_lock(&bcm->mutex);
- netif_tx_disable(bcm->net_dev);
+
+ netif_tx_lock_bh(net_dev);
+ /* We must fake a started transmission here, as we are going to
+ * disable TX. If we wouldn't fake a TX, it would be possible to
+ * trigger the netdev watchdog, if the last real TX is already
+ * some time on the past (slightly less than 5secs)
+ */
+ orig_trans_start = net_dev->trans_start;
+ net_dev->trans_start = jiffies;
+ netif_stop_queue(net_dev);
+ netif_tx_unlock_bh(net_dev);
+
spin_lock_irqsave(&bcm->irq_lock, flags);
bcm43xx_mac_suspend(bcm);
if (bcm43xx_using_pio(bcm))
@@ -3182,7 +3215,6 @@ static void bcm43xx_periodic_work_handler(void *d)
/* Periodic work should take short time, so we want low
* locking overhead.
*/
- mutex_lock(&bcm->mutex);
spin_lock_irqsave(&bcm->irq_lock, flags);
}
@@ -3196,6 +3228,7 @@ static void bcm43xx_periodic_work_handler(void *d)
bcm43xx_pio_thaw_txqueues(bcm);
bcm43xx_mac_enable(bcm);
netif_wake_queue(bcm->net_dev);
+ net_dev->trans_start = orig_trans_start;
}
mmiowb();
spin_unlock_irqrestore(&bcm->irq_lock, flags);
@@ -3516,6 +3549,7 @@ int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr));
bcm43xx_security_init(bcm);
+ drain_txstatus_queue(bcm);
ieee80211softmac_start(bcm->net_dev);
/* Let's go! Be careful after enabling the IRQs.
@@ -3993,8 +4027,6 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
struct net_device *net_dev,
struct pci_dev *pci_dev)
{
- int err;
-
bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
bcm->ieee = netdev_priv(net_dev);
bcm->softmac = ieee80211_priv(net_dev);
@@ -4012,22 +4044,8 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
(void (*)(unsigned long))bcm43xx_interrupt_tasklet,
(unsigned long)bcm);
tasklet_disable_nosync(&bcm->isr_tasklet);
- if (modparam_pio) {
+ if (modparam_pio)
bcm->__using_pio = 1;
- } else {
- err = pci_set_dma_mask(pci_dev, DMA_30BIT_MASK);
- err |= pci_set_consistent_dma_mask(pci_dev, DMA_30BIT_MASK);
- if (err) {
-#ifdef CONFIG_BCM43XX_PIO
- printk(KERN_WARNING PFX "DMA not supported. Falling back to PIO.\n");
- bcm->__using_pio = 1;
-#else
- printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
- "Recompile the driver with PIO support, please.\n");
- return -ENODEV;
-#endif /* CONFIG_BCM43XX_PIO */
- }
- }
bcm->rts_threshold = BCM43xx_DEFAULT_RTS_THRESHOLD;
/* default to sw encryption for now */
@@ -4208,7 +4226,11 @@ static int bcm43xx_resume(struct pci_dev *pdev)
dprintk(KERN_INFO PFX "Resuming...\n");
pci_set_power_state(pdev, 0);
- pci_enable_device(pdev);
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR PFX "Failure with pci_enable_device!\n");
+ return err;
+ }
pci_restore_state(pdev);
bcm43xx_chipset_attach(bcm);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 9b7b15cf6561..d27016f8c736 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -847,7 +847,7 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
unsigned long flags;
wstats = &bcm->stats.wstats;
- if (!mac->associated) {
+ if (!mac->associnfo.associated) {
wstats->miss.beacon = 0;
// bcm->ieee->ieee_stats.tx_retry_limit_exceeded = 0; // FIXME: should this be cleared here?
wstats->discard.retries = 0;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 686d895116de..f63909e4bc32 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -887,6 +887,13 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID123(
"U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
0xc7b8df9d, 0x1700d087, 0x4b74baa0),
+ PCMCIA_DEVICE_PROD_ID123(
+ "Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio",
+ "Ver. 1.00",
+ 0x5cd01705, 0x4271660f, 0x9d08ee12),
+ PCMCIA_DEVICE_PROD_ID123(
+ "corega", "WL PCCL-11", "ISL37300P",
+ 0xa21501a, 0x59868926, 0xc9049a39),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 6dfa041be66d..bc81b13a5a2a 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -364,7 +364,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
pos = 0;
while (pos < CIS_MAX_LEN - 1 && cis[pos] != CISTPL_END) {
- if (pos + cis[pos + 1] >= CIS_MAX_LEN)
+ if (pos + 2 + cis[pos + 1] > CIS_MAX_LEN)
goto cis_error;
switch (cis[pos]) {
@@ -391,7 +391,7 @@ static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
break;
case CISTPL_MANFID:
- if (cis[pos + 1] < 5)
+ if (cis[pos + 1] < 4)
goto cis_error;
manfid1 = cis[pos + 2] + (cis[pos + 3] << 8);
manfid2 = cis[pos + 4] + (cis[pos + 5] << 8);
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index b779c7dcc1a8..336cabac13b3 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2457,6 +2457,7 @@ void free_orinocodev(struct net_device *dev)
/* Wireless extensions */
/********************************************************************/
+/* Return : < 0 -> error code ; >= 0 -> length */
static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
char buf[IW_ESSID_MAX_SIZE+1])
{
@@ -2501,9 +2502,9 @@ static int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
len = le16_to_cpu(essidbuf.len);
BUG_ON(len > IW_ESSID_MAX_SIZE);
- memset(buf, 0, IW_ESSID_MAX_SIZE+1);
+ memset(buf, 0, IW_ESSID_MAX_SIZE);
memcpy(buf, p, len);
- buf[len] = '\0';
+ err = len;
fail_unlock:
orinoco_unlock(priv, &flags);
@@ -3027,17 +3028,18 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
if (netif_running(dev)) {
err = orinoco_hw_get_essid(priv, &active, essidbuf);
- if (err)
+ if (err < 0)
return err;
+ erq->length = err;
} else {
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE + 1);
+ memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE);
+ erq->length = strlen(priv->desired_essid);
orinoco_unlock(priv, &flags);
}
erq->flags = 1;
- erq->length = strlen(essidbuf);
return 0;
}
@@ -3075,10 +3077,10 @@ static int orinoco_ioctl_getnick(struct net_device *dev,
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
- memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE+1);
+ memcpy(nickbuf, priv->nick, IW_ESSID_MAX_SIZE);
orinoco_unlock(priv, &flags);
- nrq->length = strlen(nickbuf);
+ nrq->length = strlen(priv->nick);
return 0;
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0b381d77015c..7fbfc9e41d07 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1198,7 +1198,6 @@ static int ray_get_essid(struct net_device *dev,
/* Get the essid that was set */
memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
- extra[IW_ESSID_MAX_SIZE] = '\0';
/* Push it out ! */
dwrq->length = strlen(extra);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 30057a335a7b..36b29ff05814 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -193,10 +193,8 @@ static void zd1201_usbrx(struct urb *urb)
struct sk_buff *skb;
unsigned char type;
- if (!zd) {
- free = 1;
- goto exit;
- }
+ if (!zd)
+ return;
switch(urb->status) {
case -EILSEQ:
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 2d12837052b0..a7d29bddb298 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1099,7 +1099,7 @@ static void link_led_handler(void *p)
int r;
spin_lock_irq(&mac->lock);
- is_associated = sm->associated != 0;
+ is_associated = sm->associnfo.associated != 0;
spin_unlock_irq(&mac->lock);
r = zd_chip_control_leds(chip,