summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139cp.c14
-rw-r--r--drivers/net/8139too.c14
-rw-r--r--drivers/net/Kconfig33
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/au1000_eth.c206
-rw-r--r--drivers/net/cassini.c9
-rw-r--r--drivers/net/e1000/Makefile3
-rw-r--r--drivers/net/e1000/e1000.h6
-rw-r--r--drivers/net/e1000/e1000_ethtool.c41
-rw-r--r--drivers/net/e1000/e1000_hw.c115
-rw-r--r--drivers/net/e1000/e1000_hw.h7
-rw-r--r--drivers/net/e1000/e1000_main.c265
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000/e1000_param.c3
-rw-r--r--drivers/net/epic100.c56
-rw-r--r--drivers/net/forcedeth.c173
-rw-r--r--drivers/net/ibmlana.c20
-rw-r--r--drivers/net/ibmlana.h6
-rw-r--r--drivers/net/ibmveth.c291
-rw-r--r--drivers/net/ibmveth.h11
-rw-r--r--drivers/net/ixgb/Makefile2
-rw-r--r--drivers/net/ixgb/ixgb.h12
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/ixgb/ixgb_ee.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c57
-rw-r--r--drivers/net/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ixgb/ixgb_hw.h3
-rw-r--r--drivers/net/ixgb/ixgb_ids.h6
-rw-r--r--drivers/net/ixgb/ixgb_main.c304
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h2
-rw-r--r--drivers/net/ixgb/ixgb_param.c26
-rw-r--r--drivers/net/myri10ge/Makefile5
-rw-r--r--drivers/net/myri10ge/myri10ge.c2869
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h205
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h58
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c42
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/smsc.c101
-rw-r--r--drivers/net/r8169.c1
-rw-r--r--drivers/net/s2io-regs.h32
-rw-r--r--drivers/net/s2io.c1476
-rw-r--r--drivers/net/s2io.h59
-rw-r--r--drivers/net/sis900.c26
-rw-r--r--drivers/net/sis900.h10
-rw-r--r--drivers/net/skge.c232
-rw-r--r--drivers/net/skge.h6
-rw-r--r--drivers/net/smc911x.c2307
-rw-r--r--drivers/net/smc911x.h835
-rw-r--r--drivers/net/smc91x.h18
-rw-r--r--drivers/net/sungem_phy.c6
-rw-r--r--drivers/net/tulip/de2104x.c58
-rw-r--r--drivers/net/tulip/de4x5.c716
-rw-r--r--drivers/net/tulip/de4x5.h14
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/eeprom.c8
-rw-r--r--drivers/net/tulip/interrupt.c126
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/tulip.h2
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/tulip/uli526x.c80
-rw-r--r--drivers/net/tulip/winbond-840.c26
-rw-r--r--drivers/net/tulip/xircom_cb.c208
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/wan/pci200syn.c27
-rw-r--r--drivers/net/wireless/Kconfig47
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/airo.c271
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c48
-rw-r--r--drivers/net/wireless/hermes.c66
-rw-r--r--drivers/net/wireless/hermes.h43
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2200.c849
-rw-r--r--drivers/net/wireless/ipw2200.h83
-rw-r--r--drivers/net/wireless/orinoco.c251
-rw-r--r--drivers/net/wireless/orinoco.h19
-rw-r--r--drivers/net/wireless/orinoco_cs.c42
-rw-r--r--drivers/net/wireless/orinoco_nortel.c171
-rw-r--r--drivers/net/wireless/orinoco_pci.c210
-rw-r--r--drivers/net/wireless/orinoco_pci.h104
-rw-r--r--drivers/net/wireless/orinoco_plx.c223
-rw-r--r--drivers/net/wireless/orinoco_tmd.c99
-rw-r--r--drivers/net/wireless/spectrum_cs.c81
-rw-r--r--drivers/net/wireless/zd1201.c1929
-rw-r--r--drivers/net/wireless/zd1201.h148
90 files changed, 13042 insertions, 2935 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 066e22b01a94..46d8c01437e9 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -19,11 +19,11 @@
See the file COPYING in this distribution for more information.
Contributors:
-
+
Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
-
+
TODO:
* Test Tx checksumming thoroughly
* Implement dev->tx_timeout
@@ -461,7 +461,7 @@ static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static inline void cp_set_rxbufsize (struct cp_private *cp)
{
unsigned int mtu = cp->dev->mtu;
-
+
if (mtu > ETH_DATA_LEN)
/* MTU + ethernet header + FCS + optional VLAN tag */
cp->rx_buf_sz = mtu + ETH_HLEN + 8;
@@ -510,7 +510,7 @@ static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
-
+
if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
return 1;
else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
@@ -1061,7 +1061,7 @@ static void cp_init_hw (struct cp_private *cp)
cpw8(Config3, PARMEnable);
cp->wol_enabled = 0;
- cpw8(Config5, cpr8(Config5) & PMEStatus);
+ cpw8(Config5, cpr8(Config5) & PMEStatus);
cpw32_f(HiTxRingAddr, 0);
cpw32_f(HiTxRingAddr + 4, 0);
@@ -1351,7 +1351,7 @@ static void netdev_get_wol (struct cp_private *cp,
WAKE_MCAST | WAKE_UCAST;
/* We don't need to go on if WOL is disabled */
if (!cp->wol_enabled) return;
-
+
options = cpr8 (Config3);
if (options & LinkUp) wol->wolopts |= WAKE_PHY;
if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
@@ -1919,7 +1919,7 @@ static int cp_resume (struct pci_dev *pdev)
mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
spin_unlock_irqrestore (&cp->lock, flags);
-
+
return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index feae7832fc84..abd6261465f1 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -165,7 +165,7 @@ static int multicast_filter_limit = 32;
static int debug = -1;
/*
- * Receive ring size
+ * Receive ring size
* Warning: 64K ring has hardware issues and may lock up.
*/
#if defined(CONFIG_SH_DREAMCAST)
@@ -257,7 +257,7 @@ static struct pci_device_id rtl8139_pci_tbl[] = {
{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
- {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+ {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
#ifdef CONFIG_SH_SECUREEDGE5410
/* Bogus 8139 silicon reports 8129 without external PROM :-( */
@@ -1824,7 +1824,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
int tmp_work;
#endif
- if (netif_msg_rx_err (tp))
+ if (netif_msg_rx_err (tp))
printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
dev->name, rx_status);
tp->stats.rx_errors++;
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
RTL_R16 (RxBufAddr),
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
- while (netif_running(dev) && received < budget
+ while (netif_running(dev) && received < budget
&& (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
u32 ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
@@ -2031,7 +2031,7 @@ no_early_rx:
netif_receive_skb (skb);
} else {
- if (net_ratelimit())
+ if (net_ratelimit())
printk (KERN_WARNING
"%s: Memory squeeze, dropping packet.\n",
dev->name);
@@ -2158,13 +2158,13 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance,
status = RTL_R16 (IntrStatus);
/* shared irq? */
- if (unlikely((status & rtl8139_intr_mask) == 0))
+ if (unlikely((status & rtl8139_intr_mask) == 0))
goto out;
handled = 1;
/* h/w no longer present (hotplug?) or major error, bail */
- if (unlikely(status == 0xFFFF))
+ if (unlikely(status == 0xFFFF))
goto out;
/* close possible race's with dev_close */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bdaaad8f2123..f499a3bc629f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -865,6 +865,22 @@ config DM9000
<file:Documentation/networking/net-modules.txt>. The module will be
called dm9000.
+config SMC911X
+ tristate "SMSC LAN911[5678] support"
+ select CRC32
+ select MII
+ depends on NET_ETHERNET
+ help
+ This is a driver for SMSC's LAN911x series of Ethernet chipsets
+ including the new LAN9115, LAN9116, LAN9117, and LAN9118.
+ Say Y if you want it compiled into the kernel,
+ and read the Ethernet-HOWTO, available from
+ <http://www.linuxdoc.org/docs.html#howto>.
+
+ This driver is also available as a module. The module will be
+ called smc911x. If you want to compile it as a module, say M
+ here and read <file:Documentation/modules.txt>
+
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on NET_ETHERNET && ISA
@@ -2311,6 +2327,23 @@ config S2IO_NAPI
If in doubt, say N.
+config MYRI10GE
+ tristate "Myricom Myri-10G Ethernet support"
+ depends on PCI
+ select FW_LOADER
+ select CRC32
+ ---help---
+ This driver supports Myricom Myri-10G Dual Protocol interface in
+ Ethernet mode. If the eeprom on your board is not recent enough,
+ you will need a newer firmware image.
+ You may get this image or more information, at:
+
+ <http://www.myri.com/Myri-10G/>
+
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called myri10ge.
+
endmenu
source "drivers/net/tokenring/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90468aea077..1eced3287507 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -192,7 +192,9 @@ obj-$(CONFIG_R8169) += r8169.o
obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
obj-$(CONFIG_IBMVETH) += ibmveth.o
obj-$(CONFIG_S2IO) += s2io.o
+obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
+obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 14dbad14afb6..e1fe960d71b3 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -2,7 +2,7 @@
*
* Alchemy Au1x00 ethernet driver
*
- * Copyright 2001,2002,2003 MontaVista Software Inc.
+ * Copyright 2001-2003, 2006 MontaVista Software Inc.
* Copyright 2002 TimeSys Corp.
* Added ethtool/mii-tool support,
* Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
@@ -68,7 +68,7 @@ static int au1000_debug = 5;
static int au1000_debug = 3;
#endif
-#define DRV_NAME "au1000eth"
+#define DRV_NAME "au1000_eth"
#define DRV_VERSION "1.5"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
@@ -80,7 +80,7 @@ MODULE_LICENSE("GPL");
// prototypes
static void hard_stop(struct net_device *);
static void enable_rx_tx(struct net_device *dev);
-static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
+static struct net_device * au1000_probe(int port_num);
static int au1000_init(struct net_device *);
static int au1000_open(struct net_device *);
static int au1000_close(struct net_device *);
@@ -1160,12 +1160,27 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
}
static struct {
- int port;
u32 base_addr;
u32 macen_addr;
int irq;
struct net_device *dev;
-} iflist[2];
+} iflist[2] = {
+#ifdef CONFIG_SOC_AU1000
+ {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
+ {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1100
+ {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1500
+ {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
+ {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1550
+ {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
+ {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
+#endif
+};
static int num_ifs;
@@ -1176,58 +1191,14 @@ static int num_ifs;
*/
static int __init au1000_init_module(void)
{
- struct cpuinfo_mips *c = &current_cpu_data;
int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
struct net_device *dev;
int i, found_one = 0;
- switch (c->cputype) {
-#ifdef CONFIG_SOC_AU1000
- case CPU_AU1000:
- num_ifs = 2 - ni;
- iflist[0].base_addr = AU1000_ETH0_BASE;
- iflist[1].base_addr = AU1000_ETH1_BASE;
- iflist[0].macen_addr = AU1000_MAC0_ENABLE;
- iflist[1].macen_addr = AU1000_MAC1_ENABLE;
- iflist[0].irq = AU1000_MAC0_DMA_INT;
- iflist[1].irq = AU1000_MAC1_DMA_INT;
- break;
-#endif
-#ifdef CONFIG_SOC_AU1100
- case CPU_AU1100:
- num_ifs = 1 - ni;
- iflist[0].base_addr = AU1100_ETH0_BASE;
- iflist[0].macen_addr = AU1100_MAC0_ENABLE;
- iflist[0].irq = AU1100_MAC0_DMA_INT;
- break;
-#endif
-#ifdef CONFIG_SOC_AU1500
- case CPU_AU1500:
- num_ifs = 2 - ni;
- iflist[0].base_addr = AU1500_ETH0_BASE;
- iflist[1].base_addr = AU1500_ETH1_BASE;
- iflist[0].macen_addr = AU1500_MAC0_ENABLE;
- iflist[1].macen_addr = AU1500_MAC1_ENABLE;
- iflist[0].irq = AU1500_MAC0_DMA_INT;
- iflist[1].irq = AU1500_MAC1_DMA_INT;
- break;
-#endif
-#ifdef CONFIG_SOC_AU1550
- case CPU_AU1550:
- num_ifs = 2 - ni;
- iflist[0].base_addr = AU1550_ETH0_BASE;
- iflist[1].base_addr = AU1550_ETH1_BASE;
- iflist[0].macen_addr = AU1550_MAC0_ENABLE;
- iflist[1].macen_addr = AU1550_MAC1_ENABLE;
- iflist[0].irq = AU1550_MAC0_DMA_INT;
- iflist[1].irq = AU1550_MAC1_DMA_INT;
- break;
-#endif
- default:
- num_ifs = 0;
- }
+ num_ifs = NUM_ETH_INTERFACES - ni;
+
for(i = 0; i < num_ifs; i++) {
- dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
+ dev = au1000_probe(i);
iflist[i].dev = dev;
if (dev)
found_one++;
@@ -1436,8 +1407,7 @@ static struct ethtool_ops au1000_ethtool_ops = {
.get_link = au1000_get_link
};
-static struct net_device *
-au1000_probe(u32 ioaddr, int irq, int port_num)
+static struct net_device * au1000_probe(int port_num)
{
static unsigned version_printed = 0;
struct au1000_private *aup = NULL;
@@ -1445,94 +1415,95 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
db_dest_t *pDB, *pDBfree;
char *pmac, *argptr;
char ethaddr[6];
- int i, err;
+ int irq, i, err;
+ u32 base, macen;
+
+ if (port_num >= NUM_ETH_INTERFACES)
+ return NULL;
- if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
+ base = CPHYSADDR(iflist[port_num].base_addr );
+ macen = CPHYSADDR(iflist[port_num].macen_addr);
+ irq = iflist[port_num].irq;
+
+ if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
+ !request_mem_region(macen, 4, "Au1x00 ENET"))
return NULL;
- if (version_printed++ == 0)
+ if (version_printed++ == 0)
printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
- printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
+ printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
return NULL;
}
- if ((err = register_netdev(dev))) {
- printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
- err);
+ if ((err = register_netdev(dev)) != 0) {
+ printk(KERN_ERR "%s: Cannot register net device, error %d\n",
+ DRV_NAME, err);
free_netdev(dev);
return NULL;
}
- printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
- dev->name, ioaddr, irq);
+ printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
+ dev->name, base, irq);
aup = dev->priv;
/* Allocate the data buffers */
/* Snooping works fine with eth on all au1xxx */
- aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
- MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
- &aup->dma_addr,
- 0);
+ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
free_netdev(dev);
- release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+ release_mem_region( base, MAC_IOSIZE);
+ release_mem_region(macen, 4);
return NULL;
}
/* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
+ aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
+
/* Setup some variables for quick register address access */
- if (ioaddr == iflist[0].base_addr)
- {
- /* check env variables first */
- if (!get_ethernet_addr(ethaddr)) {
+ aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
+ aup->mac_id = port_num;
+ au_macs[port_num] = aup;
+
+ if (port_num == 0) {
+ /* Check the environment variables first */
+ if (get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
- } else {
+ else {
/* Check command line */
argptr = prom_getcmdline();
- if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
- printk(KERN_INFO "%s: No mac address found\n",
- dev->name);
- /* use the hard coded mac addresses */
- } else {
+ if ((pmac = strstr(argptr, "ethaddr=")) == NULL)
+ printk(KERN_INFO "%s: No MAC address found\n",
+ dev->name);
+ /* Use the hard coded MAC addresses */
+ else {
str2eaddr(ethaddr, pmac + strlen("ethaddr="));
memcpy(au1000_mac_addr, ethaddr,
- sizeof(au1000_mac_addr));
+ sizeof(au1000_mac_addr));
}
}
- aup->enable = (volatile u32 *)
- ((unsigned long)iflist[0].macen_addr);
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+
setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- aup->mac_id = 0;
- au_macs[0] = aup;
- }
- else
- if (ioaddr == iflist[1].base_addr)
- {
- aup->enable = (volatile u32 *)
- ((unsigned long)iflist[1].macen_addr);
- memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[4] += 0x10;
+ } else if (port_num == 1)
setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
- aup->mac_id = 1;
- au_macs[1] = aup;
- }
- else
- {
- printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
- }
- /* bring the device out of reset, otherwise probing the mii
- * will hang */
+ /*
+ * Assign to the Ethernet ports two consecutive MAC addresses
+ * to match those that are printed on their stickers
+ */
+ memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+ dev->dev_addr[5] += port_num;
+
+ /* Bring the device out of reset, otherwise probing the MII will hang */
*aup->enable = MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
- *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
- MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
+ *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 |
+ MAC_EN_CLOCK_ENABLE;
au_sync_delay(2);
aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
@@ -1581,7 +1552,7 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
}
spin_lock_init(&aup->lock);
- dev->base_addr = ioaddr;
+ dev->base_addr = base;
dev->irq = irq;
dev->open = au1000_open;
dev->hard_start_xmit = au1000_tx;
@@ -1615,13 +1586,12 @@ err_out:
if (aup->tx_db_inuse[i])
ReleaseDB(aup, aup->tx_db_inuse[i]);
}
- dma_free_noncoherent(NULL,
- MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
- (void *)aup->vaddr,
- aup->dma_addr);
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
unregister_netdev(dev);
free_netdev(dev);
- release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+ release_mem_region( base, MAC_IOSIZE);
+ release_mem_region(macen, 4);
return NULL;
}
@@ -1806,20 +1776,18 @@ static void __exit au1000_cleanup_module(void)
aup = (struct au1000_private *) dev->priv;
unregister_netdev(dev);
kfree(aup->mii);
- for (j = 0; j < NUM_RX_DMA; j++) {
+ for (j = 0; j < NUM_RX_DMA; j++)
if (aup->rx_db_inuse[j])
ReleaseDB(aup, aup->rx_db_inuse[j]);
- }
- for (j = 0; j < NUM_TX_DMA; j++) {
+ for (j = 0; j < NUM_TX_DMA; j++)
if (aup->tx_db_inuse[j])
ReleaseDB(aup, aup->tx_db_inuse[j]);
- }
- dma_free_noncoherent(NULL,
- MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
- (void *)aup->vaddr,
- aup->dma_addr);
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+ release_mem_region(dev->base_addr, MAC_IOSIZE);
+ release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
free_netdev(dev);
- release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
}
}
}
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index ac48f7543500..39f36aa05aa8 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4877,7 +4877,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int cas_version_printed = 0;
- unsigned long casreg_base, casreg_len;
+ unsigned long casreg_len;
struct net_device *dev;
struct cas *cp;
int i, err, pci_using_dac;
@@ -4972,7 +4972,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_using_dac = 0;
}
- casreg_base = pci_resource_start(pdev, 0);
casreg_len = pci_resource_len(pdev, 0);
cp = netdev_priv(dev);
@@ -5024,7 +5023,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
cp->timer_ticks = 0;
/* give us access to cassini registers */
- cp->regs = ioremap(casreg_base, casreg_len);
+ cp->regs = pci_iomap(pdev, 0, casreg_len);
if (cp->regs == 0UL) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
@@ -5123,7 +5122,7 @@ err_out_iounmap:
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
- iounmap(cp->regs);
+ pci_iounmap(pdev, cp->regs);
err_out_free_res:
@@ -5171,7 +5170,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
#endif
pci_free_consistent(pdev, sizeof(struct cas_init_block),
cp->init_block, cp->block_dvma);
- iounmap(cp->regs);
+ pci_iounmap(pdev, cp->regs);
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ca9f89552da3..5dea2b7dea4d 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
#
-# Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
#
# Contact Information:
# Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 281de41d030a..2bc34fbfa69c 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -114,6 +115,8 @@ struct e1000_adapter;
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
@@ -334,7 +337,6 @@ struct e1000_adapter {
boolean_t have_msi;
#endif
/* to not mess up cache alignment, always add to the bottom */
- boolean_t txb2b;
#ifdef NETIF_F_TSO
boolean_t tso_force;
#endif
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d1c705b412c2..6ed7f599eba3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -864,8 +865,8 @@ static int
e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
{
struct net_device *netdev = adapter->netdev;
- uint32_t mask, i=0, shared_int = TRUE;
- uint32_t irq = adapter->pdev->irq;
+ uint32_t mask, i=0, shared_int = TRUE;
+ uint32_t irq = adapter->pdev->irq;
*data = 0;
@@ -891,22 +892,22 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Interrupt to test */
mask = 1 << i;
- if (!shared_int) {
- /* Disable the interrupt to be reported in
- * the cause register and then force the same
- * interrupt and see if one gets posted. If
- * an interrupt was posted to the bus, the
- * test failed.
- */
- adapter->test_icr = 0;
- E1000_WRITE_REG(&adapter->hw, IMC, mask);
- E1000_WRITE_REG(&adapter->hw, ICS, mask);
- msec_delay(10);
-
- if (adapter->test_icr & mask) {
- *data = 3;
- break;
- }
+ if (!shared_int) {
+ /* Disable the interrupt to be reported in
+ * the cause register and then force the same
+ * interrupt and see if one gets posted. If
+ * an interrupt was posted to the bus, the
+ * test failed.
+ */
+ adapter->test_icr = 0;
+ E1000_WRITE_REG(&adapter->hw, IMC, mask);
+ E1000_WRITE_REG(&adapter->hw, ICS, mask);
+ msec_delay(10);
+
+ if (adapter->test_icr & mask) {
+ *data = 3;
+ break;
+ }
}
/* Enable the interrupt to be reported in
@@ -925,7 +926,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
break;
}
- if (!shared_int) {
+ if (!shared_int) {
/* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 523c2c9fc0ac..3959039b16ec 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -764,7 +765,7 @@ e1000_init_hw(struct e1000_hw *hw)
}
if (hw->mac_type == e1000_82573) {
- e1000_enable_tx_pkt_filtering(hw);
+ e1000_enable_tx_pkt_filtering(hw);
}
switch (hw->mac_type) {
@@ -860,7 +861,7 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
if(eeprom_data != EEPROM_RESERVED_WORD) {
/* Adjust SERDES output amplitude only. */
- eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
if(ret_val)
return ret_val;
@@ -1227,7 +1228,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
if (hw->phy_reset_disable)
return E1000_SUCCESS;
-
+
ret_val = e1000_phy_reset(hw);
if (ret_val) {
DEBUGOUT("Error Resetting the PHY\n");
@@ -1369,7 +1370,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_ggp_setup");
if(!hw->phy_reset_disable) {
-
+
/* Enable CRS on TX for half-duplex operation. */
ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
&phy_data);
@@ -1518,7 +1519,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
if(hw->phy_reset_disable)
return E1000_SUCCESS;
-
+
/* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if(ret_val)
@@ -1664,7 +1665,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
* collision distance in the Transmit Control Register.
* 2) Set up flow control on the MAC to that established with
* the link partner.
-* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
*
* hw - Struct containing variables accessed by shared code
******************************************************************************/
@@ -1673,7 +1674,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
{
int32_t ret_val;
DEBUGFUNC("e1000_copper_link_postconfig");
-
+
if(hw->mac_type >= e1000_82544) {
e1000_config_collision_dist(hw);
} else {
@@ -1697,7 +1698,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
return ret_val;
}
}
-
+
return E1000_SUCCESS;
}
@@ -1753,11 +1754,11 @@ e1000_setup_copper_link(struct e1000_hw *hw)
}
if(hw->autoneg) {
- /* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
ret_val = e1000_copper_link_autoneg(hw);
if(ret_val)
- return ret_val;
+ return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
* depending on value from forced_speed_duplex. */
@@ -1785,7 +1786,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
ret_val = e1000_copper_link_postconfig(hw);
if(ret_val)
return ret_val;
-
+
DEBUGOUT("Valid link established!!!\n");
return E1000_SUCCESS;
}
@@ -1983,7 +1984,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
if(ret_val)
return ret_val;
@@ -2272,7 +2273,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
DEBUGFUNC("e1000_config_mac_to_phy");
- /* 82544 or newer MAC, Auto Speed Detection takes care of
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
if (hw->mac_type >= e1000_82544)
return E1000_SUCCESS;
@@ -2291,9 +2292,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
if(ret_val)
return ret_val;
- if(phy_data & M88E1000_PSSR_DPLX)
+ if(phy_data & M88E1000_PSSR_DPLX)
ctrl |= E1000_CTRL_FD;
- else
+ else
ctrl &= ~E1000_CTRL_FD;
e1000_config_collision_dist(hw);
@@ -2492,10 +2493,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
*/
if(hw->original_fc == e1000_fc_full) {
hw->fc = e1000_fc_full;
- DEBUGOUT("Flow Control = FULL.\r\n");
+ DEBUGOUT("Flow Control = FULL.\n");
} else {
hw->fc = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
@@ -2511,7 +2512,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc = e1000_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
@@ -2526,7 +2527,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could
@@ -2552,10 +2553,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
hw->original_fc == e1000_fc_tx_pause) ||
hw->fc_strict_ieee) {
hw->fc = e1000_fc_none;
- DEBUGOUT("Flow Control = NONE.\r\n");
+ DEBUGOUT("Flow Control = NONE.\n");
} else {
hw->fc = e1000_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
}
/* Now we need to do one last check... If we auto-
@@ -2580,7 +2581,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
} else {
- DEBUGOUT("Copper PHY and Auto Neg has not completed.\r\n");
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
}
}
return E1000_SUCCESS;
@@ -2763,7 +2764,7 @@ e1000_check_for_link(struct e1000_hw *hw)
hw->autoneg_failed = 1;
return 0;
}
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2788,7 +2789,7 @@ e1000_check_for_link(struct e1000_hw *hw)
else if(((hw->media_type == e1000_media_type_fiber) ||
(hw->media_type == e1000_media_type_internal_serdes)) &&
(ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\r\n");
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
E1000_WRITE_REG(hw, TXCW, hw->txcw);
E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2851,13 +2852,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
if(status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\r\n");
+ DEBUGOUT("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\r\n");
+ DEBUGOUT(" Half Duplex\n");
}
} else {
- DEBUGOUT("1000 Mbs, Full Duplex\r\n");
+ DEBUGOUT("1000 Mbs, Full Duplex\n");
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
}
@@ -2883,7 +2884,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
}
}
- if ((hw->mac_type == e1000_80003es2lan) &&
+ if ((hw->mac_type == e1000_80003es2lan) &&
(hw->media_type == e1000_media_type_copper)) {
if (*speed == SPEED_1000)
ret_val = e1000_configure_kmrn_for_1000(hw);
@@ -3159,7 +3160,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if((hw->phy_type == e1000_phy_igp ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3298,7 +3299,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
if (e1000_swfw_sync_acquire(hw, swfw))
return -E1000_ERR_SWFW_SYNC;
- if((hw->phy_type == e1000_phy_igp ||
+ if((hw->phy_type == e1000_phy_igp ||
hw->phy_type == e1000_phy_igp_2) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3496,22 +3497,22 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
}
/* Read the device control register and assert the E1000_CTRL_PHY_RST
* bit. Then, take it out of reset.
- * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+ * For pre-e1000_82571 hardware, we delay for 10ms between the assert
* and deassert. For e1000_82571 hardware and later, we instead delay
* for 50us between and 10ms after the deassertion.
*/
ctrl = E1000_READ_REG(hw, CTRL);
E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
E1000_WRITE_FLUSH(hw);
-
- if (hw->mac_type < e1000_82571)
+
+ if (hw->mac_type < e1000_82571)
msec_delay(10);
else
udelay(100);
-
+
E1000_WRITE_REG(hw, CTRL, ctrl);
E1000_WRITE_FLUSH(hw);
-
+
if (hw->mac_type >= e1000_82571)
msec_delay(10);
e1000_swfw_sync_release(hw, swfw);
@@ -3815,7 +3816,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
/* Check polarity status */
ret_val = e1000_check_polarity(hw, &polarity);
if(ret_val)
- return ret_val;
+ return ret_val;
phy_info->cable_polarity = polarity;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
@@ -4540,14 +4541,14 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
E1000_WRITE_REG(hw, EERD, eerd);
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
-
+
if(error) {
break;
}
data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
-
+
}
-
+
return error;
}
@@ -4573,24 +4574,24 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
return -E1000_ERR_SWFW_SYNC;
for (i = 0; i < words; i++) {
- register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
- ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+ register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+ ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
E1000_EEPROM_RW_REG_START;
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
if(error) {
break;
- }
+ }
E1000_WRITE_REG(hw, EEWR, register_value);
-
+
error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
-
+
if(error) {
break;
- }
+ }
}
-
+
e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
return error;
}
@@ -4610,7 +4611,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
for(i = 0; i < attempts; i++) {
if(eerd == E1000_EEPROM_POLL_READ)
reg = E1000_READ_REG(hw, EERD);
- else
+ else
reg = E1000_READ_REG(hw, EEWR);
if(reg & E1000_EEPROM_RW_REG_DONE) {
@@ -5135,7 +5136,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
uint32_t i;
uint32_t num_rar_entry;
uint32_t num_mta_entry;
-
+
DEBUGFUNC("e1000_mc_addr_list_update");
/* Set the new number of MC addresses that we are being requested to use. */
@@ -6240,7 +6241,7 @@ e1000_check_polarity(struct e1000_hw *hw,
* 1 - Downshift ocured.
*
* returns: - E1000_ERR_XXX
- * E1000_SUCCESS
+ * E1000_SUCCESS
*
* For phy's older then IGP, this function reads the Downshift bit in the Phy
* Specific Status register. For IGP phy's, it reads the Downgrade bit in the
@@ -6255,7 +6256,7 @@ e1000_check_downshift(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_downshift");
- if(hw->phy_type == e1000_phy_igp ||
+ if(hw->phy_type == e1000_phy_igp ||
hw->phy_type == e1000_phy_igp_2) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
@@ -6684,8 +6685,8 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
} else {
-
- phy_data |= IGP02E1000_PM_D0_LPLU;
+
+ phy_data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
if (ret_val)
return ret_val;
@@ -6777,7 +6778,7 @@ int32_t
e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
{
uint8_t i;
- uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+ uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
length = (length >> 2);
@@ -6796,7 +6797,7 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
* and also checks whether the previous command is completed.
* It busy waits in case of previous command is not completed.
*
- * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
* timeout
* - E1000_SUCCESS for success.
****************************************************************************/
@@ -6820,7 +6821,7 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
msec_delay_irq(1);
}
- if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
DEBUGOUT("Previous command timeout failed .\n");
return -E1000_ERR_HOST_INTERFACE_COMMAND;
}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 150e45e30f87..467c9ed944f8 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
@@ -374,7 +375,7 @@ struct e1000_host_mng_dhcp_cookie{
};
#endif
-int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
+int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
@@ -1801,7 +1802,7 @@ struct e1000_hw {
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
-
+
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 97e71a4fe8eb..115eff25d8c1 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,51 +22,13 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000.h"
-/* Change Log
- * 7.0.33 3-Feb-2006
- * o Added another fix for the pass false carrier bit
- * 7.0.32 24-Jan-2006
- * o Need to rebuild with noew version number for the pass false carrier
- * fix in e1000_hw.c
- * 7.0.30 18-Jan-2006
- * o fixup for tso workaround to disable it for pci-x
- * o fix mem leak on 82542
- * o fixes for 10 Mb/s connections and incorrect stats
- * 7.0.28 01/06/2006
- * o hardware workaround to only set "speed mode" bit for 1G link.
- * 7.0.26 12/23/2005
- * o wake on lan support modified for device ID 10B5
- * o fix dhcp + vlan issue not making it to the iAMT firmware
- * 7.0.24 12/9/2005
- * o New hardware support for the Gigabit NIC embedded in the south bridge
- * o Fixes to the recycling logic (skb->tail) from IBM LTC
- * 6.3.9 12/16/2005
- * o incorporate fix for recycled skbs from IBM LTC
- * 6.3.7 11/18/2005
- * o Honor eeprom setting for enabling/disabling Wake On Lan
- * 6.3.5 11/17/2005
- * o Fix memory leak in rx ring handling for PCI Express adapters
- * 6.3.4 11/8/05
- * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
- * 6.3.2 9/20/05
- * o Render logic that sets/resets DRV_LOAD as inline functions to
- * avoid code replication. If f/w is AMT then set DRV_LOAD only when
- * network interface is open.
- * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
- * o Adjust PBA partioning for Jumbo frames using MTU size and not
- * rx_buffer_len
- * 6.3.1 9/19/05
- * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
- * (e1000_clean_tx_irq)
- * o Support for 8086:10B5 device (Quad Port)
- */
-
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#ifndef CONFIG_E1000_NAPI
@@ -74,9 +36,9 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
+#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
-static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
*
@@ -208,8 +170,8 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_tx_timeout(struct net_device *dev);
static void e1000_reset_task(struct net_device *dev);
static void e1000_smartspeed(struct e1000_adapter *adapter);
-static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
- struct sk_buff *skb);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -293,7 +255,7 @@ module_exit(e1000_exit_module);
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
@@ -307,7 +269,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
@@ -348,10 +310,10 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the netowrk i/f is closed.
- *
+ *
**/
-static inline void
+static void
e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
@@ -361,6 +323,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
+ case e1000_80003es2lan:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
@@ -379,13 +342,13 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
* @adapter: address of board private structure
*
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the netowrk i/f is open.
- *
+ *
**/
-static inline void
+static void
e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
@@ -394,6 +357,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
+ case e1000_80003es2lan:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
@@ -421,7 +385,7 @@ e1000_up(struct e1000_adapter *adapter)
uint16_t mii_reg;
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
if (mii_reg & MII_CR_POWER_DOWN)
- e1000_phy_reset(&adapter->hw);
+ e1000_phy_hw_reset(&adapter->hw);
}
e1000_set_multi(netdev);
@@ -711,8 +675,8 @@ e1000_probe(struct pci_dev *pdev,
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
/* if ksp3, indicate if it's port a being setup */
- if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
- e1000_ksp3_port_a == 0)
+ if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
+ e1000_ksp3_port_a == 0)
adapter->ksp3_port_a = 1;
e1000_ksp3_port_a++;
/* Reset for multiple KP3 adapters */
@@ -740,9 +704,9 @@ e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /* hard_start_xmit is safe against parallel locking */
- netdev->features |= NETIF_F_LLTX;
-
+ /* hard_start_xmit is safe against parallel locking */
+ netdev->features |= NETIF_F_LLTX;
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
/* before reading the EEPROM, reset the controller to
@@ -972,8 +936,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+ adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
hw->max_frame_size = netdev->mtu +
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1181,7 +1145,7 @@ e1000_close(struct net_device *netdev)
* @start: address of beginning of memory
* @len: length of memory
**/
-static inline boolean_t
+static boolean_t
e1000_check_64k_bound(struct e1000_adapter *adapter,
void *start, unsigned long len)
{
@@ -1599,14 +1563,21 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
- if (adapter->hw.mac_type >= e1000_82571) {
- /* We can now specify buffers in 1K increments.
- * BSIZE and BSEX are ignored in this case. */
- rctl |= adapter->rx_buffer_len << 0x11;
- } else {
- rctl &= ~E1000_RCTL_SZ_4096;
- rctl |= E1000_RCTL_BSEX;
- switch (adapter->rx_buffer_len) {
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -1621,7 +1592,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
case E1000_RXBUFFER_16384:
rctl |= E1000_RCTL_SZ_16384;
break;
- }
}
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -1715,7 +1685,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
if (hw->mac_type >= e1000_82571) {
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
/* Reset delay timers after every interrupt */
- ctrl_ext |= E1000_CTRL_EXT_CANC;
+ ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
#ifdef CONFIG_E1000_NAPI
/* Auto-Mask interrupts upon ICR read. */
ctrl_ext |= E1000_CTRL_EXT_IAME;
@@ -1807,7 +1777,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
-static inline void
+static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
@@ -2247,6 +2217,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
if (link) {
if (!netif_carrier_ok(netdev)) {
+ boolean_t txb2b = 1;
e1000_get_speed_and_duplex(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -2260,23 +2231,22 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
* and adjust the timeout factor */
netdev->tx_queue_len = adapter->tx_queue_len;
adapter->tx_timeout_factor = 1;
- adapter->txb2b = 1;
switch (adapter->link_speed) {
case SPEED_10:
- adapter->txb2b = 0;
+ txb2b = 0;
netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 8;
break;
case SPEED_100:
- adapter->txb2b = 0;
+ txb2b = 0;
netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
- if ((adapter->hw.mac_type == e1000_82571 ||
+ if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
- adapter->txb2b == 0) {
+ txb2b == 0) {
#define SPEED_MODE_BIT (1 << 21)
uint32_t tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
@@ -2400,7 +2370,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static inline int
+static int
e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
@@ -2422,7 +2392,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
mss = skb_shinfo(skb)->tso_size;
- if (skb->protocol == ntohs(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP)) {
skb->nh.iph->tot_len = 0;
skb->nh.iph->check = 0;
skb->h.th->check =
@@ -2480,7 +2450,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return FALSE;
}
-static inline boolean_t
+static boolean_t
e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
@@ -2516,7 +2486,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_MAX_TXD_PWR 12
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
-static inline int
+static int
e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
unsigned int nr_frags, unsigned int mss)
@@ -2625,7 +2595,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return count;
}
-static inline void
+static void
e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
int tx_flags, int count)
{
@@ -2689,7 +2659,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
-static inline int
+static int
e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
{
uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
@@ -2716,7 +2686,7 @@ no_fifo_stall_required:
}
#define MINIMUM_DHCP_PACKET_SIZE 282
-static inline int
+static int
e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
@@ -2764,7 +2734,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int nr_frags = 0;
unsigned int mss = 0;
int count = 0;
- int tso;
+ int tso;
unsigned int f;
len -= skb->data_len;
@@ -2777,7 +2747,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size;
- /* The controller does a simple calculation to
+ /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
@@ -2800,7 +2770,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
case e1000_82573:
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- printk(KERN_ERR
+ printk(KERN_ERR
"__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2901,7 +2871,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must. */
- if (likely(skb->protocol == ntohs(ETH_P_IP)))
+ if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
e1000_tx_queue(adapter, tx_ring, tx_flags,
@@ -2982,8 +2952,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Adapter-specific max frame size limits. */
switch (adapter->hw.mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
+ case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
@@ -3017,27 +2986,32 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
break;
}
-
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->rx_buffer_len = max_frame;
- E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
- } else {
- if(unlikely((adapter->hw.mac_type < e1000_82543) &&
- (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
- "on 82542\n");
- return -EINVAL;
- } else {
- if(max_frame <= E1000_RXBUFFER_2048)
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- else if(max_frame <= E1000_RXBUFFER_4096)
- adapter->rx_buffer_len = E1000_RXBUFFER_4096;
- else if(max_frame <= E1000_RXBUFFER_8192)
- adapter->rx_buffer_len = E1000_RXBUFFER_8192;
- else if(max_frame <= E1000_RXBUFFER_16384)
- adapter->rx_buffer_len = E1000_RXBUFFER_16384;
- }
- }
+ /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+ if (max_frame <= E1000_RXBUFFER_256)
+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
+ else if (max_frame <= E1000_RXBUFFER_512)
+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
+ else if (max_frame <= E1000_RXBUFFER_1024)
+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+ else if (max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else if (max_frame <= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+ else if (max_frame <= E1000_RXBUFFER_8192)
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+ else if (max_frame <= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+ if (!adapter->hw.tbi_compatibility_on &&
+ ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
netdev->mtu = new_mtu;
@@ -3165,7 +3139,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.cexterr;
- adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.ruc +
adapter->stats.roc;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -3391,13 +3364,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
tx_ring->next_to_clean = i;
- spin_lock(&tx_ring->tx_lock);
-
+#define TX_WAKE_THRESHOLD 32
if (unlikely(cleaned && netif_queue_stopped(netdev) &&
- netif_carrier_ok(netdev)))
- netif_wake_queue(netdev);
-
- spin_unlock(&tx_ring->tx_lock);
+ netif_carrier_ok(netdev))) {
+ spin_lock(&tx_ring->tx_lock);
+ if (netif_queue_stopped(netdev) &&
+ (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
+ netif_wake_queue(netdev);
+ spin_unlock(&tx_ring->tx_lock);
+ }
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
@@ -3445,7 +3420,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
-static inline void
+static void
e1000_rx_checksum(struct e1000_adapter *adapter,
uint32_t status_err, uint32_t csum,
struct sk_buff *skb)
@@ -3567,7 +3542,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
flags);
length--;
} else {
- dev_kfree_skb_irq(skb);
+ /* recycle */
+ buffer_info->skb = skb;
goto next_desc;
}
}
@@ -3675,6 +3651,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i];
@@ -3733,9 +3710,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
/* page alloc/put takes too long and effects small packet
* throughput, so unsplit small packets and save the alloc/put*/
- if (l1 && ((length + l1) < E1000_CB_LENGTH)) {
+ if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
u8 *vaddr;
- /* there is no documentation about how to call
+ /* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
* very long */
pci_dma_sync_single_for_cpu(pdev,
@@ -4155,7 +4132,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
- if (adapter->hw.phy_type == e1000_media_type_copper) {
+ if (adapter->hw.media_type == e1000_media_type_copper) {
switch (data->reg_num) {
case PHY_CTRL:
if (mii_reg & MII_CR_POWER_DOWN)
@@ -4514,21 +4491,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
- retval = pci_enable_wake(pdev, PCI_D3hot, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
} else {
E1000_WRITE_REG(&adapter->hw, WUC, 0);
E1000_WRITE_REG(&adapter->hw, WUFC, 0);
- retval = pci_enable_wake(pdev, PCI_D3hot, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
}
if (adapter->hw.mac_type >= e1000_82540 &&
@@ -4537,13 +4506,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc);
- retval = pci_enable_wake(pdev, PCI_D3hot, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 1);
- if (retval)
- DPRINTK(PROBE, ERR,
- "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
}
}
@@ -4553,9 +4517,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
- retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
- if (retval)
- DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
@@ -4566,22 +4528,15 @@ e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- int retval;
uint32_t manc, ret_val;
- retval = pci_set_power_state(pdev, PCI_D0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ pci_set_power_state(pdev, PCI_D0);
e1000_pci_restore_state(adapter);
ret_val = pci_enable_device(pdev);
pci_set_master(pdev);
- retval = pci_enable_wake(pdev, PCI_D3hot, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 9790db974dc1..048d052be29d 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e0a4d37d1b85..e55f8969a0fb 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,6 +22,7 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 2f7b86837fe8..8d680ce600d7 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -21,15 +21,15 @@
http://www.scyld.com/network/epic100.html
---------------------------------------------------------------------
-
+
Linux kernel-specific changes:
-
+
LK1.1.2 (jgarzik):
* Merge becker version 1.09 (4/08/2000)
LK1.1.3:
* Major bugfix to 1.09 driver (Francis Romieu)
-
+
LK1.1.4 (jgarzik):
* Merge becker test version 1.09 (5/29/2000)
@@ -66,7 +66,7 @@
LK1.1.14 (Kryzsztof Halasa):
* fix spurious bad initializations
* pound phy a la SMSC's app note on the subject
-
+
AC1.1.14ac
* fix power up/down for ethtool that broke in 1.11
@@ -244,7 +244,7 @@ static struct pci_device_id epic_pci_tbl[] = {
};
MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
-
+
#ifndef USE_IO_OPS
#undef inb
#undef inw
@@ -370,7 +370,7 @@ static int epic_close(struct net_device *dev);
static struct net_device_stats *epic_get_stats(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
-
+
static int __devinit epic_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -392,9 +392,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
version, version2, version3);
#endif
-
+
card_idx++;
-
+
ret = pci_enable_device(pdev);
if (ret)
goto out;
@@ -405,7 +405,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ret = -ENODEV;
goto err_out_disable;
}
-
+
pci_set_master(pdev);
ret = pci_request_regions(pdev, DRV_NAME);
@@ -498,7 +498,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ep->pci_dev = pdev;
ep->chip_id = chip_idx;
ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
- ep->irq_mask =
+ ep->irq_mask =
(ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
| CntFull | TxUnderrun | EpicNapiEvent;
@@ -587,7 +587,7 @@ err_out_disable:
pci_disable_device(pdev);
goto out;
}
-
+
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
@@ -709,7 +709,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
outw(value, ioaddr + MIIData);
outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
- for (i = 10000; i > 0; i--) {
+ for (i = 10000; i > 0; i--) {
barrier();
if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
break;
@@ -717,7 +717,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
return;
}
-
+
static int epic_open(struct net_device *dev)
{
struct epic_private *ep = dev->priv;
@@ -760,7 +760,7 @@ static int epic_open(struct net_device *dev)
#endif
udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
-
+
for (i = 0; i < 3; i++)
outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
@@ -803,7 +803,7 @@ static int epic_open(struct net_device *dev)
/* Enable interrupts by setting the interrupt mask. */
outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
- | CntFull | TxUnderrun
+ | CntFull | TxUnderrun
| RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
if (debug > 1)
@@ -831,7 +831,7 @@ static void epic_pause(struct net_device *dev)
struct epic_private *ep = dev->priv;
netif_stop_queue (dev);
-
+
/* Disable interrupts by clearing the interrupt mask. */
outl(0x00000000, ioaddr + INTMASK);
/* Stop the chip's Tx and Rx DMA processes. */
@@ -987,7 +987,7 @@ static void epic_init_ring(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
ep->rx_ring[i].rxstatus = 0;
ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
- ep->rx_ring[i].next = ep->rx_ring_dma +
+ ep->rx_ring[i].next = ep->rx_ring_dma +
(i+1)*sizeof(struct epic_rx_desc);
ep->rx_skbuff[i] = NULL;
}
@@ -1002,7 +1002,7 @@ static void epic_init_ring(struct net_device *dev)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
+ ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
}
@@ -1013,7 +1013,7 @@ static void epic_init_ring(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
ep->tx_skbuff[i] = NULL;
ep->tx_ring[i].txstatus = 0x0000;
- ep->tx_ring[i].next = ep->tx_ring_dma +
+ ep->tx_ring[i].next = ep->tx_ring_dma +
(i+1)*sizeof(struct epic_tx_desc);
}
ep->tx_ring[i-1].next = ep->tx_ring_dma;
@@ -1026,7 +1026,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, free_count;
u32 ctrl_word;
unsigned long flags;
-
+
if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN);
if (skb == NULL)
@@ -1042,7 +1042,7 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
- ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
+ ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
@@ -1126,7 +1126,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
- pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
@@ -1281,8 +1281,8 @@ static int epic_rx(struct net_device *dev, int budget)
ep->rx_buf_sz,
PCI_DMA_FROMDEVICE);
} else {
- pci_unmap_single(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
+ pci_unmap_single(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr,
ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb = ep->rx_skbuff[entry], pkt_len);
ep->rx_skbuff[entry] = NULL;
@@ -1307,7 +1307,7 @@ static int epic_rx(struct net_device *dev, int budget)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
+ ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
work_done++;
}
@@ -1403,7 +1403,7 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
- pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+ pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
}
@@ -1414,7 +1414,7 @@ static int epic_close(struct net_device *dev)
ep->tx_skbuff[i] = NULL;
if (!skb)
continue;
- pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
+ pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
}
@@ -1607,7 +1607,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = dev->priv;
-
+
pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
unregister_netdev(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index feb5b223cd60..5669b95162b3 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -107,6 +107,7 @@
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
+ * 0.55: 22 Mar 2006: Add flow control (pause frame).
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +119,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.54"
+#define FORCEDETH_VERSION "0.55"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -163,6 +164,7 @@
#define DEV_HAS_MSI 0x0040 /* device supports MSI */
#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
+#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
enum {
NvRegIrqStatus = 0x000,
@@ -203,6 +205,7 @@ enum {
NvRegMSIIrqMask = 0x030,
#define NVREG_MSI_VECTOR_0_ENABLED 0x01
NvRegMisc1 = 0x080,
+#define NVREG_MISC1_PAUSE_TX 0x01
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
@@ -214,7 +217,8 @@ enum {
#define NVREG_XMITSTAT_BUSY 0x01
NvRegPacketFilterFlags = 0x8c,
-#define NVREG_PFF_ALWAYS 0x7F0008
+#define NVREG_PFF_PAUSE_RX 0x08
+#define NVREG_PFF_ALWAYS 0x7F0000
#define NVREG_PFF_PROMISC 0x80
#define NVREG_PFF_MYADDR 0x20
@@ -277,6 +281,9 @@ enum {
#define NVREG_TXRXCTL_VLANINS 0x00080
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
+ NvRegTxPauseFrame = 0x170,
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -451,7 +458,7 @@ typedef union _ring_type {
#define RX_RING 128
#define TX_RING 256
-/*
+/*
* If your nic mysteriously hangs then try to reduce the limits
* to 1/0: It might be required to set NV_TX_LASTPACKET in the
* last valid ring entry. But this would be impossible to
@@ -473,7 +480,7 @@ typedef union _ring_type {
#define POLL_WAIT (1+HZ/100)
#define LINK_TIMEOUT (3*HZ)
-/*
+/*
* desc_ver values:
* The nic supports three different descriptor types:
* - DESC_VER_1: Original
@@ -506,13 +513,10 @@ typedef union _ring_type {
#define PHY_1000 0x2
#define PHY_HALF 0x100
-/* FIXME: MII defines that should be added to <linux/mii.h> */
-#define MII_1000BT_CR 0x09
-#define MII_1000BT_SR 0x0a
-#define ADVERTISE_1000FULL 0x0200
-#define ADVERTISE_1000HALF 0x0100
-#define LPA_1000FULL 0x0800
-#define LPA_1000HALF 0x0400
+#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
+#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
+#define NV_PAUSEFRAME_RX_ENABLE 0x0004
+#define NV_PAUSEFRAME_TX_ENABLE 0x0008
/* MSI/MSI-X defines */
#define NV_MSI_X_MAX_VECTORS 8
@@ -602,6 +606,9 @@ struct fe_priv {
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
+
+ /* flow control */
+ u32 pause_flags;
};
/*
@@ -612,7 +619,7 @@ static int max_interrupt_work = 5;
/*
* Optimization can be either throuput mode or cpu mode
- *
+ *
* Throughput Mode: Every tx and rx packet will generate an interrupt.
* CPU Mode: Interrupts are controlled by a timer.
*/
@@ -860,7 +867,7 @@ static int phy_init(struct net_device *dev)
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+ reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
@@ -873,14 +880,14 @@ static int phy_init(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
- mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
if (phyinterface & PHY_RGMII)
mii_control_1000 |= ADVERTISE_1000FULL;
else
mii_control_1000 &= ~ADVERTISE_1000FULL;
- if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
+ if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -918,6 +925,8 @@ static int phy_init(struct net_device *dev)
return PHY_ERROR;
}
}
+ /* some phys clear out pause advertisment on reset, set it back */
+ mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
/* restart auto negotiation */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1110,7 +1119,7 @@ static void nv_do_rx_refill(unsigned long data)
}
}
-static void nv_init_rx(struct net_device *dev)
+static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
@@ -1174,7 +1183,7 @@ static void nv_drain_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
unsigned int i;
-
+
for (i = 0; i < TX_RING; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
np->tx_ring.orig[i].FlagLen = 0;
@@ -1320,7 +1329,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
- }
+ }
dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
dev->name, np->next_tx, entries, tx_flags_extra);
@@ -1395,7 +1404,7 @@ static void nv_tx_done(struct net_device *dev)
} else {
np->stats.tx_packets++;
np->stats.tx_bytes += skb->len;
- }
+ }
}
}
nv_release_txskb(dev, i);
@@ -1441,7 +1450,7 @@ static void nv_tx_timeout(struct net_device *dev)
for (i=0;i<TX_RING;i+= 4) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
+ i,
le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
le32_to_cpu(np->tx_ring.orig[i].FlagLen),
le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
@@ -1452,7 +1461,7 @@ static void nv_tx_timeout(struct net_device *dev)
le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
} else {
printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
- i,
+ i,
le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
le32_to_cpu(np->tx_ring.ex[i].FlagLen),
@@ -1550,7 +1559,6 @@ static void nv_rx_process(struct net_device *dev)
u32 Flags;
u32 vlanflags = 0;
-
for (;;) {
struct sk_buff *skb;
int len;
@@ -1901,7 +1909,9 @@ static int nv_update_linkspeed(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int adv, lpa;
+ int adv = 0;
+ int lpa = 0;
+ int adv_lpa, adv_pause, lpa_pause;
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
@@ -1954,8 +1964,8 @@ static int nv_update_linkspeed(struct net_device *dev)
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
- control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
- status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
+ control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
@@ -1973,21 +1983,21 @@ static int nv_update_linkspeed(struct net_device *dev)
dev->name, adv, lpa);
/* FIXME: handle parallel detection properly */
- lpa = lpa & adv;
- if (lpa & LPA_100FULL) {
+ adv_lpa = lpa & adv;
+ if (adv_lpa & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
- } else if (lpa & LPA_100HALF) {
+ } else if (adv_lpa & LPA_100HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 0;
- } else if (lpa & LPA_10FULL) {
+ } else if (adv_lpa & LPA_10FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 1;
- } else if (lpa & LPA_10HALF) {
+ } else if (adv_lpa & LPA_10HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
} else {
- dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
+ dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
@@ -2030,6 +2040,56 @@ set_speed:
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
+ /* setup pause frame based on advertisement and link partner */
+ np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
+
+ if (np->duplex != 0) {
+ adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
+ lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+
+ switch (adv_pause) {
+ case (ADVERTISE_PAUSE_CAP):
+ if (lpa_pause & LPA_PAUSE_CAP) {
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
+ {
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+ }
+ break;
+ case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+ if (lpa_pause & LPA_PAUSE_CAP)
+ {
+ np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
+ }
+ if (lpa_pause == LPA_PAUSE_ASYM)
+ {
+ np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+ }
+ break;
+ }
+ }
+
+ if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
+ u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
+ if (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE)
+ writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
+ else
+ writel(pff, base + NvRegPacketFilterFlags);
+ }
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
+ u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
+ if (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
+ writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
+ writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
+ } else {
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+ writel(regmisc, base + NvRegMisc1);
+ }
+ }
+
return retval;
}
@@ -2090,7 +2150,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
spin_lock(&np->lock);
nv_tx_done(dev);
spin_unlock(&np->lock);
-
+
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
spin_lock(&np->lock);
@@ -2098,7 +2158,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock(&np->lock);
}
-
+
if (events & NVREG_IRQ_LINK) {
spin_lock(&np->lock);
nv_link_irq(dev);
@@ -2163,7 +2223,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
spin_lock_irq(&np->lock);
nv_tx_done(dev);
spin_unlock_irq(&np->lock);
-
+
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
@@ -2206,7 +2266,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
-
+
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock);
@@ -2214,7 +2274,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock);
}
-
+
if (i > max_interrupt_work) {
spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
@@ -2253,7 +2313,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
-
+
if (events & NVREG_IRQ_LINK) {
spin_lock_irq(&np->lock);
nv_link_irq(dev);
@@ -2326,7 +2386,7 @@ static void nv_do_nic_poll(unsigned long data)
np->nic_poll_irq = 0;
/* FIXME: Do we need synchronize_irq(dev->irq) here? */
-
+
writel(mask, base + NvRegIrqMask);
pci_push(base);
@@ -2441,7 +2501,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (adv & ADVERTISE_100FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (np->autoneg && np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
if (adv & ADVERTISE_1000FULL)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
}
@@ -2505,23 +2565,23 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (ecmd->advertising & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
+ adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (ecmd->advertising & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
+ adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
if (ecmd->advertising & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -2534,22 +2594,22 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
np->autoneg = 0;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
- adv |= ADVERTISE_10FULL;
+ adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_100HALF;
if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
- adv |= ADVERTISE_100FULL;
+ adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
np->fixed_mode = adv;
if (np->gigabit == PHY_GIGABIT) {
- adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+ adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
adv &= ~ADVERTISE_1000FULL;
- mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+ mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -2829,6 +2889,9 @@ static int nv_open(struct net_device *dev)
writel(0, base + NvRegAdapterControl);
+ if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
+ writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
+
/* 2) initialize descriptor rings */
set_bufsize(dev);
oom = nv_init_ring(dev);
@@ -3114,6 +3177,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->msi_flags |= NV_MSI_X_CAPABLE;
}
+ np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
+ if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
+ np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
+ }
+
+
err = -ENOMEM;
np->base = ioremap(addr, np->register_size);
if (!np->base)
@@ -3260,7 +3329,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
pci_name(pci_dev));
goto out_freering;
}
-
+
/* reset it */
phy_init(dev);
@@ -3374,11 +3443,11 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX,
},
{0,},
};
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 01ad904215a1..51fd51609ea9 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -1,4 +1,4 @@
-/*
+/*
net-3-driver for the IBM LAN Adapter/A
This is an extension to the Linux operating system, and is covered by the
@@ -11,9 +11,9 @@ This driver is based both on the SK_MCA driver, which is itself based on the
SK_G16 and 3C523 driver.
paper sources:
- 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
+ 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
Hans-Peter Messmer for the basic Microchannel stuff
-
+
'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
for help on Ethernet driver programming
@@ -27,14 +27,14 @@ paper sources:
special acknowledgements to:
- Bob Eager for helping me out with documentation from IBM
- - Jim Shorney for his endless patience with me while I was using
+ - Jim Shorney for his endless patience with me while I was using
him as a beta tester to trace down the address filter bug ;-)
Missing things:
-> set debug level via ioctl instead of compile-time switches
-> I didn't follow the development of the 2.1.x kernels, so my
- assumptions about which things changed with which kernel version
+ assumptions about which things changed with which kernel version
are probably nonsense
History:
@@ -275,7 +275,7 @@ static void InitDscrs(struct net_device *dev)
priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
-
+
for (z = 0; z < priv->rxbufcnt; z++) {
rra.startlo = baddr;
rra.starthi = 0;
@@ -570,7 +570,7 @@ static void irqrx_handler(struct net_device *dev)
lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
- /* iron out upper word halves of fields we use - SONIC will duplicate
+ /* iron out upper word halves of fields we use - SONIC will duplicate
bits 0..15 to 16..31 */
rda.status &= 0xffff;
@@ -836,9 +836,9 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
memcpy_toio(priv->base + baddr, skb->data, skb->len);
- /* copy filler into RAM - in case we're filling up...
+ /* copy filler into RAM - in case we're filling up...
we're filling a bit more than necessary, but that doesn't harm
- since the buffer is far larger...
+ since the buffer is far larger...
Sorry Linus for the filler string but I couldn't resist ;-) */
if (tmplen > skb->len) {
@@ -952,7 +952,7 @@ static int ibmlana_probe(struct net_device *dev)
priv->realirq = irq;
priv->medium = medium;
spin_lock_init(&priv->lock);
-
+
/* set base + irq for this device (irq not allocated so far) */
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h
index 458ee226e537..6b58bab9e308 100644
--- a/drivers/net/ibmlana.h
+++ b/drivers/net/ibmlana.h
@@ -17,7 +17,7 @@
/* media enumeration - defined in a way that it fits onto the LAN/A's
POS registers... */
-typedef enum {
+typedef enum {
Media_10BaseT, Media_10Base5,
Media_Unknown, Media_10Base2, Media_Count
} ibmlana_medium;
@@ -27,7 +27,7 @@ typedef enum {
typedef struct {
unsigned int slot; /* MCA-Slot-# */
struct net_device_stats stat; /* packet statistics */
- int realirq; /* memorizes actual IRQ, even when
+ int realirq; /* memorizes actual IRQ, even when
currently not allocated */
ibmlana_medium medium; /* physical cannector */
u32 tdastart, txbufstart, /* addresses */
@@ -41,7 +41,7 @@ typedef struct {
spinlock_t lock;
} ibmlana_priv;
-/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
+/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
a full 64K I/O range... */
#define IBM_LANA_IORANGE 0xa0
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 52d01027d9e7..666346f6469e 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -24,7 +24,7 @@
/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
/* option of the RS/6000 Platform Architechture to interface with virtual */
/* ethernet NICs that are presented to the partition by the hypervisor. */
-/* */
+/* */
/**************************************************************************/
/*
TODO:
@@ -79,7 +79,7 @@
#else
#define ibmveth_debug_printk_no_adapter(fmt, args...)
#define ibmveth_debug_printk(fmt, args...)
-#define ibmveth_assert(expr)
+#define ibmveth_assert(expr)
#endif
static int ibmveth_open(struct net_device *dev);
@@ -96,6 +96,7 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static struct kobj_type ktype_veth_pool;
#ifdef CONFIG_PROC_FS
#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -133,12 +134,13 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
}
/* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
{
pool->size = pool_size;
pool->index = pool_index;
pool->buff_size = buff_size;
pool->threshold = pool_size / 2;
+ pool->active = pool_active;
}
/* allocate and setup an buffer pool - called during open */
@@ -146,13 +148,13 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
{
int i;
- pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
+ pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
if(!pool->free_map) {
return -1;
}
- pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
+ pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
if(!pool->dma_addr) {
kfree(pool->free_map);
pool->free_map = NULL;
@@ -180,7 +182,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
atomic_set(&pool->available, 0);
pool->producer_index = 0;
pool->consumer_index = 0;
- pool->active = 0;
return 0;
}
@@ -214,7 +215,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
free_index = pool->consumer_index++ % pool->size;
index = pool->free_map[free_index];
-
+
ibmveth_assert(index != IBM_VETH_INVALID_MAP);
ibmveth_assert(pool->skbuff[index] == NULL);
@@ -231,10 +232,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
desc.desc = 0;
desc.fields.valid = 1;
desc.fields.length = pool->buff_size;
- desc.fields.address = dma_addr;
+ desc.fields.address = dma_addr;
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
+
if(lpar_rc != H_SUCCESS) {
pool->free_map[free_index] = index;
pool->skbuff[index] = NULL;
@@ -250,13 +251,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
adapter->replenish_add_buff_success++;
}
}
-
+
mb();
atomic_add(buffers_added, &(pool->available));
}
/* replenish routine */
-static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
{
int i;
@@ -264,7 +265,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
for(i = 0; i < IbmVethNumBufferPools; i++)
if(adapter->rx_buff_pool[i].active)
- ibmveth_replenish_buffer_pool(adapter,
+ ibmveth_replenish_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
@@ -301,7 +302,6 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
kfree(pool->skbuff);
pool->skbuff = NULL;
}
- pool->active = 0;
}
/* remove a buffer from a pool */
@@ -372,7 +372,7 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
+
if(lpar_rc != H_SUCCESS) {
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
@@ -407,7 +407,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
free_page((unsigned long)adapter->buffer_list_addr);
adapter->buffer_list_addr = NULL;
- }
+ }
if(adapter->filter_list_addr != NULL) {
if(!dma_mapping_error(adapter->filter_list_dma)) {
@@ -433,7 +433,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
}
for(i = 0; i<IbmVethNumBufferPools; i++)
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
}
static int ibmveth_open(struct net_device *netdev)
@@ -450,10 +452,10 @@ static int ibmveth_open(struct net_device *netdev)
for(i = 0; i<IbmVethNumBufferPools; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
-
+
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
-
+
if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
ibmveth_cleanup(adapter);
@@ -489,9 +491,6 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- /* call change_mtu to init the buffer pools based in initial mtu */
- ibmveth_change_mtu(netdev, netdev->mtu);
-
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
mac_address = mac_address >> 16;
@@ -504,7 +503,7 @@ static int ibmveth_open(struct net_device *netdev)
ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
-
+
lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
adapter->buffer_list_dma,
rxq_desc.desc,
@@ -519,7 +518,18 @@ static int ibmveth_open(struct net_device *netdev)
rxq_desc.desc,
mac_address);
ibmveth_cleanup(adapter);
- return -ENONET;
+ return -ENONET;
+ }
+
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ if(!adapter->rx_buff_pool[i].active)
+ continue;
+ if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+ ibmveth_error_printk("unable to alloc pool\n");
+ adapter->rx_buff_pool[i].active = 0;
+ ibmveth_cleanup(adapter);
+ return -ENOMEM ;
+ }
}
ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
@@ -547,10 +557,11 @@ static int ibmveth_close(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev->priv;
long lpar_rc;
-
+
ibmveth_debug_printk("close starting\n");
- netif_stop_queue(netdev);
+ if (!adapter->pool_config)
+ netif_stop_queue(netdev);
free_irq(netdev->irq, netdev);
@@ -694,7 +705,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[5].desc,
correlator);
} while ((lpar_rc == H_BUSY) && (retry_count--));
-
+
if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
int i;
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
@@ -780,7 +791,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
/* more work to do - return that we are not done yet */
netdev->quota -= frames_processed;
*budget -= frames_processed;
- return 1;
+ return 1;
}
/* we think we are done - reenable interrupts, then check once more to make sure we are done */
@@ -806,7 +817,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
}
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
-{
+{
struct net_device *netdev = dev_instance;
struct ibmveth_adapter *adapter = netdev->priv;
unsigned long lpar_rc;
@@ -862,7 +873,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
}
}
-
+
/* re-enable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableFiltering,
@@ -876,46 +887,22 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
{
struct ibmveth_adapter *adapter = dev->priv;
+ int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
int i;
- int prev_smaller = 1;
- if ((new_mtu < 68) ||
- (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
+ if (new_mtu < IBMVETH_MAX_MTU)
return -EINVAL;
+ /* Look for an active buffer pool that can hold the new MTU */
for(i = 0; i<IbmVethNumBufferPools; i++) {
- int activate = 0;
- if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
- activate = 1;
- prev_smaller= 1;
- } else {
- if (prev_smaller)
- activate = 1;
- prev_smaller= 0;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ dev->mtu = new_mtu;
+ return 0;
}
-
- if (activate && !adapter->rx_buff_pool[i].active) {
- struct ibmveth_buff_pool *pool =
- &adapter->rx_buff_pool[i];
- if(ibmveth_alloc_buffer_pool(pool)) {
- ibmveth_error_printk("unable to alloc pool\n");
- return -ENOMEM;
- }
- adapter->rx_buff_pool[i].active = 1;
- } else if (!activate && adapter->rx_buff_pool[i].active) {
- adapter->rx_buff_pool[i].active = 0;
- h_free_logical_lan_buffer(adapter->vdev->unit_address,
- (u64)pool_size[i]);
- }
-
}
-
- /* kick the interrupt handler so that the new buffer pools get
- replenished or deallocated */
- ibmveth_interrupt(dev->irq, dev, NULL);
-
- dev->mtu = new_mtu;
- return 0;
+ return -EINVAL;
}
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -928,7 +915,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
unsigned int *mcastFilterSize_p;
- ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
+ ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
dev->unit_address);
mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
@@ -937,7 +924,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
"attribute\n", __FILE__, __LINE__);
return 0;
}
-
+
mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
if(!mcastFilterSize_p) {
printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
@@ -945,7 +932,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
__FILE__, __LINE__);
return 0;
}
-
+
netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
if(!netdev)
@@ -960,13 +947,14 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->mcastFilterSize= *mcastFilterSize_p;
-
+ adapter->pool_config = 0;
+
/* Some older boxes running PHYP non-natively have an OF that
- returns a 8-byte local-mac-address field (and the first
+ returns a 8-byte local-mac-address field (and the first
2 bytes have to be ignored) while newer boxes' OF return
- a 6-byte field. Note that IEEE 1275 specifies that
+ a 6-byte field. Note that IEEE 1275 specifies that
local-mac-address must be a 6-byte field.
- The RPA doc specifies that the first byte must be 10b, so
+ The RPA doc specifies that the first byte must be 10b, so
we'll just look for it to solve this 8 vs. 6 byte field issue */
if ((*mac_addr_p & 0x3) != 0x02)
@@ -976,7 +964,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
memcpy(&adapter->mac_addr, mac_addr_p, 6);
adapter->liobn = dev->iommu_table->it_index;
-
+
netdev->irq = dev->irq;
netdev->open = ibmveth_open;
netdev->poll = ibmveth_poll;
@@ -989,14 +977,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->ethtool_ops = &netdev_ethtool_ops;
netdev->change_mtu = ibmveth_change_mtu;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->features |= NETIF_F_LLTX;
+ netdev->features |= NETIF_F_LLTX;
spin_lock_init(&adapter->stats_lock);
memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
- for(i = 0; i<IbmVethNumBufferPools; i++)
- ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
- pool_count[i], pool_size[i]);
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+ kobj->parent = &dev->dev.kobj;
+ sprintf(kobj->name, "pool%d", i);
+ kobj->ktype = &ktype_veth_pool;
+ kobject_register(kobj);
+ }
ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
@@ -1025,6 +1020,10 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
{
struct net_device *netdev = dev->dev.driver_data;
struct ibmveth_adapter *adapter = netdev->priv;
+ int i;
+
+ for(i = 0; i<IbmVethNumBufferPools; i++)
+ kobject_unregister(&adapter->rx_buff_pool[i].kobj);
unregister_netdev(netdev);
@@ -1048,7 +1047,7 @@ static void ibmveth_proc_unregister_driver(void)
remove_proc_entry(IBMVETH_PROC_DIR, NULL);
}
-static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
+static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0) {
return (void *)1;
@@ -1063,18 +1062,18 @@ static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return NULL;
}
-static void ibmveth_seq_stop(struct seq_file *seq, void *v)
+static void ibmveth_seq_stop(struct seq_file *seq, void *v)
{
}
-static int ibmveth_seq_show(struct seq_file *seq, void *v)
+static int ibmveth_seq_show(struct seq_file *seq, void *v)
{
struct ibmveth_adapter *adapter = seq->private;
char *current_mac = ((char*) &adapter->netdev->dev_addr);
char *firmware_mac = ((char*) &adapter->mac_addr) ;
seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
-
+
seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
@@ -1083,7 +1082,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
firmware_mac[0], firmware_mac[1], firmware_mac[2],
firmware_mac[3], firmware_mac[4], firmware_mac[5]);
-
+
seq_printf(seq, "\nAdapter Statistics:\n");
seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
@@ -1095,7 +1094,7 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
-
+
return 0;
}
static struct seq_operations ibmveth_seq_ops = {
@@ -1153,11 +1152,11 @@ static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
}
#else /* CONFIG_PROC_FS */
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
+static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
{
}
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
+static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
{
}
static void ibmveth_proc_register_driver(void)
@@ -1169,6 +1168,132 @@ static void ibmveth_proc_unregister_driver(void)
}
#endif /* CONFIG_PROC_FS */
+static struct attribute veth_active_attr;
+static struct attribute veth_num_attr;
+static struct attribute veth_size_attr;
+
+static ssize_t veth_pool_show(struct kobject * kobj,
+ struct attribute * attr, char * buf)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+
+ if (attr == &veth_active_attr)
+ return sprintf(buf, "%d\n", pool->active);
+ else if (attr == &veth_num_attr)
+ return sprintf(buf, "%d\n", pool->size);
+ else if (attr == &veth_size_attr)
+ return sprintf(buf, "%d\n", pool->buff_size);
+ return 0;
+}
+
+static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
+const char * buf, size_t count)
+{
+ struct ibmveth_buff_pool *pool = container_of(kobj,
+ struct ibmveth_buff_pool,
+ kobj);
+ struct net_device *netdev =
+ container_of(kobj->parent, struct device, kobj)->driver_data;
+ struct ibmveth_adapter *adapter = netdev->priv;
+ long value = simple_strtol(buf, NULL, 10);
+ long rc;
+
+ if (attr == &veth_active_attr) {
+ if (value && !pool->active) {
+ if(ibmveth_alloc_buffer_pool(pool)) {
+ ibmveth_error_printk("unable to alloc pool\n");
+ return -ENOMEM;
+ }
+ pool->active = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ } else if (!value && pool->active) {
+ int mtu = netdev->mtu + IBMVETH_BUFF_OH;
+ int i;
+ /* Make sure there is a buffer pool with buffers that
+ can hold a packet of the size of the MTU */
+ for(i = 0; i<IbmVethNumBufferPools; i++) {
+ if (pool == &adapter->rx_buff_pool[i])
+ continue;
+ if (!adapter->rx_buff_pool[i].active)
+ continue;
+ if (mtu < adapter->rx_buff_pool[i].buff_size) {
+ pool->active = 0;
+ h_free_logical_lan_buffer(adapter->
+ vdev->
+ unit_address,
+ pool->
+ buff_size);
+ }
+ }
+ if (pool->active) {
+ ibmveth_error_printk("no active pool >= MTU\n");
+ return -EPERM;
+ }
+ }
+ } else if (attr == &veth_num_attr) {
+ if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
+ return -EINVAL;
+ else {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ } else if (attr == &veth_size_attr) {
+ if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
+ return -EINVAL;
+ else {
+ adapter->pool_config = 1;
+ ibmveth_close(netdev);
+ adapter->pool_config = 0;
+ pool->buff_size = value;
+ if ((rc = ibmveth_open(netdev)))
+ return rc;
+ }
+ }
+
+ /* kick the interrupt handler to allocate/deallocate pools */
+ ibmveth_interrupt(netdev->irq, netdev, NULL);
+ return count;
+}
+
+
+#define ATTR(_name, _mode) \
+ struct attribute veth_##_name##_attr = { \
+ .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
+ };
+
+static ATTR(active, 0644);
+static ATTR(num, 0644);
+static ATTR(size, 0644);
+
+static struct attribute * veth_pool_attrs[] = {
+ &veth_active_attr,
+ &veth_num_attr,
+ &veth_size_attr,
+ NULL,
+};
+
+static struct sysfs_ops veth_pool_ops = {
+ .show = veth_pool_show,
+ .store = veth_pool_store,
+};
+
+static struct kobj_type ktype_veth_pool = {
+ .release = NULL,
+ .sysfs_ops = &veth_pool_ops,
+ .default_attrs = veth_pool_attrs,
+};
+
+
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
{ "network", "IBM,l-lan"},
{ "", "" }
@@ -1198,7 +1323,7 @@ static void __exit ibmveth_module_exit(void)
{
vio_unregister_driver(&ibmveth_driver);
ibmveth_proc_unregister_driver();
-}
+}
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 46919a814fca..8385bf836507 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -75,10 +75,13 @@
#define IbmVethNumBufferPools 5
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
+#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
-/* pool_size should be sorted */
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_active[] = { 1, 1, 0, 0, 0};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -94,6 +97,7 @@ struct ibmveth_buff_pool {
dma_addr_t *dma_addr;
struct sk_buff **skbuff;
int active;
+ struct kobject kobj;
};
struct ibmveth_rx_q {
@@ -118,6 +122,7 @@ struct ibmveth_adapter {
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
struct ibmveth_rx_q rx_queue;
+ int pool_config;
/* adapter specific stats */
u64 replenish_task_cycles;
@@ -134,7 +139,7 @@ struct ibmveth_adapter {
spinlock_t stats_lock;
};
-struct ibmveth_buf_desc_fields {
+struct ibmveth_buf_desc_fields {
u32 valid : 1;
u32 toggle : 1;
u32 reserved : 6;
@@ -143,7 +148,7 @@ struct ibmveth_buf_desc_fields {
};
union ibmveth_buf_desc {
- u64 desc;
+ u64 desc;
struct ibmveth_buf_desc_fields fields;
};
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
index 7c7aff1ea7d5..a8a2d3d03567 100644
--- a/drivers/net/ixgb/Makefile
+++ b/drivers/net/ixgb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
#
-# Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved.
+# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index c83271b38621..a83ef28dadb0 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -84,7 +84,12 @@ struct ixgb_adapter;
#define IXGB_DBG(args...)
#endif
-#define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args)
+#define PFX "ixgb: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __FUNCTION__ , ## args))
+
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
@@ -175,6 +180,7 @@ struct ixgb_adapter {
uint64_t hw_csum_tx_good;
uint64_t hw_csum_tx_error;
uint32_t tx_int_delay;
+ uint32_t tx_timeout_count;
boolean_t tx_int_delay_enable;
boolean_t detect_tx_hung;
@@ -192,7 +198,9 @@ struct ixgb_adapter {
/* structs defined in ixgb_hw.h */
struct ixgb_hw hw;
+ u16 msg_enable;
struct ixgb_hw_stats stats;
+ uint32_t alloc_rx_buff_failed;
#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
#endif
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 661a46b95a61..8357c5590bfb 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 5190aa8761a2..bf6fa220f38e 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index d38ade5f2f4e..cf19b898ba9b 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -44,6 +44,8 @@ extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+#define IXGB_ALL_RAR_ENTRIES 16
+
struct ixgb_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -76,6 +78,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
{"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
{"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
{"tx_deferred_ok", IXGB_STAT(stats.dc)},
+ {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
{"rx_long_length_errors", IXGB_STAT(stats.roc)},
{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
#ifdef NETIF_F_TSO
@@ -117,6 +120,16 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
+static void ixgb_set_speed_duplex(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+}
+
static int
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
@@ -130,12 +143,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ixgb_down(adapter, TRUE);
ixgb_reset(adapter);
ixgb_up(adapter);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
-
+ ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
@@ -183,11 +191,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
if(netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+ ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
@@ -212,11 +216,7 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
if(netif_running(netdev)) {
ixgb_down(adapter,TRUE);
ixgb_up(adapter);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+ ixgb_set_speed_duplex(netdev);
} else
ixgb_reset(adapter);
return 0;
@@ -251,6 +251,19 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
}
#endif /* NETIF_F_TSO */
+static uint32_t
+ixgb_get_msglevel(struct net_device *netdev)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void
+ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
static int
@@ -303,7 +316,7 @@ ixgb_get_regs(struct net_device *netdev,
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
/* there are 16 RAR entries in hardware, we only use 3 */
- for(i = 0; i < 16; i++) {
+ for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
}
@@ -593,11 +606,7 @@ ixgb_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_new;
if((err = ixgb_up(adapter)))
return err;
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
+ ixgb_set_speed_duplex(netdev);
}
return 0;
@@ -714,6 +723,8 @@ static struct ethtool_ops ixgb_ethtool_ops = {
.set_tx_csum = ixgb_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgb_get_msglevel,
+ .set_msglevel = ixgb_set_msglevel,
#ifdef NETIF_F_TSO
.get_tso = ethtool_op_get_tso,
.set_tso = ixgb_set_tso,
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 620cad48bdea..f7fa10e47fa2 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 382c6300ccc2..cb4568915ada 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -57,6 +57,7 @@ typedef enum {
typedef enum {
ixgb_media_type_unknown = 0,
ixgb_media_type_fiber = 1,
+ ixgb_media_type_copper = 2,
ixgb_num_media_types
} ixgb_media_type;
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index aee207eaa287..40a085f94c7b 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -43,6 +43,8 @@
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
-#endif /* #ifndef _IXGB_IDS_H_ */
+#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
+#define IXGB_SUBDEVICE_ID_A00C 0xA00C
+#endif /* #ifndef _IXGB_IDS_H_ */
/* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index cfd67d812f0d..57006fb8840e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -28,22 +28,6 @@
#include "ixgb.h"
-/* Change Log
- * 1.0.96 04/19/05
- * - Make needlessly global code static -- bunk@stusta.de
- * - ethtool cleanup -- shemminger@osdl.org
- * - Support for MODULE_VERSION -- linville@tuxdriver.com
- * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
- * 1.0.88 01/05/05
- * - include fix to the condition that determines when to quit NAPI - Robert Olsson
- * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
- * 1.0.84 10/26/04
- * - reset buffer_info->dma in Tx resource cleanup logic
- * 1.0.83 10/12/04
- * - sparse cleanup - shemminger@osdl.org
- * - fix tx resource cleanup logic
- */
-
char ixgb_driver_name[] = "ixgb";
static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
@@ -52,9 +36,9 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
+#define DRV_VERSION "1.0.109-k2"DRIVERNAPI
char ixgb_driver_version[] = DRV_VERSION;
-static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
/* ixgb_pci_tbl - PCI Device ID Table
*
@@ -67,6 +51,8 @@ static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
static struct pci_device_id ixgb_pci_tbl[] = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
@@ -148,6 +134,11 @@ MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
/* some defines for controlling descriptor fetches in h/w */
#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
@@ -196,7 +187,7 @@ module_exit(ixgb_exit_module);
* @adapter: board private structure
**/
-static inline void
+static void
ixgb_irq_disable(struct ixgb_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
@@ -210,7 +201,7 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
* @adapter: board private structure
**/
-static inline void
+static void
ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if(atomic_dec_and_test(&adapter->irq_sem)) {
@@ -231,6 +222,7 @@ ixgb_up(struct ixgb_adapter *adapter)
/* hardware has been reset, we need to reload some things */
+ ixgb_rar_set(hw, netdev->dev_addr, 0);
ixgb_set_multi(netdev);
ixgb_restore_vlan(adapter);
@@ -240,6 +232,9 @@ ixgb_up(struct ixgb_adapter *adapter)
ixgb_configure_rx(adapter);
ixgb_alloc_rx_buffers(adapter);
+ /* disable interrupts and get the hardware into a known state */
+ IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+
#ifdef CONFIG_PCI_MSI
{
boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
@@ -249,7 +244,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if (!pcix)
adapter->have_msi = FALSE;
else if((err = pci_enable_msi(adapter->pdev))) {
- printk (KERN_ERR
+ DPRINTK(PROBE, ERR,
"Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = FALSE;
/* proceed to try to request regular interrupt */
@@ -259,11 +254,11 @@ ixgb_up(struct ixgb_adapter *adapter)
#endif
if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
SA_SHIRQ | SA_SAMPLE_RANDOM,
- netdev->name, netdev)))
+ netdev->name, netdev))) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
return err;
-
- /* disable interrupts and get the hardware into a known state */
- IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+ }
if((hw->max_frame_size != max_frame) ||
(hw->max_frame_size !=
@@ -285,11 +280,12 @@ ixgb_up(struct ixgb_adapter *adapter)
}
mod_timer(&adapter->watchdog_timer, jiffies);
- ixgb_irq_enable(adapter);
#ifdef CONFIG_IXGB_NAPI
netif_poll_enable(netdev);
#endif
+ ixgb_irq_enable(adapter);
+
return 0;
}
@@ -326,7 +322,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
ixgb_adapter_stop(&adapter->hw);
if(!ixgb_init_hw(&adapter->hw))
- IXGB_DBG("ixgb_init_hw failed.\n");
+ DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
}
/**
@@ -363,7 +359,8 @@ ixgb_probe(struct pci_dev *pdev,
} else {
if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
(err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
- IXGB_ERR("No usable DMA configuration, aborting\n");
+ printk(KERN_ERR
+ "ixgb: No usable DMA configuration, aborting\n");
goto err_dma_mask;
}
pci_using_dac = 0;
@@ -388,6 +385,7 @@ ixgb_probe(struct pci_dev *pdev,
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.back = adapter;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
mmio_start = pci_resource_start(pdev, BAR_0);
mmio_len = pci_resource_len(pdev, BAR_0);
@@ -416,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev,
netdev->change_mtu = &ixgb_change_mtu;
ixgb_set_ethtool_ops(netdev);
netdev->tx_timeout = &ixgb_tx_timeout;
- netdev->watchdog_timeo = HZ;
+ netdev->watchdog_timeo = 5 * HZ;
#ifdef CONFIG_IXGB_NAPI
netdev->poll = &ixgb_clean;
netdev->weight = 64;
@@ -428,6 +426,7 @@ ixgb_probe(struct pci_dev *pdev,
netdev->poll_controller = ixgb_netpoll;
#endif
+ strcpy(netdev->name, pci_name(pdev));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
netdev->base_addr = adapter->hw.io_base;
@@ -449,6 +448,9 @@ ixgb_probe(struct pci_dev *pdev,
#ifdef NETIF_F_TSO
netdev->features |= NETIF_F_TSO;
#endif
+#ifdef NETIF_F_LLTX
+ netdev->features |= NETIF_F_LLTX;
+#endif
if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -456,7 +458,7 @@ ixgb_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -465,6 +467,7 @@ ixgb_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if(!is_valid_ether_addr(netdev->perm_addr)) {
+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
@@ -478,6 +481,7 @@ ixgb_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->tx_timeout_task,
(void (*)(void *))ixgb_tx_timeout_task, netdev);
+ strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev)))
goto err_register;
@@ -486,8 +490,7 @@ ixgb_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
- netdev->name);
+ DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
@@ -557,17 +560,17 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
- adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
-
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+ adapter->rx_buffer_len = hw->max_frame_size;
if((hw->device_id == IXGB_DEVICE_ID_82597EX)
- ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
- ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
+ || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
+ || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
hw->mac_type = ixgb_82597;
else {
/* should never have loaded on this device */
- printk(KERN_ERR "ixgb: unsupported device id\n");
+ DPRINTK(PROBE, ERR, "unsupported device id\n");
}
/* enable flow control to be programmed */
@@ -665,6 +668,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -677,6 +682,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if(!txdr->desc) {
vfree(txdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
@@ -750,6 +757,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -763,6 +772,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if(!rxdr->desc) {
vfree(rxdr->buffer_info);
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -794,21 +805,14 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
rctl |= IXGB_RCTL_SECRC;
- switch (adapter->rx_buffer_len) {
- case IXGB_RXBUFFER_2048:
- default:
+ if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
rctl |= IXGB_RCTL_BSIZE_2048;
- break;
- case IXGB_RXBUFFER_4096:
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
rctl |= IXGB_RCTL_BSIZE_4096;
- break;
- case IXGB_RXBUFFER_8192:
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
rctl |= IXGB_RCTL_BSIZE_8192;
- break;
- case IXGB_RXBUFFER_16384:
+ else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
rctl |= IXGB_RCTL_BSIZE_16384;
- break;
- }
IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
}
@@ -898,22 +902,25 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
adapter->tx_ring.desc = NULL;
}
-static inline void
+static void
ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
struct ixgb_buffer *buffer_info)
{
struct pci_dev *pdev = adapter->pdev;
- if(buffer_info->dma) {
- pci_unmap_page(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
- if(buffer_info->skb) {
+
+ if (buffer_info->dma)
+ pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
+ PCI_DMA_TODEVICE);
+
+ if (buffer_info->skb)
dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
+
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ buffer_info->time_stamp = 0;
+ /* these fields must always be initialized in tx
+ * buffer_info->length = 0;
+ * buffer_info->next_to_watch = 0; */
}
/**
@@ -1112,8 +1119,8 @@ ixgb_watchdog(unsigned long data)
if(adapter->hw.link_up) {
if(!netif_carrier_ok(netdev)) {
- printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
- netdev->name, 10000, "Full Duplex");
+ DPRINTK(LINK, INFO,
+ "NIC Link is Up 10000 Mbps Full Duplex\n");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
@@ -1123,9 +1130,7 @@ ixgb_watchdog(unsigned long data)
if(netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- printk(KERN_INFO
- "ixgb: %s NIC Link is Down\n",
- netdev->name);
+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_stop_queue(netdev);
@@ -1158,7 +1163,7 @@ ixgb_watchdog(unsigned long data)
#define IXGB_TX_FLAGS_VLAN 0x00000002
#define IXGB_TX_FLAGS_TSO 0x00000004
-static inline int
+static int
ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
#ifdef NETIF_F_TSO
@@ -1220,7 +1225,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
return 0;
}
-static inline boolean_t
+static boolean_t
ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
struct ixgb_context_desc *context_desc;
@@ -1258,7 +1263,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
#define IXGB_MAX_TXD_PWR 14
#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
-static inline int
+static int
ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
unsigned int first)
{
@@ -1284,6 +1289,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = 0;
len -= size;
offset += size;
@@ -1309,6 +1315,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = 0;
len -= size;
offset += size;
@@ -1323,7 +1330,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
return count;
}
-static inline void
+static void
ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1395,13 +1402,26 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return 0;
}
+#ifdef NETIF_F_LLTX
+ local_irq_save(flags);
+ if (!spin_trylock(&adapter->tx_lock)) {
+ /* Collision - tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+#else
spin_lock_irqsave(&adapter->tx_lock, flags);
+#endif
+
if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
- return 1;
+ return NETDEV_TX_BUSY;
}
+
+#ifndef NETIF_F_LLTX
spin_unlock_irqrestore(&adapter->tx_lock, flags);
+#endif
if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
@@ -1413,10 +1433,13 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
+#ifdef NETIF_F_LLTX
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+#endif
return NETDEV_TX_OK;
}
- if (tso)
+ if (likely(tso))
tx_flags |= IXGB_TX_FLAGS_TSO;
else if(ixgb_tx_csum(adapter, skb))
tx_flags |= IXGB_TX_FLAGS_CSUM;
@@ -1426,7 +1449,15 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
- return 0;
+#ifdef NETIF_F_LLTX
+ /* Make sure there is space in the ring for the next send. */
+ if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+
+#endif
+ return NETDEV_TX_OK;
}
/**
@@ -1448,6 +1479,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
}
@@ -1486,28 +1518,15 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
- IXGB_ERR("Invalid MTU setting\n");
+ DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
- if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
- || (max_frame <= IXGB_RXBUFFER_2048)) {
- adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
-
- } else if(max_frame <= IXGB_RXBUFFER_4096) {
- adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
-
- } else if(max_frame <= IXGB_RXBUFFER_8192) {
- adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
-
- } else {
- adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
- }
+ adapter->rx_buffer_len = max_frame;
netdev->mtu = new_mtu;
- if(old_max_frame != max_frame && netif_running(netdev)) {
-
+ if ((old_max_frame != max_frame) && netif_running(netdev)) {
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
}
@@ -1765,23 +1784,43 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
tx_ring->next_to_clean = i;
- spin_lock(&adapter->tx_lock);
- if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
- (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
-
- netif_wake_queue(netdev);
+ if (unlikely(netif_queue_stopped(netdev))) {
+ spin_lock(&adapter->tx_lock);
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
+ (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
+ netif_wake_queue(netdev);
+ spin_unlock(&adapter->tx_lock);
}
- spin_unlock(&adapter->tx_lock);
if(adapter->detect_tx_hung) {
/* detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
- if(tx_ring->buffer_info[i].dma &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+ if (tx_ring->buffer_info[eop].dma &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
- IXGB_STATUS_TXOFF))
+ IXGB_STATUS_TXOFF)) {
+ /* detected Tx unit hang */
+ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ IXGB_READ_REG(&adapter->hw, TDH),
+ IXGB_READ_REG(&adapter->hw, TDT),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->status);
netif_stop_queue(netdev);
+ }
}
return cleaned;
@@ -1794,7 +1833,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
* @sk_buff: socket buffer with received data
**/
-static inline void
+static void
ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -1858,6 +1897,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#endif
status = rx_desc->status;
skb = buffer_info->skb;
+ buffer_info->skb = NULL;
prefetch(skb->data);
@@ -1902,6 +1942,26 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
goto rxdesc_done;
}
+ /* code added for copybreak, this should improve
+ * performance for small packets with large amounts
+ * of reassembly being done in the stack */
+#define IXGB_CB_LENGTH 256
+ if (length < IXGB_CB_LENGTH) {
+ struct sk_buff *new_skb =
+ dev_alloc_skb(length + NET_IP_ALIGN);
+ if (new_skb) {
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ new_skb->dev = netdev;
+ memcpy(new_skb->data - NET_IP_ALIGN,
+ skb->data - NET_IP_ALIGN,
+ length + NET_IP_ALIGN);
+ /* save the skb in buffer_info as good */
+ buffer_info->skb = skb;
+ skb = new_skb;
+ }
+ }
+ /* end copybreak code */
+
/* Good Receive */
skb_put(skb, length);
@@ -1931,7 +1991,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
rxdesc_done:
/* clean up descriptor, might be written over by hw */
rx_desc->status = 0;
- buffer_info->skb = NULL;
/* use prefetched values */
rx_desc = next_rxd;
@@ -1971,12 +2030,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
/* leave three descriptors unused */
while(--cleancount > 2) {
- rx_desc = IXGB_RX_DESC(*rx_ring, i);
-
- skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
+ /* recycle! its good for you */
+ if (!(skb = buffer_info->skb))
+ skb = dev_alloc_skb(adapter->rx_buffer_len
+ + NET_IP_ALIGN);
+ else {
+ skb_trim(skb, 0);
+ goto map_skb;
+ }
- if(unlikely(!skb)) {
+ if (unlikely(!skb)) {
/* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
break;
}
@@ -1990,33 +2055,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- buffer_info->dma =
- pci_map_single(pdev,
- skb->data,
- adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+map_skb:
+ buffer_info->dma = pci_map_single(pdev,
+ skb->data,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
/* guarantee DD bit not set now before h/w gets descriptor
* this is the rest of the workaround for h/w double
* writeback. */
rx_desc->status = 0;
- if((i & ~(num_group_tail_writes- 1)) == i) {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
-
- IXGB_WRITE_REG(&adapter->hw, RDT, i);
- }
if(++i == rx_ring->count) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- rx_ring->next_to_use = i;
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs, such
+ * as IA-64). */
+ wmb();
+ IXGB_WRITE_REG(&adapter->hw, RDT, i);
+ }
}
/**
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index dba20481ee80..ee982feac64d 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 8a83dfdf746d..39fbed29a3df 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -76,7 +76,7 @@ IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
- * Default Value: Read flow control settings from the EEPROM
+ * Default Value: 2 - Tx only (silicon bug avoidance)
*/
IXGB_PARAM(FlowControl, "Flow Control setting");
@@ -137,7 +137,7 @@ IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
*
* Valid Range: 1 - 65535
*
- * Default Value: 256 (0x100)
+ * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
*/
IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
@@ -165,8 +165,6 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define XSUMRX_DEFAULT OPTION_ENABLED
-#define FLOW_CONTROL_FULL ixgb_fc_full
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
@@ -174,9 +172,9 @@ IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define MIN_FCRTH 8
#define MAX_FCRTH 0x3FFF0
-#define DEFAULT_FCPAUSE 0x100 /* this may be too long */
#define MIN_FCPAUSE 1
#define MAX_FCPAUSE 0xffff
+#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
struct ixgb_option {
enum { enable_option, range_option, list_option } type;
@@ -336,7 +334,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
- .def = ixgb_fc_full,
+ .def = ixgb_fc_tx_pause,
.arg = { .l = { .nr = LIST_LEN(fc_list),
.p = fc_list }}
};
@@ -365,8 +363,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
} else {
adapter->hw.fc.high_water = opt.def;
}
- if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
- printk (KERN_INFO
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ printk (KERN_INFO
"Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
@@ -385,8 +383,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
} else {
adapter->hw.fc.low_water = opt.def;
}
- if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
- printk (KERN_INFO
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ printk (KERN_INFO
"Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
@@ -406,12 +404,12 @@ ixgb_check_options(struct ixgb_adapter *adapter)
} else {
adapter->hw.fc.pause_time = opt.def;
}
- if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
- printk (KERN_INFO
+ if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+ printk (KERN_INFO
"Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
- if (adapter->hw.fc.type & ixgb_fc_rx_pause) {
+ if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
diff --git a/drivers/net/myri10ge/Makefile b/drivers/net/myri10ge/Makefile
new file mode 100644
index 000000000000..5df891647aee
--- /dev/null
+++ b/drivers/net/myri10ge/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Myricom Myri-10G ethernet driver
+#
+
+obj-$(CONFIG_MYRI10GE) += myri10ge.o
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
new file mode 100644
index 000000000000..e1feb58bd661
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -0,0 +1,2869 @@
+/*************************************************************************
+ * myri10ge.c: Myricom Myri-10G Ethernet driver.
+ *
+ * Copyright (C) 2005, 2006 Myricom, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Myricom, Inc. nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *
+ * If the eeprom on your board is not recent enough, you will need to get a
+ * newer firmware image at:
+ * http://www.myri.com/scs/download-Myri10GE.html
+ *
+ * Contact Information:
+ * <help@myri.com>
+ * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
+ *************************************************************************/
+
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <net/checksum.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include "myri10ge_mcp.h"
+#include "myri10ge_mcp_gen_header.h"
+
+#define MYRI10GE_VERSION_STR "1.0.0"
+
+MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
+MODULE_AUTHOR("Maintainer: help@myri.com");
+MODULE_VERSION(MYRI10GE_VERSION_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+
+#define MYRI10GE_MAX_ETHER_MTU 9014
+
+#define MYRI10GE_ETH_STOPPED 0
+#define MYRI10GE_ETH_STOPPING 1
+#define MYRI10GE_ETH_STARTING 2
+#define MYRI10GE_ETH_RUNNING 3
+#define MYRI10GE_ETH_OPEN_FAILED 4
+
+#define MYRI10GE_EEPROM_STRINGS_SIZE 256
+#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
+
+#define MYRI10GE_NO_CONFIRM_DATA 0xffffffff
+#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
+
+struct myri10ge_rx_buffer_state {
+ struct sk_buff *skb;
+ DECLARE_PCI_UNMAP_ADDR(bus)
+ DECLARE_PCI_UNMAP_LEN(len)
+};
+
+struct myri10ge_tx_buffer_state {
+ struct sk_buff *skb;
+ int last;
+ DECLARE_PCI_UNMAP_ADDR(bus)
+ DECLARE_PCI_UNMAP_LEN(len)
+};
+
+struct myri10ge_cmd {
+ u32 data0;
+ u32 data1;
+ u32 data2;
+};
+
+struct myri10ge_rx_buf {
+ struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
+ u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */
+ struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
+ struct myri10ge_rx_buffer_state *info;
+ int cnt;
+ int alloc_fail;
+ int mask; /* number of rx slots -1 */
+};
+
+struct myri10ge_tx_buf {
+ struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
+ u8 __iomem *wc_fifo; /* w/c send fifo address */
+ struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
+ char *req_bytes;
+ struct myri10ge_tx_buffer_state *info;
+ int mask; /* number of transmit slots -1 */
+ int boundary; /* boundary transmits cannot cross */
+ int req ____cacheline_aligned; /* transmit slots submitted */
+ int pkt_start; /* packets started */
+ int done ____cacheline_aligned; /* transmit slots completed */
+ int pkt_done; /* packets completed */
+};
+
+struct myri10ge_rx_done {
+ struct mcp_slot *entry;
+ dma_addr_t bus;
+ int cnt;
+ int idx;
+};
+
+struct myri10ge_priv {
+ int running; /* running? */
+ int csum_flag; /* rx_csums? */
+ struct myri10ge_tx_buf tx; /* transmit ring */
+ struct myri10ge_rx_buf rx_small;
+ struct myri10ge_rx_buf rx_big;
+ struct myri10ge_rx_done rx_done;
+ int small_bytes;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ u8 __iomem *sram;
+ int sram_size;
+ unsigned long board_span;
+ unsigned long iomem_base;
+ u32 __iomem *irq_claim;
+ u32 __iomem *irq_deassert;
+ char *mac_addr_string;
+ struct mcp_cmd_response *cmd;
+ dma_addr_t cmd_bus;
+ struct mcp_irq_data *fw_stats;
+ dma_addr_t fw_stats_bus;
+ struct pci_dev *pdev;
+ int msi_enabled;
+ unsigned int link_state;
+ unsigned int rdma_tags_available;
+ int intr_coal_delay;
+ u32 __iomem *intr_coal_delay_ptr;
+ int mtrr;
+ int wake_queue;
+ int stop_queue;
+ int down_cnt;
+ wait_queue_head_t down_wq;
+ struct work_struct watchdog_work;
+ struct timer_list watchdog_timer;
+ int watchdog_tx_done;
+ int watchdog_resets;
+ int tx_linearized;
+ int pause;
+ char *fw_name;
+ char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
+ char fw_version[128];
+ u8 mac_addr[6]; /* eeprom mac address */
+ unsigned long serial_number;
+ int vendor_specific_offset;
+ u32 devctl;
+ u16 msi_flags;
+ u32 pm_state[16];
+ u32 read_dma;
+ u32 write_dma;
+ u32 read_write_dma;
+};
+
+static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
+static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
+
+static char *myri10ge_fw_name = NULL;
+module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n");
+
+static int myri10ge_ecrc_enable = 1;
+module_param(myri10ge_ecrc_enable, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n");
+
+static int myri10ge_max_intr_slots = 1024;
+module_param(myri10ge_max_intr_slots, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n");
+
+static int myri10ge_small_bytes = -1; /* -1 == auto */
+module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n");
+
+static int myri10ge_msi = 1; /* enable msi by default */
+module_param(myri10ge_msi, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n");
+
+static int myri10ge_intr_coal_delay = 25;
+module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n");
+
+static int myri10ge_flow_control = 1;
+module_param(myri10ge_flow_control, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n");
+
+static int myri10ge_deassert_wait = 1;
+module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_deassert_wait,
+ "Wait when deasserting legacy interrupts\n");
+
+static int myri10ge_force_firmware = 0;
+module_param(myri10ge_force_firmware, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_force_firmware,
+ "Force firmware to assume aligned completions\n");
+
+static int myri10ge_skb_cross_4k = 0;
+module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_skb_cross_4k,
+ "Can a small skb cross a 4KB boundary?\n");
+
+static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+module_param(myri10ge_initial_mtu, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
+
+static int myri10ge_napi_weight = 64;
+module_param(myri10ge_napi_weight, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n");
+
+static int myri10ge_watchdog_timeout = 1;
+module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n");
+
+static int myri10ge_max_irq_loops = 1048576;
+module_param(myri10ge_max_irq_loops, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_irq_loops,
+ "Set stuck legacy IRQ detection threshold\n");
+
+#define MYRI10GE_FW_OFFSET 1024*1024
+#define MYRI10GE_HIGHPART_TO_U32(X) \
+(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
+#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
+
+#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
+
+static int
+myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
+ struct myri10ge_cmd *data, int atomic)
+{
+ struct mcp_cmd *buf;
+ char buf_bytes[sizeof(*buf) + 8];
+ struct mcp_cmd_response *response = mgp->cmd;
+ char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET;
+ u32 dma_low, dma_high, result, value;
+ int sleep_total = 0;
+
+ /* ensure buf is aligned to 8 bytes */
+ buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
+
+ buf->data0 = htonl(data->data0);
+ buf->data1 = htonl(data->data1);
+ buf->data2 = htonl(data->data2);
+ buf->cmd = htonl(cmd);
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf->response_addr.low = htonl(dma_low);
+ buf->response_addr.high = htonl(dma_high);
+ response->result = MYRI10GE_NO_RESPONSE_RESULT;
+ mb();
+ myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
+
+ /* wait up to 15ms. Longest command is the DMA benchmark,
+ * which is capped at 5ms, but runs from a timeout handler
+ * that runs every 7.8ms. So a 15ms timeout leaves us with
+ * a 2.2ms margin
+ */
+ if (atomic) {
+ /* if atomic is set, do not sleep,
+ * and try to get the completion quickly
+ * (1ms will be enough for those commands) */
+ for (sleep_total = 0;
+ sleep_total < 1000
+ && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+ sleep_total += 10)
+ udelay(10);
+ } else {
+ /* use msleep for most command */
+ for (sleep_total = 0;
+ sleep_total < 15
+ && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+ sleep_total++)
+ msleep(1);
+ }
+
+ result = ntohl(response->result);
+ value = ntohl(response->data);
+ if (result != MYRI10GE_NO_RESPONSE_RESULT) {
+ if (result == 0) {
+ data->data0 = value;
+ return 0;
+ } else {
+ dev_err(&mgp->pdev->dev,
+ "command %d failed, result = %d\n",
+ cmd, result);
+ return -ENXIO;
+ }
+ }
+
+ dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
+ cmd, result);
+ return -EAGAIN;
+}
+
+/*
+ * The eeprom strings on the lanaiX have the format
+ * SN=x\0
+ * MAC=x:x:x:x:x:x\0
+ * PT:ddd mmm xx xx:xx:xx xx\0
+ * PV:ddd mmm xx xx:xx:xx xx\0
+ */
+static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
+{
+ char *ptr, *limit;
+ int i;
+
+ ptr = mgp->eeprom_strings;
+ limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
+
+ while (*ptr != '\0' && ptr < limit) {
+ if (memcmp(ptr, "MAC=", 4) == 0) {
+ ptr += 4;
+ mgp->mac_addr_string = ptr;
+ for (i = 0; i < 6; i++) {
+ if ((ptr + 2) > limit)
+ goto abort;
+ mgp->mac_addr[i] =
+ simple_strtoul(ptr, &ptr, 16);
+ ptr += 1;
+ }
+ }
+ if (memcmp((const void *)ptr, "SN=", 3) == 0) {
+ ptr += 3;
+ mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
+ }
+ while (ptr < limit && *ptr++) ;
+ }
+
+ return 0;
+
+abort:
+ dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
+ return -ENXIO;
+}
+
+/*
+ * Enable or disable periodic RDMAs from the host to make certain
+ * chipsets resend dropped PCIe messages
+ */
+
+static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
+{
+ char __iomem *submit;
+ u32 buf[16];
+ u32 dma_low, dma_high;
+ int i;
+
+ /* clear confirmation addr */
+ mgp->cmd->data = 0;
+ mb();
+
+ /* send a rdma command to the PCIe engine, and wait for the
+ * response in the confirmation address. The firmware should
+ * write a -1 there to indicate it is alive and well
+ */
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf[0] = htonl(dma_high); /* confirm addr MSW */
+ buf[1] = htonl(dma_low); /* confirm addr LSW */
+ buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA); /* confirm data */
+ buf[3] = htonl(dma_high); /* dummy addr MSW */
+ buf[4] = htonl(dma_low); /* dummy addr LSW */
+ buf[5] = htonl(enable); /* enable? */
+
+ submit = mgp->sram + 0xfc01c0;
+
+ myri10ge_pio_copy(submit, &buf, sizeof(buf));
+ for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
+ msleep(1);
+ if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
+ dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
+ (enable ? "enable" : "disable"));
+}
+
+static int
+myri10ge_validate_firmware(struct myri10ge_priv *mgp,
+ struct mcp_gen_header *hdr)
+{
+ struct device *dev = &mgp->pdev->dev;
+ int major, minor;
+
+ /* check firmware type */
+ if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
+ dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
+ return -EINVAL;
+ }
+
+ /* save firmware version for ethtool */
+ strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
+
+ sscanf(mgp->fw_version, "%d.%d", &major, &minor);
+
+ if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) {
+ dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
+ dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
+ MXGEFW_VERSION_MINOR);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
+{
+ unsigned crc, reread_crc;
+ const struct firmware *fw;
+ struct device *dev = &mgp->pdev->dev;
+ struct mcp_gen_header *hdr;
+ size_t hdr_offset;
+ int status;
+
+ if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+ dev_err(dev, "Unable to load %s firmware image via hotplug\n",
+ mgp->fw_name);
+ status = -EINVAL;
+ goto abort_with_nothing;
+ }
+
+ /* check size */
+
+ if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
+ fw->size < MCP_HEADER_PTR_OFFSET + 4) {
+ dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
+ status = -EINVAL;
+ goto abort_with_fw;
+ }
+
+ /* check id */
+ hdr_offset = ntohl(*(u32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
+ if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
+ dev_err(dev, "Bad firmware file\n");
+ status = -EINVAL;
+ goto abort_with_fw;
+ }
+ hdr = (void *)(fw->data + hdr_offset);
+
+ status = myri10ge_validate_firmware(mgp, hdr);
+ if (status != 0)
+ goto abort_with_fw;
+
+ crc = crc32(~0, fw->data, fw->size);
+ if (mgp->tx.boundary == 2048) {
+ /* Avoid PCI burst on chipset with unaligned completions. */
+ int i;
+ __iomem u32 *ptr = (__iomem u32 *) (mgp->sram +
+ MYRI10GE_FW_OFFSET);
+ for (i = 0; i < fw->size / 4; i++) {
+ __raw_writel(((u32 *) fw->data)[i], ptr + i);
+ wmb();
+ }
+ } else {
+ myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
+ fw->size);
+ }
+ /* corruption checking is good for parity recovery and buggy chipset */
+ memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
+ reread_crc = crc32(~0, fw->data, fw->size);
+ if (crc != reread_crc) {
+ dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
+ (unsigned)fw->size, reread_crc, crc);
+ status = -EIO;
+ goto abort_with_fw;
+ }
+ *size = (u32) fw->size;
+
+abort_with_fw:
+ release_firmware(fw);
+
+abort_with_nothing:
+ return status;
+}
+
+static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
+{
+ struct mcp_gen_header *hdr;
+ struct device *dev = &mgp->pdev->dev;
+ const size_t bytes = sizeof(struct mcp_gen_header);
+ size_t hdr_offset;
+ int status;
+
+ /* find running firmware header */
+ hdr_offset = ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
+
+ if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
+ dev_err(dev, "Running firmware has bad header offset (%d)\n",
+ (int)hdr_offset);
+ return -EIO;
+ }
+
+ /* copy header of running firmware from SRAM to host memory to
+ * validate firmware */
+ hdr = kmalloc(bytes, GFP_KERNEL);
+ if (hdr == NULL) {
+ dev_err(dev, "could not malloc firmware hdr\n");
+ return -ENOMEM;
+ }
+ memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
+ status = myri10ge_validate_firmware(mgp, hdr);
+ kfree(hdr);
+ return status;
+}
+
+static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
+{
+ char __iomem *submit;
+ u32 buf[16];
+ u32 dma_low, dma_high, size;
+ int status, i;
+
+ size = 0;
+ status = myri10ge_load_hotplug_firmware(mgp, &size);
+ if (status) {
+ dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
+
+ /* Do not attempt to adopt firmware if there
+ * was a bad crc */
+ if (status == -EIO)
+ return status;
+
+ status = myri10ge_adopt_running_firmware(mgp);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed to adopt running firmware\n");
+ return status;
+ }
+ dev_info(&mgp->pdev->dev,
+ "Successfully adopted running firmware\n");
+ if (mgp->tx.boundary == 4096) {
+ dev_warn(&mgp->pdev->dev,
+ "Using firmware currently running on NIC"
+ ". For optimal\n");
+ dev_warn(&mgp->pdev->dev,
+ "performance consider loading optimized "
+ "firmware\n");
+ dev_warn(&mgp->pdev->dev, "via hotplug\n");
+ }
+
+ mgp->fw_name = "adopted";
+ mgp->tx.boundary = 2048;
+ return status;
+ }
+
+ /* clear confirmation addr */
+ mgp->cmd->data = 0;
+ mb();
+
+ /* send a reload command to the bootstrap MCP, and wait for the
+ * response in the confirmation address. The firmware should
+ * write a -1 there to indicate it is alive and well
+ */
+ dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+ dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+ buf[0] = htonl(dma_high); /* confirm addr MSW */
+ buf[1] = htonl(dma_low); /* confirm addr LSW */
+ buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA); /* confirm data */
+
+ /* FIX: All newest firmware should un-protect the bottom of
+ * the sram before handoff. However, the very first interfaces
+ * do not. Therefore the handoff copy must skip the first 8 bytes
+ */
+ buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
+ buf[4] = htonl(size - 8); /* length of code */
+ buf[5] = htonl(8); /* where to copy to */
+ buf[6] = htonl(0); /* where to jump to */
+
+ submit = mgp->sram + 0xfc0000;
+
+ myri10ge_pio_copy(submit, &buf, sizeof(buf));
+ mb();
+ msleep(1);
+ mb();
+ i = 0;
+ while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) {
+ msleep(1);
+ i++;
+ }
+ if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
+ dev_err(&mgp->pdev->dev, "handoff failed\n");
+ return -ENXIO;
+ }
+ dev_info(&mgp->pdev->dev, "handoff confirmed\n");
+ myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+
+ return 0;
+}
+
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+
+ cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
+ | (addr[2] << 8) | addr[3]);
+
+ cmd.data1 = ((addr[4] << 8) | (addr[5]));
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
+ return status;
+}
+
+static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
+{
+ struct myri10ge_cmd cmd;
+ int status, ctl;
+
+ ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
+ status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
+
+ if (status) {
+ printk(KERN_ERR
+ "myri10ge: %s: Failed to set flow control mode\n",
+ mgp->dev->name);
+ return status;
+ }
+ mgp->pause = pause;
+ return 0;
+}
+
+static void
+myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
+{
+ struct myri10ge_cmd cmd;
+ int status, ctl;
+
+ ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
+ status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
+ if (status)
+ printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n",
+ mgp->dev->name);
+}
+
+static int myri10ge_reset(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+ size_t bytes;
+ u32 len;
+
+ /* try to send a reset command to the card to see if it
+ * is alive */
+ memset(&cmd, 0, sizeof(cmd));
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed reset\n");
+ return -ENXIO;
+ }
+
+ /* Now exchange information about interrupts */
+
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ memset(mgp->rx_done.entry, 0, bytes);
+ cmd.data0 = (u32) bytes;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0);
+
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
+ mgp->irq_claim = (__iomem u32 *) (mgp->sram + cmd.data0);
+ if (!mgp->msi_enabled) {
+ status |= myri10ge_send_cmd
+ (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0);
+ mgp->irq_deassert = (__iomem u32 *) (mgp->sram + cmd.data0);
+
+ }
+ status |= myri10ge_send_cmd
+ (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
+ mgp->intr_coal_delay_ptr = (__iomem u32 *) (mgp->sram + cmd.data0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
+ return status;
+ }
+ __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+
+ /* Run a small DMA test.
+ * The magic multipliers to the length tell the firmware
+ * to do DMA read, write, or read+write tests. The
+ * results are returned in cmd.data0. The upper 16
+ * bits or the return is the number of transfers completed.
+ * The lower 16 bits is the time in 0.5us ticks that the
+ * transfers took to complete.
+ */
+
+ len = mgp->tx.boundary;
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data2 = len * 0x10000;
+ status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+ if (status == 0)
+ mgp->read_dma = ((cmd.data0 >> 16) * len * 2) /
+ (cmd.data0 & 0xffff);
+ else
+ dev_warn(&mgp->pdev->dev, "DMA read benchmark failed: %d\n",
+ status);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data2 = len * 0x1;
+ status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+ if (status == 0)
+ mgp->write_dma = ((cmd.data0 >> 16) * len * 2) /
+ (cmd.data0 & 0xffff);
+ else
+ dev_warn(&mgp->pdev->dev, "DMA write benchmark failed: %d\n",
+ status);
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data2 = len * 0x10001;
+ status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+ if (status == 0)
+ mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
+ (cmd.data0 & 0xffff);
+ else
+ dev_warn(&mgp->pdev->dev,
+ "DMA read/write benchmark failed: %d\n", status);
+
+ memset(mgp->rx_done.entry, 0, bytes);
+
+ /* reset mcp/driver shared state back to 0 */
+ mgp->tx.req = 0;
+ mgp->tx.done = 0;
+ mgp->tx.pkt_start = 0;
+ mgp->tx.pkt_done = 0;
+ mgp->rx_big.cnt = 0;
+ mgp->rx_small.cnt = 0;
+ mgp->rx_done.idx = 0;
+ mgp->rx_done.cnt = 0;
+ status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
+ myri10ge_change_promisc(mgp, 0, 0);
+ myri10ge_change_pause(mgp, mgp->pause);
+ return status;
+}
+
+static inline void
+myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
+ struct mcp_kreq_ether_recv *src)
+{
+ u32 low;
+
+ low = src->addr_low;
+ src->addr_low = DMA_32BIT_MASK;
+ myri10ge_pio_copy(dst, src, 8 * sizeof(*src));
+ mb();
+ src->addr_low = low;
+ __raw_writel(low, &dst->addr_low);
+ mb();
+}
+
+/*
+ * Set of routines to get a new receive buffer. Any buffer which
+ * crosses a 4KB boundary must start on a 4KB boundary due to PCIe
+ * wdma restrictions. We also try to align any smaller allocation to
+ * at least a 16 byte boundary for efficiency. We assume the linux
+ * memory allocator works by powers of 2, and will not return memory
+ * smaller than 2KB which crosses a 4KB boundary. If it does, we fall
+ * back to allocating 2x as much space as required.
+ *
+ * We intend to replace large (>4KB) skb allocations by using
+ * pages directly and building a fraglist in the near future.
+ */
+
+static inline struct sk_buff *myri10ge_alloc_big(int bytes)
+{
+ struct sk_buff *skb;
+ unsigned long data, roundup;
+
+ skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD);
+ if (skb == NULL)
+ return NULL;
+
+ /* Correct skb->truesize so that socket buffer
+ * accounting is not confused the rounding we must
+ * do to satisfy alignment constraints.
+ */
+ skb->truesize -= 4096;
+
+ data = (unsigned long)(skb->data);
+ roundup = (-data) & (4095);
+ skb_reserve(skb, roundup);
+ return skb;
+}
+
+/* Allocate 2x as much space as required and use whichever portion
+ * does not cross a 4KB boundary */
+static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
+{
+ struct sk_buff *skb;
+ unsigned long data, boundary;
+
+ skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1);
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ /* Correct skb->truesize so that socket buffer
+ * accounting is not confused the rounding we must
+ * do to satisfy alignment constraints.
+ */
+ skb->truesize -= bytes + MXGEFW_PAD;
+
+ data = (unsigned long)(skb->data);
+ boundary = (data + 4095UL) & ~4095UL;
+ if ((boundary - data) >= (bytes + MXGEFW_PAD))
+ return skb;
+
+ skb_reserve(skb, boundary - data);
+ return skb;
+}
+
+/* Allocate just enough space, and verify that the allocated
+ * space does not cross a 4KB boundary */
+static inline struct sk_buff *myri10ge_alloc_small(int bytes)
+{
+ struct sk_buff *skb;
+ unsigned long roundup, data, end;
+
+ skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD);
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ /* Round allocated buffer to 16 byte boundary */
+ data = (unsigned long)(skb->data);
+ roundup = (-data) & 15UL;
+ skb_reserve(skb, roundup);
+ /* Verify that the data buffer does not cross a page boundary */
+ data = (unsigned long)(skb->data);
+ end = data + bytes + MXGEFW_PAD - 1;
+ if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) {
+ printk(KERN_NOTICE
+ "myri10ge_alloc_small: small skb crossed 4KB boundary\n");
+ myri10ge_skb_cross_4k = 1;
+ dev_kfree_skb_any(skb);
+ skb = myri10ge_alloc_small_safe(bytes);
+ }
+ return skb;
+}
+
+static inline int
+myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
+ int idx)
+{
+ struct sk_buff *skb;
+ dma_addr_t bus;
+ int len, retval = 0;
+
+ bytes += VLAN_HLEN; /* account for 802.1q vlan tag */
+
+ if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
+ skb = myri10ge_alloc_big(bytes);
+ else if (myri10ge_skb_cross_4k)
+ skb = myri10ge_alloc_small_safe(bytes);
+ else
+ skb = myri10ge_alloc_small(bytes);
+
+ if (unlikely(skb == NULL)) {
+ rx->alloc_fail++;
+ retval = -ENOBUFS;
+ goto done;
+ }
+
+ /* set len so that it only covers the area we
+ * need mapped for DMA */
+ len = bytes + MXGEFW_PAD;
+
+ bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+ rx->info[idx].skb = skb;
+ pci_unmap_addr_set(&rx->info[idx], bus, bus);
+ pci_unmap_len_set(&rx->info[idx], len, len);
+ rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus));
+ rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+
+done:
+ /* copy 8 descriptors (64-bytes) to the mcp at a time */
+ if ((idx & 7) == 7) {
+ if (rx->wc_fifo == NULL)
+ myri10ge_submit_8rx(&rx->lanai[idx - 7],
+ &rx->shadow[idx - 7]);
+ else {
+ mb();
+ myri10ge_pio_copy(rx->wc_fifo,
+ &rx->shadow[idx - 7], 64);
+ }
+ }
+ return retval;
+}
+
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
+{
+ struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
+
+ if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
+ (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
+ vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
+ skb->csum = hw_csum;
+ skb->ip_summed = CHECKSUM_HW;
+ }
+}
+
+static inline unsigned long
+myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+ int bytes, int len, int csum)
+{
+ dma_addr_t bus;
+ struct sk_buff *skb;
+ int idx, unmap_len;
+
+ idx = rx->cnt & rx->mask;
+ rx->cnt++;
+
+ /* save a pointer to the received skb */
+ skb = rx->info[idx].skb;
+ bus = pci_unmap_addr(&rx->info[idx], bus);
+ unmap_len = pci_unmap_len(&rx->info[idx], len);
+
+ /* try to replace the received skb */
+ if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) {
+ /* drop the frame -- the old skbuf is re-cycled */
+ mgp->stats.rx_dropped += 1;
+ return 0;
+ }
+
+ /* unmap the recvd skb */
+ pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE);
+
+ /* mcp implicitly skips 1st bytes so that packet is properly
+ * aligned */
+ skb_reserve(skb, MXGEFW_PAD);
+
+ /* set the length of the frame */
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, mgp->dev);
+ skb->dev = mgp->dev;
+ if (mgp->csum_flag) {
+ if ((skb->protocol == ntohs(ETH_P_IP)) ||
+ (skb->protocol == ntohs(ETH_P_IPV6))) {
+ skb->csum = ntohs((u16) csum);
+ skb->ip_summed = CHECKSUM_HW;
+ } else
+ myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
+ }
+
+ netif_receive_skb(skb);
+ mgp->dev->last_rx = jiffies;
+ return 1;
+}
+
+static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct sk_buff *skb;
+ int idx, len;
+ int limit = 0;
+
+ while (tx->pkt_done != mcp_index) {
+ idx = tx->done & tx->mask;
+ skb = tx->info[idx].skb;
+
+ /* Mark as free */
+ tx->info[idx].skb = NULL;
+ if (tx->info[idx].last) {
+ tx->pkt_done++;
+ tx->info[idx].last = 0;
+ }
+ tx->done++;
+ len = pci_unmap_len(&tx->info[idx], len);
+ pci_unmap_len_set(&tx->info[idx], len, 0);
+ if (skb) {
+ mgp->stats.tx_bytes += skb->len;
+ mgp->stats.tx_packets++;
+ dev_kfree_skb_irq(skb);
+ if (len)
+ pci_unmap_single(pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ } else {
+ if (len)
+ pci_unmap_page(pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ }
+
+ /* limit potential for livelock by only handling
+ * 2 full tx rings per call */
+ if (unlikely(++limit > 2 * tx->mask))
+ break;
+ }
+ /* start the queue if we've stopped it */
+ if (netif_queue_stopped(mgp->dev)
+ && tx->req - tx->done < (tx->mask >> 1)) {
+ mgp->wake_queue++;
+ netif_wake_queue(mgp->dev);
+ }
+}
+
+static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
+{
+ struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+ unsigned long rx_bytes = 0;
+ unsigned long rx_packets = 0;
+ unsigned long rx_ok;
+
+ int idx = rx_done->idx;
+ int cnt = rx_done->cnt;
+ u16 length;
+ u16 checksum;
+
+ while (rx_done->entry[idx].length != 0 && *limit != 0) {
+ length = ntohs(rx_done->entry[idx].length);
+ rx_done->entry[idx].length = 0;
+ checksum = ntohs(rx_done->entry[idx].checksum);
+ if (length <= mgp->small_bytes)
+ rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
+ mgp->small_bytes,
+ length, checksum);
+ else
+ rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
+ mgp->dev->mtu + ETH_HLEN,
+ length, checksum);
+ rx_packets += rx_ok;
+ rx_bytes += rx_ok * (unsigned long)length;
+ cnt++;
+ idx = cnt & (myri10ge_max_intr_slots - 1);
+
+ /* limit potential for livelock by only handling a
+ * limited number of frames. */
+ (*limit)--;
+ }
+ rx_done->idx = idx;
+ rx_done->cnt = cnt;
+ mgp->stats.rx_packets += rx_packets;
+ mgp->stats.rx_bytes += rx_bytes;
+}
+
+static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
+{
+ struct mcp_irq_data *stats = mgp->fw_stats;
+
+ if (unlikely(stats->stats_updated)) {
+ if (mgp->link_state != stats->link_up) {
+ mgp->link_state = stats->link_up;
+ if (mgp->link_state) {
+ printk(KERN_INFO "myri10ge: %s: link up\n",
+ mgp->dev->name);
+ netif_carrier_on(mgp->dev);
+ } else {
+ printk(KERN_INFO "myri10ge: %s: link down\n",
+ mgp->dev->name);
+ netif_carrier_off(mgp->dev);
+ }
+ }
+ if (mgp->rdma_tags_available !=
+ ntohl(mgp->fw_stats->rdma_tags_available)) {
+ mgp->rdma_tags_available =
+ ntohl(mgp->fw_stats->rdma_tags_available);
+ printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
+ "%d tags left\n", mgp->dev->name,
+ mgp->rdma_tags_available);
+ }
+ mgp->down_cnt += stats->link_down;
+ if (stats->link_down)
+ wake_up(&mgp->down_wq);
+ }
+}
+
+static int myri10ge_poll(struct net_device *netdev, int *budget)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+ int limit, orig_limit, work_done;
+
+ /* process as many rx events as NAPI will allow */
+ limit = min(*budget, netdev->quota);
+ orig_limit = limit;
+ myri10ge_clean_rx_done(mgp, &limit);
+ work_done = orig_limit - limit;
+ *budget -= work_done;
+ netdev->quota -= work_done;
+
+ if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) {
+ netif_rx_complete(netdev);
+ __raw_writel(htonl(3), mgp->irq_claim);
+ return 0;
+ }
+ return 1;
+}
+
+static irqreturn_t myri10ge_intr(int irq, void *arg, struct pt_regs *regs)
+{
+ struct myri10ge_priv *mgp = arg;
+ struct mcp_irq_data *stats = mgp->fw_stats;
+ struct myri10ge_tx_buf *tx = &mgp->tx;
+ u32 send_done_count;
+ int i;
+
+ /* make sure it is our IRQ, and that the DMA has finished */
+ if (unlikely(!stats->valid))
+ return (IRQ_NONE);
+
+ /* low bit indicates receives are present, so schedule
+ * napi poll handler */
+ if (stats->valid & 1)
+ netif_rx_schedule(mgp->dev);
+
+ if (!mgp->msi_enabled) {
+ __raw_writel(0, mgp->irq_deassert);
+ if (!myri10ge_deassert_wait)
+ stats->valid = 0;
+ mb();
+ } else
+ stats->valid = 0;
+
+ /* Wait for IRQ line to go low, if using INTx */
+ i = 0;
+ while (1) {
+ i++;
+ /* check for transmit completes and receives */
+ send_done_count = ntohl(stats->send_done_count);
+ if (send_done_count != tx->pkt_done)
+ myri10ge_tx_done(mgp, (int)send_done_count);
+ if (unlikely(i > myri10ge_max_irq_loops)) {
+ printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
+ mgp->dev->name);
+ stats->valid = 0;
+ schedule_work(&mgp->watchdog_work);
+ }
+ if (likely(stats->valid == 0))
+ break;
+ cpu_relax();
+ barrier();
+ }
+
+ myri10ge_check_statblock(mgp);
+
+ __raw_writel(htonl(3), mgp->irq_claim + 1);
+ return (IRQ_HANDLED);
+}
+
+static int
+myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static void
+myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ strlcpy(info->driver, "myri10ge", sizeof(info->driver));
+ strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+ strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+}
+
+static int
+myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ coal->rx_coalesce_usecs = mgp->intr_coal_delay;
+ return 0;
+}
+
+static int
+myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ mgp->intr_coal_delay = coal->rx_coalesce_usecs;
+ __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+ return 0;
+}
+
+static void
+myri10ge_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ pause->autoneg = 0;
+ pause->rx_pause = mgp->pause;
+ pause->tx_pause = mgp->pause;
+}
+
+static int
+myri10ge_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ if (pause->tx_pause != mgp->pause)
+ return myri10ge_change_pause(mgp, pause->tx_pause);
+ if (pause->rx_pause != mgp->pause)
+ return myri10ge_change_pause(mgp, pause->tx_pause);
+ if (pause->autoneg != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void
+myri10ge_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+ ring->rx_mini_max_pending = mgp->rx_small.mask + 1;
+ ring->rx_max_pending = mgp->rx_big.mask + 1;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = mgp->rx_small.mask + 1;
+ ring->rx_mini_pending = ring->rx_mini_max_pending;
+ ring->rx_pending = ring->rx_max_pending;
+ ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
+ ring->tx_pending = ring->tx_max_pending;
+}
+
+static u32 myri10ge_get_rx_csum(struct net_device *netdev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ if (mgp->csum_flag)
+ return 1;
+ else
+ return 0;
+}
+
+static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ if (csum_enabled)
+ mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
+ else
+ mgp->csum_flag = 0;
+ return 0;
+}
+
+static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+ /* device-specific stats */
+ "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
+ "serial_number", "tx_pkt_start", "tx_pkt_done",
+ "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
+ "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
+ "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered",
+ "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
+ "dropped_no_big_buffer"
+};
+
+#define MYRI10GE_NET_STATS_LEN 21
+#define MYRI10GE_STATS_LEN sizeof(myri10ge_gstrings_stats) / ETH_GSTRING_LEN
+
+static void
+myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *myri10ge_gstrings_stats,
+ sizeof(myri10ge_gstrings_stats));
+ break;
+ }
+}
+
+static int myri10ge_get_stats_count(struct net_device *netdev)
+{
+ return MYRI10GE_STATS_LEN;
+}
+
+static void
+myri10ge_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
+ data[i] = ((unsigned long *)&mgp->stats)[i];
+
+ data[i++] = (unsigned int)mgp->read_dma;
+ data[i++] = (unsigned int)mgp->write_dma;
+ data[i++] = (unsigned int)mgp->read_write_dma;
+ data[i++] = (unsigned int)mgp->serial_number;
+ data[i++] = (unsigned int)mgp->tx.pkt_start;
+ data[i++] = (unsigned int)mgp->tx.pkt_done;
+ data[i++] = (unsigned int)mgp->tx.req;
+ data[i++] = (unsigned int)mgp->tx.done;
+ data[i++] = (unsigned int)mgp->rx_small.cnt;
+ data[i++] = (unsigned int)mgp->rx_big.cnt;
+ data[i++] = (unsigned int)mgp->wake_queue;
+ data[i++] = (unsigned int)mgp->stop_queue;
+ data[i++] = (unsigned int)mgp->watchdog_resets;
+ data[i++] = (unsigned int)mgp->tx_linearized;
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
+ data[i++] =
+ (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
+ data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
+}
+
+static struct ethtool_ops myri10ge_ethtool_ops = {
+ .get_settings = myri10ge_get_settings,
+ .get_drvinfo = myri10ge_get_drvinfo,
+ .get_coalesce = myri10ge_get_coalesce,
+ .set_coalesce = myri10ge_set_coalesce,
+ .get_pauseparam = myri10ge_get_pauseparam,
+ .set_pauseparam = myri10ge_set_pauseparam,
+ .get_ringparam = myri10ge_get_ringparam,
+ .get_rx_csum = myri10ge_get_rx_csum,
+ .set_rx_csum = myri10ge_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+#endif
+ .get_strings = myri10ge_get_strings,
+ .get_stats_count = myri10ge_get_stats_count,
+ .get_ethtool_stats = myri10ge_get_ethtool_stats
+};
+
+static int myri10ge_allocate_rings(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_cmd cmd;
+ int tx_ring_size, rx_ring_size;
+ int tx_ring_entries, rx_ring_entries;
+ int i, status;
+ size_t bytes;
+
+ mgp = netdev_priv(dev);
+
+ /* get ring sizes */
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
+ tx_ring_size = cmd.data0;
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+ rx_ring_size = cmd.data0;
+
+ tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
+ rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
+ mgp->tx.mask = tx_ring_entries - 1;
+ mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;
+
+ /* allocate the host shadow rings */
+
+ bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
+ * sizeof(*mgp->tx.req_list);
+ mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->tx.req_bytes == NULL)
+ goto abort_with_nothing;
+
+ /* ensure req_list entries are aligned to 8 bytes */
+ mgp->tx.req_list = (struct mcp_kreq_ether_send *)
+ ALIGN((unsigned long)mgp->tx.req_bytes, 8);
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow);
+ mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_small.shadow == NULL)
+ goto abort_with_tx_req_bytes;
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow);
+ mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_big.shadow == NULL)
+ goto abort_with_rx_small_shadow;
+
+ /* allocate the host info rings */
+
+ bytes = tx_ring_entries * sizeof(*mgp->tx.info);
+ mgp->tx.info = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->tx.info == NULL)
+ goto abort_with_rx_big_shadow;
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_small.info);
+ mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_small.info == NULL)
+ goto abort_with_tx_info;
+
+ bytes = rx_ring_entries * sizeof(*mgp->rx_big.info);
+ mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL);
+ if (mgp->rx_big.info == NULL)
+ goto abort_with_rx_small_info;
+
+ /* Fill the receive rings */
+
+ for (i = 0; i <= mgp->rx_small.mask; i++) {
+ status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev,
+ mgp->small_bytes, i);
+ if (status) {
+ printk(KERN_ERR
+ "myri10ge: %s: alloced only %d small bufs\n",
+ dev->name, i);
+ goto abort_with_rx_small_ring;
+ }
+ }
+
+ for (i = 0; i <= mgp->rx_big.mask; i++) {
+ status =
+ myri10ge_getbuf(&mgp->rx_big, mgp->pdev,
+ dev->mtu + ETH_HLEN, i);
+ if (status) {
+ printk(KERN_ERR
+ "myri10ge: %s: alloced only %d big bufs\n",
+ dev->name, i);
+ goto abort_with_rx_big_ring;
+ }
+ }
+
+ return 0;
+
+abort_with_rx_big_ring:
+ for (i = 0; i <= mgp->rx_big.mask; i++) {
+ if (mgp->rx_big.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_big.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_big.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_big.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_big.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+
+abort_with_rx_small_ring:
+ for (i = 0; i <= mgp->rx_small.mask; i++) {
+ if (mgp->rx_small.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_small.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_small.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_small.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_small.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+ kfree(mgp->rx_big.info);
+
+abort_with_rx_small_info:
+ kfree(mgp->rx_small.info);
+
+abort_with_tx_info:
+ kfree(mgp->tx.info);
+
+abort_with_rx_big_shadow:
+ kfree(mgp->rx_big.shadow);
+
+abort_with_rx_small_shadow:
+ kfree(mgp->rx_small.shadow);
+
+abort_with_tx_req_bytes:
+ kfree(mgp->tx.req_bytes);
+ mgp->tx.req_bytes = NULL;
+ mgp->tx.req_list = NULL;
+
+abort_with_nothing:
+ return status;
+}
+
+static void myri10ge_free_rings(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct sk_buff *skb;
+ struct myri10ge_tx_buf *tx;
+ int i, len, idx;
+
+ mgp = netdev_priv(dev);
+
+ for (i = 0; i <= mgp->rx_big.mask; i++) {
+ if (mgp->rx_big.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_big.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_big.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_big.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_big.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ for (i = 0; i <= mgp->rx_small.mask; i++) {
+ if (mgp->rx_small.info[i].skb != NULL)
+ dev_kfree_skb_any(mgp->rx_small.info[i].skb);
+ if (pci_unmap_len(&mgp->rx_small.info[i], len))
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&mgp->rx_small.info[i],
+ bus),
+ pci_unmap_len(&mgp->rx_small.info[i],
+ len),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ tx = &mgp->tx;
+ while (tx->done != tx->req) {
+ idx = tx->done & tx->mask;
+ skb = tx->info[idx].skb;
+
+ /* Mark as free */
+ tx->info[idx].skb = NULL;
+ tx->done++;
+ len = pci_unmap_len(&tx->info[idx], len);
+ pci_unmap_len_set(&tx->info[idx], len, 0);
+ if (skb) {
+ mgp->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ if (len)
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ } else {
+ if (len)
+ pci_unmap_page(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ }
+ }
+ kfree(mgp->rx_big.info);
+
+ kfree(mgp->rx_small.info);
+
+ kfree(mgp->tx.info);
+
+ kfree(mgp->rx_big.shadow);
+
+ kfree(mgp->rx_small.shadow);
+
+ kfree(mgp->tx.req_bytes);
+ mgp->tx.req_bytes = NULL;
+ mgp->tx.req_list = NULL;
+}
+
+static int myri10ge_open(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_cmd cmd;
+ int status, big_pow2;
+
+ mgp = netdev_priv(dev);
+
+ if (mgp->running != MYRI10GE_ETH_STOPPED)
+ return -EBUSY;
+
+ mgp->running = MYRI10GE_ETH_STARTING;
+ status = myri10ge_reset(mgp);
+ if (status != 0) {
+ printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name);
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENXIO;
+ }
+
+ /* decide what small buffer size to use. For good TCP rx
+ * performance, it is important to not receive 1514 byte
+ * frames into jumbo buffers, as it confuses the socket buffer
+ * accounting code, leading to drops and erratic performance.
+ */
+
+ if (dev->mtu <= ETH_DATA_LEN)
+ mgp->small_bytes = 128; /* enough for a TCP header */
+ else
+ mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */
+
+ /* Override the small buffer size? */
+ if (myri10ge_small_bytes > 0)
+ mgp->small_bytes = myri10ge_small_bytes;
+
+ /* If the user sets an obscenely small MTU, adjust the small
+ * bytes down to nearly nothing */
+ if (mgp->small_bytes >= (dev->mtu + ETH_HLEN))
+ mgp->small_bytes = 64;
+
+ /* get the lanai pointers to the send and receive rings */
+
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
+ mgp->tx.lanai =
+ (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
+
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
+ mgp->rx_small.lanai =
+ (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
+
+ status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
+ mgp->rx_big.lanai =
+ (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
+
+ if (status != 0) {
+ printk(KERN_ERR
+ "myri10ge: %s: failed to get ring sizes or locations\n",
+ dev->name);
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENXIO;
+ }
+
+ if (mgp->mtrr >= 0) {
+ mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000;
+ mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000;
+ mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000;
+ } else {
+ mgp->tx.wc_fifo = NULL;
+ mgp->rx_small.wc_fifo = NULL;
+ mgp->rx_big.wc_fifo = NULL;
+ }
+
+ status = myri10ge_allocate_rings(dev);
+ if (status != 0)
+ goto abort_with_nothing;
+
+ /* Firmware needs the big buff size as a power of 2. Lie and
+ * tell him the buffer is larger, because we only use 1
+ * buffer/pkt, and the mtu will prevent overruns.
+ */
+ big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD;
+ while ((big_pow2 & (big_pow2 - 1)) != 0)
+ big_pow2++;
+
+ /* now give firmware buffers sizes, and MTU */
+ cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
+ cmd.data0 = mgp->small_bytes;
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
+ cmd.data0 = big_pow2;
+ status |=
+ myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
+ if (status) {
+ printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n",
+ dev->name);
+ goto abort_with_rings;
+ }
+
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0);
+ if (status) {
+ printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
+ dev->name);
+ goto abort_with_rings;
+ }
+
+ mgp->link_state = -1;
+ mgp->rdma_tags_available = 15;
+
+ netif_poll_enable(mgp->dev); /* must happen prior to any irq */
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
+ if (status) {
+ printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
+ dev->name);
+ goto abort_with_rings;
+ }
+
+ mgp->wake_queue = 0;
+ mgp->stop_queue = 0;
+ mgp->running = MYRI10GE_ETH_RUNNING;
+ mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
+ add_timer(&mgp->watchdog_timer);
+ netif_wake_queue(dev);
+ return 0;
+
+abort_with_rings:
+ myri10ge_free_rings(dev);
+
+abort_with_nothing:
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return -ENOMEM;
+}
+
+static int myri10ge_close(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp;
+ struct myri10ge_cmd cmd;
+ int status, old_down_cnt;
+
+ mgp = netdev_priv(dev);
+
+ if (mgp->running != MYRI10GE_ETH_RUNNING)
+ return 0;
+
+ if (mgp->tx.req_bytes == NULL)
+ return 0;
+
+ del_timer_sync(&mgp->watchdog_timer);
+ mgp->running = MYRI10GE_ETH_STOPPING;
+ netif_poll_disable(mgp->dev);
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ old_down_cnt = mgp->down_cnt;
+ mb();
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
+ if (status)
+ printk(KERN_ERR "myri10ge: %s: Couldn't bring down link\n",
+ dev->name);
+
+ wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, HZ);
+ if (old_down_cnt == mgp->down_cnt)
+ printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name);
+
+ netif_tx_disable(dev);
+
+ myri10ge_free_rings(dev);
+
+ mgp->running = MYRI10GE_ETH_STOPPED;
+ return 0;
+}
+
+/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
+ * backwards one at a time and handle ring wraps */
+
+static inline void
+myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
+ struct mcp_kreq_ether_send *src, int cnt)
+{
+ int idx, starting_slot;
+ starting_slot = tx->req;
+ while (cnt > 1) {
+ cnt--;
+ idx = (starting_slot + cnt) & tx->mask;
+ myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
+ mb();
+ }
+}
+
+/*
+ * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
+ * at most 32 bytes at a time, so as to avoid involving the software
+ * pio handler in the nic. We re-write the first segment's flags
+ * to mark them valid only after writing the entire chain.
+ */
+
+static inline void
+myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
+ int cnt)
+{
+ int idx, i;
+ struct mcp_kreq_ether_send __iomem *dstp, *dst;
+ struct mcp_kreq_ether_send *srcp;
+ u8 last_flags;
+
+ idx = tx->req & tx->mask;
+
+ last_flags = src->flags;
+ src->flags = 0;
+ mb();
+ dst = dstp = &tx->lanai[idx];
+ srcp = src;
+
+ if ((idx + cnt) < tx->mask) {
+ for (i = 0; i < (cnt - 1); i += 2) {
+ myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
+ mb(); /* force write every 32 bytes */
+ srcp += 2;
+ dstp += 2;
+ }
+ } else {
+ /* submit all but the first request, and ensure
+ * that it is submitted below */
+ myri10ge_submit_req_backwards(tx, src, cnt);
+ i = 0;
+ }
+ if (i < cnt) {
+ /* submit the first request */
+ myri10ge_pio_copy(dstp, srcp, sizeof(*src));
+ mb(); /* barrier before setting valid flag */
+ }
+
+ /* re-write the last 32-bits with the valid flags */
+ src->flags = last_flags;
+ __raw_writel(*((u32 *) src + 3), (u32 __iomem *) dst + 3);
+ tx->req += cnt;
+ mb();
+}
+
+static inline void
+myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
+ struct mcp_kreq_ether_send *src, int cnt)
+{
+ tx->req += cnt;
+ mb();
+ while (cnt >= 4) {
+ myri10ge_pio_copy(tx->wc_fifo, src, 64);
+ mb();
+ src += 4;
+ cnt -= 4;
+ }
+ if (cnt > 0) {
+ /* pad it to 64 bytes. The src is 64 bytes bigger than it
+ * needs to be so that we don't overrun it */
+ myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64);
+ mb();
+ }
+}
+
+/*
+ * Transmit a packet. We need to split the packet so that a single
+ * segment does not cross myri10ge->tx.boundary, so this makes segment
+ * counting tricky. So rather than try to count segments up front, we
+ * just give up if there are too few segments to hold a reasonably
+ * fragmented packet currently available. If we run
+ * out of segments while preparing a packet for DMA, we just linearize
+ * it and try again.
+ */
+
+static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct mcp_kreq_ether_send *req;
+ struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct skb_frag_struct *frag;
+ dma_addr_t bus;
+ u32 low, high_swapped;
+ unsigned int len;
+ int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+ u16 pseudo_hdr_offset, cksum_offset;
+ int cum_len, seglen, boundary, rdma_count;
+ u8 flags, odd_flag;
+
+again:
+ req = tx->req_list;
+ avail = tx->mask - 1 - (tx->req - tx->done);
+
+ mss = 0;
+ max_segments = MXGEFW_MAX_SEND_DESC;
+
+#ifdef NETIF_F_TSO
+ if (skb->len > (dev->mtu + ETH_HLEN)) {
+ mss = skb_shinfo(skb)->tso_size;
+ if (mss != 0)
+ max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
+ }
+#endif /*NETIF_F_TSO */
+
+ if ((unlikely(avail < max_segments))) {
+ /* we are out of transmit resources */
+ mgp->stop_queue++;
+ netif_stop_queue(dev);
+ return 1;
+ }
+
+ /* Setup checksum offloading, if needed */
+ cksum_offset = 0;
+ pseudo_hdr_offset = 0;
+ odd_flag = 0;
+ flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
+ if (likely(skb->ip_summed == CHECKSUM_HW)) {
+ cksum_offset = (skb->h.raw - skb->data);
+ pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
+ /* If the headers are excessively large, then we must
+ * fall back to a software checksum */
+ if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
+ if (skb_checksum_help(skb, 0))
+ goto drop;
+ cksum_offset = 0;
+ pseudo_hdr_offset = 0;
+ } else {
+ pseudo_hdr_offset = htons(pseudo_hdr_offset);
+ odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
+ flags |= MXGEFW_FLAGS_CKSUM;
+ }
+ }
+
+ cum_len = 0;
+
+#ifdef NETIF_F_TSO
+ if (mss) { /* TSO */
+ /* this removes any CKSUM flag from before */
+ flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
+
+ /* negative cum_len signifies to the
+ * send loop that we are still in the
+ * header portion of the TSO packet.
+ * TSO header must be at most 134 bytes long */
+ cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+
+ /* for TSO, pseudo_hdr_offset holds mss.
+ * The firmware figures out where to put
+ * the checksum by parsing the header. */
+ pseudo_hdr_offset = htons(mss);
+ } else
+#endif /*NETIF_F_TSO */
+ /* Mark small packets, and pad out tiny packets */
+ if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
+ flags |= MXGEFW_FLAGS_SMALL;
+
+ /* pad frames to at least ETH_ZLEN bytes */
+ if (unlikely(skb->len < ETH_ZLEN)) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL) {
+ /* The packet is gone, so we must
+ * return 0 */
+ mgp->stats.tx_dropped += 1;
+ return 0;
+ }
+ /* adjust the len to account for the zero pad
+ * so that the nic can know how long it is */
+ skb->len = ETH_ZLEN;
+ }
+ }
+
+ /* map the skb for DMA */
+ len = skb->len - skb->data_len;
+ idx = tx->req & tx->mask;
+ tx->info[idx].skb = skb;
+ bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&tx->info[idx], bus, bus);
+ pci_unmap_len_set(&tx->info[idx], len, len);
+
+ frag_cnt = skb_shinfo(skb)->nr_frags;
+ frag_idx = 0;
+ count = 0;
+ rdma_count = 0;
+
+ /* "rdma_count" is the number of RDMAs belonging to the
+ * current packet BEFORE the current send request. For
+ * non-TSO packets, this is equal to "count".
+ * For TSO packets, rdma_count needs to be reset
+ * to 0 after a segment cut.
+ *
+ * The rdma_count field of the send request is
+ * the number of RDMAs of the packet starting at
+ * that request. For TSO send requests with one ore more cuts
+ * in the middle, this is the number of RDMAs starting
+ * after the last cut in the request. All previous
+ * segments before the last cut implicitly have 1 RDMA.
+ *
+ * Since the number of RDMAs is not known beforehand,
+ * it must be filled-in retroactively - after each
+ * segmentation cut or at the end of the entire packet.
+ */
+
+ while (1) {
+ /* Break the SKB or Fragment up into pieces which
+ * do not cross mgp->tx.boundary */
+ low = MYRI10GE_LOWPART_TO_U32(bus);
+ high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+ while (len) {
+ u8 flags_next;
+ int cum_len_next;
+
+ if (unlikely(count == max_segments))
+ goto abort_linearize;
+
+ boundary = (low + tx->boundary) & ~(tx->boundary - 1);
+ seglen = boundary - low;
+ if (seglen > len)
+ seglen = len;
+ flags_next = flags & ~MXGEFW_FLAGS_FIRST;
+ cum_len_next = cum_len + seglen;
+#ifdef NETIF_F_TSO
+ if (mss) { /* TSO */
+ (req - rdma_count)->rdma_count = rdma_count + 1;
+
+ if (likely(cum_len >= 0)) { /* payload */
+ int next_is_first, chop;
+
+ chop = (cum_len_next > mss);
+ cum_len_next = cum_len_next % mss;
+ next_is_first = (cum_len_next == 0);
+ flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
+ flags_next |= next_is_first *
+ MXGEFW_FLAGS_FIRST;
+ rdma_count |= -(chop | next_is_first);
+ rdma_count += chop & !next_is_first;
+ } else if (likely(cum_len_next >= 0)) { /* header ends */
+ int small;
+
+ rdma_count = -1;
+ cum_len_next = 0;
+ seglen = -cum_len;
+ small = (mss <= MXGEFW_SEND_SMALL_SIZE);
+ flags_next = MXGEFW_FLAGS_TSO_PLD |
+ MXGEFW_FLAGS_FIRST |
+ (small * MXGEFW_FLAGS_SMALL);
+ }
+ }
+#endif /* NETIF_F_TSO */
+ req->addr_high = high_swapped;
+ req->addr_low = htonl(low);
+ req->pseudo_hdr_offset = pseudo_hdr_offset;
+ req->pad = 0; /* complete solid 16-byte block; does this matter? */
+ req->rdma_count = 1;
+ req->length = htons(seglen);
+ req->cksum_offset = cksum_offset;
+ req->flags = flags | ((cum_len & 1) * odd_flag);
+
+ low += seglen;
+ len -= seglen;
+ cum_len = cum_len_next;
+ flags = flags_next;
+ req++;
+ count++;
+ rdma_count++;
+ if (unlikely(cksum_offset > seglen))
+ cksum_offset -= seglen;
+ else
+ cksum_offset = 0;
+ }
+ if (frag_idx == frag_cnt)
+ break;
+
+ /* map next fragment for DMA */
+ idx = (count + tx->req) & tx->mask;
+ frag = &skb_shinfo(skb)->frags[frag_idx];
+ frag_idx++;
+ len = frag->size;
+ bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
+ len, PCI_DMA_TODEVICE);
+ pci_unmap_addr_set(&tx->info[idx], bus, bus);
+ pci_unmap_len_set(&tx->info[idx], len, len);
+ }
+
+ (req - rdma_count)->rdma_count = rdma_count;
+#ifdef NETIF_F_TSO
+ if (mss)
+ do {
+ req--;
+ req->flags |= MXGEFW_FLAGS_TSO_LAST;
+ } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
+ MXGEFW_FLAGS_FIRST)));
+#endif
+ idx = ((count - 1) + tx->req) & tx->mask;
+ tx->info[idx].last = 1;
+ if (tx->wc_fifo == NULL)
+ myri10ge_submit_req(tx, tx->req_list, count);
+ else
+ myri10ge_submit_req_wc(tx, tx->req_list, count);
+ tx->pkt_start++;
+ if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
+ mgp->stop_queue++;
+ netif_stop_queue(dev);
+ }
+ dev->trans_start = jiffies;
+ return 0;
+
+abort_linearize:
+ /* Free any DMA resources we've alloced and clear out the skb
+ * slot so as to not trip up assertions, and to avoid a
+ * double-free if linearizing fails */
+
+ last_idx = (idx + 1) & tx->mask;
+ idx = tx->req & tx->mask;
+ tx->info[idx].skb = NULL;
+ do {
+ len = pci_unmap_len(&tx->info[idx], len);
+ if (len) {
+ if (tx->info[idx].skb != NULL)
+ pci_unmap_single(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(mgp->pdev,
+ pci_unmap_addr(&tx->info[idx],
+ bus), len,
+ PCI_DMA_TODEVICE);
+ pci_unmap_len_set(&tx->info[idx], len, 0);
+ tx->info[idx].skb = NULL;
+ }
+ idx = (idx + 1) & tx->mask;
+ } while (idx != last_idx);
+ if (skb_shinfo(skb)->tso_size) {
+ printk(KERN_ERR
+ "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
+ mgp->dev->name);
+ goto drop;
+ }
+
+ if (skb_linearize(skb, GFP_ATOMIC))
+ goto drop;
+
+ mgp->tx_linearized++;
+ goto again;
+
+drop:
+ dev_kfree_skb_any(skb);
+ mgp->stats.tx_dropped += 1;
+ return 0;
+
+}
+
+static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ return &mgp->stats;
+}
+
+static void myri10ge_set_multicast_list(struct net_device *dev)
+{
+ /* can be called from atomic contexts,
+ * pass 1 to force atomicity in myri10ge_send_cmd() */
+ myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1);
+}
+
+static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ int status;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ status = myri10ge_update_mac_address(mgp, sa->sa_data);
+ if (status != 0) {
+ printk(KERN_ERR
+ "myri10ge: %s: changing mac address failed with %d\n",
+ dev->name, status);
+ return status;
+ }
+
+ /* change the dev structure */
+ memcpy(dev->dev_addr, sa->sa_data, 6);
+ return 0;
+}
+
+static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
+ int error = 0;
+
+ if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
+ printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n",
+ dev->name, new_mtu);
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: changing mtu from %d to %d\n",
+ dev->name, dev->mtu, new_mtu);
+ if (mgp->running) {
+ /* if we change the mtu on an active device, we must
+ * reset the device so the firmware sees the change */
+ myri10ge_close(dev);
+ dev->mtu = new_mtu;
+ myri10ge_open(dev);
+ } else
+ dev->mtu = new_mtu;
+
+ return error;
+}
+
+/*
+ * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
+ * Only do it if the bridge is a root port since we don't want to disturb
+ * any other device, except if forced with myri10ge_ecrc_enable > 1.
+ */
+
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE 0x005d
+
+static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *bridge = mgp->pdev->bus->self;
+ struct device *dev = &mgp->pdev->dev;
+ unsigned cap;
+ unsigned err_cap;
+ u16 val;
+ u8 ext_type;
+ int ret;
+
+ if (!myri10ge_ecrc_enable || !bridge)
+ return;
+
+ /* check that the bridge is a root port */
+ cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
+ ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
+ if (myri10ge_ecrc_enable > 1) {
+ struct pci_dev *old_bridge = bridge;
+
+ /* Walk the hierarchy up to the root port
+ * where ECRC has to be enabled */
+ do {
+ bridge = bridge->bus->self;
+ if (!bridge) {
+ dev_err(dev,
+ "Failed to find root port"
+ " to force ECRC\n");
+ return;
+ }
+ cap =
+ pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(bridge,
+ cap + PCI_CAP_FLAGS, &val);
+ ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+ } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
+
+ dev_info(dev,
+ "Forcing ECRC on non-root port %s"
+ " (enabling on root port %s)\n",
+ pci_name(old_bridge), pci_name(bridge));
+ } else {
+ dev_err(dev,
+ "Not enabling ECRC on non-root port %s\n",
+ pci_name(bridge));
+ return;
+ }
+ }
+
+ cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
+ /* nvidia ext cap is not always linked in ext cap chain */
+ if (!cap
+ && bridge->vendor == PCI_VENDOR_ID_NVIDIA
+ && bridge->device == PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE)
+ cap = 0x160;
+
+ if (!cap)
+ return;
+
+ ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
+ if (ret) {
+ dev_err(dev, "failed reading ext-conf-space of %s\n",
+ pci_name(bridge));
+ dev_err(dev, "\t pci=nommconf in use? "
+ "or buggy/incomplete/absent ACPI MCFG attr?\n");
+ return;
+ }
+ if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
+ return;
+
+ err_cap |= PCI_ERR_CAP_ECRC_GENE;
+ pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
+ dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+}
+
+/*
+ * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
+ * when the PCI-E Completion packets are aligned on an 8-byte
+ * boundary. Some PCI-E chip sets always align Completion packets; on
+ * the ones that do not, the alignment can be enforced by enabling
+ * ECRC generation (if supported).
+ *
+ * When PCI-E Completion packets are not aligned, it is actually more
+ * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
+ *
+ * If the driver can neither enable ECRC nor verify that it has
+ * already been enabled, then it must use a firmware image which works
+ * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it
+ * should also ensure that it never gives the device a Read-DMA which is
+ * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is
+ * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat)
+ * firmware image, and set tx.boundary to 4KB.
+ */
+
+#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
+
+static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *bridge = mgp->pdev->bus->self;
+
+ mgp->tx.boundary = 2048;
+ mgp->fw_name = myri10ge_fw_unaligned;
+
+ if (myri10ge_force_firmware == 0) {
+ myri10ge_enable_ecrc(mgp);
+
+ /* Check to see if the upstream bridge is known to
+ * provide aligned completions */
+ if (bridge
+ /* ServerWorks HT2000/HT1000 */
+ && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
+ && bridge->device ==
+ PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) {
+ dev_info(&mgp->pdev->dev,
+ "Assuming aligned completions (0x%x:0x%x)\n",
+ bridge->vendor, bridge->device);
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+ }
+ } else {
+ if (myri10ge_force_firmware == 1) {
+ dev_info(&mgp->pdev->dev,
+ "Assuming aligned completions (forced)\n");
+ mgp->tx.boundary = 4096;
+ mgp->fw_name = myri10ge_fw_aligned;
+ } else {
+ dev_info(&mgp->pdev->dev,
+ "Assuming unaligned completions (forced)\n");
+ mgp->tx.boundary = 2048;
+ mgp->fw_name = myri10ge_fw_unaligned;
+ }
+ }
+ if (myri10ge_fw_name != NULL) {
+ dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
+ myri10ge_fw_name);
+ mgp->fw_name = myri10ge_fw_name;
+ }
+}
+
+static void myri10ge_save_state(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int cap;
+
+ pci_save_state(pdev);
+ /* now save PCIe and MSI state that Linux will not
+ * save for us */
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &mgp->devctl);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pci_read_config_word(pdev, cap + PCI_MSI_FLAGS, &mgp->msi_flags);
+}
+
+static void myri10ge_restore_state(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int cap;
+
+ /* restore PCIe and MSI state that linux will not */
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_write_config_dword(pdev, cap + PCI_CAP_ID_EXP, mgp->devctl);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pci_write_config_word(pdev, cap + PCI_MSI_FLAGS, mgp->msi_flags);
+
+ pci_restore_state(pdev);
+}
+
+#ifdef CONFIG_PM
+
+static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return -EINVAL;
+ netdev = mgp->dev;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ printk(KERN_INFO "myri10ge: closing %s\n", netdev->name);
+ rtnl_lock();
+ myri10ge_close(netdev);
+ rtnl_unlock();
+ }
+ myri10ge_dummy_rdma(mgp, 0);
+ free_irq(pdev->irq, mgp);
+ myri10ge_save_state(mgp);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int myri10ge_resume(struct pci_dev *pdev)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+ int status;
+ u16 vendor;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return -EINVAL;
+ netdev = mgp->dev;
+ pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
+ msleep(5); /* give card time to respond */
+ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+ if (vendor == 0xffff) {
+ printk(KERN_ERR "myri10ge: %s: device disappeared!\n",
+ mgp->dev->name);
+ return -EIO;
+ }
+ myri10ge_restore_state(mgp);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
+
+ status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ,
+ netdev->name, mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to allocate IRQ\n");
+ goto abort_with_msi;
+ }
+
+ myri10ge_reset(mgp);
+ myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+
+ /* Save configuration space to be restored if the
+ * nic resets due to a parity error */
+ myri10ge_save_state(mgp);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ myri10ge_open(netdev);
+ rtnl_unlock();
+ }
+ netif_device_attach(netdev);
+
+ return 0;
+
+abort_with_msi:
+ return -EIO;
+
+}
+
+#endif /* CONFIG_PM */
+
+static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
+{
+ struct pci_dev *pdev = mgp->pdev;
+ int vs = mgp->vendor_specific_offset;
+ u32 reboot;
+
+ /*enter read32 mode */
+ pci_write_config_byte(pdev, vs + 0x10, 0x3);
+
+ /*read REBOOT_STATUS (0xfffffff0) */
+ pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
+ pci_read_config_dword(pdev, vs + 0x14, &reboot);
+ return reboot;
+}
+
+/*
+ * This watchdog is used to check whether the board has suffered
+ * from a parity error and needs to be recovered.
+ */
+static void myri10ge_watchdog(void *arg)
+{
+ struct myri10ge_priv *mgp = arg;
+ u32 reboot;
+ int status;
+ u16 cmd, vendor;
+
+ mgp->watchdog_resets++;
+ pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_MASTER) == 0) {
+ /* Bus master DMA disabled? Check to see
+ * if the card rebooted due to a parity error
+ * For now, just report it */
+ reboot = myri10ge_read_reboot(mgp);
+ printk(KERN_ERR
+ "myri10ge: %s: NIC rebooted (0x%x), resetting\n",
+ mgp->dev->name, reboot);
+ /*
+ * A rebooted nic will come back with config space as
+ * it was after power was applied to PCIe bus.
+ * Attempt to restore config space which was saved
+ * when the driver was loaded, or the last time the
+ * nic was resumed from power saving mode.
+ */
+ myri10ge_restore_state(mgp);
+ } else {
+ /* if we get back -1's from our slot, perhaps somebody
+ * powered off our card. Don't try to reset it in
+ * this case */
+ if (cmd == 0xffff) {
+ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+ if (vendor == 0xffff) {
+ printk(KERN_ERR
+ "myri10ge: %s: device disappeared!\n",
+ mgp->dev->name);
+ return;
+ }
+ }
+ /* Perhaps it is a software error. Try to reset */
+
+ printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
+ mgp->dev->name);
+ printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
+ mgp->dev->name, mgp->tx.req, mgp->tx.done,
+ mgp->tx.pkt_start, mgp->tx.pkt_done,
+ (int)ntohl(mgp->fw_stats->send_done_count));
+ msleep(2000);
+ printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
+ mgp->dev->name, mgp->tx.req, mgp->tx.done,
+ mgp->tx.pkt_start, mgp->tx.pkt_done,
+ (int)ntohl(mgp->fw_stats->send_done_count));
+ }
+ rtnl_lock();
+ myri10ge_close(mgp->dev);
+ status = myri10ge_load_firmware(mgp);
+ if (status != 0)
+ printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
+ mgp->dev->name);
+ else
+ myri10ge_open(mgp->dev);
+ rtnl_unlock();
+}
+
+/*
+ * We use our own timer routine rather than relying upon
+ * netdev->tx_timeout because we have a very large hardware transmit
+ * queue. Due to the large queue, the netdev->tx_timeout function
+ * cannot detect a NIC with a parity error in a timely fashion if the
+ * NIC is lightly loaded.
+ */
+static void myri10ge_watchdog_timer(unsigned long arg)
+{
+ struct myri10ge_priv *mgp;
+
+ mgp = (struct myri10ge_priv *)arg;
+ if (mgp->tx.req != mgp->tx.done &&
+ mgp->tx.done == mgp->watchdog_tx_done)
+ /* nic seems like it might be stuck.. */
+ schedule_work(&mgp->watchdog_work);
+ else
+ /* rearm timer */
+ mod_timer(&mgp->watchdog_timer,
+ jiffies + myri10ge_watchdog_timeout * HZ);
+
+ mgp->watchdog_tx_done = mgp->tx.done;
+}
+
+static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct myri10ge_priv *mgp;
+ struct device *dev = &pdev->dev;
+ size_t bytes;
+ int i;
+ int status = -ENXIO;
+ int cap;
+ int dac_enabled;
+ u16 val;
+
+ netdev = alloc_etherdev(sizeof(*mgp));
+ if (netdev == NULL) {
+ dev_err(dev, "Could not allocate ethernet device\n");
+ return -ENOMEM;
+ }
+
+ mgp = netdev_priv(netdev);
+ memset(mgp, 0, sizeof(*mgp));
+ mgp->dev = netdev;
+ mgp->pdev = pdev;
+ mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
+ mgp->pause = myri10ge_flow_control;
+ mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+ init_waitqueue_head(&mgp->down_wq);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "pci_enable_device call failed\n");
+ status = -ENODEV;
+ goto abort_with_netdev;
+ }
+ myri10ge_select_firmware(mgp);
+
+ /* Find the vendor-specific cap so we can check
+ * the reboot register later on */
+ mgp->vendor_specific_offset
+ = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+ /* Set our max read request to 4KB */
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (cap < 64) {
+ dev_err(&pdev->dev, "Bad PCI_CAP_ID_EXP location %d\n", cap);
+ goto abort_with_netdev;
+ }
+ status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d reading PCI_EXP_DEVCTL\n",
+ status);
+ goto abort_with_netdev;
+ }
+ val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
+ status = pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, val);
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
+ status);
+ goto abort_with_netdev;
+ }
+
+ pci_set_master(pdev);
+ dac_enabled = 1;
+ status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (status != 0) {
+ dac_enabled = 0;
+ dev_err(&pdev->dev,
+ "64-bit pci address mask was refused, trying 32-bit");
+ status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ }
+ if (status != 0) {
+ dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
+ goto abort_with_netdev;
+ }
+ mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ &mgp->cmd_bus, GFP_KERNEL);
+ if (mgp->cmd == NULL)
+ goto abort_with_netdev;
+
+ mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+ &mgp->fw_stats_bus, GFP_KERNEL);
+ if (mgp->fw_stats == NULL)
+ goto abort_with_cmd;
+
+ mgp->board_span = pci_resource_len(pdev, 0);
+ mgp->iomem_base = pci_resource_start(pdev, 0);
+ mgp->mtrr = -1;
+#ifdef CONFIG_MTRR
+ mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
+ MTRR_TYPE_WRCOMB, 1);
+#endif
+ /* Hack. need to get rid of these magic numbers */
+ mgp->sram_size =
+ 2 * 1024 * 1024 - (2 * (48 * 1024) + (32 * 1024)) - 0x100;
+ if (mgp->sram_size > mgp->board_span) {
+ dev_err(&pdev->dev, "board span %ld bytes too small\n",
+ mgp->board_span);
+ goto abort_with_wc;
+ }
+ mgp->sram = ioremap(mgp->iomem_base, mgp->board_span);
+ if (mgp->sram == NULL) {
+ dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
+ mgp->board_span, mgp->iomem_base);
+ status = -ENXIO;
+ goto abort_with_wc;
+ }
+ memcpy_fromio(mgp->eeprom_strings,
+ mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE,
+ MYRI10GE_EEPROM_STRINGS_SIZE);
+ memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
+ status = myri10ge_read_mac_addr(mgp);
+ if (status)
+ goto abort_with_ioremap;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ netdev->dev_addr[i] = mgp->mac_addr[i];
+
+ /* allocate rx done ring */
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &mgp->rx_done.bus, GFP_KERNEL);
+ if (mgp->rx_done.entry == NULL)
+ goto abort_with_ioremap;
+ memset(mgp->rx_done.entry, 0, bytes);
+
+ status = myri10ge_load_firmware(mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to load firmware\n");
+ goto abort_with_rx_done;
+ }
+
+ status = myri10ge_reset(mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed reset\n");
+ goto abort_with_firmware;
+ }
+
+ if (myri10ge_msi) {
+ status = pci_enable_msi(pdev);
+ if (status != 0)
+ dev_err(&pdev->dev,
+ "Error %d setting up MSI; falling back to xPIC\n",
+ status);
+ else
+ mgp->msi_enabled = 1;
+ }
+
+ status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ,
+ netdev->name, mgp);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to allocate IRQ\n");
+ goto abort_with_firmware;
+ }
+
+ pci_set_drvdata(pdev, mgp);
+ if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
+ myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+ if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
+ myri10ge_initial_mtu = 68;
+ netdev->mtu = myri10ge_initial_mtu;
+ netdev->open = myri10ge_open;
+ netdev->stop = myri10ge_close;
+ netdev->hard_start_xmit = myri10ge_xmit;
+ netdev->get_stats = myri10ge_get_stats;
+ netdev->base_addr = mgp->iomem_base;
+ netdev->irq = pdev->irq;
+ netdev->change_mtu = myri10ge_change_mtu;
+ netdev->set_multicast_list = myri10ge_set_multicast_list;
+ netdev->set_mac_address = myri10ge_set_mac_address;
+ netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ if (dac_enabled)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->poll = myri10ge_poll;
+ netdev->weight = myri10ge_napi_weight;
+
+ /* Save configuration space to be restored if the
+ * nic resets due to a parity error */
+ myri10ge_save_state(mgp);
+ /* Restore state immediately since pci_save_msi_state disables MSI */
+ myri10ge_restore_state(mgp);
+
+ /* Setup the watchdog timer */
+ setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
+ (unsigned long)mgp);
+
+ SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+ INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+ status = register_netdev(netdev);
+ if (status != 0) {
+ dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
+ goto abort_with_irq;
+ }
+
+ printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+ netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"),
+ pdev->irq, mgp->tx.boundary, mgp->fw_name,
+ (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
+
+ return 0;
+
+abort_with_irq:
+ free_irq(pdev->irq, mgp);
+ if (mgp->msi_enabled)
+ pci_disable_msi(pdev);
+
+abort_with_firmware:
+ myri10ge_dummy_rdma(mgp, 0);
+
+abort_with_rx_done:
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ dma_free_coherent(&pdev->dev, bytes,
+ mgp->rx_done.entry, mgp->rx_done.bus);
+
+abort_with_ioremap:
+ iounmap(mgp->sram);
+
+abort_with_wc:
+#ifdef CONFIG_MTRR
+ if (mgp->mtrr >= 0)
+ mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+ mgp->fw_stats, mgp->fw_stats_bus);
+
+abort_with_cmd:
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ mgp->cmd, mgp->cmd_bus);
+
+abort_with_netdev:
+
+ free_netdev(netdev);
+ return status;
+}
+
+/*
+ * myri10ge_remove
+ *
+ * Does what is necessary to shutdown one Myrinet device. Called
+ * once for each Myrinet card by the kernel when a module is
+ * unloaded.
+ */
+static void myri10ge_remove(struct pci_dev *pdev)
+{
+ struct myri10ge_priv *mgp;
+ struct net_device *netdev;
+ size_t bytes;
+
+ mgp = pci_get_drvdata(pdev);
+ if (mgp == NULL)
+ return;
+
+ flush_scheduled_work();
+ netdev = mgp->dev;
+ unregister_netdev(netdev);
+ free_irq(pdev->irq, mgp);
+ if (mgp->msi_enabled)
+ pci_disable_msi(pdev);
+
+ myri10ge_dummy_rdma(mgp, 0);
+
+ bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ dma_free_coherent(&pdev->dev, bytes,
+ mgp->rx_done.entry, mgp->rx_done.bus);
+
+ iounmap(mgp->sram);
+
+#ifdef CONFIG_MTRR
+ if (mgp->mtrr >= 0)
+ mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+ mgp->fw_stats, mgp->fw_stats_bus);
+
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+ mgp->cmd, mgp->cmd_bus);
+
+ free_netdev(netdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
+
+static struct pci_device_id myri10ge_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
+ {0},
+};
+
+static struct pci_driver myri10ge_driver = {
+ .name = "myri10ge",
+ .probe = myri10ge_probe,
+ .remove = myri10ge_remove,
+ .id_table = myri10ge_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = myri10ge_suspend,
+ .resume = myri10ge_resume,
+#endif
+};
+
+static __init int myri10ge_init_module(void)
+{
+ printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
+ MYRI10GE_VERSION_STR);
+ return pci_register_driver(&myri10ge_driver);
+}
+
+module_init(myri10ge_init_module);
+
+static __exit void myri10ge_cleanup_module(void)
+{
+ pci_unregister_driver(&myri10ge_driver);
+}
+
+module_exit(myri10ge_cleanup_module);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
new file mode 100644
index 000000000000..0a6cae6cb186
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -0,0 +1,205 @@
+#ifndef __MYRI10GE_MCP_H__
+#define __MYRI10GE_MCP_H__
+
+#define MXGEFW_VERSION_MAJOR 1
+#define MXGEFW_VERSION_MINOR 4
+
+/* 8 Bytes */
+struct mcp_dma_addr {
+ u32 high;
+ u32 low;
+};
+
+/* 4 Bytes */
+struct mcp_slot {
+ u16 checksum;
+ u16 length;
+};
+
+/* 64 Bytes */
+struct mcp_cmd {
+ u32 cmd;
+ u32 data0; /* will be low portion if data > 32 bits */
+ /* 8 */
+ u32 data1; /* will be high portion if data > 32 bits */
+ u32 data2; /* currently unused.. */
+ /* 16 */
+ struct mcp_dma_addr response_addr;
+ /* 24 */
+ u8 pad[40];
+};
+
+/* 8 Bytes */
+struct mcp_cmd_response {
+ u32 data;
+ u32 result;
+};
+
+/*
+ * flags used in mcp_kreq_ether_send_t:
+ *
+ * The SMALL flag is only needed in the first segment. It is raised
+ * for packets that are total less or equal 512 bytes.
+ *
+ * The CKSUM flag must be set in all segments.
+ *
+ * The PADDED flags is set if the packet needs to be padded, and it
+ * must be set for all segments.
+ *
+ * The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative
+ * length of all previous segments was odd.
+ */
+
+#define MXGEFW_FLAGS_SMALL 0x1
+#define MXGEFW_FLAGS_TSO_HDR 0x1
+#define MXGEFW_FLAGS_FIRST 0x2
+#define MXGEFW_FLAGS_ALIGN_ODD 0x4
+#define MXGEFW_FLAGS_CKSUM 0x8
+#define MXGEFW_FLAGS_TSO_LAST 0x8
+#define MXGEFW_FLAGS_NO_TSO 0x10
+#define MXGEFW_FLAGS_TSO_CHOP 0x10
+#define MXGEFW_FLAGS_TSO_PLD 0x20
+
+#define MXGEFW_SEND_SMALL_SIZE 1520
+#define MXGEFW_MAX_MTU 9400
+
+union mcp_pso_or_cumlen {
+ u16 pseudo_hdr_offset;
+ u16 cum_len;
+};
+
+#define MXGEFW_MAX_SEND_DESC 12
+#define MXGEFW_PAD 2
+
+/* 16 Bytes */
+struct mcp_kreq_ether_send {
+ u32 addr_high;
+ u32 addr_low;
+ u16 pseudo_hdr_offset;
+ u16 length;
+ u8 pad;
+ u8 rdma_count;
+ u8 cksum_offset; /* where to start computing cksum */
+ u8 flags; /* as defined above */
+};
+
+/* 8 Bytes */
+struct mcp_kreq_ether_recv {
+ u32 addr_high;
+ u32 addr_low;
+};
+
+/* Commands */
+
+#define MXGEFW_CMD_OFFSET 0xf80000
+
+enum myri10ge_mcp_cmd_type {
+ MXGEFW_CMD_NONE = 0,
+ /* Reset the mcp, it is left in a safe state, waiting
+ * for the driver to set all its parameters */
+ MXGEFW_CMD_RESET,
+
+ /* get the version number of the current firmware..
+ * (may be available in the eeprom strings..? */
+ MXGEFW_GET_MCP_VERSION,
+
+ /* Parameters which must be set by the driver before it can
+ * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next
+ * MXGEFW_CMD_RESET is issued */
+
+ MXGEFW_CMD_SET_INTRQ_DMA,
+ MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */
+ MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */
+
+ /* Parameters which refer to lanai SRAM addresses where the
+ * driver must issue PIO writes for various things */
+
+ MXGEFW_CMD_GET_SEND_OFFSET,
+ MXGEFW_CMD_GET_SMALL_RX_OFFSET,
+ MXGEFW_CMD_GET_BIG_RX_OFFSET,
+ MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
+ MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
+
+ /* Parameters which refer to rings stored on the MCP,
+ * and whose size is controlled by the mcp */
+
+ MXGEFW_CMD_GET_SEND_RING_SIZE, /* in bytes */
+ MXGEFW_CMD_GET_RX_RING_SIZE, /* in bytes */
+
+ /* Parameters which refer to rings stored in the host,
+ * and whose size is controlled by the host. Note that
+ * all must be physically contiguous and must contain
+ * a power of 2 number of entries. */
+
+ MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */
+
+ /* command to bring ethernet interface up. Above parameters
+ * (plus mtu & mac address) must have been exchanged prior
+ * to issuing this command */
+ MXGEFW_CMD_ETHERNET_UP,
+
+ /* command to bring ethernet interface down. No further sends
+ * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP
+ * is issued, and all interrupt queues must be flushed prior
+ * to ack'ing this command */
+
+ MXGEFW_CMD_ETHERNET_DOWN,
+
+ /* commands the driver may issue live, without resetting
+ * the nic. Note that increasing the mtu "live" should
+ * only be done if the driver has already supplied buffers
+ * sufficiently large to handle the new mtu. Decreasing
+ * the mtu live is safe */
+
+ MXGEFW_CMD_SET_MTU,
+ MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */
+ MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */
+ MXGEFW_CMD_SET_STATS_DMA,
+
+ MXGEFW_ENABLE_PROMISC,
+ MXGEFW_DISABLE_PROMISC,
+ MXGEFW_SET_MAC_ADDRESS,
+
+ MXGEFW_ENABLE_FLOW_CONTROL,
+ MXGEFW_DISABLE_FLOW_CONTROL,
+
+ /* do a DMA test
+ * data0,data1 = DMA address
+ * data2 = RDMA length (MSH), WDMA length (LSH)
+ * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
+ */
+ MXGEFW_DMA_TEST
+};
+
+enum myri10ge_mcp_cmd_status {
+ MXGEFW_CMD_OK = 0,
+ MXGEFW_CMD_UNKNOWN,
+ MXGEFW_CMD_ERROR_RANGE,
+ MXGEFW_CMD_ERROR_BUSY,
+ MXGEFW_CMD_ERROR_EMPTY,
+ MXGEFW_CMD_ERROR_CLOSED,
+ MXGEFW_CMD_ERROR_HASH_ERROR,
+ MXGEFW_CMD_ERROR_BAD_PORT,
+ MXGEFW_CMD_ERROR_RESOURCES
+};
+
+/* 40 Bytes */
+struct mcp_irq_data {
+ u32 send_done_count;
+
+ u32 link_up;
+ u32 dropped_link_overflow;
+ u32 dropped_link_error_or_filtered;
+ u32 dropped_runt;
+ u32 dropped_overrun;
+ u32 dropped_no_small_buffer;
+ u32 dropped_no_big_buffer;
+ u32 rdma_tags_available;
+
+ u8 tx_stopped;
+ u8 link_down;
+ u8 stats_updated;
+ u8 valid;
+};
+
+#endif /* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
new file mode 100644
index 000000000000..487f7792fd46
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -0,0 +1,58 @@
+#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
+#define __MYRI10GE_MCP_GEN_HEADER_H__
+
+/* this file define a standard header used as a first entry point to
+ * exchange information between firmware/driver and driver. The
+ * header structure can be anywhere in the mcp. It will usually be in
+ * the .data section, because some fields needs to be initialized at
+ * compile time.
+ * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
+ * contains the location of the header.
+ *
+ * Typically a MCP will start with the following:
+ * .text
+ * .space 52 ! to help catch MEMORY_INT errors
+ * bt start ! jump to real code
+ * nop
+ * .long _gen_mcp_header
+ *
+ * The source will have a definition like:
+ *
+ * mcp_gen_header_t gen_mcp_header = {
+ * .header_length = sizeof(mcp_gen_header_t),
+ * .mcp_type = MCP_TYPE_XXX,
+ * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
+ * .mcp_globals = (unsigned)&Globals
+ * };
+ */
+
+#define MCP_HEADER_PTR_OFFSET 0x3c
+
+#define MCP_TYPE_MX 0x4d582020 /* "MX " */
+#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
+#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
+#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
+
+struct mcp_gen_header {
+ /* the first 4 fields are filled at compile time */
+ unsigned header_length;
+ unsigned mcp_type;
+ char version[128];
+ unsigned mcp_globals; /* pointer to mcp-type specific structure */
+
+ /* filled by the MCP at run-time */
+ unsigned sram_size;
+ unsigned string_specs; /* either the original STRING_SPECS or a superset */
+ unsigned string_specs_len;
+
+ /* Fields above this comment are guaranteed to be present.
+ *
+ * Fields below this comment are extensions added in later versions
+ * of this struct, drivers should compare the header_length against
+ * offsetof(field) to check wether a given MCP implements them.
+ *
+ * Never remove any field. Keep everything naturally align.
+ */
+};
+
+#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d090df413049..661bfe54ff5d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -12,7 +12,7 @@
Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
pcnet_cs.c 1.153 2003/11/09 18:53:09
-
+
The network driver code is based on Donald Becker's NE2000 code:
Written 1992,1993 by Donald Becker.
@@ -146,7 +146,7 @@ typedef struct hw_info_t {
#define MII_PHYID_REG2 0x03
static hw_info_t hw_info[] = {
- { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
+ { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
{ /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
{ /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
{ /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
@@ -193,7 +193,7 @@ static hw_info_t hw_info[] = {
{ /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
{ /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
HAS_MISC_REG | HAS_IBM_MISC },
- { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
+ { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
HAS_MISC_REG | HAS_IBM_MISC },
{ /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
{ /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
@@ -330,7 +330,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
for (j = 0; j < 6; j++)
dev->dev_addr[j] = readb(base + (j<<1));
}
-
+
iounmap(virt);
j = pcmcia_release_window(link->win);
if (j != CS_SUCCESS)
@@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link)
if (link->io.NumPorts2 > 0) {
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
- link->irq.Attributes =
+ link->irq.Attributes =
IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
}
} else {
@@ -543,19 +543,19 @@ static int pcnet_config(struct pcmcia_device *link)
manfid = le16_to_cpu(buf[0]);
prodid = le16_to_cpu(buf[1]);
}
-
+
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
while (last_ret == CS_SUCCESS) {
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
cistpl_io_t *io = &(parse.cftable_entry.io);
-
+
if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
cfg->index == 0 || cfg->io.nwin == 0)
goto next_entry;
-
+
link->conf.ConfigIndex = cfg->index;
/* For multifunction cards, by convention, we configure the
network function with window 0, and serial with window 1 */
@@ -584,7 +584,7 @@ static int pcnet_config(struct pcmcia_device *link)
}
CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
-
+
if (link->io.NumPorts2 == 8) {
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
@@ -592,7 +592,7 @@ static int pcnet_config(struct pcmcia_device *link)
if ((manfid == MANFID_IBM) &&
(prodid == PRODID_IBM_HOME_AND_AWAY))
link->conf.ConfigIndex |= 0x10;
-
+
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
dev->irq = link->irq.AssignedIRQ;
dev->base_addr = link->io.BasePort1;
@@ -614,7 +614,7 @@ static int pcnet_config(struct pcmcia_device *link)
hw_info = get_ax88190(link);
if (hw_info == NULL)
hw_info = get_hwired(link);
-
+
if (hw_info == NULL) {
printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
" address for io base %#3lx\n", dev->base_addr);
@@ -631,7 +631,7 @@ static int pcnet_config(struct pcmcia_device *link)
info->flags &= ~USE_BIG_BUF;
if (!use_big_buf)
info->flags &= ~USE_BIG_BUF;
-
+
if (info->flags & USE_BIG_BUF) {
start_pg = SOCKET_START_PG;
stop_pg = SOCKET_STOP_PG;
@@ -929,7 +929,7 @@ static void set_misc_reg(struct net_device *dev)
kio_addr_t nic_base = dev->base_addr;
pcnet_dev_t *info = PRIV(dev);
u_char tmp;
-
+
if (info->flags & HAS_MISC_REG) {
tmp = inb_p(nic_base + PCNET_MISC) & ~3;
if (dev->if_port == 2)
@@ -1022,7 +1022,7 @@ static int pcnet_close(struct net_device *dev)
ei_close(dev);
free_irq(dev->irq, dev);
-
+
link->open--;
netif_stop_queue(dev);
del_timer_sync(&info->watchdog);
@@ -1054,12 +1054,12 @@ static void pcnet_reset_8390(struct net_device *dev)
udelay(100);
}
outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
-
+
if (i == 100)
printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
dev->name);
set_misc_reg(dev);
-
+
} /* pcnet_reset_8390 */
/*====================================================================*/
@@ -1233,7 +1233,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
dev->name, ei_status.dmaing, ei_status.irqlock);
return;
}
-
+
ei_status.dmaing |= 0x01;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
@@ -1458,7 +1458,7 @@ static void shmem_get_8390_hdr(struct net_device *dev,
void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
+ (ring_page << 8)
- (ei_status.rx_start_page << 8);
-
+
copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
/* Fix for big endian systems */
hdr->count = le16_to_cpu(hdr->count);
@@ -1473,7 +1473,7 @@ static void shmem_block_input(struct net_device *dev, int count,
unsigned long offset = (TX_PAGES<<8) + ring_offset
- (ei_status.rx_start_page << 8);
char *buf = skb->data;
-
+
if (offset + count > ei_status.priv) {
/* We must wrap the input move. */
int semi_count = ei_status.priv - offset;
@@ -1541,7 +1541,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
info->base = NULL; link->win = NULL;
goto failed;
}
-
+
ei_status.mem = info->base + offset;
ei_status.priv = req.Size;
dev->mem_start = (u_long)ei_status.mem;
@@ -1768,6 +1768,8 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"),
+ PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
+ 0xb4be14e3, 0x43ac239b, 0x0877b627),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fa39b944bc46..cda3e53d6917 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -45,5 +45,11 @@ config CICADA_PHY
---help---
Currently supports the cis8204
+config SMSC_PHY
+ tristate "Drivers for SMSC PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the LAN83C185 PHY
+
endmenu
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e4116a5fbb4c..d9614134cc06 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
+obj-$(CONFIG_SMSC_PHY) += smsc.o
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
new file mode 100644
index 000000000000..25e31fb5cb31
--- /dev/null
+++ b/drivers/net/phy/smsc.c
@@ -0,0 +1,101 @@
+/*
+ * drivers/net/phy/smsc.c
+ *
+ * Driver for SMSC PHYs
+ *
+ * Author: Herbert Valerio Riedel
+ *
+ * Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+
+
+static int lan83c185_config_intr(struct phy_device *phydev)
+{
+ int rc = phy_write (phydev, MII_LAN83C185_IM,
+ ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
+ ? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
+ : 0));
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan83c185_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read (phydev, MII_LAN83C185_ISF);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int lan83c185_config_init(struct phy_device *phydev)
+{
+ return lan83c185_ack_interrupt (phydev);
+}
+
+
+static struct phy_driver lan83c185_driver = {
+ .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
+ .phy_id_mask = 0xfffffff0,
+ .name = "SMSC LAN83C185",
+
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
+ | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+ /* basic functions */
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .config_init = lan83c185_config_init,
+
+ /* IRQ related */
+ .ack_interrupt = lan83c185_ack_interrupt,
+ .config_intr = lan83c185_config_intr,
+
+ .driver = { .owner = THIS_MODULE, }
+};
+
+static int __init smsc_init(void)
+{
+ return phy_driver_register (&lan83c185_driver);
+}
+
+static void __exit smsc_exit(void)
+{
+ phy_driver_unregister (&lan83c185_driver);
+}
+
+MODULE_DESCRIPTION("SMSC PHY driver");
+MODULE_AUTHOR("Herbert Valerio Riedel");
+MODULE_LICENSE("GPL");
+
+module_init(smsc_init);
+module_exit(smsc_exit);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0ad3310290f1..9945cc6b8d90 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -184,6 +184,7 @@ static const struct {
static struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
{ PCI_DEVICE(0x16ec, 0x0116), },
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 00179bc3437f..0ef525899566 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -167,6 +167,7 @@ typedef struct _XENA_dev_config {
u8 unused4[0x08];
u64 gpio_int_reg;
+#define GPIO_INT_REG_DP_ERR_INT BIT(0)
#define GPIO_INT_REG_LINK_DOWN BIT(1)
#define GPIO_INT_REG_LINK_UP BIT(2)
u64 gpio_int_mask;
@@ -187,7 +188,7 @@ typedef struct _XENA_dev_config {
/* PIC Control registers */
u64 pic_control;
#define PIC_CNTL_RX_ALARM_MAP_1 BIT(0)
-#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,4)
+#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5)
u64 swapper_ctrl;
#define SWAPPER_CTRL_PIF_R_FE BIT(0)
@@ -267,6 +268,21 @@ typedef struct _XENA_dev_config {
/* General Configuration */
u64 mdio_control;
+#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16)
+#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5)
+#define MDIO_MMD_PMA_DEV_ADDR 0x1
+#define MDIO_MMD_PMD_DEV_ADDR 0x1
+#define MDIO_MMD_WIS_DEV_ADDR 0x2
+#define MDIO_MMD_PCS_DEV_ADDR 0x3
+#define MDIO_MMD_PHYXS_DEV_ADDR 0x4
+#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5)
+#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4)
+#define MDIO_OP(val) vBIT(val, 60, 2)
+#define MDIO_OP_ADDR_TRANS 0x0
+#define MDIO_OP_WRITE_TRANS 0x1
+#define MDIO_OP_READ_POST_INC_TRANS 0x2
+#define MDIO_OP_READ_TRANS 0x3
+#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16)
u64 dtx_control;
@@ -284,9 +300,13 @@ typedef struct _XENA_dev_config {
u64 gpio_control;
#define GPIO_CTRL_GPIO_0 BIT(8)
u64 misc_control;
+#define EXT_REQ_EN BIT(1)
#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
- u8 unused7_1[0x240 - 0x208];
+ u8 unused7_1[0x230 - 0x208];
+
+ u64 pic_control2;
+ u64 ini_dperr_ctrl;
u64 wreq_split_mask;
#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
@@ -493,6 +513,7 @@ typedef struct _XENA_dev_config {
#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
+#define PRC_CTRL_GROUP_READS BIT(38)
#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
u64 prc_alarm_action;
@@ -541,7 +562,12 @@ typedef struct _XENA_dev_config {
#define RX_PA_CFG_IGNORE_LLC_CTRL BIT(3)
#define RX_PA_CFG_IGNORE_L2_ERR BIT(6)
- u8 unused12[0x700 - 0x1D8];
+ u64 unused_11_1;
+
+ u64 ring_bump_counter1;
+ u64 ring_bump_counter2;
+
+ u8 unused12[0x700 - 0x1F0];
u64 rxdma_debug_ctrl;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 79208f434ac1..cac9fdd2e1d5 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -26,15 +26,22 @@
*
* The module loadable parameters that are supported by the driver and a brief
* explaination of all the variables.
+ *
* rx_ring_num : This can be used to program the number of receive rings used
* in the driver.
- * rx_ring_sz: This defines the number of descriptors each ring can have. This
- * is also an array of size 8.
+ * rx_ring_sz: This defines the number of receive blocks each ring can have.
+ * This is also an array of size 8.
* rx_ring_mode: This defines the operation mode of all 8 rings. The valid
* values are 1, 2 and 3.
* tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
* tx_fifo_len: This too is an array of 8. Each element defines the number of
* Tx descriptors that can be associated with each corresponding FIFO.
+ * intr_type: This defines the type of interrupt. The values can be 0(INTA),
+ * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
+ * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
+ * Possible values '1' for enable '0' for disable. Default is '0'
+ * lro_max_pkts: This parameter defines maximum number of packets can be
+ * aggregated as a single large packet
************************************************************************/
#include <linux/config.h>
@@ -70,7 +77,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.11.2"
+#define DRV_VERSION "2.0.14.2"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -106,18 +113,14 @@ static inline int RXD_IS_UP2DT(RxD_t *rxdp)
#define LOW 2
static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
{
- int level = 0;
mac_info_t *mac_control;
mac_control = &sp->mac_control;
- if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
- level = LOW;
- if (rxb_size <= rxd_count[sp->rxd_mode]) {
- level = PANIC;
- }
- }
-
- return level;
+ if (rxb_size <= rxd_count[sp->rxd_mode])
+ return PANIC;
+ else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
+ return LOW;
+ return 0;
}
/* Ethtool related variables and Macros. */
@@ -136,7 +139,11 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"tmac_mcst_frms"},
{"tmac_bcst_frms"},
{"tmac_pause_ctrl_frms"},
+ {"tmac_ttl_octets"},
+ {"tmac_ucst_frms"},
+ {"tmac_nucst_frms"},
{"tmac_any_err_frms"},
+ {"tmac_ttl_less_fb_octets"},
{"tmac_vld_ip_octets"},
{"tmac_vld_ip"},
{"tmac_drop_ip"},
@@ -151,13 +158,27 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_vld_mcst_frms"},
{"rmac_vld_bcst_frms"},
{"rmac_in_rng_len_err_frms"},
+ {"rmac_out_rng_len_err_frms"},
{"rmac_long_frms"},
{"rmac_pause_ctrl_frms"},
+ {"rmac_unsup_ctrl_frms"},
+ {"rmac_ttl_octets"},
+ {"rmac_accepted_ucst_frms"},
+ {"rmac_accepted_nucst_frms"},
{"rmac_discarded_frms"},
+ {"rmac_drop_events"},
+ {"rmac_ttl_less_fb_octets"},
+ {"rmac_ttl_frms"},
{"rmac_usized_frms"},
{"rmac_osized_frms"},
{"rmac_frag_frms"},
{"rmac_jabber_frms"},
+ {"rmac_ttl_64_frms"},
+ {"rmac_ttl_65_127_frms"},
+ {"rmac_ttl_128_255_frms"},
+ {"rmac_ttl_256_511_frms"},
+ {"rmac_ttl_512_1023_frms"},
+ {"rmac_ttl_1024_1518_frms"},
{"rmac_ip"},
{"rmac_ip_octets"},
{"rmac_hdr_err_ip"},
@@ -166,12 +187,82 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_tcp"},
{"rmac_udp"},
{"rmac_err_drp_udp"},
+ {"rmac_xgmii_err_sym"},
+ {"rmac_frms_q0"},
+ {"rmac_frms_q1"},
+ {"rmac_frms_q2"},
+ {"rmac_frms_q3"},
+ {"rmac_frms_q4"},
+ {"rmac_frms_q5"},
+ {"rmac_frms_q6"},
+ {"rmac_frms_q7"},
+ {"rmac_full_q0"},
+ {"rmac_full_q1"},
+ {"rmac_full_q2"},
+ {"rmac_full_q3"},
+ {"rmac_full_q4"},
+ {"rmac_full_q5"},
+ {"rmac_full_q6"},
+ {"rmac_full_q7"},
{"rmac_pause_cnt"},
+ {"rmac_xgmii_data_err_cnt"},
+ {"rmac_xgmii_ctrl_err_cnt"},
{"rmac_accepted_ip"},
{"rmac_err_tcp"},
+ {"rd_req_cnt"},
+ {"new_rd_req_cnt"},
+ {"new_rd_req_rtry_cnt"},
+ {"rd_rtry_cnt"},
+ {"wr_rtry_rd_ack_cnt"},
+ {"wr_req_cnt"},
+ {"new_wr_req_cnt"},
+ {"new_wr_req_rtry_cnt"},
+ {"wr_rtry_cnt"},
+ {"wr_disc_cnt"},
+ {"rd_rtry_wr_ack_cnt"},
+ {"txp_wr_cnt"},
+ {"txd_rd_cnt"},
+ {"txd_wr_cnt"},
+ {"rxd_rd_cnt"},
+ {"rxd_wr_cnt"},
+ {"txf_rd_cnt"},
+ {"rxf_wr_cnt"},
+ {"rmac_ttl_1519_4095_frms"},
+ {"rmac_ttl_4096_8191_frms"},
+ {"rmac_ttl_8192_max_frms"},
+ {"rmac_ttl_gt_max_frms"},
+ {"rmac_osized_alt_frms"},
+ {"rmac_jabber_alt_frms"},
+ {"rmac_gt_max_alt_frms"},
+ {"rmac_vlan_frms"},
+ {"rmac_len_discard"},
+ {"rmac_fcs_discard"},
+ {"rmac_pf_discard"},
+ {"rmac_da_discard"},
+ {"rmac_red_discard"},
+ {"rmac_rts_discard"},
+ {"rmac_ingm_full_discard"},
+ {"link_fault_cnt"},
{"\n DRIVER STATISTICS"},
{"single_bit_ecc_errs"},
{"double_bit_ecc_errs"},
+ {"parity_err_cnt"},
+ {"serious_err_cnt"},
+ {"soft_reset_cnt"},
+ {"fifo_full_cnt"},
+ {"ring_full_cnt"},
+ ("alarm_transceiver_temp_high"),
+ ("alarm_transceiver_temp_low"),
+ ("alarm_laser_bias_current_high"),
+ ("alarm_laser_bias_current_low"),
+ ("alarm_laser_output_power_high"),
+ ("alarm_laser_output_power_low"),
+ ("warn_transceiver_temp_high"),
+ ("warn_transceiver_temp_low"),
+ ("warn_laser_bias_current_high"),
+ ("warn_laser_bias_current_low"),
+ ("warn_laser_output_power_high"),
+ ("warn_laser_output_power_low"),
("lro_aggregated_pkts"),
("lro_flush_both_count"),
("lro_out_of_sequence_pkts"),
@@ -220,9 +311,7 @@ static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
* the XAUI.
*/
-#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
#define END_SIGN 0x0
-
static const u64 herc_act_dtx_cfg[] = {
/* Set address */
0x8000051536750000ULL, 0x80000515367500E0ULL,
@@ -244,37 +333,19 @@ static const u64 herc_act_dtx_cfg[] = {
END_SIGN
};
-static const u64 xena_mdio_cfg[] = {
- /* Reset PMA PLL */
- 0xC001010000000000ULL, 0xC0010100000000E0ULL,
- 0xC0010100008000E4ULL,
- /* Remove Reset from PMA PLL */
- 0xC001010000000000ULL, 0xC0010100000000E0ULL,
- 0xC0010100000000E4ULL,
- END_SIGN
-};
-
static const u64 xena_dtx_cfg[] = {
+ /* Set address */
0x8000051500000000ULL, 0x80000515000000E0ULL,
- 0x80000515D93500E4ULL, 0x8001051500000000ULL,
- 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
- 0x8002051500000000ULL, 0x80020515000000E0ULL,
- 0x80020515F21000E4ULL,
- /* Set PADLOOPBACKN */
- 0x8002051500000000ULL, 0x80020515000000E0ULL,
- 0x80020515B20000E4ULL, 0x8003051500000000ULL,
- 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
- 0x8004051500000000ULL, 0x80040515000000E0ULL,
- 0x80040515B20000E4ULL, 0x8005051500000000ULL,
- 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
- SWITCH_SIGN,
- /* Remove PADLOOPBACKN */
+ /* Write data */
+ 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
+ /* Set address */
+ 0x8001051500000000ULL, 0x80010515000000E0ULL,
+ /* Write data */
+ 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
+ /* Set address */
0x8002051500000000ULL, 0x80020515000000E0ULL,
- 0x80020515F20000E4ULL, 0x8003051500000000ULL,
- 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
- 0x8004051500000000ULL, 0x80040515000000E0ULL,
- 0x80040515F20000E4ULL, 0x8005051500000000ULL,
- 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
+ /* Write data */
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
END_SIGN
};
@@ -303,15 +374,15 @@ static const u64 fix_mac[] = {
/* Module Loadable parameters. */
static unsigned int tx_fifo_num = 1;
static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
- {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
+ {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
static unsigned int rx_ring_num = 1;
static unsigned int rx_ring_sz[MAX_RX_RINGS] =
- {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+ {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
static unsigned int rts_frm_len[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = 0 };
static unsigned int rx_ring_mode = 1;
static unsigned int use_continuous_tx_intrs = 1;
-static unsigned int rmac_pause_time = 65535;
+static unsigned int rmac_pause_time = 0x100;
static unsigned int mc_pause_threshold_q0q3 = 187;
static unsigned int mc_pause_threshold_q4q7 = 187;
static unsigned int shared_splits;
@@ -549,11 +620,6 @@ static int init_shared_mem(struct s2io_nic *nic)
rx_blocks->block_dma_addr +
(rxd_size[nic->rxd_mode] * l);
}
-
- mac_control->rings[i].rx_blocks[j].block_virt_addr =
- tmp_v_addr;
- mac_control->rings[i].rx_blocks[j].block_dma_addr =
- tmp_p_addr;
}
/* Interlinking all Rx Blocks */
for (j = 0; j < blk_cnt; j++) {
@@ -772,7 +838,21 @@ static int s2io_verify_pci_mode(nic_t *nic)
return mode;
}
+#define NEC_VENID 0x1033
+#define NEC_DEVID 0x0125
+static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
+{
+ struct pci_dev *tdev = NULL;
+ while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
+ if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
+ if (tdev->bus == s2io_pdev->bus->parent)
+ return 1;
+ }
+ }
+ return 0;
+}
+static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
/**
* s2io_print_pci_mode -
*/
@@ -789,6 +869,14 @@ static int s2io_print_pci_mode(nic_t *nic)
if ( val64 & PCI_MODE_UNKNOWN_MODE)
return -1; /* Unknown PCI mode */
+ config->bus_speed = bus_speed[mode];
+
+ if (s2io_on_nec_bridge(nic->pdev)) {
+ DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
+ nic->dev->name);
+ return mode;
+ }
+
if (val64 & PCI_MODE_32_BITS) {
DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
} else {
@@ -798,35 +886,27 @@ static int s2io_print_pci_mode(nic_t *nic)
switch(mode) {
case PCI_MODE_PCI_33:
DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
- config->bus_speed = 33;
break;
case PCI_MODE_PCI_66:
DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
- config->bus_speed = 133;
break;
case PCI_MODE_PCIX_M1_66:
DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
- config->bus_speed = 133; /* Herc doubles the clock rate */
break;
case PCI_MODE_PCIX_M1_100:
DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
- config->bus_speed = 200;
break;
case PCI_MODE_PCIX_M1_133:
DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
- config->bus_speed = 266;
break;
case PCI_MODE_PCIX_M2_66:
DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
- config->bus_speed = 133;
break;
case PCI_MODE_PCIX_M2_100:
DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
- config->bus_speed = 200;
break;
case PCI_MODE_PCIX_M2_133:
DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
- config->bus_speed = 266;
break;
default:
return -1; /* Unsupported bus speed */
@@ -854,7 +934,7 @@ static int init_nic(struct s2io_nic *nic)
int i, j;
mac_info_t *mac_control;
struct config_param *config;
- int mdio_cnt = 0, dtx_cnt = 0;
+ int dtx_cnt = 0;
unsigned long long mem_share;
int mem_size;
@@ -901,20 +981,6 @@ static int init_nic(struct s2io_nic *nic)
val64 = dev->mtu;
writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
- /*
- * Configuring the XAUI Interface of Xena.
- * ***************************************
- * To Configure the Xena's XAUI, one has to write a series
- * of 64 bit values into two registers in a particular
- * sequence. Hence a macro 'SWITCH_SIGN' has been defined
- * which will be defined in the array of configuration values
- * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
- * to switch writing from one regsiter to another. We continue
- * writing these values until we encounter the 'END_SIGN' macro.
- * For example, After making a series of 21 writes into
- * dtx_control register the 'SWITCH_SIGN' appears and hence we
- * start writing into mdio_control until we encounter END_SIGN.
- */
if (nic->device_type & XFRAME_II_DEVICE) {
while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
@@ -924,35 +990,11 @@ static int init_nic(struct s2io_nic *nic)
dtx_cnt++;
}
} else {
- while (1) {
- dtx_cfg:
- while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
- if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
- dtx_cnt++;
- goto mdio_cfg;
- }
- SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
- &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- dtx_cnt++;
- }
- mdio_cfg:
- while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
- if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
- mdio_cnt++;
- goto dtx_cfg;
- }
- SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
- &bar0->mdio_control, UF);
- val64 = readq(&bar0->mdio_control);
- mdio_cnt++;
- }
- if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
- (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
- break;
- } else {
- goto dtx_cfg;
- }
+ while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
+ SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ dtx_cnt++;
}
}
@@ -994,11 +1036,6 @@ static int init_nic(struct s2io_nic *nic)
}
}
- /* Enable Tx FIFO partition 0. */
- val64 = readq(&bar0->tx_fifo_partition_0);
- val64 |= BIT(0); /* To enable the FIFO partition. */
- writeq(val64, &bar0->tx_fifo_partition_0);
-
/*
* Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
* SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
@@ -1177,6 +1214,11 @@ static int init_nic(struct s2io_nic *nic)
break;
}
+ /* Enable Tx FIFO partition 0. */
+ val64 = readq(&bar0->tx_fifo_partition_0);
+ val64 |= (TX_FIFO_PARTITION_EN);
+ writeq(val64, &bar0->tx_fifo_partition_0);
+
/* Filling the Rx round robin registers as per the
* number of Rings and steering based on QoS.
*/
@@ -1545,19 +1587,26 @@ static int init_nic(struct s2io_nic *nic)
val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
writeq(val64, &bar0->pic_control);
+ if (nic->config.bus_speed == 266) {
+ writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
+ writeq(0x0, &bar0->read_retry_delay);
+ writeq(0x0, &bar0->write_retry_delay);
+ }
+
/*
* Programming the Herc to split every write transaction
* that does not start on an ADB to reduce disconnects.
*/
if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = WREQ_SPLIT_MASK_SET_MASK(255);
- writeq(val64, &bar0->wreq_split_mask);
- }
-
- /* Setting Link stability period to 64 ms */
- if (nic->device_type == XFRAME_II_DEVICE) {
- val64 = MISC_LINK_STABILITY_PRD(3);
+ val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
writeq(val64, &bar0->misc_control);
+ val64 = readq(&bar0->pic_control2);
+ val64 &= ~(BIT(13)|BIT(14)|BIT(15));
+ writeq(val64, &bar0->pic_control2);
+ }
+ if (strstr(nic->product_name, "CX4")) {
+ val64 = TMAC_AVG_IPG(0x17);
+ writeq(val64, &bar0->tmac_avg_ipg);
}
return SUCCESS;
@@ -1948,6 +1997,10 @@ static int start_nic(struct s2io_nic *nic)
val64 |= PRC_CTRL_RC_ENABLED;
else
val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
+ if (nic->device_type == XFRAME_II_DEVICE)
+ val64 |= PRC_CTRL_GROUP_READS;
+ val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
+ val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
writeq(val64, &bar0->prc_ctrl_n[i]);
}
@@ -2018,6 +2071,13 @@ static int start_nic(struct s2io_nic *nic)
val64 |= ADAPTER_EOI_TX_ON;
writeq(val64, &bar0->adapter_control);
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+ /*
+ * Dont see link state interrupts initally on some switches,
+ * so directly scheduling the link state task here.
+ */
+ schedule_work(&nic->set_link_task);
+ }
/* SXE-002: Initialize link and activity LED */
subid = nic->pdev->subsystem_device;
if (((subid & 0xFF) >= 0x07) &&
@@ -2029,12 +2089,6 @@ static int start_nic(struct s2io_nic *nic)
writeq(val64, (void __iomem *)bar0 + 0x2700);
}
- /*
- * Don't see link state interrupts on certain switches, so
- * directly scheduling a link state task from here.
- */
- schedule_work(&nic->set_link_task);
-
return SUCCESS;
}
/**
@@ -2134,7 +2188,7 @@ static void stop_nic(struct s2io_nic *nic)
{
XENA_dev_config_t __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
- u16 interruptible, i;
+ u16 interruptible;
mac_info_t *mac_control;
struct config_param *config;
@@ -2147,12 +2201,10 @@ static void stop_nic(struct s2io_nic *nic)
interruptible |= TX_MAC_INTR | RX_MAC_INTR;
en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
- /* Disable PRCs */
- for (i = 0; i < config->rx_ring_num; i++) {
- val64 = readq(&bar0->prc_ctrl_n[i]);
- val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
- writeq(val64, &bar0->prc_ctrl_n[i]);
- }
+ /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
+ val64 = readq(&bar0->adapter_control);
+ val64 &= ~(ADAPTER_CNTL_EN);
+ writeq(val64, &bar0->adapter_control);
}
static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
@@ -2231,13 +2283,12 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
atomic_read(&nic->rx_bufs_left[ring_no]);
+ block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
+ off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
while (alloc_tab < alloc_cnt) {
block_no = mac_control->rings[ring_no].rx_curr_put_info.
block_index;
- block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
- block_index;
off = mac_control->rings[ring_no].rx_curr_put_info.offset;
- off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
rxdp = mac_control->rings[ring_no].
rx_blocks[block_no].rxds[off].virt_addr;
@@ -2307,9 +2358,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
memset(rxdp, 0, sizeof(RxD1_t));
skb_reserve(skb, NET_IP_ALIGN);
((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
- (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
- rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
+ (nic->pdev, skb->data, size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
} else if (nic->rxd_mode >= RXD_MODE_3A) {
/*
@@ -2516,7 +2567,7 @@ static int s2io_poll(struct net_device *dev, int *budget)
mac_info_t *mac_control;
struct config_param *config;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
- u64 val64;
+ u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
int i;
atomic_inc(&nic->isr_cnt);
@@ -2528,8 +2579,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
nic->pkts_to_process = dev->quota;
org_pkts_to_process = nic->pkts_to_process;
- val64 = readq(&bar0->rx_traffic_int);
writeq(val64, &bar0->rx_traffic_int);
+ val64 = readl(&bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) {
rx_intr_handler(&mac_control->rings[i]);
@@ -2554,7 +2605,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
}
}
/* Re enable the Rx interrupts. */
- en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
+ writeq(0x0, &bar0->rx_traffic_mask);
+ val64 = readl(&bar0->rx_traffic_mask);
atomic_dec(&nic->isr_cnt);
return 0;
@@ -2666,6 +2718,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
((RxD3_t*)rxdp)->Buffer2_ptr,
dev->mtu, PCI_DMA_FROMDEVICE);
}
+ prefetch(skb->data);
rx_osm_handler(ring_data, rxdp);
get_info.offset++;
ring_data->rx_curr_get_info.offset = get_info.offset;
@@ -2737,6 +2790,10 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
if (txdlp->Control_1 & TXD_T_CODE) {
unsigned long long err;
err = txdlp->Control_1 & TXD_T_CODE;
+ if (err & 0x1) {
+ nic->mac_control.stats_info->sw_stat.
+ parity_err_cnt++;
+ }
if ((err >> 48) == 0xA) {
DBG_PRINT(TX_DBG, "TxD returned due \
to loss of link\n");
@@ -2760,7 +2817,8 @@ to loss of link\n");
dev_kfree_skb_irq(skb);
get_info.offset++;
- get_info.offset %= get_info.fifo_len + 1;
+ if (get_info.offset == get_info.fifo_len + 1)
+ get_info.offset = 0;
txdlp = (TxD_t *) fifo_data->list_info
[get_info.offset].list_virt_addr;
fifo_data->tx_curr_get_info.offset =
@@ -2774,6 +2832,256 @@ to loss of link\n");
}
/**
+ * s2io_mdio_write - Function to write in to MDIO registers
+ * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ * @addr : address value
+ * @value : data value
+ * @dev : pointer to net_device structure
+ * Description:
+ * This function is used to write values to the MDIO registers
+ * NONE
+ */
+static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
+{
+ u64 val64 = 0x0;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
+
+ //address transaction
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ //Data transaction
+ val64 = 0x0;
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0)
+ | MDIO_MDIO_DATA(value)
+ | MDIO_OP(MDIO_OP_WRITE_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ val64 = 0x0;
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0)
+ | MDIO_OP(MDIO_OP_READ_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+}
+
+/**
+ * s2io_mdio_read - Function to write in to MDIO registers
+ * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ * @addr : address value
+ * @dev : pointer to net_device structure
+ * Description:
+ * This function is used to read values to the MDIO registers
+ * NONE
+ */
+static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
+{
+ u64 val64 = 0x0;
+ u64 rval64 = 0x0;
+ nic_t *sp = dev->priv;
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
+
+ /* address transaction */
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Data transaction */
+ val64 = 0x0;
+ val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+ | MDIO_MMD_DEV_ADDR(mmd_type)
+ | MDIO_MMS_PRT_ADDR(0x0)
+ | MDIO_OP(MDIO_OP_READ_TRANS);
+ writeq(val64, &bar0->mdio_control);
+ val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+ writeq(val64, &bar0->mdio_control);
+ udelay(100);
+
+ /* Read the value from regs */
+ rval64 = readq(&bar0->mdio_control);
+ rval64 = rval64 & 0xFFFF0000;
+ rval64 = rval64 >> 16;
+ return rval64;
+}
+/**
+ * s2io_chk_xpak_counter - Function to check the status of the xpak counters
+ * @counter : couter value to be updated
+ * @flag : flag to indicate the status
+ * @type : counter type
+ * Description:
+ * This function is to check the status of the xpak counters value
+ * NONE
+ */
+
+static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
+{
+ u64 mask = 0x3;
+ u64 val64;
+ int i;
+ for(i = 0; i <index; i++)
+ mask = mask << 0x2;
+
+ if(flag > 0)
+ {
+ *counter = *counter + 1;
+ val64 = *regs_stat & mask;
+ val64 = val64 >> (index * 0x2);
+ val64 = val64 + 1;
+ if(val64 == 3)
+ {
+ switch(type)
+ {
+ case 1:
+ DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+ "service. Excessive temperatures may "
+ "result in premature transceiver "
+ "failure \n");
+ break;
+ case 2:
+ DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+ "service Excessive bias currents may "
+ "indicate imminent laser diode "
+ "failure \n");
+ break;
+ case 3:
+ DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+ "service Excessive laser output "
+ "power may saturate far-end "
+ "receiver\n");
+ break;
+ default:
+ DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
+ "type \n");
+ }
+ val64 = 0x0;
+ }
+ val64 = val64 << (index * 0x2);
+ *regs_stat = (*regs_stat & (~mask)) | (val64);
+
+ } else {
+ *regs_stat = *regs_stat & (~mask);
+ }
+}
+
+/**
+ * s2io_updt_xpak_counter - Function to update the xpak counters
+ * @dev : pointer to net_device struct
+ * Description:
+ * This function is to upate the status of the xpak counters value
+ * NONE
+ */
+static void s2io_updt_xpak_counter(struct net_device *dev)
+{
+ u16 flag = 0x0;
+ u16 type = 0x0;
+ u16 val16 = 0x0;
+ u64 val64 = 0x0;
+ u64 addr = 0x0;
+
+ nic_t *sp = dev->priv;
+ StatInfo_t *stat_info = sp->mac_control.stats_info;
+
+ /* Check the communication with the MDIO slave */
+ addr = 0x0000;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+ if((val64 == 0xFFFF) || (val64 == 0x0000))
+ {
+ DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
+ "Returned %llx\n", (unsigned long long)val64);
+ return;
+ }
+
+ /* Check for the expecte value of 2040 at PMA address 0x0000 */
+ if(val64 != 0x2040)
+ {
+ DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
+ DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
+ (unsigned long long)val64);
+ return;
+ }
+
+ /* Loading the DOM register to MDIO register */
+ addr = 0xA100;
+ s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+ /* Reading the Alarm flags */
+ addr = 0xA070;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+ flag = CHECKBIT(val64, 0x7);
+ type = 1;
+ s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
+ &stat_info->xpak_stat.xpak_regs_stat,
+ 0x0, flag, type);
+
+ if(CHECKBIT(val64, 0x6))
+ stat_info->xpak_stat.alarm_transceiver_temp_low++;
+
+ flag = CHECKBIT(val64, 0x3);
+ type = 2;
+ s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
+ &stat_info->xpak_stat.xpak_regs_stat,
+ 0x2, flag, type);
+
+ if(CHECKBIT(val64, 0x2))
+ stat_info->xpak_stat.alarm_laser_bias_current_low++;
+
+ flag = CHECKBIT(val64, 0x1);
+ type = 3;
+ s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
+ &stat_info->xpak_stat.xpak_regs_stat,
+ 0x4, flag, type);
+
+ if(CHECKBIT(val64, 0x0))
+ stat_info->xpak_stat.alarm_laser_output_power_low++;
+
+ /* Reading the Warning flags */
+ addr = 0xA074;
+ val64 = 0x0;
+ val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+ if(CHECKBIT(val64, 0x7))
+ stat_info->xpak_stat.warn_transceiver_temp_high++;
+
+ if(CHECKBIT(val64, 0x6))
+ stat_info->xpak_stat.warn_transceiver_temp_low++;
+
+ if(CHECKBIT(val64, 0x3))
+ stat_info->xpak_stat.warn_laser_bias_current_high++;
+
+ if(CHECKBIT(val64, 0x2))
+ stat_info->xpak_stat.warn_laser_bias_current_low++;
+
+ if(CHECKBIT(val64, 0x1))
+ stat_info->xpak_stat.warn_laser_output_power_high++;
+
+ if(CHECKBIT(val64, 0x0))
+ stat_info->xpak_stat.warn_laser_output_power_low++;
+}
+
+/**
* alarm_intr_handler - Alarm Interrrupt handler
* @nic: device private variable
* Description: If the interrupt was neither because of Rx packet or Tx
@@ -2790,6 +3098,18 @@ static void alarm_intr_handler(struct s2io_nic *nic)
struct net_device *dev = (struct net_device *) nic->dev;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
register u64 val64 = 0, err_reg = 0;
+ u64 cnt;
+ int i;
+ nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
+ /* Handling the XPAK counters update */
+ if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
+ /* waiting for an hour */
+ nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
+ } else {
+ s2io_updt_xpak_counter(dev);
+ /* reset the count to zero */
+ nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
+ }
/* Handling link status change error Intr */
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
@@ -2816,6 +3136,8 @@ static void alarm_intr_handler(struct s2io_nic *nic)
MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
netif_stop_queue(dev);
schedule_work(&nic->rst_timer_task);
+ nic->mac_control.stats_info->sw_stat.
+ soft_reset_cnt++;
}
}
} else {
@@ -2827,11 +3149,13 @@ static void alarm_intr_handler(struct s2io_nic *nic)
/* In case of a serious error, the device will be Reset. */
val64 = readq(&bar0->serr_source);
if (val64 & SERR_SOURCE_ANY) {
+ nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
(unsigned long long)val64);
netif_stop_queue(dev);
schedule_work(&nic->rst_timer_task);
+ nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
}
/*
@@ -2849,6 +3173,35 @@ static void alarm_intr_handler(struct s2io_nic *nic)
ac = readq(&bar0->adapter_control);
schedule_work(&nic->set_link_task);
}
+ /* Check for data parity error */
+ val64 = readq(&bar0->pic_int_status);
+ if (val64 & PIC_INT_GPIO) {
+ val64 = readq(&bar0->gpio_int_reg);
+ if (val64 & GPIO_INT_REG_DP_ERR_INT) {
+ nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
+ schedule_work(&nic->rst_timer_task);
+ nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
+ }
+ }
+
+ /* Check for ring full counter */
+ if (nic->device_type & XFRAME_II_DEVICE) {
+ val64 = readq(&bar0->ring_bump_counter1);
+ for (i=0; i<4; i++) {
+ cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
+ cnt >>= 64 - ((i+1)*16);
+ nic->mac_control.stats_info->sw_stat.ring_full_cnt
+ += cnt;
+ }
+
+ val64 = readq(&bar0->ring_bump_counter2);
+ for (i=0; i<4; i++) {
+ cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
+ cnt >>= 64 - ((i+1)*16);
+ nic->mac_control.stats_info->sw_stat.ring_full_cnt
+ += cnt;
+ }
+ }
/* Other type of interrupts are not being handled now, TODO */
}
@@ -2864,23 +3217,26 @@ static void alarm_intr_handler(struct s2io_nic *nic)
* SUCCESS on success and FAILURE on failure.
*/
-static int wait_for_cmd_complete(nic_t * sp)
+static int wait_for_cmd_complete(void *addr, u64 busy_bit)
{
- XENA_dev_config_t __iomem *bar0 = sp->bar0;
int ret = FAILURE, cnt = 0;
u64 val64;
while (TRUE) {
- val64 = readq(&bar0->rmac_addr_cmd_mem);
- if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+ val64 = readq(addr);
+ if (!(val64 & busy_bit)) {
ret = SUCCESS;
break;
}
- msleep(50);
+
+ if(in_interrupt())
+ mdelay(50);
+ else
+ msleep(50);
+
if (cnt++ > 10)
break;
}
-
return ret;
}
@@ -2919,6 +3275,9 @@ static void s2io_reset(nic_t * sp)
* PCI write to sw_reset register is done by this time.
*/
msleep(250);
+ if (strstr(sp->product_name, "CX4")) {
+ msleep(750);
+ }
/* Restore the PCI state saved during initialization. */
pci_restore_state(sp->pdev);
@@ -3137,7 +3496,7 @@ static void restore_xmsi_data(nic_t *nic)
u64 val64;
int i;
- for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ for (i=0; i< nic->avail_msix_vectors; i++) {
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3156,7 +3515,7 @@ static void store_xmsi_data(nic_t *nic)
int i;
/* Store and display */
- for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+ for (i=0; i< nic->avail_msix_vectors; i++) {
val64 = (BIT(15) | vBIT(i, 26, 6));
writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) {
@@ -3284,15 +3643,24 @@ static int s2io_enable_msi_x(nic_t *nic)
writeq(tx_mat, &bar0->tx_mat0_n[7]);
}
+ nic->avail_msix_vectors = 0;
ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+ /* We fail init if error or we get less vectors than min required */
+ if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
+ nic->avail_msix_vectors = ret;
+ ret = pci_enable_msix(nic->pdev, nic->entries, ret);
+ }
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
kfree(nic->entries);
kfree(nic->s2io_entries);
nic->entries = NULL;
nic->s2io_entries = NULL;
+ nic->avail_msix_vectors = 0;
return -ENOMEM;
}
+ if (!nic->avail_msix_vectors)
+ nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
/*
* To enable MSI-X, MSI also needs to be enabled, due to a bug
@@ -3325,8 +3693,6 @@ static int s2io_open(struct net_device *dev)
{
nic_t *sp = dev->priv;
int err = 0;
- int i;
- u16 msi_control; /* Temp variable */
/*
* Make sure you have link off by default every time
@@ -3336,11 +3702,14 @@ static int s2io_open(struct net_device *dev)
sp->last_link_state = 0;
/* Initialize H/W and enable interrupts */
- if (s2io_card_up(sp)) {
+ err = s2io_card_up(sp);
+ if (err) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
- err = -ENODEV;
- goto hw_init_failed;
+ if (err == -ENODEV)
+ goto hw_init_failed;
+ else
+ goto hw_enable_failed;
}
/* Store the values of the MSIX table in the nic_t structure */
@@ -3357,6 +3726,8 @@ failed\n", dev->name);
}
}
if (sp->intr_type == MSI_X) {
+ int i;
+
for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
sprintf(sp->desc1, "%s:MSI-X-%d-TX",
@@ -3409,24 +3780,26 @@ setting_mac_address_failed:
isr_registration_failed:
del_timer_sync(&sp->alarm_timer);
if (sp->intr_type == MSI_X) {
- if (sp->device_type == XFRAME_II_DEVICE) {
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
+ int i;
+ u16 msi_control; /* Temp variable */
- free_irq(vector, arg);
- }
- pci_disable_msix(sp->pdev);
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
- /* Temp */
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
+ free_irq(vector, arg);
}
+ pci_disable_msix(sp->pdev);
+
+ /* Temp */
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
}
else if (sp->intr_type == MSI)
pci_disable_msi(sp->pdev);
+hw_enable_failed:
s2io_reset(sp);
hw_init_failed:
if (sp->intr_type == MSI_X) {
@@ -3454,35 +3827,12 @@ hw_init_failed:
static int s2io_close(struct net_device *dev)
{
nic_t *sp = dev->priv;
- int i;
- u16 msi_control;
flush_scheduled_work();
netif_stop_queue(dev);
/* Reset card, kill tasklet and free Tx and Rx buffers. */
- s2io_card_down(sp);
-
- if (sp->intr_type == MSI_X) {
- if (sp->device_type == XFRAME_II_DEVICE) {
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
+ s2io_card_down(sp, 1);
- free_irq(vector, arg);
- }
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
- }
- }
- else {
- free_irq(sp->pdev->irq, dev);
- if (sp->intr_type == MSI)
- pci_disable_msi(sp->pdev);
- }
sp->device_close_flag = TRUE; /* Device is shut down. */
return 0;
}
@@ -3545,7 +3895,8 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
/* Avoid "put" pointer going beyond "get" pointer */
- if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
+ if (txdp->Host_Control ||
+ ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
netif_stop_queue(dev);
dev_kfree_skb(skb);
@@ -3655,11 +4006,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
mmiowb();
put_off++;
- put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
+ if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
+ put_off = 0;
mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
/* Avoid "put" pointer going beyond "get" pointer */
- if (((put_off + 1) % queue_len) == get_off) {
+ if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
+ sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
DBG_PRINT(TX_DBG,
"No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
put_off, get_off);
@@ -3795,7 +4148,6 @@ s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
}
-
static void s2io_txpic_intr_handle(nic_t *sp)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3806,41 +4158,56 @@ static void s2io_txpic_intr_handle(nic_t *sp)
val64 = readq(&bar0->gpio_int_reg);
if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
(val64 & GPIO_INT_REG_LINK_UP)) {
+ /*
+ * This is unstable state so clear both up/down
+ * interrupt and adapter to re-evaluate the link state.
+ */
val64 |= GPIO_INT_REG_LINK_DOWN;
val64 |= GPIO_INT_REG_LINK_UP;
writeq(val64, &bar0->gpio_int_reg);
- goto masking;
- }
-
- if (((sp->last_link_state == LINK_UP) &&
- (val64 & GPIO_INT_REG_LINK_DOWN)) ||
- ((sp->last_link_state == LINK_DOWN) &&
- (val64 & GPIO_INT_REG_LINK_UP))) {
val64 = readq(&bar0->gpio_int_mask);
- val64 |= GPIO_INT_MASK_LINK_DOWN;
- val64 |= GPIO_INT_MASK_LINK_UP;
+ val64 &= ~(GPIO_INT_MASK_LINK_UP |
+ GPIO_INT_MASK_LINK_DOWN);
writeq(val64, &bar0->gpio_int_mask);
- s2io_set_link((unsigned long)sp);
}
-masking:
- if (sp->last_link_state == LINK_UP) {
- /*enable down interrupt */
- val64 = readq(&bar0->gpio_int_mask);
- /* unmasks link down intr */
- val64 &= ~GPIO_INT_MASK_LINK_DOWN;
- /* masks link up intr */
- val64 |= GPIO_INT_MASK_LINK_UP;
- writeq(val64, &bar0->gpio_int_mask);
- } else {
- /*enable UP Interrupt */
- val64 = readq(&bar0->gpio_int_mask);
- /* unmasks link up interrupt */
- val64 &= ~GPIO_INT_MASK_LINK_UP;
- /* masks link down interrupt */
- val64 |= GPIO_INT_MASK_LINK_DOWN;
- writeq(val64, &bar0->gpio_int_mask);
+ else if (val64 & GPIO_INT_REG_LINK_UP) {
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(sp, val64,
+ sp->device_enabled_once)) {
+ /* Enable Adapter */
+ val64 = readq(&bar0->adapter_control);
+ val64 |= ADAPTER_CNTL_EN;
+ writeq(val64, &bar0->adapter_control);
+ val64 |= ADAPTER_LED_ON;
+ writeq(val64, &bar0->adapter_control);
+ if (!sp->device_enabled_once)
+ sp->device_enabled_once = 1;
+
+ s2io_link(sp, LINK_UP);
+ /*
+ * unmask link down interrupt and mask link-up
+ * intr
+ */
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~GPIO_INT_MASK_LINK_DOWN;
+ val64 |= GPIO_INT_MASK_LINK_UP;
+ writeq(val64, &bar0->gpio_int_mask);
+
+ }
+ }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
+ val64 = readq(&bar0->adapter_status);
+ if (verify_xena_quiescence(sp, val64,
+ sp->device_enabled_once)) {
+ s2io_link(sp, LINK_DOWN);
+ /* Link is down so unmaks link up interrupt */
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 &= ~GPIO_INT_MASK_LINK_UP;
+ val64 |= GPIO_INT_MASK_LINK_DOWN;
+ writeq(val64, &bar0->gpio_int_mask);
+ }
}
}
+ val64 = readq(&bar0->gpio_int_mask);
}
/**
@@ -3863,7 +4230,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
nic_t *sp = dev->priv;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
int i;
- u64 reason = 0, val64;
+ u64 reason = 0, val64, org_mask;
mac_info_t *mac_control;
struct config_param *config;
@@ -3887,43 +4254,41 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
+ val64 = 0xFFFFFFFFFFFFFFFFULL;
+ /* Store current mask before masking all interrupts */
+ org_mask = readq(&bar0->general_int_mask);
+ writeq(val64, &bar0->general_int_mask);
+
#ifdef CONFIG_S2IO_NAPI
if (reason & GEN_INTR_RXTRAFFIC) {
if (netif_rx_schedule_prep(dev)) {
- en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
- DISABLE_INTRS);
+ writeq(val64, &bar0->rx_traffic_mask);
__netif_rx_schedule(dev);
}
}
#else
- /* If Intr is because of Rx Traffic */
- if (reason & GEN_INTR_RXTRAFFIC) {
- /*
- * rx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
- * cleared and hence a read can be avoided.
- */
- val64 = 0xFFFFFFFFFFFFFFFFULL;
- writeq(val64, &bar0->rx_traffic_int);
- for (i = 0; i < config->rx_ring_num; i++) {
- rx_intr_handler(&mac_control->rings[i]);
- }
+ /*
+ * Rx handler is called by default, without checking for the
+ * cause of interrupt.
+ * rx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ writeq(val64, &bar0->rx_traffic_int);
+ for (i = 0; i < config->rx_ring_num; i++) {
+ rx_intr_handler(&mac_control->rings[i]);
}
#endif
- /* If Intr is because of Tx Traffic */
- if (reason & GEN_INTR_TXTRAFFIC) {
- /*
- * tx_traffic_int reg is an R1 register, writing all 1's
- * will ensure that the actual interrupt causing bit get's
- * cleared and hence a read can be avoided.
- */
- val64 = 0xFFFFFFFFFFFFFFFFULL;
- writeq(val64, &bar0->tx_traffic_int);
+ /*
+ * tx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ writeq(val64, &bar0->tx_traffic_int);
- for (i = 0; i < config->tx_fifo_num; i++)
- tx_intr_handler(&mac_control->fifos[i]);
- }
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
if (reason & GEN_INTR_TXPIC)
s2io_txpic_intr_handle(sp);
@@ -3949,6 +4314,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
DBG_PRINT(ERR_DBG, " in ISR!!\n");
clear_bit(0, (&sp->tasklet_status));
atomic_dec(&sp->isr_cnt);
+ writeq(org_mask, &bar0->general_int_mask);
return IRQ_HANDLED;
}
clear_bit(0, (&sp->tasklet_status));
@@ -3964,7 +4330,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
}
}
#endif
-
+ writeq(org_mask, &bar0->general_int_mask);
atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
}
@@ -4067,7 +4433,8 @@ static void s2io_set_multicast(struct net_device *dev)
RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
- wait_for_cmd_complete(sp);
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
sp->m_cast_flg = 1;
sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
@@ -4082,7 +4449,8 @@ static void s2io_set_multicast(struct net_device *dev)
RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
- wait_for_cmd_complete(sp);
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
@@ -4147,7 +4515,8 @@ static void s2io_set_multicast(struct net_device *dev)
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait for command completes */
- if (wait_for_cmd_complete(sp)) {
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
DBG_PRINT(ERR_DBG, "%s: Adding ",
dev->name);
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4177,7 +4546,8 @@ static void s2io_set_multicast(struct net_device *dev)
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait for command completes */
- if (wait_for_cmd_complete(sp)) {
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
DBG_PRINT(ERR_DBG, "%s: Adding ",
dev->name);
DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4222,7 +4592,8 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
RMAC_ADDR_CMD_MEM_OFFSET(0);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
- if (wait_for_cmd_complete(sp)) {
+ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
return FAILURE;
}
@@ -4619,6 +4990,44 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
}
return ret;
}
+static void s2io_vpd_read(nic_t *nic)
+{
+ u8 vpd_data[256],data;
+ int i=0, cnt, fail = 0;
+ int vpd_addr = 0x80;
+
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ strcpy(nic->product_name, "Xframe II 10GbE network adapter");
+ vpd_addr = 0x80;
+ }
+ else {
+ strcpy(nic->product_name, "Xframe I 10GbE network adapter");
+ vpd_addr = 0x50;
+ }
+
+ for (i = 0; i < 256; i +=4 ) {
+ pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
+ pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
+ pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
+ for (cnt = 0; cnt <5; cnt++) {
+ msleep(2);
+ pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
+ if (data == 0x80)
+ break;
+ }
+ if (cnt >= 5) {
+ DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
+ fail = 1;
+ break;
+ }
+ pci_read_config_dword(nic->pdev, (vpd_addr + 4),
+ (u32 *)&vpd_data[i]);
+ }
+ if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
+ memset(nic->product_name, 0, vpd_data[1]);
+ memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
+ }
+}
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
@@ -4931,8 +5340,10 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
u64 val64;
val64 = readq(&bar0->adapter_status);
- if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
+ if(!(LINK_IS_UP(val64)))
*data = 1;
+ else
+ *data = 0;
return 0;
}
@@ -5112,7 +5523,6 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
int i = 0;
nic_t *sp = dev->priv;
StatInfo_t *stat_info = sp->mac_control.stats_info;
- u64 tmp;
s2io_updt_stats(sp);
tmp_stats[i++] =
@@ -5129,9 +5539,19 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
(u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
le32_to_cpu(stat_info->tmac_bcst_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_ttl_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_ucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_nucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
le32_to_cpu(stat_info->tmac_any_err_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
@@ -5163,11 +5583,27 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
(u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_vld_bcst_frms);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_ttl_octets);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
+ << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
+ << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_discarded_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
+ << 32 | le32_to_cpu(stat_info->rmac_drop_events);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_usized_frms);
@@ -5180,40 +5616,129 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
le32_to_cpu(stat_info->rmac_jabber_frms);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
le32_to_cpu(stat_info->rmac_ip);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
le32_to_cpu(stat_info->rmac_drop_ip);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
le32_to_cpu(stat_info->rmac_icmp);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
- tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
le32_to_cpu(stat_info->rmac_udp);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
le32_to_cpu(stat_info->rmac_err_drp_udp);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
+ tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
le32_to_cpu(stat_info->rmac_pause_cnt);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
tmp_stats[i++] =
(u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
le32_to_cpu(stat_info->rmac_accepted_ip);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
+ tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
+ tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
tmp_stats[i++] = 0;
tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
+ tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
+ tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
+ tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
+ tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
tmp_stats[i++] = stat_info->sw_stat.sending_both;
tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
- tmp = 0;
if (stat_info->sw_stat.num_aggregations) {
- tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
- do_div(tmp, stat_info->sw_stat.num_aggregations);
+ u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
+ int count = 0;
+ /*
+ * Since 64-bit divide does not work on all platforms,
+ * do repeated subtraction.
+ */
+ while (tmp >= stat_info->sw_stat.num_aggregations) {
+ tmp -= stat_info->sw_stat.num_aggregations;
+ count++;
+ }
+ tmp_stats[i++] = count;
}
- tmp_stats[i++] = tmp;
+ else
+ tmp_stats[i++] = 0;
}
static int s2io_ethtool_get_regs_len(struct net_device *dev)
@@ -5351,7 +5876,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
if (netif_running(dev)) {
- s2io_card_down(sp);
+ s2io_card_down(sp, 0);
netif_stop_queue(dev);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
@@ -5489,12 +6014,172 @@ static void s2io_set_link(unsigned long data)
clear_bit(0, &(nic->link_state));
}
-static void s2io_card_down(nic_t * sp)
+static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
+ struct sk_buff **skb, u64 *temp0, u64 *temp1,
+ u64 *temp2, int size)
+{
+ struct net_device *dev = sp->dev;
+ struct sk_buff *frag_list;
+
+ if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
+ /* allocate skb */
+ if (*skb) {
+ DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
+ /*
+ * As Rx frame are not going to be processed,
+ * using same mapped address for the Rxd
+ * buffer pointer
+ */
+ ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
+ } else {
+ *skb = dev_alloc_skb(size);
+ if (!(*skb)) {
+ DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
+ DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+ return -ENOMEM ;
+ }
+ /* storing the mapped addr in a temp variable
+ * such it will be used for next rxd whose
+ * Host Control is NULL
+ */
+ ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single( sp->pdev, (*skb)->data,
+ size - NET_IP_ALIGN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Host_Control = (unsigned long) (*skb);
+ }
+ } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
+ /* Two buffer Mode */
+ if (*skb) {
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+ } else {
+ *skb = dev_alloc_skb(size);
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+ pci_map_single(sp->pdev, (*skb)->data,
+ dev->mtu + 4,
+ PCI_DMA_FROMDEVICE);
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ rxdp->Host_Control = (unsigned long) (*skb);
+
+ /* Buffer-1 will be dummy buffer not used */
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+ pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ }
+ } else if ((rxdp->Host_Control == 0)) {
+ /* Three buffer mode */
+ if (*skb) {
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+ } else {
+ *skb = dev_alloc_skb(size);
+
+ ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+ pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
+ PCI_DMA_FROMDEVICE);
+ /* Buffer-1 receives L3/L4 headers */
+ ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+ pci_map_single( sp->pdev, (*skb)->data,
+ l3l4hdr_size + 4,
+ PCI_DMA_FROMDEVICE);
+ /*
+ * skb_shinfo(skb)->frag_list will have L4
+ * data payload
+ */
+ skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
+ ALIGN_SIZE);
+ if (skb_shinfo(*skb)->frag_list == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
+ failed\n ", dev->name);
+ return -ENOMEM ;
+ }
+ frag_list = skb_shinfo(*skb)->frag_list;
+ frag_list->next = NULL;
+ /*
+ * Buffer-2 receives L4 data payload
+ */
+ ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+ pci_map_single( sp->pdev, frag_list->data,
+ dev->mtu, PCI_DMA_FROMDEVICE);
+ }
+ }
+ return 0;
+}
+static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
+{
+ struct net_device *dev = sp->dev;
+ if (sp->rxd_mode == RXD_MODE_1) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
+ } else if (sp->rxd_mode == RXD_MODE_3B) {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
+ } else {
+ rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+ rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
+ rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
+ }
+}
+
+static int rxd_owner_bit_reset(nic_t *sp)
+{
+ int i, j, k, blk_cnt = 0, size;
+ mac_info_t * mac_control = &sp->mac_control;
+ struct config_param *config = &sp->config;
+ struct net_device *dev = sp->dev;
+ RxD_t *rxdp = NULL;
+ struct sk_buff *skb = NULL;
+ buffAdd_t *ba = NULL;
+ u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
+
+ /* Calculate the size based on ring mode */
+ size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+ if (sp->rxd_mode == RXD_MODE_1)
+ size += NET_IP_ALIGN;
+ else if (sp->rxd_mode == RXD_MODE_3B)
+ size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+ else
+ size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
+
+ for (i = 0; i < config->rx_ring_num; i++) {
+ blk_cnt = config->rx_cfg[i].num_rxd /
+ (rxd_count[sp->rxd_mode] +1);
+
+ for (j = 0; j < blk_cnt; j++) {
+ for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
+ rxdp = mac_control->rings[i].
+ rx_blocks[j].rxds[k].virt_addr;
+ if(sp->rxd_mode >= RXD_MODE_3A)
+ ba = &mac_control->rings[i].ba[j][k];
+ set_rxd_buffer_pointer(sp, rxdp, ba,
+ &skb,(u64 *)&temp0_64,
+ (u64 *)&temp1_64,
+ (u64 *)&temp2_64, size);
+
+ set_rxd_buffer_size(sp, rxdp, size);
+ wmb();
+ /* flip the Ownership bit to Hardware */
+ rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ }
+ }
+ return 0;
+
+}
+
+static void s2io_card_down(nic_t * sp, int flag)
{
int cnt = 0;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
unsigned long flags;
register u64 val64 = 0;
+ struct net_device *dev = sp->dev;
del_timer_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
@@ -5505,12 +6190,51 @@ static void s2io_card_down(nic_t * sp)
/* disable Tx and Rx traffic on the NIC */
stop_nic(sp);
+ if (flag) {
+ if (sp->intr_type == MSI_X) {
+ int i;
+ u16 msi_control;
+
+ for (i=1; (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS); i++) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+
+ free_irq(vector, arg);
+ }
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+ pci_disable_msix(sp->pdev);
+ } else {
+ free_irq(sp->pdev->irq, dev);
+ if (sp->intr_type == MSI)
+ pci_disable_msi(sp->pdev);
+ }
+ }
+ /* Waiting till all Interrupt handlers are complete */
+ cnt = 0;
+ do {
+ msleep(10);
+ if (!atomic_read(&sp->isr_cnt))
+ break;
+ cnt++;
+ } while(cnt < 5);
/* Kill tasklet. */
tasklet_kill(&sp->task);
/* Check if the device is Quiescent and then Reset the NIC */
do {
+ /* As per the HW requirement we need to replenish the
+ * receive buffer to avoid the ring bump. Since there is
+ * no intention of processing the Rx frame at this pointwe are
+ * just settting the ownership bit of rxd in Each Rx
+ * ring to HW and set the appropriate buffer size
+ * based on the ring mode
+ */
+ rxd_owner_bit_reset(sp);
+
val64 = readq(&bar0->adapter_status);
if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
break;
@@ -5528,15 +6252,6 @@ static void s2io_card_down(nic_t * sp)
} while (1);
s2io_reset(sp);
- /* Waiting till all Interrupt handlers are complete */
- cnt = 0;
- do {
- msleep(10);
- if (!atomic_read(&sp->isr_cnt))
- break;
- cnt++;
- } while(cnt < 5);
-
spin_lock_irqsave(&sp->tx_lock, flags);
/* Free all Tx buffers */
free_tx_buffers(sp);
@@ -5637,7 +6352,7 @@ static void s2io_restart_nic(unsigned long data)
struct net_device *dev = (struct net_device *) data;
nic_t *sp = dev->priv;
- s2io_card_down(sp);
+ s2io_card_down(sp, 0);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
dev->name);
@@ -5667,6 +6382,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
if (netif_carrier_ok(dev)) {
schedule_work(&sp->rst_timer_task);
+ sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
}
}
@@ -5695,18 +6411,33 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
((unsigned long) rxdp->Host_Control);
int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
+ unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
lro_t *lro;
skb->dev = dev;
- if (rxdp->Control_1 & RXD_T_CODE) {
- unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
- dev->name, err);
- dev_kfree_skb(skb);
- sp->stats.rx_crc_errors++;
- atomic_dec(&sp->rx_bufs_left[ring_no]);
- rxdp->Host_Control = 0;
- return 0;
+
+ if (err) {
+ /* Check for parity error */
+ if (err & 0x1) {
+ sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
+ }
+
+ /*
+ * Drop the packet if bad transfer code. Exception being
+ * 0x5, which could be due to unsupported IPv6 extension header.
+ * In this case, we let stack handle the packet.
+ * Note that in this case, since checksum will be incorrect,
+ * stack will validate the same.
+ */
+ if (err && ((err >> 48) != 0x5)) {
+ DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
+ dev->name, err);
+ sp->stats.rx_crc_errors++;
+ dev_kfree_skb(skb);
+ atomic_dec(&sp->rx_bufs_left[ring_no]);
+ rxdp->Host_Control = 0;
+ return 0;
+ }
}
/* Updating statistics */
@@ -5792,6 +6523,9 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
clear_lro_session(lro);
goto send_up;
case 0: /* sessions exceeded */
+ case -1: /* non-TCP or not
+ * L2 aggregatable
+ */
case 5: /*
* First pkt in session not
* L3/L4 aggregatable
@@ -5918,13 +6652,6 @@ static void s2io_init_pci(nic_t * sp)
pci_write_config_word(sp->pdev, PCI_COMMAND,
(pci_cmd | PCI_COMMAND_PARITY));
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
-
- /* Forcibly disabling relaxed ordering capability of the card. */
- pcix_cmd &= 0xfffd;
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- pcix_cmd);
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(pcix_cmd));
}
MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
@@ -5954,6 +6681,55 @@ module_param(intr_type, int, 0);
module_param(lro, int, 0);
module_param(lro_max_pkts, int, 0);
+static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
+{
+ if ( tx_fifo_num > 8) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
+ "supported\n");
+ DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
+ tx_fifo_num = 8;
+ }
+ if ( rx_ring_num > 8) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
+ "supported\n");
+ DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
+ rx_ring_num = 8;
+ }
+#ifdef CONFIG_S2IO_NAPI
+ if (*dev_intr_type != INTA) {
+ DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
+ "MSI/MSI-X is enabled. Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+#endif
+#ifndef CONFIG_PCI_MSI
+ if (*dev_intr_type != INTA) {
+ DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
+ "MSI/MSI-X. Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+#else
+ if (*dev_intr_type > MSI_X) {
+ DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
+ "Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+#endif
+ if ((*dev_intr_type == MSI_X) &&
+ ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
+ (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
+ DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
+ "Defaulting to INTA\n");
+ *dev_intr_type = INTA;
+ }
+ if (rx_ring_mode > 3) {
+ DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
+ DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
+ rx_ring_mode = 3;
+ }
+ return SUCCESS;
+}
+
/**
* s2io_init_nic - Initialization of the adapter .
* @pdev : structure containing the PCI related information of the device.
@@ -5984,15 +6760,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
int mode;
u8 dev_intr_type = intr_type;
-#ifdef CONFIG_S2IO_NAPI
- if (dev_intr_type != INTA) {
- DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
-is enabled. Defaulting to INTA\n");
- dev_intr_type = INTA;
- }
- else
- DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
-#endif
+ if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
+ return ret;
if ((ret = pci_enable_device(pdev))) {
DBG_PRINT(ERR_DBG,
@@ -6017,14 +6786,6 @@ is enabled. Defaulting to INTA\n");
pci_disable_device(pdev);
return -ENOMEM;
}
-
- if ((dev_intr_type == MSI_X) &&
- ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
- (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
- DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
-Defaulting to INTA\n");
- dev_intr_type = INTA;
- }
if (dev_intr_type != MSI_X) {
if (pci_request_regions(pdev, s2io_driver_name)) {
DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
@@ -6100,8 +6861,6 @@ Defaulting to INTA\n");
config = &sp->config;
/* Tx side parameters. */
- if (tx_fifo_len[0] == 0)
- tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
config->tx_fifo_num = tx_fifo_num;
for (i = 0; i < MAX_TX_FIFOS; i++) {
config->tx_cfg[i].fifo_len = tx_fifo_len[i];
@@ -6125,8 +6884,6 @@ Defaulting to INTA\n");
config->max_txds = MAX_SKB_FRAGS + 2;
/* Rx side parameters. */
- if (rx_ring_sz[0] == 0)
- rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
config->rx_ring_num = rx_ring_num;
for (i = 0; i < MAX_RX_RINGS; i++) {
config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -6267,8 +7024,8 @@ Defaulting to INTA\n");
val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
writeq(val64, &bar0->rmac_addr_cmd_mem);
- wait_for_cmd_complete(sp);
-
+ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+ RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
tmp64 = readq(&bar0->rmac_addr_data0_mem);
mac_down = (u32) tmp64;
mac_up = (u32) (tmp64 >> 32);
@@ -6322,82 +7079,63 @@ Defaulting to INTA\n");
ret = -ENODEV;
goto register_failed;
}
-
- if (sp->device_type & XFRAME_II_DEVICE) {
- DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
- dev->name);
- DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
+ s2io_vpd_read(sp);
+ DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
+ DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
get_xena_rev_id(sp->pdev),
s2io_driver_version);
- switch(sp->intr_type) {
- case INTA:
- DBG_PRINT(ERR_DBG, ", Intr type INTA");
- break;
- case MSI:
- DBG_PRINT(ERR_DBG, ", Intr type MSI");
- break;
- case MSI_X:
- DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
- break;
- }
-
- DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
- DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
+ DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
sp->def_mac_addr[0].mac_addr[0],
sp->def_mac_addr[0].mac_addr[1],
sp->def_mac_addr[0].mac_addr[2],
sp->def_mac_addr[0].mac_addr[3],
sp->def_mac_addr[0].mac_addr[4],
sp->def_mac_addr[0].mac_addr[5]);
+ if (sp->device_type & XFRAME_II_DEVICE) {
mode = s2io_print_pci_mode(sp);
if (mode < 0) {
- DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
+ DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
ret = -EBADSLT;
+ unregister_netdev(dev);
goto set_swap_failed;
}
- } else {
- DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
- dev->name);
- DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
- get_xena_rev_id(sp->pdev),
- s2io_driver_version);
- switch(sp->intr_type) {
- case INTA:
- DBG_PRINT(ERR_DBG, ", Intr type INTA");
- break;
- case MSI:
- DBG_PRINT(ERR_DBG, ", Intr type MSI");
- break;
- case MSI_X:
- DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
- break;
- }
- DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
- DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
- sp->def_mac_addr[0].mac_addr[0],
- sp->def_mac_addr[0].mac_addr[1],
- sp->def_mac_addr[0].mac_addr[2],
- sp->def_mac_addr[0].mac_addr[3],
- sp->def_mac_addr[0].mac_addr[4],
- sp->def_mac_addr[0].mac_addr[5]);
}
- if (sp->rxd_mode == RXD_MODE_3B)
- DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
- "enabled\n",dev->name);
- if (sp->rxd_mode == RXD_MODE_3A)
- DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
- "enabled\n",dev->name);
-
+ switch(sp->rxd_mode) {
+ case RXD_MODE_1:
+ DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ case RXD_MODE_3B:
+ DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ case RXD_MODE_3A:
+ DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
+ dev->name);
+ break;
+ }
+#ifdef CONFIG_S2IO_NAPI
+ DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
+#endif
+ switch(sp->intr_type) {
+ case INTA:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
+ break;
+ case MSI:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
+ break;
+ case MSI_X:
+ DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
+ break;
+ }
if (sp->lro)
DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
- dev->name);
+ dev->name);
/* Initialize device name */
- strcpy(sp->name, dev->name);
- if (sp->device_type & XFRAME_II_DEVICE)
- strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
- else
- strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
+ sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
/* Initialize bimodal Interrupts */
sp->config.bimodal = bimodal;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0a0b5b29d81e..3203732a668d 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,8 @@
#define SUCCESS 0
#define FAILURE -1
+#define CHECKBIT(value, nbit) (value & (1 << nbit))
+
/* Maximum time to flicker LED when asked to identify NIC using ethtool */
#define MAX_FLICKER_TIME 60000 /* 60 Secs */
@@ -78,6 +80,11 @@ static int debug_level = ERR_DBG;
typedef struct {
unsigned long long single_ecc_errs;
unsigned long long double_ecc_errs;
+ unsigned long long parity_err_cnt;
+ unsigned long long serious_err_cnt;
+ unsigned long long soft_reset_cnt;
+ unsigned long long fifo_full_cnt;
+ unsigned long long ring_full_cnt;
/* LRO statistics */
unsigned long long clubbed_frms_cnt;
unsigned long long sending_both;
@@ -87,6 +94,25 @@ typedef struct {
unsigned long long num_aggregations;
} swStat_t;
+/* Xpak releated alarm and warnings */
+typedef struct {
+ u64 alarm_transceiver_temp_high;
+ u64 alarm_transceiver_temp_low;
+ u64 alarm_laser_bias_current_high;
+ u64 alarm_laser_bias_current_low;
+ u64 alarm_laser_output_power_high;
+ u64 alarm_laser_output_power_low;
+ u64 warn_transceiver_temp_high;
+ u64 warn_transceiver_temp_low;
+ u64 warn_laser_bias_current_high;
+ u64 warn_laser_bias_current_low;
+ u64 warn_laser_output_power_high;
+ u64 warn_laser_output_power_low;
+ u64 xpak_regs_stat;
+ u32 xpak_timer_count;
+} xpakStat_t;
+
+
/* The statistics block of Xena */
typedef struct stat_block {
/* Tx MAC statistics counters. */
@@ -263,7 +289,9 @@ typedef struct stat_block {
u32 rmac_accepted_ip_oflow;
u32 reserved_14;
u32 link_fault_cnt;
+ u8 buffer[20];
swStat_t sw_stat;
+ xpakStat_t xpak_stat;
} StatInfo_t;
/*
@@ -659,7 +687,8 @@ typedef struct {
} usr_addr_t;
/* Default Tunable parameters of the NIC. */
-#define DEFAULT_FIFO_LEN 4096
+#define DEFAULT_FIFO_0_LEN 4096
+#define DEFAULT_FIFO_1_7_LEN 512
#define SMALL_BLK_CNT 30
#define LARGE_BLK_CNT 100
@@ -732,7 +761,7 @@ struct s2io_nic {
int device_close_flag;
int device_enabled_once;
- char name[50];
+ char name[60];
struct tasklet_struct task;
volatile unsigned long tasklet_status;
@@ -803,6 +832,8 @@ struct s2io_nic {
char desc1[35];
char desc2[35];
+ int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
+
struct msix_info_st msix_info[0x3f];
#define XFRAME_I_DEVICE 1
@@ -824,6 +855,8 @@ struct s2io_nic {
spinlock_t rx_lock;
atomic_t isr_cnt;
u64 *ufo_in_band_v;
+#define VPD_PRODUCT_NAME_LEN 50
+ u8 product_name[VPD_PRODUCT_NAME_LEN];
};
#define RESET_ERROR 1;
@@ -848,28 +881,32 @@ static inline void writeq(u64 val, void __iomem *addr)
writel((u32) (val), addr);
writel((u32) (val >> 32), (addr + 4));
}
+#endif
-/* In 32 bit modes, some registers have to be written in a
- * particular order to expect correct hardware operation. The
- * macro SPECIAL_REG_WRITE is used to perform such ordered
- * writes. Defines UF (Upper First) and LF (Lower First) will
- * be used to specify the required write order.
+/*
+ * Some registers have to be written in a particular order to
+ * expect correct hardware operation. The macro SPECIAL_REG_WRITE
+ * is used to perform such ordered writes. Defines UF (Upper First)
+ * and LF (Lower First) will be used to specify the required write order.
*/
#define UF 1
#define LF 2
static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
{
+ u32 ret;
+
if (order == LF) {
writel((u32) (val), addr);
+ ret = readl(addr);
writel((u32) (val >> 32), (addr + 4));
+ ret = readl(addr + 4);
} else {
writel((u32) (val >> 32), (addr + 4));
+ ret = readl(addr + 4);
writel((u32) (val), addr);
+ ret = readl(addr);
}
}
-#else
-#define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr)
-#endif
/* Interrupt related values of Xena */
@@ -965,7 +1002,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
static void s2io_set_link(unsigned long data);
static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t *nic);
+static void s2io_card_down(nic_t *nic, int flag);
static int s2io_card_up(nic_t *nic);
static int get_xena_rev_id(struct pci_dev *pdev);
static void restore_xmsi_data(nic_t *nic);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index f5a3bf4d959a..d05874172209 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1,6 +1,6 @@
/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
Copyright 1999 Silicon Integrated System Corporation
- Revision: 1.08.09 Sep. 19 2005
+ Revision: 1.08.10 Apr. 2 2006
Modified from the driver which is originally written by Donald Becker.
@@ -17,9 +17,10 @@
SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
preliminary Rev. 1.0 Jan. 18, 1998
+ Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support
Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
- Rev 1.08.07 Nov. 2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
+ Rev 1.08.07 Nov. 2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support
Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
@@ -77,7 +78,7 @@
#include "sis900.h"
#define SIS900_MODULE_NAME "sis900"
-#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
+#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
static char version[] __devinitdata =
KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
@@ -1402,6 +1403,11 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
rx_flags |= RxATX;
}
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ /* Can accept Jumbo packet */
+ rx_flags |= RxAJAB;
+#endif
+
outl (tx_flags, ioaddr + txcfg);
outl (rx_flags, ioaddr + rxcfg);
}
@@ -1714,18 +1720,26 @@ static int sis900_rx(struct net_device *net_dev)
while (rx_status & OWN) {
unsigned int rx_size;
+ unsigned int data_size;
if (--rx_work_limit < 0)
break;
- rx_size = (rx_status & DSIZE) - CRC_SIZE;
+ data_size = rx_status & DSIZE;
+ rx_size = data_size - CRC_SIZE;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ /* ``TOOLONG'' flag means jumbo packet recived. */
+ if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
+ rx_status &= (~ ((unsigned int)TOOLONG));
+#endif
if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
/* corrupted packet received */
if (netif_msg_rx_err(sis_priv))
printk(KERN_DEBUG "%s: Corrupted packet "
- "received, buffer status = 0x%8.8x.\n",
- net_dev->name, rx_status);
+ "received, buffer status = 0x%8.8x/%d.\n",
+ net_dev->name, rx_status, data_size);
sis_priv->stats.rx_errors++;
if (rx_status & OVERRUN)
sis_priv->stats.rx_over_errors++;
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
index 50323941e3c0..4834e3a15694 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/sis900.h
@@ -310,8 +310,14 @@ enum sis630_revision_id {
#define CRC_SIZE 4
#define MAC_HEADER_SIZE 14
-#define TX_BUF_SIZE 1536
-#define RX_BUF_SIZE 1536
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define MAX_FRAME_SIZE (1518 + 4)
+#else
+#define MAX_FRAME_SIZE 1518
+#endif /* CONFIG_VLAN_802_1Q */
+
+#define TX_BUF_SIZE (MAX_FRAME_SIZE+18)
+#define RX_BUF_SIZE (MAX_FRAME_SIZE+18)
#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */
#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5ca5a1b546a1..536dd1cf7f79 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,12 +44,13 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.5"
+#define DRV_VERSION "1.6"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
#define DEFAULT_RX_RING_SIZE 512
#define MAX_TX_RING_SIZE 1024
+#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
#define MAX_RX_RING_SIZE 4096
#define RX_COPY_THRESHOLD 128
#define RX_BUF_SIZE 1536
@@ -401,7 +402,7 @@ static int skge_set_ring_param(struct net_device *dev,
int err;
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
- p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
+ p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
return -EINVAL;
skge->rx_ring.count = p->rx_pending;
@@ -603,7 +604,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
struct skge_hw *hw = skge->hw;
int port = skge->port;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS) {
switch (mode) {
case LED_MODE_OFF:
@@ -663,7 +664,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
PHY_M_LED_MO_RX(MO_LED_ON));
}
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
}
/* blink LED's for finding board */
@@ -2038,7 +2039,7 @@ static void skge_phy_reset(struct skge_port *skge)
netif_stop_queue(skge->netdev);
netif_carrier_off(skge->netdev);
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS) {
genesis_reset(hw, port);
genesis_mac_init(hw, port);
@@ -2046,7 +2047,7 @@ static void skge_phy_reset(struct skge_port *skge)
yukon_reset(hw, port);
yukon_init(hw, port);
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
}
/* Basic MII support */
@@ -2067,12 +2068,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* fallthru */
case SIOCGMIIREG: {
u16 val = 0;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS)
err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
else
err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
data->val_out = val;
break;
}
@@ -2081,14 +2082,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS)
err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
data->val_in);
else
err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
data->val_in);
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
break;
}
return err;
@@ -2191,12 +2192,12 @@ static int skge_up(struct net_device *dev)
goto free_rx_ring;
/* Initialize MAC */
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_mac_init(hw, port);
else
yukon_mac_init(hw, port);
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
/* Configure RAMbuffers */
chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2302,21 +2303,20 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
- struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
struct skge_tx_desc *td;
int i;
u32 control, len;
u64 map;
+ unsigned long flags;
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
return NETDEV_TX_OK;
- if (!spin_trylock(&skge->tx_lock)) {
+ if (!spin_trylock_irqsave(&skge->tx_lock, flags))
/* Collision - tell upper layer to requeue */
return NETDEV_TX_LOCKED;
- }
if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -2325,12 +2325,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
dev->name);
}
- spin_unlock(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
return NETDEV_TX_BUSY;
}
- e = ring->to_use;
+ e = skge->tx_ring.to_use;
td = e->desc;
+ BUG_ON(td->control & BMU_OWN);
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -2371,8 +2372,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
frag->size, PCI_DMA_TODEVICE);
e = e->next;
- e->skb = NULL;
+ e->skb = skb;
tf = e->desc;
+ BUG_ON(tf->control & BMU_OWN);
+
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
pci_unmap_addr_set(e, mapaddr, map);
@@ -2389,56 +2392,68 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
- if (netif_msg_tx_queued(skge))
+ if (unlikely(netif_msg_tx_queued(skge)))
printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
- dev->name, e - ring->start, skb->len);
+ dev->name, e - skge->tx_ring.start, skb->len);
- ring->to_use = e->next;
- if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
+ skge->tx_ring.to_use = e->next;
+ if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
pr_debug("%s: transmit queue full\n", dev->name);
netif_stop_queue(dev);
}
- mmiowb();
- spin_unlock(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
-static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
+
+/* Free resources associated with this reing element */
+static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
+ u32 control)
{
struct pci_dev *pdev = skge->hw->pdev;
- struct skge_element *e;
- for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
- struct sk_buff *skb = e->skb;
- int i;
+ BUG_ON(!e->skb);
- e->skb = NULL;
+ /* skb header vs. fragment */
+ if (control & BMU_STF)
pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ pci_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
+ pci_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- e = e->next;
- pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
- skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- }
+ if (control & BMU_EOF) {
+ if (unlikely(netif_msg_tx_done(skge)))
+ printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
+ skge->netdev->name, e - skge->tx_ring.start);
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(e->skb);
}
- skge->tx_ring.to_clean = e;
+ e->skb = NULL;
}
+/* Free all buffers in transmit ring */
static void skge_tx_clean(struct skge_port *skge)
{
+ struct skge_element *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&skge->tx_lock, flags);
+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+ skge_tx_free(skge, e, td->control);
+ td->control = 0;
+ }
- spin_lock_bh(&skge->tx_lock);
- skge_tx_complete(skge, skge->tx_ring.to_use);
+ skge->tx_ring.to_clean = e;
netif_wake_queue(skge->netdev);
- spin_unlock_bh(&skge->tx_lock);
+ spin_unlock_irqrestore(&skge->tx_lock, flags);
}
static void skge_tx_timeout(struct net_device *dev)
@@ -2664,32 +2679,28 @@ resubmit:
return NULL;
}
-static void skge_tx_done(struct skge_port *skge)
+/* Free all buffers in Tx ring which are no longer owned by device */
+static void skge_txirq(struct net_device *dev)
{
+ struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
- struct skge_element *e, *last;
+ struct skge_element *e;
+
+ rmb();
spin_lock(&skge->tx_lock);
- last = ring->to_clean;
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
if (td->control & BMU_OWN)
break;
- if (td->control & BMU_EOF) {
- last = e->next;
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - ring->start);
- }
+ skge_tx_free(skge, e, td->control);
}
+ skge->tx_ring.to_clean = e;
- skge_tx_complete(skge, last);
-
- skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
-
- if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
+ if (netif_queue_stopped(skge->netdev)
+ && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
netif_wake_queue(skge->netdev);
spin_unlock(&skge->tx_lock);
@@ -2704,8 +2715,6 @@ static int skge_poll(struct net_device *dev, int *budget)
int to_do = min(dev->quota, *budget);
int work_done = 0;
- skge_tx_done(skge);
-
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb;
@@ -2737,10 +2746,12 @@ static int skge_poll(struct net_device *dev, int *budget)
return 1; /* not done */
netif_rx_complete(dev);
- mmiowb();
- hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask |= rxirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ mmiowb();
+ spin_unlock_irq(&hw->hw_lock);
return 0;
}
@@ -2847,16 +2858,16 @@ static void skge_error_irq(struct skge_hw *hw)
}
/*
- * Interrupt from PHY are handled in tasklet (soft irq)
+ * Interrupt from PHY are handled in work queue
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(unsigned long data)
+static void skge_extirq(void *arg)
{
- struct skge_hw *hw = (struct skge_hw *) data;
+ struct skge_hw *hw = arg;
int port;
- spin_lock(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
for (port = 0; port < hw->ports; port++) {
struct net_device *dev = hw->dev[port];
struct skge_port *skge = netdev_priv(dev);
@@ -2868,10 +2879,12 @@ static void skge_extirq(unsigned long data)
bcom_phy_intr(skge);
}
}
- spin_unlock(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2884,54 +2897,68 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0)
return IRQ_NONE;
+ spin_lock(&hw->hw_lock);
+ status &= hw->intr_mask;
if (status & IS_EXT_REG) {
hw->intr_mask &= ~IS_EXT_REG;
- tasklet_schedule(&hw->ext_tasklet);
+ schedule_work(&hw->phy_work);
}
- if (status & (IS_R1_F|IS_XA1_F)) {
- skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
- netif_rx_schedule(hw->dev[0]);
+ if (status & IS_XA1_F) {
+ skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
+ skge_txirq(hw->dev[0]);
}
- if (status & (IS_R2_F|IS_XA2_F)) {
- skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
- netif_rx_schedule(hw->dev[1]);
+ if (status & IS_R1_F) {
+ skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
+ hw->intr_mask &= ~IS_R1_F;
+ netif_rx_schedule(hw->dev[0]);
}
- if (likely((status & hw->intr_mask) == 0))
- return IRQ_HANDLED;
+ if (status & IS_PA_TO_TX1)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
if (status & IS_PA_TO_RX1) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
- ++skge->net_stats.rx_over_errors;
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
- }
- if (status & IS_PA_TO_RX2) {
- struct skge_port *skge = netdev_priv(hw->dev[1]);
++skge->net_stats.rx_over_errors;
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
}
- if (status & IS_PA_TO_TX1)
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
-
- if (status & IS_PA_TO_TX2)
- skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
if (status & IS_MAC1)
skge_mac_intr(hw, 0);
- if (status & IS_MAC2)
- skge_mac_intr(hw, 1);
+ if (hw->dev[1]) {
+ if (status & IS_XA2_F) {
+ skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
+ skge_txirq(hw->dev[1]);
+ }
+
+ if (status & IS_R2_F) {
+ skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
+ hw->intr_mask &= ~IS_R2_F;
+ netif_rx_schedule(hw->dev[1]);
+ }
+
+ if (status & IS_PA_TO_RX2) {
+ struct skge_port *skge = netdev_priv(hw->dev[1]);
+ ++skge->net_stats.rx_over_errors;
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+ }
+
+ if (status & IS_PA_TO_TX2)
+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+ if (status & IS_MAC2)
+ skge_mac_intr(hw, 1);
+ }
if (status & IS_HW_ERR)
skge_error_irq(hw);
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -2957,7 +2984,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_1 + port*8,
dev->dev_addr, ETH_ALEN);
@@ -2970,7 +2997,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
return 0;
}
@@ -3082,6 +3109,7 @@ static int skge_reset(struct skge_hw *hw)
else
hw->ram_size = t8 * 4096;
+ spin_lock_init(&hw->hw_lock);
hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
if (hw->ports > 1)
hw->intr_mask |= IS_PORT_2;
@@ -3150,14 +3178,14 @@ static int skge_reset(struct skge_hw *hw)
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_lock_bh(&hw->phy_lock);
+ mutex_lock(&hw->phy_mutex);
for (i = 0; i < hw->ports; i++) {
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_reset(hw, i);
else
yukon_reset(hw, i);
}
- spin_unlock_bh(&hw->phy_lock);
+ mutex_unlock(&hw->phy_mutex);
return 0;
}
@@ -3305,8 +3333,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
}
hw->pdev = pdev;
- spin_lock_init(&hw->phy_lock);
- tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
+ mutex_init(&hw->phy_mutex);
+ INIT_WORK(&hw->phy_work, skge_extirq, hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
@@ -3334,6 +3362,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
goto err_out_led_off;
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto err_out_free_netdev;
+ }
+
+
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3388,11 +3424,15 @@ static void __devexit skge_remove(struct pci_dev *pdev)
dev0 = hw->dev[0];
unregister_netdev(dev0);
+ spin_lock_irq(&hw->hw_lock);
+ hw->intr_mask = 0;
skge_write32(hw, B0_IMSK, 0);
+ spin_unlock_irq(&hw->hw_lock);
+
skge_write16(hw, B0_LED, LED_STAT_OFF);
skge_write8(hw, B0_CTST, CS_RST_SET);
- tasklet_kill(&hw->ext_tasklet);
+ flush_scheduled_work();
free_irq(pdev->irq, hw);
pci_release_regions(pdev);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 1f1ce88c8186..ed19ff47ce11 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2388,6 +2388,7 @@ struct skge_ring {
struct skge_hw {
void __iomem *regs;
struct pci_dev *pdev;
+ spinlock_t hw_lock;
u32 intr_mask;
struct net_device *dev[2];
@@ -2399,9 +2400,8 @@ struct skge_hw {
u32 ram_size;
u32 ram_offset;
u16 phy_addr;
-
- struct tasklet_struct ext_tasklet;
- spinlock_t phy_lock;
+ struct work_struct phy_work;
+ struct mutex phy_mutex;
};
enum {
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
new file mode 100644
index 000000000000..bdd8702ead54
--- /dev/null
+++ b/drivers/net/smc911x.c
@@ -0,0 +1,2307 @@
+/*
+ * smc911x.c
+ * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
+ *
+ * Copyright (C) 2005 Sensoria Corp
+ * Derived from the unified SMC91x driver by Nicolas Pitre
+ * and the smsc911x.c reference driver by SMSC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Arguments:
+ * watchdog = TX watchdog timeout
+ * tx_fifo_kb = Size of TX FIFO in KB
+ *
+ * History:
+ * 04/16/05 Dustin McIntire Initial version
+ */
+static const char version[] =
+ "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
+
+/* Debugging options */
+#define ENABLE_SMC_DEBUG_RX 0
+#define ENABLE_SMC_DEBUG_TX 0
+#define ENABLE_SMC_DEBUG_DMA 0
+#define ENABLE_SMC_DEBUG_PKTS 0
+#define ENABLE_SMC_DEBUG_MISC 0
+#define ENABLE_SMC_DEBUG_FUNC 0
+
+#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0)
+#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1)
+#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2)
+#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
+#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
+#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
+
+#ifndef SMC_DEBUG
+#define SMC_DEBUG ( SMC_DEBUG_RX | \
+ SMC_DEBUG_TX | \
+ SMC_DEBUG_DMA | \
+ SMC_DEBUG_PKTS | \
+ SMC_DEBUG_MISC | \
+ SMC_DEBUG_FUNC \
+ )
+#endif
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/workqueue.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "smc911x.h"
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+static int tx_fifo_kb=8;
+module_param(tx_fifo_kb, int, 0400);
+MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
+
+MODULE_LICENSE("GPL");
+
+/*
+ * The internal workings of the driver. If you are changing anything
+ * here with the SMC stuff, you should have the datasheet and know
+ * what you are doing.
+ */
+#define CARDNAME "smc911x"
+
+/*
+ * Use power-down feature of the chip
+ */
+#define POWER_DOWN 1
+
+
+/* store this information for the driver.. */
+struct smc911x_local {
+ /*
+ * If I have to wait until the DMA is finished and ready to reload a
+ * packet, I will store the skbuff here. Then, the DMA will send it
+ * out and free it.
+ */
+ struct sk_buff *pending_tx_skb;
+
+ /*
+ * these are things that the kernel wants me to keep, so users
+ * can find out semi-useless statistics of how well the card is
+ * performing
+ */
+ struct net_device_stats stats;
+
+ /* version/revision of the SMC911x chip */
+ u16 version;
+ u16 revision;
+
+ /* FIFO sizes */
+ int tx_fifo_kb;
+ int tx_fifo_size;
+ int rx_fifo_size;
+ int afc_cfg;
+
+ /* Contains the current active receive/phy mode */
+ int ctl_rfduplx;
+ int ctl_rspeed;
+
+ u32 msg_enable;
+ u32 phy_type;
+ struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+ int work_pending;
+
+ int tx_throttle;
+ spinlock_t lock;
+
+#ifdef SMC_USE_DMA
+ /* DMA needs the physical address of the chip */
+ u_long physaddr;
+ int rxdma;
+ int txdma;
+ int rxdma_active;
+ int txdma_active;
+ struct sk_buff *current_rx_skb;
+ struct sk_buff *current_tx_skb;
+ struct device *dev;
+#endif
+};
+
+#if SMC_DEBUG > 0
+#define DBG(n, args...) \
+ do { \
+ if (SMC_DEBUG & (n)) \
+ printk(args); \
+ } while (0)
+
+#define PRINTK(args...) printk(args)
+#else
+#define DBG(n, args...) do { } while (0)
+#define PRINTK(args...) printk(KERN_DEBUG args)
+#endif
+
+#if SMC_DEBUG_PKTS > 0
+static void PRINT_PKT(u_char *buf, int length)
+{
+ int i;
+ int remainder;
+ int lines;
+
+ lines = length / 16;
+ remainder = length % 16;
+
+ for (i = 0; i < lines ; i ++) {
+ int cur;
+ for (cur = 0; cur < 8; cur++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+ }
+ for (i = 0; i < remainder/2 ; i++) {
+ u_char a, b;
+ a = *buf++;
+ b = *buf++;
+ printk("%02x%02x ", a, b);
+ }
+ printk("\n");
+}
+#else
+#define PRINT_PKT(x...) do { } while (0)
+#endif
+
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) do { \
+ unsigned int __mask; \
+ unsigned long __flags; \
+ spin_lock_irqsave(&lp->lock, __flags); \
+ __mask = SMC_GET_INT_EN(); \
+ __mask |= (x); \
+ SMC_SET_INT_EN(__mask); \
+ spin_unlock_irqrestore(&lp->lock, __flags); \
+} while (0)
+
+/* this disables an interrupt from the interrupt mask register */
+#define SMC_DISABLE_INT(x) do { \
+ unsigned int __mask; \
+ unsigned long __flags; \
+ spin_lock_irqsave(&lp->lock, __flags); \
+ __mask = SMC_GET_INT_EN(); \
+ __mask &= ~(x); \
+ SMC_SET_INT_EN(__mask); \
+ spin_unlock_irqrestore(&lp->lock, __flags); \
+} while (0)
+
+/*
+ * this does a soft reset on the device
+ */
+static void smc911x_reset(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int reg, timeout=0, resets=1;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /* Take out of PM setting first */
+ if ((SMC_GET_PMT_CTRL() & PMT_CTRL_READY_) == 0) {
+ /* Write to the bytetest will take out of powerdown */
+ SMC_SET_BYTE_TEST(0);
+ timeout=10;
+ do {
+ udelay(10);
+ reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_;
+ } while ( timeout-- && !reg);
+ if (timeout == 0) {
+ PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+ return;
+ }
+ }
+
+ /* Disable all interrupts */
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_INT_EN(0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ while (resets--) {
+ SMC_SET_HW_CFG(HW_CFG_SRST_);
+ timeout=10;
+ do {
+ udelay(10);
+ reg = SMC_GET_HW_CFG();
+ /* If chip indicates reset timeout then try again */
+ if (reg & HW_CFG_SRST_TO_) {
+ PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+ resets++;
+ break;
+ }
+ } while ( timeout-- && (reg & HW_CFG_SRST_));
+ }
+ if (timeout == 0) {
+ PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+ return;
+ }
+
+ /* make sure EEPROM has finished loading before setting GPIO_CFG */
+ timeout=1000;
+ while ( timeout-- && (SMC_GET_E2P_CMD() & E2P_CMD_EPC_BUSY_)) {
+ udelay(10);
+ }
+ if (timeout == 0){
+ PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+ return;
+ }
+
+ /* Initialize interrupts */
+ SMC_SET_INT_EN(0);
+ SMC_ACK_INT(-1);
+
+ /* Reset the FIFO level and flow control settings */
+ SMC_SET_HW_CFG((lp->tx_fifo_kb & 0xF) << 16);
+//TODO: Figure out what appropriate pause time is
+ SMC_SET_FLOW(FLOW_FCPT_ | FLOW_FCEN_);
+ SMC_SET_AFC_CFG(lp->afc_cfg);
+
+
+ /* Set to LED outputs */
+ SMC_SET_GPIO_CFG(0x70070000);
+
+ /*
+ * Deassert IRQ for 1*10us for edge type interrupts
+ * and drive IRQ pin push-pull
+ */
+ SMC_SET_IRQ_CFG( (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_ );
+
+ /* clear anything saved */
+ if (lp->pending_tx_skb != NULL) {
+ dev_kfree_skb (lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ }
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static void smc911x_enable(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned mask, cfg, cr;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ SMC_SET_MAC_ADDR(dev->dev_addr);
+
+ /* Enable TX */
+ cfg = SMC_GET_HW_CFG();
+ cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
+ cfg |= HW_CFG_SF_;
+ SMC_SET_HW_CFG(cfg);
+ SMC_SET_FIFO_TDA(0xFF);
+ /* Update TX stats on every 64 packets received or every 1 sec */
+ SMC_SET_FIFO_TSL(64);
+ SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(cr);
+ cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
+ SMC_SET_MAC_CR(cr);
+ SMC_SET_TX_CFG(TX_CFG_TX_ON_);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Add 2 byte padding to start of packets */
+ SMC_SET_RX_CFG((2<<8) & RX_CFG_RXDOFF_);
+
+ /* Turn on receiver and enable RX */
+ if (cr & MAC_CR_RXEN_)
+ DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_MAC_CR( cr | MAC_CR_RXEN_ );
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ /* Interrupt on every received packet */
+ SMC_SET_FIFO_RSA(0x01);
+ SMC_SET_FIFO_RSL(0x00);
+
+ /* now, enable interrupts */
+ mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
+ INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
+ INT_EN_PHY_INT_EN_;
+ if (IS_REV_A(lp->revision))
+ mask|=INT_EN_RDFL_EN_;
+ else {
+ mask|=INT_EN_RDFO_EN_;
+ }
+ SMC_ENABLE_INT(mask);
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void smc911x_shutdown(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned cr;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+
+ /* Disable IRQ's */
+ SMC_SET_INT_EN(0);
+
+ /* Turn of Rx and TX */
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(cr);
+ cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
+ SMC_SET_MAC_CR(cr);
+ SMC_SET_TX_CFG(TX_CFG_STOP_TX_);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static inline void smc911x_drop_pkt(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int fifo_count, timeout, reg;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+ fifo_count = SMC_GET_RX_FIFO_INF() & 0xFFFF;
+ if (fifo_count <= 4) {
+ /* Manually dump the packet data */
+ while (fifo_count--)
+ SMC_GET_RX_FIFO();
+ } else {
+ /* Fast forward through the bad packet */
+ SMC_SET_RX_DP_CTRL(RX_DP_CTRL_FFWD_BUSY_);
+ timeout=50;
+ do {
+ udelay(10);
+ reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_;
+ } while ( timeout-- && reg);
+ if (timeout == 0) {
+ PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+ }
+ }
+}
+
+/*
+ * This is the procedure to handle the receipt of a packet.
+ * It should be called after checking for packet presence in
+ * the RX status FIFO. It must be called with the spin lock
+ * already held.
+ */
+static inline void smc911x_rcv(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int pkt_len, status;
+ struct sk_buff *skb;
+ unsigned char *data;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
+ dev->name, __FUNCTION__);
+ status = SMC_GET_RX_STS_FIFO();
+ DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+ dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+ pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
+ if (status & RX_STS_ES_) {
+ /* Deal with a bad packet */
+ lp->stats.rx_errors++;
+ if (status & RX_STS_CRC_ERR_)
+ lp->stats.rx_crc_errors++;
+ else {
+ if (status & RX_STS_LEN_ERR_)
+ lp->stats.rx_length_errors++;
+ if (status & RX_STS_MCAST_)
+ lp->stats.multicast++;
+ }
+ /* Remove the bad packet data from the RX FIFO */
+ smc911x_drop_pkt(dev);
+ } else {
+ /* Receive a valid packet */
+ /* Alloc a buffer with extra room for DMA alignment */
+ skb=dev_alloc_skb(pkt_len+32);
+ if (unlikely(skb == NULL)) {
+ PRINTK( "%s: Low memory, rcvd packet dropped.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ smc911x_drop_pkt(dev);
+ return;
+ }
+ /* Align IP header to 32 bits
+ * Note that the device is configured to add a 2
+ * byte padding to the packet start, so we really
+ * want to write to the orignal data pointer */
+ data = skb->data;
+ skb_reserve(skb, 2);
+ skb_put(skb,pkt_len-4);
+#ifdef SMC_USE_DMA
+ {
+ unsigned int fifo;
+ /* Lower the FIFO threshold if possible */
+ fifo = SMC_GET_FIFO_INT();
+ if (fifo & 0xFF) fifo--;
+ DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
+ dev->name, fifo & 0xff);
+ SMC_SET_FIFO_INT(fifo);
+ /* Setup RX DMA */
+ SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
+ lp->rxdma_active = 1;
+ lp->current_rx_skb = skb;
+ SMC_PULL_DATA(data, (pkt_len+2+15) & ~15);
+ /* Packet processing deferred to DMA RX interrupt */
+ }
+#else
+ SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
+ SMC_PULL_DATA(data, pkt_len+2+3);
+
+ DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
+ PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len-4;
+#endif
+ }
+}
+
+/*
+ * This is called to actually send a packet to the chip.
+ */
+static void smc911x_hardware_send_pkt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ struct sk_buff *skb;
+ unsigned int cmdA, cmdB, len;
+ unsigned char *buf;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__);
+ BUG_ON(lp->pending_tx_skb == NULL);
+
+ skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+
+ /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
+ /* cmdB {31:16] pkt tag [10:0] length */
+#ifdef SMC_USE_DMA
+ /* 16 byte buffer alignment mode */
+ buf = (char*)((u32)(skb->data) & ~0xF);
+ len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
+ cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
+ TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+ skb->len;
+#else
+ buf = (char*)((u32)skb->data & ~0x3);
+ len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
+ cmdA = (((u32)skb->data & 0x3) << 16) |
+ TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+ skb->len;
+#endif
+ /* tag is packet length so we can use this in stats update later */
+ cmdB = (skb->len << 16) | (skb->len & 0x7FF);
+
+ DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+ dev->name, len, len, buf, cmdA, cmdB);
+ SMC_SET_TX_FIFO(cmdA);
+ SMC_SET_TX_FIFO(cmdB);
+
+ DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+ PRINT_PKT(buf, len <= 64 ? len : 64);
+
+ /* Send pkt via PIO or DMA */
+#ifdef SMC_USE_DMA
+ lp->current_tx_skb = skb;
+ SMC_PUSH_DATA(buf, len);
+ /* DMA complete IRQ will free buffer and set jiffies */
+#else
+ SMC_PUSH_DATA(buf, len);
+ dev->trans_start = jiffies;
+ dev_kfree_skb(skb);
+#endif
+ spin_lock_irqsave(&lp->lock, flags);
+ if (!lp->tx_throttle) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ SMC_ENABLE_INT(INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
+}
+
+/*
+ * Since I am not sure if I will have enough room in the chip's ram
+ * to store the packet, I call this routine which either sends it
+ * now, or set the card to generates an interrupt when ready
+ * for the packet.
+ */
+static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int free;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+ dev->name, __FUNCTION__);
+
+ BUG_ON(lp->pending_tx_skb != NULL);
+
+ free = SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TDFREE_;
+ DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+
+ /* Turn off the flow when running out of space in FIFO */
+ if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
+ DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
+ dev->name, free);
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Reenable when at least 1 packet of size MTU present */
+ SMC_SET_FIFO_TDA((SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
+ lp->tx_throttle = 1;
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ /* Drop packets when we run out of space in TX FIFO
+ * Account for overhead required for:
+ *
+ * Tx command words 8 bytes
+ * Start offset 15 bytes
+ * End padding 15 bytes
+ */
+ if (unlikely(free < (skb->len + 8 + 15 + 15))) {
+ printk("%s: No Tx free space %d < %d\n",
+ dev->name, free, skb->len);
+ lp->pending_tx_skb = NULL;
+ lp->stats.tx_errors++;
+ lp->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+#ifdef SMC_USE_DMA
+ {
+ /* If the DMA is already running then defer this packet Tx until
+ * the DMA IRQ starts it
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->txdma_active) {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+ lp->pending_tx_skb = skb;
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
+ } else {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+ lp->txdma_active = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+#endif
+ lp->pending_tx_skb = skb;
+ smc911x_hardware_send_pkt(dev);
+
+ return 0;
+}
+
+/*
+ * This handles a TX status interrupt, which is only called when:
+ * - a TX error occurred, or
+ * - TX of a packet completed.
+ */
+static void smc911x_tx(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int tx_status;
+
+ DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+ dev->name, __FUNCTION__);
+
+ /* Collect the TX status */
+ while (((SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
+ DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
+ dev->name,
+ (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16);
+ tx_status = SMC_GET_TX_STS_FIFO();
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes+=tx_status>>16;
+ DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
+ dev->name, (tx_status & 0xffff0000) >> 16,
+ tx_status & 0x0000ffff);
+ /* count Tx errors, but ignore lost carrier errors when in
+ * full-duplex mode */
+ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
+ !(tx_status & 0x00000306))) {
+ lp->stats.tx_errors++;
+ }
+ if (tx_status & TX_STS_MANY_COLL_) {
+ lp->stats.collisions+=16;
+ lp->stats.tx_aborted_errors++;
+ } else {
+ lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
+ }
+ /* carrier error only has meaning for half-duplex communication */
+ if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
+ !lp->ctl_rfduplx) {
+ lp->stats.tx_carrier_errors++;
+ }
+ if (tx_status & TX_STS_LATE_COLL_) {
+ lp->stats.collisions++;
+ lp->stats.tx_aborted_errors++;
+ }
+ }
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+/*
+ * Reads a register from the MII Management serial interface
+ */
+
+static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int phydata;
+
+ SMC_GET_MII(phyreg, phyaddr, phydata);
+
+ DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+ __FUNCTION__, phyaddr, phyreg, phydata);
+ return phydata;
+}
+
+
+/*
+ * Writes a register to the MII Management serial interface
+ */
+static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
+ int phydata)
+{
+ unsigned long ioaddr = dev->base_addr;
+
+ DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+ __FUNCTION__, phyaddr, phyreg, phydata);
+
+ SMC_SET_MII(phyreg, phyaddr, phydata);
+}
+
+/*
+ * Finds and reports the PHY address (115 and 117 have external
+ * PHY interface 118 has internal only
+ */
+static void smc911x_phy_detect(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ int phyaddr;
+ unsigned int cfg, id1, id2;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ lp->phy_type = 0;
+
+ /*
+ * Scan all 32 PHY addresses if necessary, starting at
+ * PHY#1 to PHY#31, and then PHY#0 last.
+ */
+ switch(lp->version) {
+ case 0x115:
+ case 0x117:
+ cfg = SMC_GET_HW_CFG();
+ if (cfg & HW_CFG_EXT_PHY_DET_) {
+ cfg &= ~HW_CFG_PHY_CLK_SEL_;
+ cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+ SMC_SET_HW_CFG(cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg |= HW_CFG_EXT_PHY_EN_;
+ SMC_SET_HW_CFG(cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg &= ~HW_CFG_PHY_CLK_SEL_;
+ cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
+ SMC_SET_HW_CFG(cfg);
+ udelay(10); /* Wait for clocks to stop */
+
+ cfg |= HW_CFG_SMI_SEL_;
+ SMC_SET_HW_CFG(cfg);
+
+ for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
+
+ /* Read the PHY identifiers */
+ SMC_GET_PHY_ID1(phyaddr & 31, id1);
+ SMC_GET_PHY_ID2(phyaddr & 31, id2);
+
+ /* Make sure it is a valid identifier */
+ if (id1 != 0x0000 && id1 != 0xffff &&
+ id1 != 0x8000 && id2 != 0x0000 &&
+ id2 != 0xffff && id2 != 0x8000) {
+ /* Save the PHY's address */
+ lp->mii.phy_id = phyaddr & 31;
+ lp->phy_type = id1 << 16 | id2;
+ break;
+ }
+ }
+ }
+ default:
+ /* Internal media only */
+ SMC_GET_PHY_ID1(1, id1);
+ SMC_GET_PHY_ID2(1, id2);
+ /* Save the PHY's address */
+ lp->mii.phy_id = 1;
+ lp->phy_type = id1 << 16 | id2;
+ }
+
+ DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+ dev->name, id1, id2, lp->mii.phy_id);
+}
+
+/*
+ * Sets the PHY to a configuration as determined by the user.
+ * Called with spin_lock held.
+ */
+static int smc911x_phy_fixed(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ int bmcr;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /* Enter Link Disable state */
+ SMC_GET_PHY_BMCR(phyaddr, bmcr);
+ bmcr |= BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+ /*
+ * Set our fixed capabilities
+ * Disable auto-negotiation
+ */
+ bmcr &= ~BMCR_ANENABLE;
+ if (lp->ctl_rfduplx)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (lp->ctl_rspeed == 100)
+ bmcr |= BMCR_SPEED100;
+
+ /* Write our capabilities to the phy control register */
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+ /* Re-Configure the Receive/Phy Control register */
+ bmcr &= ~BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+ return 1;
+}
+
+/*
+ * smc911x_phy_reset - reset the phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Issue a software reset for the specified PHY and
+ * wait up to 100ms for the reset to complete. We should
+ * not access the PHY for 50ms after issuing the reset.
+ *
+ * The time to wait appears to be dependent on the PHY.
+ *
+ */
+static int smc911x_phy_reset(struct net_device *dev, int phy)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int timeout;
+ unsigned long flags;
+ unsigned int reg;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ reg = SMC_GET_PMT_CTRL();
+ reg &= ~0xfffff030;
+ reg |= PMT_CTRL_PHY_RST_;
+ SMC_SET_PMT_CTRL(reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ for (timeout = 2; timeout; timeout--) {
+ msleep(50);
+ spin_lock_irqsave(&lp->lock, flags);
+ reg = SMC_GET_PMT_CTRL();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ if (!(reg & PMT_CTRL_PHY_RST_)) {
+ /* extra delay required because the phy may
+ * not be completed with its reset
+ * when PHY_BCR_RESET_ is cleared. 256us
+ * should suffice, but use 500us to be safe
+ */
+ udelay(500);
+ break;
+ }
+ }
+
+ return reg & PMT_CTRL_PHY_RST_;
+}
+
+/*
+ * smc911x_phy_powerdown - powerdown phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Power down the specified PHY
+ */
+static void smc911x_phy_powerdown(struct net_device *dev, int phy)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int bmcr;
+
+ /* Enter Link Disable state */
+ SMC_GET_PHY_BMCR(phy, bmcr);
+ bmcr |= BMCR_PDOWN;
+ SMC_SET_PHY_BMCR(phy, bmcr);
+}
+
+/*
+ * smc911x_phy_check_media - check the media status and adjust BMCR
+ * @dev: net device
+ * @init: set true for initialisation
+ *
+ * Select duplex mode depending on negotiation state. This
+ * also updates our carrier state.
+ */
+static void smc911x_phy_check_media(struct net_device *dev, int init)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ unsigned int bmcr, cr;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
+ /* duplex state has changed */
+ SMC_GET_PHY_BMCR(phyaddr, bmcr);
+ SMC_GET_MAC_CR(cr);
+ if (lp->mii.full_duplex) {
+ DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+ bmcr |= BMCR_FULLDPLX;
+ cr |= MAC_CR_RCVOWN_;
+ } else {
+ DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+ bmcr &= ~BMCR_FULLDPLX;
+ cr &= ~MAC_CR_RCVOWN_;
+ }
+ SMC_SET_PHY_BMCR(phyaddr, bmcr);
+ SMC_SET_MAC_CR(cr);
+ }
+}
+
+/*
+ * Configures the specified PHY through the MII management interface
+ * using Autonegotiation.
+ * Calls smc911x_phy_fixed() if the user has requested a certain config.
+ * If RPC ANEG bit is set, the media selection is dependent purely on
+ * the selection by the MII (either in the MII BMCR reg or the result
+ * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
+ * is controlled by the RPC SPEED and RPC DPLX bits.
+ */
+static void smc911x_phy_configure(void *data)
+{
+ struct net_device *dev = data;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ int my_phy_caps; /* My PHY capabilities */
+ int my_ad_caps; /* My Advertised capabilities */
+ int status;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+
+ /*
+ * We should not be called if phy_type is zero.
+ */
+ if (lp->phy_type == 0)
+ goto smc911x_phy_configure_exit;
+
+ if (smc911x_phy_reset(dev, phyaddr)) {
+ printk("%s: PHY reset timed out\n", dev->name);
+ goto smc911x_phy_configure_exit;
+ }
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /*
+ * Enable PHY Interrupts (for register 18)
+ * Interrupts listed here are enabled
+ */
+ SMC_SET_PHY_INT_MASK(phyaddr, PHY_INT_MASK_ENERGY_ON_ |
+ PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
+ PHY_INT_MASK_LINK_DOWN_);
+
+ /* If the user requested no auto neg, then go set his request */
+ if (lp->mii.force_media) {
+ smc911x_phy_fixed(dev);
+ goto smc911x_phy_configure_exit;
+ }
+
+ /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
+ SMC_GET_PHY_BMSR(phyaddr, my_phy_caps);
+ if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
+ printk(KERN_INFO "Auto negotiation NOT supported\n");
+ smc911x_phy_fixed(dev);
+ goto smc911x_phy_configure_exit;
+ }
+
+ /* CSMA capable w/ both pauses */
+ my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+ if (my_phy_caps & BMSR_100BASE4)
+ my_ad_caps |= ADVERTISE_100BASE4;
+ if (my_phy_caps & BMSR_100FULL)
+ my_ad_caps |= ADVERTISE_100FULL;
+ if (my_phy_caps & BMSR_100HALF)
+ my_ad_caps |= ADVERTISE_100HALF;
+ if (my_phy_caps & BMSR_10FULL)
+ my_ad_caps |= ADVERTISE_10FULL;
+ if (my_phy_caps & BMSR_10HALF)
+ my_ad_caps |= ADVERTISE_10HALF;
+
+ /* Disable capabilities not selected by our user */
+ if (lp->ctl_rspeed != 100)
+ my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
+
+ if (!lp->ctl_rfduplx)
+ my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
+
+ /* Update our Auto-Neg Advertisement Register */
+ SMC_SET_PHY_MII_ADV(phyaddr, my_ad_caps);
+ lp->mii.advertising = my_ad_caps;
+
+ /*
+ * Read the register back. Without this, it appears that when
+ * auto-negotiation is restarted, sometimes it isn't ready and
+ * the link does not come up.
+ */
+ udelay(10);
+ SMC_GET_PHY_MII_ADV(phyaddr, status);
+
+ DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
+ DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+
+ /* Restart auto-negotiation process in order to advertise my caps */
+ SMC_SET_PHY_BMCR(phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
+
+ smc911x_phy_check_media(dev, 1);
+
+smc911x_phy_configure_exit:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ lp->work_pending = 0;
+}
+
+/*
+ * smc911x_phy_interrupt
+ *
+ * Purpose: Handle interrupts relating to PHY register 18. This is
+ * called from the "hard" interrupt handler under our private spinlock.
+ */
+static void smc911x_phy_interrupt(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int phyaddr = lp->mii.phy_id;
+ int status;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ if (lp->phy_type == 0)
+ return;
+
+ smc911x_phy_check_media(dev, 0);
+ /* read to clear status bits */
+ SMC_GET_PHY_INT_SRC(phyaddr,status);
+ DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
+ dev->name, status & 0xffff);
+ DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
+ dev->name, SMC_GET_AFC_CFG());
+}
+
+/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
+
+/*
+ * This is the main routine of the driver, to handle the device when
+ * it needs some attention.
+ */
+static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned int status, mask, timeout;
+ unsigned int rx_overrun=0, cr, pkts;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Spurious interrupt check */
+ if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
+ (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
+ return IRQ_NONE;
+ }
+
+ mask = SMC_GET_INT_EN();
+ SMC_SET_INT_EN(0);
+
+ /* set a timeout value, so I don't stay here forever */
+ timeout = 8;
+
+
+ do {
+ status = SMC_GET_INT();
+
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+ dev->name, status, mask, status & ~mask);
+
+ status &= mask;
+ if (!status)
+ break;
+
+ /* Handle SW interrupt condition */
+ if (status & INT_STS_SW_INT_) {
+ SMC_ACK_INT(INT_STS_SW_INT_);
+ mask &= ~INT_EN_SW_INT_EN_;
+ }
+ /* Handle various error conditions */
+ if (status & INT_STS_RXE_) {
+ SMC_ACK_INT(INT_STS_RXE_);
+ lp->stats.rx_errors++;
+ }
+ if (status & INT_STS_RXDFH_INT_) {
+ SMC_ACK_INT(INT_STS_RXDFH_INT_);
+ lp->stats.rx_dropped+=SMC_GET_RX_DROP();
+ }
+ /* Undocumented interrupt-what is the right thing to do here? */
+ if (status & INT_STS_RXDF_INT_) {
+ SMC_ACK_INT(INT_STS_RXDF_INT_);
+ }
+
+ /* Rx Data FIFO exceeds set level */
+ if (status & INT_STS_RDFL_) {
+ if (IS_REV_A(lp->revision)) {
+ rx_overrun=1;
+ SMC_GET_MAC_CR(cr);
+ cr &= ~MAC_CR_RXEN_;
+ SMC_SET_MAC_CR(cr);
+ DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ }
+ SMC_ACK_INT(INT_STS_RDFL_);
+ }
+ if (status & INT_STS_RDFO_) {
+ if (!IS_REV_A(lp->revision)) {
+ SMC_GET_MAC_CR(cr);
+ cr &= ~MAC_CR_RXEN_;
+ SMC_SET_MAC_CR(cr);
+ rx_overrun=1;
+ DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+ lp->stats.rx_errors++;
+ lp->stats.rx_fifo_errors++;
+ }
+ SMC_ACK_INT(INT_STS_RDFO_);
+ }
+ /* Handle receive condition */
+ if ((status & INT_STS_RSFL_) || rx_overrun) {
+ unsigned int fifo;
+ DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+ fifo = SMC_GET_RX_FIFO_INF();
+ pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
+ DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
+ dev->name, pkts, fifo & 0xFFFF );
+ if (pkts != 0) {
+#ifdef SMC_USE_DMA
+ unsigned int fifo;
+ if (lp->rxdma_active){
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+ "%s: RX DMA active\n", dev->name);
+ /* The DMA is already running so up the IRQ threshold */
+ fifo = SMC_GET_FIFO_INT() & ~0xFF;
+ fifo |= pkts & 0xFF;
+ DBG(SMC_DEBUG_RX,
+ "%s: Setting RX stat FIFO threshold to %d\n",
+ dev->name, fifo & 0xff);
+ SMC_SET_FIFO_INT(fifo);
+ } else
+#endif
+ smc911x_rcv(dev);
+ }
+ SMC_ACK_INT(INT_STS_RSFL_);
+ }
+ /* Handle transmit FIFO available */
+ if (status & INT_STS_TDFA_) {
+ DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+ SMC_SET_FIFO_TDA(0xFF);
+ lp->tx_throttle = 0;
+#ifdef SMC_USE_DMA
+ if (!lp->txdma_active)
+#endif
+ netif_wake_queue(dev);
+ SMC_ACK_INT(INT_STS_TDFA_);
+ }
+ /* Handle transmit done condition */
+#if 1
+ if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
+ "%s: Tx stat FIFO limit (%d) /GPT irq\n",
+ dev->name, (SMC_GET_FIFO_INT() & 0x00ff0000) >> 16);
+ smc911x_tx(dev);
+ SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+ SMC_ACK_INT(INT_STS_TSFL_);
+ SMC_ACK_INT(INT_STS_TSFL_ | INT_STS_GPT_INT_);
+ }
+#else
+ if (status & INT_STS_TSFL_) {
+ DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+ smc911x_tx(dev);
+ SMC_ACK_INT(INT_STS_TSFL_);
+ }
+
+ if (status & INT_STS_GPT_INT_) {
+ DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+ dev->name,
+ SMC_GET_IRQ_CFG(),
+ SMC_GET_FIFO_INT(),
+ SMC_GET_RX_CFG());
+ DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
+ "Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+ dev->name,
+ (SMC_GET_RX_FIFO_INF() & 0x00ff0000) >> 16,
+ SMC_GET_RX_FIFO_INF() & 0xffff,
+ SMC_GET_RX_STS_FIFO_PEEK());
+ SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+ SMC_ACK_INT(INT_STS_GPT_INT_);
+ }
+#endif
+
+ /* Handle PHY interupt condition */
+ if (status & INT_STS_PHY_INT_) {
+ DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+ smc911x_phy_interrupt(dev);
+ SMC_ACK_INT(INT_STS_PHY_INT_);
+ }
+ } while (--timeout);
+
+ /* restore mask state */
+ SMC_SET_INT_EN(mask);
+
+ DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
+ dev->name, 8-timeout);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef SMC_USE_DMA
+static void
+smc911x_tx_dma_irq(int dma, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb = lp->current_tx_skb;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+ /* Clear the DMA interrupt sources */
+ SMC_DMA_ACK_IRQ(dev, dma);
+ BUG_ON(skb == NULL);
+ dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
+ dev->trans_start = jiffies;
+ dev_kfree_skb_irq(skb);
+ lp->current_tx_skb = NULL;
+ if (lp->pending_tx_skb != NULL)
+ smc911x_hardware_send_pkt(dev);
+ else {
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+ "%s: No pending Tx packets. DMA disabled\n", dev->name);
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->txdma_active = 0;
+ if (!lp->tx_throttle) {
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+ "%s: TX DMA irq completed\n", dev->name);
+}
+static void
+smc911x_rx_dma_irq(int dma, void *data, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)data;
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ struct sk_buff *skb = lp->current_rx_skb;
+ unsigned long flags;
+ unsigned int pkts;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+ /* Clear the DMA interrupt sources */
+ SMC_DMA_ACK_IRQ(dev, dma);
+ dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
+ BUG_ON(skb == NULL);
+ lp->current_rx_skb = NULL;
+ PRINT_PKT(skb->data, skb->len);
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += skb->len;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16;
+ if (pkts != 0) {
+ smc911x_rcv(dev);
+ }else {
+ lp->rxdma_active = 0;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+ "%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+ dev->name, pkts);
+}
+#endif /* SMC_USE_DMA */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc911x_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smc911x_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
+/* Our watchdog timed out. Called by the networking layer */
+static void smc911x_timeout(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int status, mask;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ status = SMC_GET_INT();
+ mask = SMC_GET_INT_EN();
+ spin_unlock_irqrestore(&lp->lock, flags);
+ DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+ dev->name, status, mask);
+
+ /* Dump the current TX FIFO contents and restart */
+ mask = SMC_GET_TX_CFG();
+ SMC_SET_TX_CFG(mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
+ /*
+ * Reconfiguring the PHY doesn't seem like a bad idea here, but
+ * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
+ * which calls schedule(). Hence we use a work queue.
+ */
+ if (lp->phy_type != 0) {
+ if (schedule_work(&lp->phy_configure)) {
+ lp->work_pending = 1;
+ }
+ }
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void smc911x_set_multicast_list(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int multicast_table[2];
+ unsigned int mcr, update_multicast = 0;
+ unsigned long flags;
+ /* table for flipping the order of 5 bits */
+ static const unsigned char invert5[] =
+ {0x00, 0x10, 0x08, 0x18, 0x04, 0x14, 0x0C, 0x1C,
+ 0x02, 0x12, 0x0A, 0x1A, 0x06, 0x16, 0x0E, 0x1E,
+ 0x01, 0x11, 0x09, 0x19, 0x05, 0x15, 0x0D, 0x1D,
+ 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F};
+
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CR(mcr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (dev->flags & IFF_PROMISC) {
+
+ DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+ mcr |= MAC_CR_PRMS_;
+ }
+ /*
+ * Here, I am setting this to accept all multicast packets.
+ * I don't need to zero the multicast table, because the flag is
+ * checked before the table is
+ */
+ else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+ mcr |= MAC_CR_MCPAS_;
+ }
+
+ /*
+ * This sets the internal hardware table to filter out unwanted
+ * multicast packets before they take up memory.
+ *
+ * The SMC chip uses a hash table where the high 6 bits of the CRC of
+ * address are the offset into the table. If that bit is 1, then the
+ * multicast packet is accepted. Otherwise, it's dropped silently.
+ *
+ * To use the 6 bits as an offset into the table, the high 1 bit is
+ * the number of the 32 bit register, while the low 5 bits are the bit
+ * within that register.
+ */
+ else if (dev->mc_count) {
+ int i;
+ struct dev_mc_list *cur_addr;
+
+ /* Set the Hash perfec mode */
+ mcr |= MAC_CR_HPFILT_;
+
+ /* start with a table of all zeros: reject all */
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ cur_addr = dev->mc_list;
+ for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ int position;
+
+ /* do we have a pointer here? */
+ if (!cur_addr)
+ break;
+ /* make sure this is a multicast address -
+ shouldn't this be a given if we have it here ? */
+ if (!(*cur_addr->dmi_addr & 1))
+ continue;
+
+ /* only use the low order bits */
+ position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert5[position&0x1F]&0x1] |=
+ (1<<invert5[(position>>1)&0x1F]);
+ }
+
+ /* be sure I get rid of flags I might have set */
+ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ /* now, the table can be loaded into the chipset */
+ update_multicast = 1;
+ } else {
+ DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
+ dev->name);
+ mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ /*
+ * since I'm disabling all multicast entirely, I need to
+ * clear the multicast list
+ */
+ memset(multicast_table, 0, sizeof(multicast_table));
+ update_multicast = 1;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_SET_MAC_CR(mcr);
+ if (update_multicast) {
+ DBG(SMC_DEBUG_MISC,
+ "%s: update mcast hash table 0x%08x 0x%08x\n",
+ dev->name, multicast_table[0], multicast_table[1]);
+ SMC_SET_HASHL(multicast_table[0]);
+ SMC_SET_HASHH(multicast_table[1]);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int
+smc911x_open(struct net_device *dev)
+{
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* reset the hardware */
+ smc911x_reset(dev);
+
+ /* Configure the PHY, initialize the link state */
+ smc911x_phy_configure(dev);
+
+ /* Turn on Tx + Rx */
+ smc911x_enable(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * smc911x_close
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int smc911x_close(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ smc911x_shutdown(dev);
+
+ if (lp->phy_type != 0) {
+ /* We need to ensure that no calls to
+ * smc911x_phy_configure are pending.
+
+ * flush_scheduled_work() cannot be called because we
+ * are running with the netlink semaphore held (from
+ * devinet_ioctl()) and the pending work queue
+ * contains linkwatch_event() (scheduled by
+ * netif_carrier_off() above). linkwatch_event() also
+ * wants the netlink semaphore.
+ */
+ while (lp->work_pending)
+ schedule();
+ smc911x_phy_powerdown(dev, lp->mii.phy_id);
+ }
+
+ if (lp->pending_tx_skb) {
+ dev_kfree_skb(lp->pending_tx_skb);
+ lp->pending_tx_skb = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *smc911x_query_statistics(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+
+ return &lp->stats;
+}
+
+/*
+ * Ethtool support
+ */
+static int
+smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int ret, status;
+ unsigned long flags;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_ethtool_gset(&lp->mii, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_TP | SUPPORTED_AUI;
+
+ if (lp->ctl_rspeed == 10)
+ cmd->speed = SPEED_10;
+ else if (lp->ctl_rspeed == 100)
+ cmd->speed = SPEED_100;
+
+ cmd->autoneg = AUTONEG_DISABLE;
+ if (lp->mii.phy_id==1)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->port = 0;
+ SMC_GET_PHY_SPECIAL(lp->mii.phy_id, status);
+ cmd->duplex =
+ (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int
+smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret;
+ unsigned long flags;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_ethtool_sset(&lp->mii, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ if (cmd->autoneg != AUTONEG_DISABLE ||
+ cmd->speed != SPEED_10 ||
+ (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
+ (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ return -EINVAL;
+
+ lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void
+smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, CARDNAME, sizeof(info->driver));
+ strncpy(info->version, version, sizeof(info->version));
+ strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info));
+}
+
+static int smc911x_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int ret = -EINVAL;
+ unsigned long flags;
+
+ if (lp->phy_type != 0) {
+ spin_lock_irqsave(&lp->lock, flags);
+ ret = mii_nway_restart(&lp->mii);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+ return ret;
+}
+
+static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ lp->msg_enable = level;
+}
+
+static int smc911x_ethtool_getregslen(struct net_device *dev)
+{
+ /* System regs + MAC regs + PHY regs */
+ return (((E2P_CMD - ID_REV)/4 + 1) +
+ (WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
+}
+
+static void smc911x_ethtool_getregs(struct net_device *dev,
+ struct ethtool_regs* regs, void *buf)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct smc911x_local *lp = netdev_priv(dev);
+ unsigned long flags;
+ u32 reg,i,j=0;
+ u32 *data = (u32*)buf;
+
+ regs->version = lp->version;
+ for(i=ID_REV;i<=E2P_CMD;i+=4) {
+ data[j++] = SMC_inl(ioaddr,i);
+ }
+ for(i=MAC_CR;i<=WUCSR;i++) {
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MAC_CSR(i, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ data[j++] = reg;
+ }
+ for(i=0;i<=31;i++) {
+ spin_lock_irqsave(&lp->lock, flags);
+ SMC_GET_MII(i, lp->mii.phy_id, reg);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ data[j++] = reg & 0xFFFF;
+ }
+}
+
+static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int timeout;
+ int e2p_cmd;
+
+ e2p_cmd = SMC_GET_E2P_CMD();
+ for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
+ if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
+ PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
+ dev->name, __FUNCTION__);
+ return -EFAULT;
+ }
+ mdelay(1);
+ e2p_cmd = SMC_GET_E2P_CMD();
+ }
+ if (timeout == 0) {
+ PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
+ dev->name, __FUNCTION__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
+ int cmd, int addr)
+{
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ SMC_SET_E2P_CMD(E2P_CMD_EPC_BUSY_ |
+ ((cmd) & (0x7<<28)) |
+ ((addr) & 0xFF));
+ return 0;
+}
+
+static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
+ u8 *data)
+{
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ *data = SMC_GET_E2P_DATA();
+ return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
+ u8 data)
+{
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+ return ret;
+ SMC_SET_E2P_DATA(data);
+ return 0;
+}
+
+static int smc911x_ethtool_geteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ u8 eebuf[SMC911X_EEPROM_LEN];
+ int i, ret;
+
+ for(i=0;i<SMC911X_EEPROM_LEN;i++) {
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
+ return ret;
+ if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
+ return ret;
+ }
+ memcpy(data, eebuf+eeprom->offset, eeprom->len);
+ return 0;
+}
+
+static int smc911x_ethtool_seteeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i, ret;
+
+ /* Enable erase */
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
+ return ret;
+ for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
+ /* erase byte */
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
+ return ret;
+ /* write byte */
+ if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
+ return ret;
+ if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
+ return ret;
+ }
+ return 0;
+}
+
+static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
+{
+ return SMC911X_EEPROM_LEN;
+}
+
+static struct ethtool_ops smc911x_ethtool_ops = {
+ .get_settings = smc911x_ethtool_getsettings,
+ .set_settings = smc911x_ethtool_setsettings,
+ .get_drvinfo = smc911x_ethtool_getdrvinfo,
+ .get_msglevel = smc911x_ethtool_getmsglevel,
+ .set_msglevel = smc911x_ethtool_setmsglevel,
+ .nway_reset = smc911x_ethtool_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_regs_len = smc911x_ethtool_getregslen,
+ .get_regs = smc911x_ethtool_getregs,
+ .get_eeprom_len = smc911x_ethtool_geteeprom_len,
+ .get_eeprom = smc911x_ethtool_geteeprom,
+ .set_eeprom = smc911x_ethtool_seteeprom,
+};
+
+/*
+ * smc911x_findirq
+ *
+ * This routine has a simple purpose -- make the SMC chip generate an
+ * interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ */
+static int __init smc911x_findirq(unsigned long ioaddr)
+{
+ int timeout = 20;
+ unsigned long cookie;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+
+ cookie = probe_irq_on();
+
+ /*
+ * Force a SW interrupt
+ */
+
+ SMC_SET_INT_EN(INT_EN_SW_INT_EN_);
+
+ /*
+ * Wait until positive that the interrupt has been generated
+ */
+ do {
+ int int_status;
+ udelay(10);
+ int_status = SMC_GET_INT_EN();
+ if (int_status & INT_EN_SW_INT_EN_)
+ break; /* got the interrupt */
+ } while (--timeout);
+
+ /*
+ * there is really nothing that I can do here if timeout fails,
+ * as autoirq_report will return a 0 anyway, which is what I
+ * want in this case. Plus, the clean up is needed in both
+ * cases.
+ */
+
+ /* and disable all interrupts again */
+ SMC_SET_INT_EN(0);
+
+ /* and return what I found */
+ return probe_irq_off(cookie);
+}
+
+/*
+ * Function: smc911x_probe(unsigned long ioaddr)
+ *
+ * Purpose:
+ * Tests to see if a given ioaddr points to an SMC911x chip.
+ * Returns a 0 on success
+ *
+ * Algorithm:
+ * (1) see if the endian word is OK
+ * (1) see if I recognize the chip ID in the appropriate register
+ *
+ * Here I do typical initialization tasks.
+ *
+ * o Initialize the structure if needed
+ * o print out my vanity message if not done so already
+ * o print out what type of hardware is detected
+ * o print out the ethernet address
+ * o find the IRQ
+ * o set up my private data
+ * o configure the dev structure with my subroutines
+ * o actually GRAB the irq.
+ * o GRAB the region
+ */
+static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
+{
+ struct smc911x_local *lp = netdev_priv(dev);
+ int i, retval;
+ unsigned int val, chip_id, revision;
+ const char *version_string;
+
+ DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+ /* First, see if the endian word is recognized */
+ val = SMC_GET_BYTE_TEST();
+ DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+ if (val != 0x87654321) {
+ printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /*
+ * check if the revision register is something that I
+ * recognize. These might need to be added to later,
+ * as future revisions could be added.
+ */
+ chip_id = SMC_GET_PN();
+ DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+ for(i=0;chip_ids[i].id != 0; i++) {
+ if (chip_ids[i].id == chip_id) break;
+ }
+ if (!chip_ids[i].id) {
+ printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ version_string = chip_ids[i].name;
+
+ revision = SMC_GET_REV();
+ DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+
+ /* At this point I'll assume that the chip is an SMC911x. */
+ DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+
+ /* Validate the TX FIFO size requested */
+ if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
+ printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+ retval = -EINVAL;
+ goto err_out;
+ }
+
+ /* fill in some of the fields */
+ dev->base_addr = ioaddr;
+ lp->version = chip_ids[i].id;
+ lp->revision = revision;
+ lp->tx_fifo_kb = tx_fifo_kb;
+ /* Reverse calculate the RX FIFO size from the TX */
+ lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
+ lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
+
+ /* Set the automatic flow control values */
+ switch(lp->tx_fifo_kb) {
+ /*
+ * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
+ * AFC_LO is AFC_HI/2
+ * BACK_DUR is about 5uS*(AFC_LO) rounded down
+ */
+ case 2:/* 13440 Rx Data Fifo Size */
+ lp->afc_cfg=0x008C46AF;break;
+ case 3:/* 12480 Rx Data Fifo Size */
+ lp->afc_cfg=0x0082419F;break;
+ case 4:/* 11520 Rx Data Fifo Size */
+ lp->afc_cfg=0x00783C9F;break;
+ case 5:/* 10560 Rx Data Fifo Size */
+ lp->afc_cfg=0x006E374F;break;
+ case 6:/* 9600 Rx Data Fifo Size */
+ lp->afc_cfg=0x0064328F;break;
+ case 7:/* 8640 Rx Data Fifo Size */
+ lp->afc_cfg=0x005A2D7F;break;
+ case 8:/* 7680 Rx Data Fifo Size */
+ lp->afc_cfg=0x0050287F;break;
+ case 9:/* 6720 Rx Data Fifo Size */
+ lp->afc_cfg=0x0046236F;break;
+ case 10:/* 5760 Rx Data Fifo Size */
+ lp->afc_cfg=0x003C1E6F;break;
+ case 11:/* 4800 Rx Data Fifo Size */
+ lp->afc_cfg=0x0032195F;break;
+ /*
+ * AFC_HI is ~1520 bytes less than RX Data Fifo Size
+ * AFC_LO is AFC_HI/2
+ * BACK_DUR is about 5uS*(AFC_LO) rounded down
+ */
+ case 12:/* 3840 Rx Data Fifo Size */
+ lp->afc_cfg=0x0024124F;break;
+ case 13:/* 2880 Rx Data Fifo Size */
+ lp->afc_cfg=0x0015073F;break;
+ case 14:/* 1920 Rx Data Fifo Size */
+ lp->afc_cfg=0x0006032F;break;
+ default:
+ PRINTK("%s: ERROR -- no AFC_CFG setting found",
+ dev->name);
+ break;
+ }
+
+ DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
+ "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+ lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+
+ spin_lock_init(&lp->lock);
+
+ /* Get the MAC address */
+ SMC_GET_MAC_ADDR(dev->dev_addr);
+
+ /* now, reset the chip, and put it into a known state */
+ smc911x_reset(dev);
+
+ /*
+ * If dev->irq is 0, then the device has to be banged on to see
+ * what the IRQ is.
+ *
+ * Specifying an IRQ is done with the assumption that the user knows
+ * what (s)he is doing. No checking is done!!!!
+ */
+ if (dev->irq < 1) {
+ int trials;
+
+ trials = 3;
+ while (trials--) {
+ dev->irq = smc911x_findirq(ioaddr);
+ if (dev->irq)
+ break;
+ /* kick the card and try again */
+ smc911x_reset(dev);
+ }
+ }
+ if (dev->irq == 0) {
+ printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+ dev->name);
+ retval = -ENODEV;
+ goto err_out;
+ }
+ dev->irq = irq_canonicalize(dev->irq);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->open = smc911x_open;
+ dev->stop = smc911x_close;
+ dev->hard_start_xmit = smc911x_hard_start_xmit;
+ dev->tx_timeout = smc911x_timeout;
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ dev->get_stats = smc911x_query_statistics;
+ dev->set_multicast_list = smc911x_set_multicast_list;
+ dev->ethtool_ops = &smc911x_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = smc911x_poll_controller;
+#endif
+
+ INIT_WORK(&lp->phy_configure, smc911x_phy_configure, dev);
+ lp->mii.phy_id_mask = 0x1f;
+ lp->mii.reg_num_mask = 0x1f;
+ lp->mii.force_media = 0;
+ lp->mii.full_duplex = 0;
+ lp->mii.dev = dev;
+ lp->mii.mdio_read = smc911x_phy_read;
+ lp->mii.mdio_write = smc911x_phy_write;
+
+ /*
+ * Locate the phy, if any.
+ */
+ smc911x_phy_detect(dev);
+
+ /* Set default parameters */
+ lp->msg_enable = NETIF_MSG_LINK;
+ lp->ctl_rfduplx = 1;
+ lp->ctl_rspeed = 100;
+
+ /* Grab the IRQ */
+ retval = request_irq(dev->irq, &smc911x_interrupt, SA_SHIRQ, dev->name, dev);
+ if (retval)
+ goto err_out;
+
+ set_irq_type(dev->irq, IRQT_FALLING);
+
+#ifdef SMC_USE_DMA
+ lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
+ lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
+ lp->rxdma_active = 0;
+ lp->txdma_active = 0;
+ dev->dma = lp->rxdma;
+#endif
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk("%s: %s (rev %d) at %#lx IRQ %d",
+ dev->name, version_string, lp->revision,
+ dev->base_addr, dev->irq);
+
+#ifdef SMC_USE_DMA
+ if (lp->rxdma != -1)
+ printk(" RXDMA %d ", lp->rxdma);
+
+ if (lp->txdma != -1)
+ printk("TXDMA %d", lp->txdma);
+#endif
+ printk("\n");
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk("%s: Invalid ethernet MAC address. Please "
+ "set using ifconfig\n", dev->name);
+ } else {
+ /* Print the Ethernet address */
+ printk("%s: Ethernet addr: ", dev->name);
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x\n", dev->dev_addr[5]);
+ }
+
+ if (lp->phy_type == 0) {
+ PRINTK("%s: No PHY found\n", dev->name);
+ } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
+ PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+ } else {
+ PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+ }
+ }
+
+err_out:
+#ifdef SMC_USE_DMA
+ if (retval) {
+ if (lp->rxdma != -1) {
+ SMC_DMA_FREE(dev, lp->rxdma);
+ }
+ if (lp->txdma != -1) {
+ SMC_DMA_FREE(dev, lp->txdma);
+ }
+ }
+#endif
+ return retval;
+}
+
+/*
+ * smc911x_init(void)
+ *
+ * Output:
+ * 0 --> there is a device
+ * anything else, error
+ */
+static int smc911x_drv_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct resource *res;
+ unsigned int *addr;
+ int ret;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Request the regions.
+ */
+ if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct smc911x_local));
+ if (!ndev) {
+ printk("%s: could not allocate device.\n", CARDNAME);
+ ret = -ENOMEM;
+ goto release_1;
+ }
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->dma = (unsigned char)-1;
+ ndev->irq = platform_get_irq(pdev, 0);
+
+ addr = ioremap(res->start, SMC911X_IO_EXTENT);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto release_both;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ ret = smc911x_probe(ndev, (unsigned long)addr);
+ if (ret != 0) {
+ platform_set_drvdata(pdev, NULL);
+ iounmap(addr);
+release_both:
+ free_netdev(ndev);
+release_1:
+ release_mem_region(res->start, SMC911X_IO_EXTENT);
+out:
+ printk("%s: not found (%d).\n", CARDNAME, ret);
+ }
+#ifdef SMC_USE_DMA
+ else {
+ struct smc911x_local *lp = netdev_priv(ndev);
+ lp->physaddr = res->start;
+ lp->dev = &pdev->dev;
+ }
+#endif
+
+ return ret;
+}
+
+static int smc911x_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(ndev->irq, ndev);
+
+#ifdef SMC_USE_DMA
+ {
+ struct smc911x_local *lp = netdev_priv(ndev);
+ if (lp->rxdma != -1) {
+ SMC_DMA_FREE(dev, lp->rxdma);
+ }
+ if (lp->txdma != -1) {
+ SMC_DMA_FREE(dev, lp->txdma);
+ }
+ }
+#endif
+ iounmap((void *)ndev->base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, SMC911X_IO_EXTENT);
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+ unsigned long ioaddr = ndev->base_addr;
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ if (ndev) {
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ smc911x_shutdown(ndev);
+#if POWER_DOWN
+ /* Set D2 - Energy detect only setting */
+ SMC_SET_PMT_CTRL(2<<12);
+#endif
+ }
+ }
+ return 0;
+}
+
+static int smc911x_drv_resume(struct platform_device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(dev);
+
+ DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+ if (ndev) {
+ struct smc911x_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ smc911x_reset(ndev);
+ smc911x_enable(ndev);
+ if (lp->phy_type != 0)
+ smc911x_phy_configure(ndev);
+ netif_device_attach(ndev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver smc911x_driver = {
+ .probe = smc911x_drv_probe,
+ .remove = smc911x_drv_remove,
+ .suspend = smc911x_drv_suspend,
+ .resume = smc911x_drv_resume,
+ .driver = {
+ .name = CARDNAME,
+ },
+};
+
+static int __init smc911x_init(void)
+{
+ return platform_driver_register(&smc911x_driver);
+}
+
+static void __exit smc911x_cleanup(void)
+{
+ platform_driver_unregister(&smc911x_driver);
+}
+
+module_init(smc911x_init);
+module_exit(smc911x_cleanup);
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
new file mode 100644
index 000000000000..962a710459fc
--- /dev/null
+++ b/drivers/net/smc911x.h
@@ -0,0 +1,835 @@
+/*------------------------------------------------------------------------
+ . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device.
+ .
+ . Copyright (C) 2005 Sensoria Corp.
+ . Derived from the unified SMC91x driver by Nicolas Pitre
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ .
+ . Information contained in this file was obtained from the LAN9118
+ . manual from SMC. To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ . Authors
+ . Dustin McIntire <dustin@sensoria.com>
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC911X_H_
+#define _SMC911X_H_
+
+/*
+ * Use the DMA feature on PXA chips
+ */
+#ifdef CONFIG_ARCH_PXA
+ #define SMC_USE_PXA_DMA 1
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+#endif
+
+
+/*
+ * Define the bus width specific IO macros
+ */
+
+#if SMC_USE_16BIT
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) ((SMC_inw(a, r) & 0xFFFF)+(SMC_inw(a+2, r)<<16))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) \
+ do{ \
+ writel(v & 0xFFFF, (a) + (r)); \
+ writel(v >> 16, (a) + (r) + 2); \
+ } while (0)
+#define SMC_insl(a, r, p, l) readsw((short*)((a) + (r)), p, l*2)
+#define SMC_outsl(a, r, p, l) writesw((short*)((a) + (r)), p, l*2)
+
+#elif SMC_USE_32BIT
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((int*)((a) + (r)), p, l)
+#define SMC_outsl(a, r, p, l) writesl((int*)((a) + (r)), p, l)
+
+#endif /* SMC_USE_16BIT */
+
+
+
+#if SMC_USE_PXA_DMA
+#define SMC_USE_DMA
+
+/*
+ * Define the request and free functions
+ * These are unfortunately architecture specific as no generic allocation
+ * mechanism exits
+ */
+#define SMC_DMA_REQUEST(dev, handler) \
+ pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev)
+
+#define SMC_DMA_FREE(dev, dma) \
+ pxa_free_dma(dma)
+
+#define SMC_DMA_ACK_IRQ(dev, dma) \
+{ \
+ if (DCSR(dma) & DCSR_BUSERR) { \
+ printk("%s: DMA %d bus error!\n", dev->name, dma); \
+ } \
+ DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
+}
+
+/*
+ * Use a DMA for RX and TX packets.
+ */
+#include <linux/dma-mapping.h>
+#include <asm/dma.h>
+#include <asm/arch/pxa-regs.h>
+
+static dma_addr_t rx_dmabuf, tx_dmabuf;
+static int rx_dmalen, tx_dmalen;
+
+#ifdef SMC_insl
+#undef SMC_insl
+#define SMC_insl(a, r, p, l) \
+ smc_pxa_dma_insl(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ *((u32 *)buf) = SMC_inl(ioaddr, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+ rx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = rx_dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_insw
+#undef SMC_insw
+#define SMC_insw(a, r, p, l) \
+ smc_pxa_dma_insw(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insw(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ while ((long)buf & 6) {
+ *((u16 *)buf) = SMC_inw(ioaddr, reg);
+ buf += 2;
+ len--;
+ }
+
+ len *= 2;
+ rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+ rx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DTADR(dma) = rx_dmabuf;
+ DSADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+ DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsl
+#undef SMC_outsl
+#define SMC_outsl(a, r, p, l) \
+ smc_pxa_dma_outsl(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ if ((long)buf & 4) {
+ SMC_outl(*((u32 *)buf), ioaddr, reg);
+ buf += 4;
+ len--;
+ }
+
+ len *= 4;
+ tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
+ tx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DSADR(dma) = tx_dmabuf;
+ DTADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+ DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsw
+#undef SMC_outsw
+#define SMC_outsw(a, r, p, l) \
+ smc_pxa_dma_outsw(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsw(struct device *dev, u_long ioaddr, u_long physaddr,
+ int reg, int dma, u_char *buf, int len)
+{
+ /* 64 bit alignment is required for memory to memory DMA */
+ while ((long)buf & 6) {
+ SMC_outw(*((u16 *)buf), ioaddr, reg);
+ buf += 2;
+ len--;
+ }
+
+ len *= 2;
+ tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
+ tx_dmalen = len;
+ DCSR(dma) = DCSR_NODESC;
+ DSADR(dma) = tx_dmabuf;
+ DTADR(dma) = physaddr + reg;
+ DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+ DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+ DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#endif /* SMC_USE_PXA_DMA */
+
+
+/* Chip Parameters and Register Definitions */
+
+#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2)
+
+#define SMC911X_IO_EXTENT 0x100
+
+#define SMC911X_EEPROM_LEN 7
+
+/* Below are the register offsets and bit definitions
+ * of the Lan911x memory space
+ */
+#define RX_DATA_FIFO (0x00)
+
+#define TX_DATA_FIFO (0x20)
+#define TX_CMD_A_INT_ON_COMP_ (0x80000000)
+#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000)
+#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000)
+#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000)
+#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000)
+#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000)
+#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000)
+#define TX_CMD_A_INT_LAST_SEG_ (0x00001000)
+#define TX_CMD_A_BUF_SIZE_ (0x000007FF)
+#define TX_CMD_B_PKT_TAG_ (0xFFFF0000)
+#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000)
+#define TX_CMD_B_DISABLE_PADDING_ (0x00001000)
+#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF)
+
+#define RX_STATUS_FIFO (0x40)
+#define RX_STS_PKT_LEN_ (0x3FFF0000)
+#define RX_STS_ES_ (0x00008000)
+#define RX_STS_BCST_ (0x00002000)
+#define RX_STS_LEN_ERR_ (0x00001000)
+#define RX_STS_RUNT_ERR_ (0x00000800)
+#define RX_STS_MCAST_ (0x00000400)
+#define RX_STS_TOO_LONG_ (0x00000080)
+#define RX_STS_COLL_ (0x00000040)
+#define RX_STS_ETH_TYPE_ (0x00000020)
+#define RX_STS_WDOG_TMT_ (0x00000010)
+#define RX_STS_MII_ERR_ (0x00000008)
+#define RX_STS_DRIBBLING_ (0x00000004)
+#define RX_STS_CRC_ERR_ (0x00000002)
+#define RX_STATUS_FIFO_PEEK (0x44)
+#define TX_STATUS_FIFO (0x48)
+#define TX_STS_TAG_ (0xFFFF0000)
+#define TX_STS_ES_ (0x00008000)
+#define TX_STS_LOC_ (0x00000800)
+#define TX_STS_NO_CARR_ (0x00000400)
+#define TX_STS_LATE_COLL_ (0x00000200)
+#define TX_STS_MANY_COLL_ (0x00000100)
+#define TX_STS_COLL_CNT_ (0x00000078)
+#define TX_STS_MANY_DEFER_ (0x00000004)
+#define TX_STS_UNDERRUN_ (0x00000002)
+#define TX_STS_DEFERRED_ (0x00000001)
+#define TX_STATUS_FIFO_PEEK (0x4C)
+#define ID_REV (0x50)
+#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */
+#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */
+
+#define INT_CFG (0x54)
+#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */
+#define INT_CFG_INT_DEAS_CLR_ (0x00004000)
+#define INT_CFG_INT_DEAS_STS_ (0x00002000)
+#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */
+#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */
+#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */
+#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */
+
+#define INT_STS (0x58)
+#define INT_STS_SW_INT_ (0x80000000) /* R/WC */
+#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */
+#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */
+#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */
+#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */
+#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */
+#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */
+#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */
+#define INT_STS_PHY_INT_ (0x00040000) /* RO */
+#define INT_STS_PME_INT_ (0x00020000) /* R/WC */
+#define INT_STS_TXSO_ (0x00010000) /* R/WC */
+#define INT_STS_RWT_ (0x00008000) /* R/WC */
+#define INT_STS_RXE_ (0x00004000) /* R/WC */
+#define INT_STS_TXE_ (0x00002000) /* R/WC */
+//#define INT_STS_ERX_ (0x00001000) /* R/WC */
+#define INT_STS_TDFU_ (0x00000800) /* R/WC */
+#define INT_STS_TDFO_ (0x00000400) /* R/WC */
+#define INT_STS_TDFA_ (0x00000200) /* R/WC */
+#define INT_STS_TSFF_ (0x00000100) /* R/WC */
+#define INT_STS_TSFL_ (0x00000080) /* R/WC */
+//#define INT_STS_RXDF_ (0x00000040) /* R/WC */
+#define INT_STS_RDFO_ (0x00000040) /* R/WC */
+#define INT_STS_RDFL_ (0x00000020) /* R/WC */
+#define INT_STS_RSFF_ (0x00000010) /* R/WC */
+#define INT_STS_RSFL_ (0x00000008) /* R/WC */
+#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */
+#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */
+#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */
+
+#define INT_EN (0x5C)
+#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */
+#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */
+#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */
+#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */
+//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */
+#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */
+#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */
+#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */
+#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */
+#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */
+#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */
+#define INT_EN_RWT_EN_ (0x00008000) /* R/W */
+#define INT_EN_RXE_EN_ (0x00004000) /* R/W */
+#define INT_EN_TXE_EN_ (0x00002000) /* R/W */
+//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */
+#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */
+#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */
+#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */
+#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */
+#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */
+//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */
+#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */
+#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */
+#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */
+#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */
+#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */
+#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */
+#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */
+
+#define BYTE_TEST (0x64)
+#define FIFO_INT (0x68)
+#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */
+#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */
+#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */
+#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */
+
+#define RX_CFG (0x6C)
+#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */
+#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */
+#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */
+#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */
+#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */
+#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */
+#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */
+//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */
+
+#define TX_CFG (0x70)
+//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */
+//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */
+#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */
+#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */
+#define TX_CFG_TXSAO_ (0x00000004) /* R/W */
+#define TX_CFG_TX_ON_ (0x00000002) /* R/W */
+#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */
+
+#define HW_CFG (0x74)
+#define HW_CFG_TTM_ (0x00200000) /* R/W */
+#define HW_CFG_SF_ (0x00100000) /* R/W */
+#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */
+#define HW_CFG_TR_ (0x00003000) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */
+#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */
+#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */
+#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */
+#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */
+#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */
+#define HW_CFG_SRST_TO_ (0x00000002) /* RO */
+#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */
+
+#define RX_DP_CTRL (0x78)
+#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */
+#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */
+
+#define RX_FIFO_INF (0x7C)
+#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */
+#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */
+
+#define TX_FIFO_INF (0x80)
+#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */
+#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */
+
+#define PMT_CTRL (0x84)
+#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */
+#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */
+#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */
+#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */
+#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */
+#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */
+#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */
+#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */
+#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */
+#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */
+#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */
+#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */
+#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */
+#define PMT_CTRL_READY_ (0x00000001) /* RO */
+
+#define GPIO_CFG (0x88)
+#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */
+#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */
+#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */
+#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */
+#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */
+#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */
+#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */
+#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */
+#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */
+#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */
+#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */
+#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */
+#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */
+#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */
+#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */
+#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */
+#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */
+#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */
+
+#define GPT_CFG (0x8C)
+#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */
+#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */
+
+#define GPT_CNT (0x90)
+#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */
+
+#define ENDIAN (0x98)
+#define FREE_RUN (0x9C)
+#define RX_DROP (0xA0)
+#define MAC_CSR_CMD (0xA4)
+#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */
+#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */
+#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */
+
+#define MAC_CSR_DATA (0xA8)
+#define AFC_CFG (0xAC)
+#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */
+#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */
+#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */
+#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */
+#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */
+#define AFC_CFG_FCADD_ (0x00000002) /* R/W */
+#define AFC_CFG_FCANY_ (0x00000001) /* R/W */
+
+#define E2P_CMD (0xB0)
+#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */
+#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */
+#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */
+#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */
+#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */
+#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */
+#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */
+#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */
+#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */
+#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */
+#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */
+#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */
+#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */
+
+#define E2P_DATA (0xB4)
+#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */
+/* end of LAN register offsets and bit definitions */
+
+/*
+ ****************************************************************************
+ ****************************************************************************
+ * MAC Control and Status Register (Indirect Address)
+ * Offset (through the MAC_CSR CMD and DATA port)
+ ****************************************************************************
+ ****************************************************************************
+ *
+ */
+#define MAC_CR (0x01) /* R/W */
+
+/* MAC_CR - MAC Control Register */
+#define MAC_CR_RXALL_ (0x80000000)
+// TODO: delete this bit? It is not described in the data sheet.
+#define MAC_CR_HBDIS_ (0x10000000)
+#define MAC_CR_RCVOWN_ (0x00800000)
+#define MAC_CR_LOOPBK_ (0x00200000)
+#define MAC_CR_FDPX_ (0x00100000)
+#define MAC_CR_MCPAS_ (0x00080000)
+#define MAC_CR_PRMS_ (0x00040000)
+#define MAC_CR_INVFILT_ (0x00020000)
+#define MAC_CR_PASSBAD_ (0x00010000)
+#define MAC_CR_HFILT_ (0x00008000)
+#define MAC_CR_HPFILT_ (0x00002000)
+#define MAC_CR_LCOLL_ (0x00001000)
+#define MAC_CR_BCAST_ (0x00000800)
+#define MAC_CR_DISRTY_ (0x00000400)
+#define MAC_CR_PADSTR_ (0x00000100)
+#define MAC_CR_BOLMT_MASK_ (0x000000C0)
+#define MAC_CR_DFCHK_ (0x00000020)
+#define MAC_CR_TXEN_ (0x00000008)
+#define MAC_CR_RXEN_ (0x00000004)
+
+#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */
+#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */
+#define HASHH (0x04) /* R/W */
+#define HASHL (0x05) /* R/W */
+
+#define MII_ACC (0x06) /* R/W */
+#define MII_ACC_PHY_ADDR_ (0x0000F800)
+#define MII_ACC_MIIRINDA_ (0x000007C0)
+#define MII_ACC_MII_WRITE_ (0x00000002)
+#define MII_ACC_MII_BUSY_ (0x00000001)
+
+#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */
+
+#define FLOW (0x08) /* R/W */
+#define FLOW_FCPT_ (0xFFFF0000)
+#define FLOW_FCPASS_ (0x00000004)
+#define FLOW_FCEN_ (0x00000002)
+#define FLOW_FCBSY_ (0x00000001)
+
+#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */
+#define VLAN1_VTI1_ (0x0000ffff)
+
+#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */
+#define VLAN2_VTI2_ (0x0000ffff)
+
+#define WUFF (0x0B) /* WO */
+
+#define WUCSR (0x0C) /* R/W */
+#define WUCSR_GUE_ (0x00000200)
+#define WUCSR_WUFR_ (0x00000040)
+#define WUCSR_MPR_ (0x00000020)
+#define WUCSR_WAKE_EN_ (0x00000004)
+#define WUCSR_MPEN_ (0x00000002)
+
+/*
+ ****************************************************************************
+ * Chip Specific MII Defines
+ ****************************************************************************
+ *
+ * Phy register offsets and bit definitions
+ *
+ */
+
+#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */
+//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000)
+#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
+//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800)
+//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400)
+//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200)
+//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100)
+//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010)
+//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008)
+//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004)
+#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002)
+
+#define PHY_INT_SRC ((u32)29)
+#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080)
+#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040)
+#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020)
+#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010)
+#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008)
+#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004)
+#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002)
+
+#define PHY_INT_MASK ((u32)30)
+#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080)
+#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040)
+#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020)
+#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010)
+#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008)
+#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004)
+#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002)
+
+#define PHY_SPECIAL ((u32)31)
+#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000)
+#define PHY_SPECIAL_RES_ ((u16)0x0040)
+#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1)
+#define PHY_SPECIAL_SPD_ ((u16)0x001C)
+#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004)
+#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014)
+#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008)
+#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018)
+
+#define LAN911X_INTERNAL_PHY_ID (0x0007C000)
+
+/* Chip ID values */
+#define CHIP_9115 0x115
+#define CHIP_9116 0x116
+#define CHIP_9117 0x117
+#define CHIP_9118 0x118
+
+struct chip_id {
+ u16 id;
+ char *name;
+};
+
+static const struct chip_id chip_ids[] = {
+ { CHIP_9115, "LAN9115" },
+ { CHIP_9116, "LAN9116" },
+ { CHIP_9117, "LAN9117" },
+ { CHIP_9118, "LAN9118" },
+ { 0, NULL },
+};
+
+#define IS_REV_A(x) ((x & 0xFFFF)==0)
+
+/*
+ * Macros to abstract register access according to the data bus
+ * capabilities. Please use those and not the in/out primitives.
+ */
+/* FIFO read/write macros */
+#define SMC_PUSH_DATA(p, l) SMC_outsl( ioaddr, TX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_PULL_DATA(p, l) SMC_insl ( ioaddr, RX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_SET_TX_FIFO(x) SMC_outl( x, ioaddr, TX_DATA_FIFO )
+#define SMC_GET_RX_FIFO() SMC_inl( ioaddr, RX_DATA_FIFO )
+
+
+/* I/O mapped register read/write macros */
+#define SMC_GET_TX_STS_FIFO() SMC_inl( ioaddr, TX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO() SMC_inl( ioaddr, RX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO_PEEK() SMC_inl( ioaddr, RX_STATUS_FIFO_PEEK )
+#define SMC_GET_PN() (SMC_inl( ioaddr, ID_REV ) >> 16)
+#define SMC_GET_REV() (SMC_inl( ioaddr, ID_REV ) & 0xFFFF)
+#define SMC_GET_IRQ_CFG() SMC_inl( ioaddr, INT_CFG )
+#define SMC_SET_IRQ_CFG(x) SMC_outl( x, ioaddr, INT_CFG )
+#define SMC_GET_INT() SMC_inl( ioaddr, INT_STS )
+#define SMC_ACK_INT(x) SMC_outl( x, ioaddr, INT_STS )
+#define SMC_GET_INT_EN() SMC_inl( ioaddr, INT_EN )
+#define SMC_SET_INT_EN(x) SMC_outl( x, ioaddr, INT_EN )
+#define SMC_GET_BYTE_TEST() SMC_inl( ioaddr, BYTE_TEST )
+#define SMC_SET_BYTE_TEST(x) SMC_outl( x, ioaddr, BYTE_TEST )
+#define SMC_GET_FIFO_INT() SMC_inl( ioaddr, FIFO_INT )
+#define SMC_SET_FIFO_INT(x) SMC_outl( x, ioaddr, FIFO_INT )
+#define SMC_SET_FIFO_TDA(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~(0xFF<<24); \
+ SMC_SET_FIFO_INT( __mask | (x)<<24 ); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_TSL(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~(0xFF<<16); \
+ SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<16)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_RSA(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~(0xFF<<8); \
+ SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<8)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_SET_FIFO_RSL(x) \
+ do { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_GET_FIFO_INT() & ~0xFF; \
+ SMC_SET_FIFO_INT( __mask | ((x) & 0xFF)); \
+ local_irq_restore(__flags); \
+ } while (0)
+#define SMC_GET_RX_CFG() SMC_inl( ioaddr, RX_CFG )
+#define SMC_SET_RX_CFG(x) SMC_outl( x, ioaddr, RX_CFG )
+#define SMC_GET_TX_CFG() SMC_inl( ioaddr, TX_CFG )
+#define SMC_SET_TX_CFG(x) SMC_outl( x, ioaddr, TX_CFG )
+#define SMC_GET_HW_CFG() SMC_inl( ioaddr, HW_CFG )
+#define SMC_SET_HW_CFG(x) SMC_outl( x, ioaddr, HW_CFG )
+#define SMC_GET_RX_DP_CTRL() SMC_inl( ioaddr, RX_DP_CTRL )
+#define SMC_SET_RX_DP_CTRL(x) SMC_outl( x, ioaddr, RX_DP_CTRL )
+#define SMC_GET_PMT_CTRL() SMC_inl( ioaddr, PMT_CTRL )
+#define SMC_SET_PMT_CTRL(x) SMC_outl( x, ioaddr, PMT_CTRL )
+#define SMC_GET_GPIO_CFG() SMC_inl( ioaddr, GPIO_CFG )
+#define SMC_SET_GPIO_CFG(x) SMC_outl( x, ioaddr, GPIO_CFG )
+#define SMC_GET_RX_FIFO_INF() SMC_inl( ioaddr, RX_FIFO_INF )
+#define SMC_SET_RX_FIFO_INF(x) SMC_outl( x, ioaddr, RX_FIFO_INF )
+#define SMC_GET_TX_FIFO_INF() SMC_inl( ioaddr, TX_FIFO_INF )
+#define SMC_SET_TX_FIFO_INF(x) SMC_outl( x, ioaddr, TX_FIFO_INF )
+#define SMC_GET_GPT_CFG() SMC_inl( ioaddr, GPT_CFG )
+#define SMC_SET_GPT_CFG(x) SMC_outl( x, ioaddr, GPT_CFG )
+#define SMC_GET_RX_DROP() SMC_inl( ioaddr, RX_DROP )
+#define SMC_SET_RX_DROP(x) SMC_outl( x, ioaddr, RX_DROP )
+#define SMC_GET_MAC_CMD() SMC_inl( ioaddr, MAC_CSR_CMD )
+#define SMC_SET_MAC_CMD(x) SMC_outl( x, ioaddr, MAC_CSR_CMD )
+#define SMC_GET_MAC_DATA() SMC_inl( ioaddr, MAC_CSR_DATA )
+#define SMC_SET_MAC_DATA(x) SMC_outl( x, ioaddr, MAC_CSR_DATA )
+#define SMC_GET_AFC_CFG() SMC_inl( ioaddr, AFC_CFG )
+#define SMC_SET_AFC_CFG(x) SMC_outl( x, ioaddr, AFC_CFG )
+#define SMC_GET_E2P_CMD() SMC_inl( ioaddr, E2P_CMD )
+#define SMC_SET_E2P_CMD(x) SMC_outl( x, ioaddr, E2P_CMD )
+#define SMC_GET_E2P_DATA() SMC_inl( ioaddr, E2P_DATA )
+#define SMC_SET_E2P_DATA(x) SMC_outl( x, ioaddr, E2P_DATA )
+
+/* MAC register read/write macros */
+#define SMC_GET_MAC_CSR(a,v) \
+ do { \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | \
+ MAC_CSR_CMD_R_NOT_W_ | (a) ); \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ v = SMC_GET_MAC_DATA(); \
+ } while (0)
+#define SMC_SET_MAC_CSR(a,v) \
+ do { \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_DATA(v); \
+ SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | (a) ); \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ } while (0)
+#define SMC_GET_MAC_CR(x) SMC_GET_MAC_CSR( MAC_CR, x )
+#define SMC_SET_MAC_CR(x) SMC_SET_MAC_CSR( MAC_CR, x )
+#define SMC_GET_ADDRH(x) SMC_GET_MAC_CSR( ADDRH, x )
+#define SMC_SET_ADDRH(x) SMC_SET_MAC_CSR( ADDRH, x )
+#define SMC_GET_ADDRL(x) SMC_GET_MAC_CSR( ADDRL, x )
+#define SMC_SET_ADDRL(x) SMC_SET_MAC_CSR( ADDRL, x )
+#define SMC_GET_HASHH(x) SMC_GET_MAC_CSR( HASHH, x )
+#define SMC_SET_HASHH(x) SMC_SET_MAC_CSR( HASHH, x )
+#define SMC_GET_HASHL(x) SMC_GET_MAC_CSR( HASHL, x )
+#define SMC_SET_HASHL(x) SMC_SET_MAC_CSR( HASHL, x )
+#define SMC_GET_MII_ACC(x) SMC_GET_MAC_CSR( MII_ACC, x )
+#define SMC_SET_MII_ACC(x) SMC_SET_MAC_CSR( MII_ACC, x )
+#define SMC_GET_MII_DATA(x) SMC_GET_MAC_CSR( MII_DATA, x )
+#define SMC_SET_MII_DATA(x) SMC_SET_MAC_CSR( MII_DATA, x )
+#define SMC_GET_FLOW(x) SMC_GET_MAC_CSR( FLOW, x )
+#define SMC_SET_FLOW(x) SMC_SET_MAC_CSR( FLOW, x )
+#define SMC_GET_VLAN1(x) SMC_GET_MAC_CSR( VLAN1, x )
+#define SMC_SET_VLAN1(x) SMC_SET_MAC_CSR( VLAN1, x )
+#define SMC_GET_VLAN2(x) SMC_GET_MAC_CSR( VLAN2, x )
+#define SMC_SET_VLAN2(x) SMC_SET_MAC_CSR( VLAN2, x )
+#define SMC_SET_WUFF(x) SMC_SET_MAC_CSR( WUFF, x )
+#define SMC_GET_WUCSR(x) SMC_GET_MAC_CSR( WUCSR, x )
+#define SMC_SET_WUCSR(x) SMC_SET_MAC_CSR( WUCSR, x )
+
+/* PHY register read/write macros */
+#define SMC_GET_MII(a,phy,v) \
+ do { \
+ u32 __v; \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \
+ MII_ACC_MII_BUSY_); \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_GET_MII_DATA(v); \
+ } while (0)
+#define SMC_SET_MII(a,phy,v) \
+ do { \
+ u32 __v; \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ SMC_SET_MII_DATA(v); \
+ SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) | \
+ MII_ACC_MII_BUSY_ | \
+ MII_ACC_MII_WRITE_ ); \
+ do { \
+ SMC_GET_MII_ACC(__v); \
+ } while ( __v & MII_ACC_MII_BUSY_ ); \
+ } while (0)
+#define SMC_GET_PHY_BMCR(phy,x) SMC_GET_MII( MII_BMCR, phy, x )
+#define SMC_SET_PHY_BMCR(phy,x) SMC_SET_MII( MII_BMCR, phy, x )
+#define SMC_GET_PHY_BMSR(phy,x) SMC_GET_MII( MII_BMSR, phy, x )
+#define SMC_GET_PHY_ID1(phy,x) SMC_GET_MII( MII_PHYSID1, phy, x )
+#define SMC_GET_PHY_ID2(phy,x) SMC_GET_MII( MII_PHYSID2, phy, x )
+#define SMC_GET_PHY_MII_ADV(phy,x) SMC_GET_MII( MII_ADVERTISE, phy, x )
+#define SMC_SET_PHY_MII_ADV(phy,x) SMC_SET_MII( MII_ADVERTISE, phy, x )
+#define SMC_GET_PHY_MII_LPA(phy,x) SMC_GET_MII( MII_LPA, phy, x )
+#define SMC_SET_PHY_MII_LPA(phy,x) SMC_SET_MII( MII_LPA, phy, x )
+#define SMC_GET_PHY_CTRL_STS(phy,x) SMC_GET_MII( PHY_MODE_CTRL_STS, phy, x )
+#define SMC_SET_PHY_CTRL_STS(phy,x) SMC_SET_MII( PHY_MODE_CTRL_STS, phy, x )
+#define SMC_GET_PHY_INT_SRC(phy,x) SMC_GET_MII( PHY_INT_SRC, phy, x )
+#define SMC_SET_PHY_INT_SRC(phy,x) SMC_SET_MII( PHY_INT_SRC, phy, x )
+#define SMC_GET_PHY_INT_MASK(phy,x) SMC_GET_MII( PHY_INT_MASK, phy, x )
+#define SMC_SET_PHY_INT_MASK(phy,x) SMC_SET_MII( PHY_INT_MASK, phy, x )
+#define SMC_GET_PHY_SPECIAL(phy,x) SMC_GET_MII( PHY_SPECIAL, phy, x )
+
+
+
+/* Misc read/write macros */
+
+#ifndef SMC_GET_MAC_ADDR
+#define SMC_GET_MAC_ADDR(addr) \
+ do { \
+ unsigned int __v; \
+ \
+ SMC_GET_MAC_CSR(ADDRL, __v); \
+ addr[0] = __v; addr[1] = __v >> 8; \
+ addr[2] = __v >> 16; addr[3] = __v >> 24; \
+ SMC_GET_MAC_CSR(ADDRH, __v); \
+ addr[4] = __v; addr[5] = __v >> 8; \
+ } while (0)
+#endif
+
+#define SMC_SET_MAC_ADDR(addr) \
+ do { \
+ SMC_SET_MAC_CSR(ADDRL, \
+ addr[0] | \
+ (addr[1] << 8) | \
+ (addr[2] << 16) | \
+ (addr[3] << 24)); \
+ SMC_SET_MAC_CSR(ADDRH, addr[4]|(addr[5] << 8));\
+ } while (0)
+
+
+#define SMC_WRITE_EEPROM_CMD(cmd, addr) \
+ do { \
+ while (SMC_GET_E2P_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ SMC_SET_MAC_CMD(MAC_CSR_CMD_R_NOT_W_ | a ); \
+ while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_); \
+ } while (0)
+
+#endif /* _SMC911X_H_ */
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e1be1af51201..f72a4f57905a 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -129,6 +129,24 @@
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
+#elif defined(CONFIG_MACH_LOGICPD_PXA270)
+
+#define SMC_CAN_USE_8BIT 0
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 0
+#define SMC_IO_SHIFT 0
+#define SMC_NOWAIT 1
+#define SMC_USE_PXA_DMA 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+
#elif defined(CONFIG_ARCH_INNOKOM) || \
defined(CONFIG_MACH_MAINSTONE) || \
defined(CONFIG_ARCH_PXA_IDP) || \
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index b2ddd5e79303..9282b4b0c022 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -345,9 +345,9 @@ static int bcm5421_enable_fiber(struct mii_phy* phy)
static int bcm5461_enable_fiber(struct mii_phy* phy)
{
- phy_write(phy, MII_NCONFIG, 0xfc0c);
- phy_write(phy, MII_BMCR, 0x4140);
- phy_write(phy, MII_NCONFIG, 0xfc0b);
+ phy_write(phy, MII_NCONFIG, 0xfc0c);
+ phy_write(phy, MII_BMCR, 0x4140);
+ phy_write(phy, MII_NCONFIG, 0xfc0b);
phy_write(phy, MII_BMCR, 0x0140);
return 0;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index e3dd144d326b..5f743b972949 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -227,12 +227,12 @@ enum {
SROMC0InfoLeaf = 27,
MediaBlockMask = 0x3f,
MediaCustomCSRs = (1 << 6),
-
+
/* PCIPM bits */
PM_Sleep = (1 << 31),
PM_Snooze = (1 << 30),
PM_Mask = PM_Sleep | PM_Snooze,
-
+
/* SIAStatus bits */
NWayState = (1 << 14) | (1 << 13) | (1 << 12),
NWayRestart = (1 << 12),
@@ -858,7 +858,7 @@ static void de_stop_rxtx (struct de_private *de)
return;
cpu_relax();
}
-
+
printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
}
@@ -931,7 +931,7 @@ static void de_set_media (struct de_private *de)
macmode |= FullDuplex;
else
macmode &= ~FullDuplex;
-
+
if (netif_msg_link(de)) {
printk(KERN_INFO "%s: set link %s\n"
KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
@@ -966,9 +966,9 @@ static void de21040_media_timer (unsigned long data)
u32 status = dr32(SIAStatus);
unsigned int carrier;
unsigned long flags;
-
+
carrier = (status & NetCxnErr) ? 0 : 1;
-
+
if (carrier) {
if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
goto no_link_yet;
@@ -985,7 +985,7 @@ static void de21040_media_timer (unsigned long data)
return;
}
- de_link_down(de);
+ de_link_down(de);
if (de->media_lock)
return;
@@ -1039,7 +1039,7 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
return 0;
break;
}
-
+
return 1;
}
@@ -1050,9 +1050,9 @@ static void de21041_media_timer (unsigned long data)
u32 status = dr32(SIAStatus);
unsigned int carrier;
unsigned long flags;
-
+
carrier = (status & NetCxnErr) ? 0 : 1;
-
+
if (carrier) {
if ((de->media_type == DE_MEDIA_TP_AUTO ||
de->media_type == DE_MEDIA_TP ||
@@ -1072,7 +1072,7 @@ static void de21041_media_timer (unsigned long data)
return;
}
- de_link_down(de);
+ de_link_down(de);
/* if media type locked, don't switch media */
if (de->media_lock)
@@ -1124,7 +1124,7 @@ static void de21041_media_timer (unsigned long data)
u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
}
-
+
set_media:
spin_lock_irqsave(&de->lock, flags);
de_stop_rxtx(de);
@@ -1148,7 +1148,7 @@ static void de_media_interrupt (struct de_private *de, u32 status)
mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
return;
}
-
+
BUG_ON(!(status & LinkFail));
if (netif_carrier_ok(de->dev)) {
@@ -1227,7 +1227,7 @@ static int de_init_hw (struct de_private *de)
int rc;
de_adapter_wake(de);
-
+
macmode = dr32(MacMode) & ~MacModeClear;
rc = de_reset_mac(de);
@@ -1413,7 +1413,7 @@ static int de_close (struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
spin_unlock_irqrestore(&de->lock, flags);
-
+
free_irq(dev->irq, dev);
de_free_rings(de);
@@ -1441,7 +1441,7 @@ static void de_tx_timeout (struct net_device *dev)
spin_unlock_irq(&de->lock);
enable_irq(dev->irq);
-
+
/* Update the error counts. */
__de_get_stats(de);
@@ -1451,7 +1451,7 @@ static void de_tx_timeout (struct net_device *dev)
de_init_rings(de);
de_init_hw(de);
-
+
netif_wake_queue(dev);
}
@@ -1459,7 +1459,7 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
{
int i;
u32 *rbuf = (u32 *)buf;
-
+
/* read all CSRs */
for (i = 0; i < DE_NUM_REGS; i++)
rbuf[i] = dr32(i * 8);
@@ -1474,7 +1474,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
ecmd->phy_address = 0;
ecmd->advertising = de->media_advertise;
-
+
switch (de->media_type) {
case DE_MEDIA_AUI:
ecmd->port = PORT_AUI;
@@ -1489,7 +1489,7 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
ecmd->speed = SPEED_10;
break;
}
-
+
if (dr32(MacMode) & FullDuplex)
ecmd->duplex = DUPLEX_FULL;
else
@@ -1529,7 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
if (ecmd->autoneg == AUTONEG_ENABLE &&
(!(ecmd->advertising & ADVERTISED_Autoneg)))
return -EINVAL;
-
+
switch (ecmd->port) {
case PORT_AUI:
new_media = DE_MEDIA_AUI;
@@ -1554,22 +1554,22 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
return -EINVAL;
break;
}
-
+
media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
-
+
if ((new_media == de->media_type) &&
(media_lock == de->media_lock) &&
(ecmd->advertising == de->media_advertise))
return 0; /* nothing to change */
-
+
de_link_down(de);
de_stop_rxtx(de);
-
+
de->media_type = new_media;
de->media_lock = media_lock;
de->media_advertise = ecmd->advertising;
de_set_media(de);
-
+
return 0;
}
@@ -1817,7 +1817,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
default: de->media_type = DE_MEDIA_TP_AUTO; break;
}
-
+
if (netif_msg_probe(de))
printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
de->board_idx, ofs,
@@ -1886,7 +1886,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
de->media[idx].csr13,
de->media[idx].csr14,
de->media[idx].csr15);
-
+
} else if (netif_msg_probe(de))
printk("\n");
@@ -2118,7 +2118,7 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irq(&de->lock);
enable_irq(dev->irq);
-
+
/* Update the error counts. */
__de_get_stats(de);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index f56094102042..da8bd0d62a3f 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -41,11 +41,11 @@
Digital Semiconductor SROM Specification. The driver currently
recognises the following chips:
- DC21040 (no SROM)
- DC21041[A]
- DC21140[A]
- DC21142
- DC21143
+ DC21040 (no SROM)
+ DC21041[A]
+ DC21140[A]
+ DC21142
+ DC21143
So far the driver is known to work with the following cards:
@@ -55,7 +55,7 @@
SMC8432
SMC9332 (w/new SROM)
ZNYX31[45]
- ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
+ ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
The driver has been tested on a relatively busy network using the DE425,
DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
@@ -106,7 +106,7 @@
loading by:
insmod de4x5 io=0xghh where g = bus number
- hh = device number
+ hh = device number
NB: autoprobing for modules is now supported by default. You may just
use:
@@ -120,11 +120,11 @@
4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
kernel with the de4x5 configuration turned off and reboot.
5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
+ 6) run the net startup bits for your new eth?? interface(s) manually
+ (usually /etc/rc.inet[12] at boot time).
7) enjoy!
- To unload a module, turn off the associated interface(s)
+ To unload a module, turn off the associated interface(s)
'ifconfig eth?? down' then 'rmmod de4x5'.
Automedia detection is included so that in principal you can disconnect
@@ -135,7 +135,7 @@
By default, the driver will now autodetect any DECchip based card.
Should you have a need to restrict the driver to DIGITAL only cards, you
can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
+ 'dec_only=1' parameter.
I've changed the timing routines to use the kernel timer and scheduling
functions so that the hangs and other assorted problems that occurred
@@ -204,7 +204,7 @@
following parameters are allowed:
fdx for full duplex
- autosense to set the media/speed; with the following
+ autosense to set the media/speed; with the following
sub-parameters:
TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
@@ -235,14 +235,14 @@
this automatically or include #define DE4X5_FORCE_EISA on or before
line 1040 in the driver.
- TO DO:
+ TO DO:
------
Revision History
----------------
Version Date Description
-
+
0.1 17-Nov-94 Initial writing. ALPHA code release.
0.2 13-Jan-95 Added PCI support for DE435's.
0.21 19-Jan-95 Added auto media detection.
@@ -251,7 +251,7 @@
Add request/release_region code.
Add loadable modules support for PCI.
Clean up loadable modules support.
- 0.23 28-Feb-95 Added DC21041 and DC21140 support.
+ 0.23 28-Feb-95 Added DC21041 and DC21140 support.
Fix missed frame counter value and initialisation.
Fixed EISA probe.
0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
@@ -280,7 +280,7 @@
Add kernel timer code (h/w is too flaky).
Add MII based PHY autosense.
Add new multicasting code.
- Add new autosense algorithms for media/mode
+ Add new autosense algorithms for media/mode
selection using kernel scheduling/timing.
Re-formatted.
Made changes suggested by <jeff@router.patch.net>:
@@ -307,10 +307,10 @@
Add Accton to the list of broken cards.
Fix TX under-run bug for non DC21140 chips.
Fix boot command probe bug in alloc_device() as
- reported by <koen.gadeyne@barco.com> and
+ reported by <koen.gadeyne@barco.com> and
<orava@nether.tky.hut.fi>.
Add cache locks to prevent a race condition as
- reported by <csd@microplex.com> and
+ reported by <csd@microplex.com> and
<baba@beckman.uiuc.edu>.
Upgraded alloc_device() code.
0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
@@ -322,7 +322,7 @@
with a loopback packet.
0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
by <bhat@mundook.cs.mu.OZ.AU>
- 0.45 8-Dec-96 Include endian functions for PPC use, from work
+ 0.45 8-Dec-96 Include endian functions for PPC use, from work
by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
0.451 28-Dec-96 Added fix to allow autoprobe for modules after
suggestion from <mjacob@feral.com>.
@@ -346,14 +346,14 @@
<paubert@iram.es>.
0.52 26-Apr-97 Some changes may not credit the right people -
a disk crash meant I lost some mail.
- Change RX interrupt routine to drop rather than
- defer packets to avoid hang reported by
+ Change RX interrupt routine to drop rather than
+ defer packets to avoid hang reported by
<g.thomas@opengroup.org>.
Fix srom_exec() to return for COMPACT and type 1
infoblocks.
Added DC21142 and DC21143 functions.
Added byte counters from <phil@tazenda.demon.co.uk>
- Added SA_INTERRUPT temporary fix from
+ Added SA_INTERRUPT temporary fix from
<mjacob@feral.com>.
0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
module load: bug reported by
@@ -363,10 +363,10 @@
Make above search independent of BIOS device scan
direction.
Completed DC2114[23] autosense functions.
- 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
+ 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
<robin@intercore.com
Fix type1_infoblock() bug introduced in 0.53, from
- problem reports by
+ problem reports by
<parmee@postecss.ncrfran.france.ncr.com> and
<jo@ice.dillingen.baynet.de>.
Added argument list to set up each board from either
@@ -374,7 +374,7 @@
Added generic MII PHY functionality to deal with
newer PHY chips.
Fix the mess in 2.1.67.
- 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
+ 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
<redhat@cococo.net>.
Fix bug in pci_probe() for 64 bit systems reported
by <belliott@accessone.com>.
@@ -398,7 +398,7 @@
version. I hope nothing is broken...
Add TX done interrupt modification from suggestion
by <Austin.Donnelly@cl.cam.ac.uk>.
- Fix is_anc_capable() bug reported by
+ Fix is_anc_capable() bug reported by
<Austin.Donnelly@cl.cam.ac.uk>.
Fix type[13]_infoblock() bug: during MII search, PHY
lp->rst not run because lp->ibn not initialised -
@@ -413,7 +413,7 @@
Add an_exception() for old ZYNX346 and fix compile
warning on PPC & SPARC, from <ecd@skynet.be>.
Fix lastPCI to correctly work with compiled in
- kernels and modules from bug report by
+ kernels and modules from bug report by
<Zlatko.Calusic@CARNet.hr> et al.
0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
when media is unconnected.
@@ -425,7 +425,7 @@
0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
a 21143 by <mmporter@home.com>.
Change PCI/EISA bus probing order.
- 0.545 28-Nov-99 Further Moto SROM bug fix from
+ 0.545 28-Nov-99 Further Moto SROM bug fix from
<mporter@eng.mcd.mot.com>
Remove double checking for DEBUG_RX in de4x5_dbg_rx()
from report by <geert@linux-m68k.org>
@@ -434,8 +434,8 @@
variable 'pb', on a non de4x5 PCI device, in this
case a PCI bridge (DEC chip 21152). The value of
'pb' is now only initialized if a de4x5 chip is
- present.
- <france@handhelds.org>
+ present.
+ <france@handhelds.org>
0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
generic DMA APIs. Fixed DE425 support on Alpha.
@@ -584,7 +584,7 @@ static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
/*
** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.:
+** parameter, e.g.:
** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
**
** For a compiled in driver, place e.g.
@@ -655,7 +655,7 @@ static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
** Memory Alignment. Each descriptor is 4 longwords long. To force a
** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
+** and hence the RX descriptor ring's first entry.
*/
#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
@@ -1081,8 +1081,8 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
mdelay(2); /* Wait for 2ms */\
}
-
-static int __devinit
+
+static int __devinit
de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
{
char name[DE4X5_NAME_LENGTH + 1];
@@ -1102,12 +1102,12 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
mdelay(10);
RESET_DE4X5;
-
+
if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
return -ENXIO; /* Hardware could not reset */
}
-
- /*
+
+ /*
** Now find out what kind of DC21040/DC21041/DC21140 board we have.
*/
lp->useSROM = FALSE;
@@ -1116,21 +1116,21 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
} else {
EISA_signature(name, gendev);
}
-
+
if (*name == '\0') { /* Not found a board signature */
return -ENXIO;
}
-
+
dev->base_addr = iobase;
printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase);
-
+
printk(", h/w address ");
status = get_hw_addr(dev);
for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */
printk("%2.2x:", dev->dev_addr[i]);
}
printk("%2.2x,\n", dev->dev_addr[i]);
-
+
if (status != 0) {
printk(" which has an Ethernet PROM CRC error.\n");
return -ENXIO;
@@ -1171,10 +1171,10 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
}
lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-
+
/*
** Set up the RX descriptor ring (Intels)
- ** Allocate contiguous receive buffers, long word aligned (Alphas)
+ ** Allocate contiguous receive buffers, long word aligned (Alphas)
*/
#if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
for (i=0; i<NUM_RX_DESC; i++) {
@@ -1210,7 +1210,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
lp->rxRingSize = NUM_RX_DESC;
lp->txRingSize = NUM_TX_DESC;
-
+
/* Write the end of list marker to the descriptor lists */
lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
@@ -1219,7 +1219,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
outl(lp->dma_rings, DE4X5_RRBA);
outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
DE4X5_TRBA);
-
+
/* Initialise the IRQ mask and Enable/Disable */
lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
lp->irq_en = IMR_NIM | IMR_AIM;
@@ -1252,7 +1252,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
mii_get_phy(dev);
}
-
+
#ifndef __sparc_v9__
printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
#else
@@ -1260,11 +1260,11 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
#endif
((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
}
-
+
if (de4x5_debug & DEBUG_VERSION) {
printk(version);
}
-
+
/* The DE4X5-specific entries in the device structure. */
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, gendev);
@@ -1274,23 +1274,23 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
dev->get_stats = &de4x5_get_stats;
dev->set_multicast_list = &set_multicast_list;
dev->do_ioctl = &de4x5_ioctl;
-
+
dev->mem_start = 0;
-
+
/* Fill in the generic fields of the device structure. */
if ((status = register_netdev (dev))) {
dma_free_coherent (gendev, lp->dma_size,
lp->rx_ring, lp->dma_rings);
return status;
}
-
+
/* Let the adapter sleep to save power */
yawn(dev, SLEEP);
-
+
return status;
}
-
+
static int
de4x5_open(struct net_device *dev)
{
@@ -1312,15 +1312,15 @@ de4x5_open(struct net_device *dev)
*/
yawn(dev, WAKEUP);
- /*
- ** Re-initialize the DE4X5...
+ /*
+ ** Re-initialize the DE4X5...
*/
status = de4x5_init(dev);
spin_lock_init(&lp->lock);
lp->state = OPEN;
de4x5_dbg_open(dev);
-
- if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
+
+ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
lp->adapter_name, dev)) {
printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
@@ -1340,11 +1340,11 @@ de4x5_open(struct net_device *dev)
lp->interrupt = UNMASK_INTERRUPTS;
dev->trans_start = jiffies;
-
+
START_DE4X5;
-
+
de4x5_setup_intr(dev);
-
+
if (de4x5_debug & DEBUG_OPEN) {
printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
@@ -1355,7 +1355,7 @@ de4x5_open(struct net_device *dev)
printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
}
-
+
return status;
}
@@ -1369,15 +1369,15 @@ de4x5_open(struct net_device *dev)
*/
static int
de4x5_init(struct net_device *dev)
-{
+{
/* Lock out other processes whilst setting up the hardware */
netif_stop_queue(dev);
-
+
de4x5_sw_reset(dev);
-
+
/* Autoconfigure the connected port */
autoconf_media(dev);
-
+
return 0;
}
@@ -1388,7 +1388,7 @@ de4x5_sw_reset(struct net_device *dev)
u_long iobase = dev->base_addr;
int i, j, status = 0;
s32 bmr, omr;
-
+
/* Select the MII or SRL port now and RESET the MAC */
if (!lp->useSROM) {
if (lp->phy[lp->active].id != 0) {
@@ -1399,7 +1399,7 @@ de4x5_sw_reset(struct net_device *dev)
de4x5_switch_mac_port(dev);
}
- /*
+ /*
** Set the programmable burst length to 8 longwords for all the DC21140
** Fasternet chips and 4 longwords for all others: DMA errors result
** without these values. Cache align 16 long.
@@ -1416,23 +1416,23 @@ de4x5_sw_reset(struct net_device *dev)
outl(lp->dma_rings, DE4X5_RRBA);
outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
DE4X5_TRBA);
-
+
lp->rx_new = lp->rx_old = 0;
lp->tx_new = lp->tx_old = 0;
-
+
for (i = 0; i < lp->rxRingSize; i++) {
lp->rx_ring[i].status = cpu_to_le32(R_OWN);
}
-
+
for (i = 0; i < lp->txRingSize; i++) {
lp->tx_ring[i].status = cpu_to_le32(0);
}
-
+
barrier();
/* Build the setup frame depending on filtering mode */
SetMulticastFilter(dev);
-
+
load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
outl(omr|OMR_ST, DE4X5_OMR);
@@ -1445,18 +1445,18 @@ de4x5_sw_reset(struct net_device *dev)
outl(omr, DE4X5_OMR); /* Stop everything! */
if (j == 0) {
- printk("%s: Setup frame timed out, status %08x\n", dev->name,
+ printk("%s: Setup frame timed out, status %08x\n", dev->name,
inl(DE4X5_STS));
status = -EIO;
}
-
+
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
lp->tx_old = lp->tx_new;
return status;
}
-/*
+/*
** Writes a socket buffer address to the next available transmit descriptor.
*/
static int
@@ -1469,9 +1469,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (lp->tx_enable == NO) { /* Cannot send for now */
- return -1;
+ return -1;
}
-
+
/*
** Clean out the TX ring asynchronously to interrupts - sometimes the
** interrupts are lost by delayed descriptor status updates relative to
@@ -1482,7 +1482,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags);
/* Test if cache is already locked - requeue skb if so */
- if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
+ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
return -1;
/* Transmit descriptor ring full or stale skb */
@@ -1509,10 +1509,10 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
lp->stats.tx_bytes += skb->len;
outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-
+
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
dev->trans_start = jiffies;
-
+
if (TX_BUFFS_AVAIL) {
netif_start_queue(dev); /* Another pkt may be queued */
}
@@ -1521,15 +1521,15 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
}
if (skb) de4x5_putb_cache(dev, skb);
}
-
+
lp->cache.lock = 0;
return status;
}
/*
-** The DE4X5 interrupt handler.
-**
+** The DE4X5 interrupt handler.
+**
** I/O Read/Writes through intermediate PCI bridges are never 'posted',
** so that the asserted interrupt always has some real data to work with -
** if these I/O accesses are ever changed to memory accesses, ensure the
@@ -1546,7 +1546,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
s32 imr, omr, sts, limit;
u_long iobase;
unsigned int handled = 0;
-
+
if (dev == NULL) {
printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
return IRQ_NONE;
@@ -1554,35 +1554,35 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
lp = netdev_priv(dev);
spin_lock(&lp->lock);
iobase = dev->base_addr;
-
+
DISABLE_IRQs; /* Ensure non re-entrancy */
if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
printk("%s: Re-entering the interrupt handler.\n", dev->name);
synchronize_irq(dev->irq);
-
+
for (limit=0; limit<8; limit++) {
sts = inl(DE4X5_STS); /* Read IRQ status */
outl(sts, DE4X5_STS); /* Reset the board interrupts */
-
+
if (!(sts & lp->irq_mask)) break;/* All done */
handled = 1;
-
+
if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
de4x5_rx(dev);
-
+
if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
- de4x5_tx(dev);
-
+ de4x5_tx(dev);
+
if (sts & STS_LNF) { /* TP Link has failed */
lp->irq_mask &= ~IMR_LFM;
}
-
+
if (sts & STS_UNF) { /* Transmit underrun */
de4x5_txur(dev);
}
-
+
if (sts & STS_SE) { /* Bus Error */
STOP_DE4X5;
printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
@@ -1603,7 +1603,7 @@ de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
lp->interrupt = UNMASK_INTERRUPTS;
ENABLE_IRQs;
spin_unlock(&lp->lock);
-
+
return IRQ_RETVAL(handled);
}
@@ -1614,11 +1614,11 @@ de4x5_rx(struct net_device *dev)
u_long iobase = dev->base_addr;
int entry;
s32 status;
-
+
for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
entry=lp->rx_new) {
status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-
+
if (lp->rx_ovf) {
if (inl(DE4X5_MFC) & MFC_FOCM) {
de4x5_rx_ovfc(dev);
@@ -1629,7 +1629,7 @@ de4x5_rx(struct net_device *dev)
if (status & RD_FS) { /* Remember the start of frame */
lp->rx_old = entry;
}
-
+
if (status & RD_LS) { /* Valid frame status */
if (lp->tx_enable) lp->linkOK++;
if (status & RD_ES) { /* There was an error. */
@@ -1646,9 +1646,9 @@ de4x5_rx(struct net_device *dev)
struct sk_buff *skb;
short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
>> 16) - 4;
-
+
if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
- printk("%s: Insufficient memory; nuking packet.\n",
+ printk("%s: Insufficient memory; nuking packet.\n",
dev->name);
lp->stats.rx_dropped++;
} else {
@@ -1658,14 +1658,14 @@ de4x5_rx(struct net_device *dev)
skb->protocol=eth_type_trans(skb,dev);
de4x5_local_stats(dev, skb->data, pkt_len);
netif_rx(skb);
-
+
/* Update stats */
dev->last_rx = jiffies;
lp->stats.rx_packets++;
lp->stats.rx_bytes += pkt_len;
}
}
-
+
/* Change buffer ownership for this frame, back to the adapter */
for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
@@ -1674,13 +1674,13 @@ de4x5_rx(struct net_device *dev)
lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
barrier();
}
-
+
/*
** Update entry information
*/
lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
}
-
+
return 0;
}
@@ -1705,20 +1705,20 @@ de4x5_tx(struct net_device *dev)
u_long iobase = dev->base_addr;
int entry;
s32 status;
-
+
for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
if (status < 0) { /* Buffer not sent yet */
break;
} else if (status != 0x7fffffff) { /* Not setup frame */
if (status & TD_ES) { /* An error happened */
- lp->stats.tx_errors++;
+ lp->stats.tx_errors++;
if (status & TD_NC) lp->stats.tx_carrier_errors++;
if (status & TD_LC) lp->stats.tx_window_errors++;
if (status & TD_UF) lp->stats.tx_fifo_errors++;
if (status & TD_EC) lp->pktStats.excessive_collisions++;
if (status & TD_DE) lp->stats.tx_aborted_errors++;
-
+
if (TX_PKT_PENDING) {
outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
}
@@ -1727,14 +1727,14 @@ de4x5_tx(struct net_device *dev)
if (lp->tx_enable) lp->linkOK++;
}
/* Update the collision counter */
- lp->stats.collisions += ((status & TD_EC) ? 16 :
+ lp->stats.collisions += ((status & TD_EC) ? 16 :
((status & TD_CC) >> 3));
/* Free the buffer. */
if (lp->tx_skb[entry] != NULL)
de4x5_free_tx_buff(lp, entry);
}
-
+
/* Update all the pointers */
lp->tx_old = (++lp->tx_old) % lp->txRingSize;
}
@@ -1746,7 +1746,7 @@ de4x5_tx(struct net_device *dev)
else
netif_start_queue(dev);
}
-
+
return 0;
}
@@ -1755,9 +1755,9 @@ de4x5_ast(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
int next_tick = DE4X5_AUTOSENSE_MS;
-
+
disable_ast(dev);
-
+
if (lp->useSROM) {
next_tick = srom_autoconf(dev);
} else if (lp->chipset == DC21140) {
@@ -1769,7 +1769,7 @@ de4x5_ast(struct net_device *dev)
}
lp->linkOK = 0;
enable_ast(dev, next_tick);
-
+
return 0;
}
@@ -1792,11 +1792,11 @@ de4x5_txur(struct net_device *dev)
}
outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
}
-
+
return 0;
}
-static int
+static int
de4x5_rx_ovfc(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -1813,7 +1813,7 @@ de4x5_rx_ovfc(struct net_device *dev)
}
outl(omr, DE4X5_OMR);
-
+
return 0;
}
@@ -1823,22 +1823,22 @@ de4x5_close(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 imr, omr;
-
+
disable_ast(dev);
netif_stop_queue(dev);
-
+
if (de4x5_debug & DEBUG_CLOSE) {
printk("%s: Shutting down ethercard, status was %8.8x.\n",
dev->name, inl(DE4X5_STS));
}
-
- /*
+
+ /*
** We stop the DE4X5 here... mask interrupts and stop TX & RX
*/
DISABLE_IRQs;
STOP_DE4X5;
-
+
/* Free the associated irq */
free_irq(dev->irq, dev);
lp->state = CLOSED;
@@ -1846,10 +1846,10 @@ de4x5_close(struct net_device *dev)
/* Free any socket buffers */
de4x5_free_rx_buffs(dev);
de4x5_free_tx_buffs(dev);
-
+
/* Put the adapter to sleep to save power */
yawn(dev, SLEEP);
-
+
return 0;
}
@@ -1858,9 +1858,9 @@ de4x5_get_stats(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-
+
return &lp->stats;
}
@@ -1886,7 +1886,7 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
(*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
lp->pktStats.unicast++;
}
-
+
lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
if (lp->pktStats.bins[0] == 0) { /* Reset counters */
memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
@@ -1937,11 +1937,11 @@ set_multicast_list(struct net_device *dev)
omr = inl(DE4X5_OMR);
omr |= OMR_PR;
outl(omr, DE4X5_OMR);
- } else {
+ } else {
SetMulticastFilter(dev);
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
-
+
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
dev->trans_start = jiffies;
@@ -1969,20 +1969,20 @@ SetMulticastFilter(struct net_device *dev)
omr = inl(DE4X5_OMR);
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
-
+
if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
addrs=dmi->dmi_addr;
dmi=dmi->next;
- if ((*addrs & 0x01) == 1) { /* multicast address? */
+ if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
-
+
byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-
+
byte <<= 1; /* calc offset into setup frame */
if (byte & 0x02) {
byte -= 1;
@@ -1994,14 +1994,14 @@ SetMulticastFilter(struct net_device *dev)
for (j=0; j<dev->mc_count; j++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
- for (i=0; i<ETH_ALEN; i++) {
+ for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
}
}
}
outl(omr, DE4X5_OMR);
-
+
return;
}
@@ -2031,18 +2031,18 @@ static int __init de4x5_eisa_probe (struct device *gendev)
status = -EBUSY;
goto release_reg_1;
}
-
+
if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
status = -ENOMEM;
goto release_reg_2;
}
lp = netdev_priv(dev);
-
+
cfid = (u32) inl(PCI_CFID);
lp->cfrv = (u_short) inl(PCI_CFRV);
device = (cfid >> 8) & 0x00ffff00;
vendor = (u_short) cfid;
-
+
/* Read the EISA Configuration Registers */
regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
#ifdef CONFIG_ALPHA
@@ -2050,7 +2050,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
* care about the EISA configuration, and thus doesn't
* configure the PLX bridge properly. Oh well... Simply mimic
* the EISA config file to sort it out. */
-
+
/* EISA REG1: Assert DecChip 21040 HW Reset */
outb (ER1_IAM | 1, EISA_REG1);
mdelay (1);
@@ -2061,12 +2061,12 @@ static int __init de4x5_eisa_probe (struct device *gendev)
/* EISA REG3: R/W Burst Transfer Enable */
outb (ER3_BWE | ER3_BRE, EISA_REG3);
-
+
/* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
#endif
irq = de4x5_irq[(regval >> 1) & 0x03];
-
+
if (is_DC2114x) {
device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
}
@@ -2077,7 +2077,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
outl(0x00006000, PCI_CFLT);
outl(iobase, PCI_CBIO);
-
+
DevicePresent(dev, EISA_APROM);
dev->irq = irq;
@@ -2102,7 +2102,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
dev = device->driver_data;
iobase = dev->base_addr;
-
+
unregister_netdev (dev);
free_netdev (dev);
release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
@@ -2131,11 +2131,11 @@ MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
/*
** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple
+** SROM, so that in multiport cards that have one SROM shared between multiple
** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
** For single port cards this is a time waster...
*/
-static void __devinit
+static void __devinit
srom_search(struct net_device *dev, struct pci_dev *pdev)
{
u_char pb;
@@ -2163,7 +2163,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
/* Set the device number information */
lp->device = PCI_SLOT(this_dev->devfn);
lp->bus_num = pb;
-
+
/* Set the chipset information */
if (is_DC2114x) {
device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2176,7 +2176,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
/* Fetch the IRQ to be used */
irq = this_dev->irq;
if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-
+
/* Check if I/O accesses are enabled */
pci_read_config_word(this_dev, PCI_COMMAND, &status);
if (!(status & PCI_COMMAND_IO)) continue;
@@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->bus = PCI;
lp->bus_num = 0;
-
+
/* Search for an SROM on this bus */
if (lp->bus_num != pb) {
lp->bus_num = pb;
@@ -2267,7 +2267,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
/* Set the device number information */
lp->device = dev_num;
lp->bus_num = pb;
-
+
/* Set the chipset information */
if (is_DC2114x) {
device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2283,7 +2283,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
error = -ENODEV;
goto free_dev;
}
-
+
/* Check if I/O accesses and Bus Mastering are enabled */
pci_read_config_word(pdev, PCI_COMMAND, &status);
#ifdef __powerpc__
@@ -2322,7 +2322,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
}
dev->irq = irq;
-
+
if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
goto release;
}
@@ -2377,7 +2377,7 @@ static struct pci_driver de4x5_pci_driver = {
** Auto configure the media here rather than setting the port at compile
** time. This routine is called by de4x5_init() and when a loss of media is
** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been
+** [TP] or no recent receive activity) to check whether the user has been
** sneaky and changed the port on us.
*/
static int
@@ -2405,7 +2405,7 @@ autoconf_media(struct net_device *dev)
}
enable_ast(dev, next_tick);
-
+
return (lp->media);
}
@@ -2428,7 +2428,7 @@ dc21040_autoconf(struct net_device *dev)
u_long iobase = dev->base_addr;
int next_tick = DE4X5_AUTOSENSE_MS;
s32 imr;
-
+
switch (lp->media) {
case INIT:
DISABLE_IRQs;
@@ -2447,36 +2447,36 @@ dc21040_autoconf(struct net_device *dev)
lp->local_state = 0;
next_tick = dc21040_autoconf(dev);
break;
-
+
case TP:
- next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
+ next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
TP_SUSPECT, test_tp);
break;
-
+
case TP_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
break;
-
+
case BNC:
case AUI:
case BNC_AUI:
- next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
+ next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
BNC_AUI_SUSPECT, ping_media);
break;
-
+
case BNC_AUI_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
break;
-
+
case EXT_SIA:
- next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
+ next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
NC, EXT_SIA_SUSPECT, ping_media);
break;
-
+
case EXT_SIA_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
break;
-
+
case NC:
/* default to TP for all */
reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
@@ -2488,13 +2488,13 @@ dc21040_autoconf(struct net_device *dev)
lp->tx_enable = NO;
break;
}
-
+
return next_tick;
}
static int
dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
- int next_state, int suspect_state,
+ int next_state, int suspect_state,
int (*fn)(struct net_device *, int))
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -2507,7 +2507,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo
lp->local_state++;
next_tick = 500;
break;
-
+
case 1:
if (!lp->tx_enable) {
linkBad = fn(dev, timeout);
@@ -2527,7 +2527,7 @@ dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeo
}
break;
}
-
+
return next_tick;
}
@@ -2582,7 +2582,7 @@ dc21041_autoconf(struct net_device *dev)
u_long iobase = dev->base_addr;
s32 sts, irqs, irq_mask, imr, omr;
int next_tick = DE4X5_AUTOSENSE_MS;
-
+
switch (lp->media) {
case INIT:
DISABLE_IRQs;
@@ -2603,7 +2603,7 @@ dc21041_autoconf(struct net_device *dev)
lp->local_state = 0;
next_tick = dc21041_autoconf(dev);
break;
-
+
case TP_NW:
if (lp->timeout < 0) {
omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
@@ -2623,7 +2623,7 @@ dc21041_autoconf(struct net_device *dev)
next_tick = dc21041_autoconf(dev);
}
break;
-
+
case ANS:
if (!lp->tx_enable) {
irqs = STS_LNP;
@@ -2645,11 +2645,11 @@ dc21041_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case ANS_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
break;
-
+
case TP:
if (!lp->tx_enable) {
if (lp->timeout < 0) {
@@ -2679,11 +2679,11 @@ dc21041_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case TP_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
break;
-
+
case AUI:
if (!lp->tx_enable) {
if (lp->timeout < 0) {
@@ -2709,11 +2709,11 @@ dc21041_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case AUI_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
break;
-
+
case BNC:
switch (lp->local_state) {
case 0:
@@ -2731,7 +2731,7 @@ dc21041_autoconf(struct net_device *dev)
next_tick = dc21041_autoconf(dev);
}
break;
-
+
case 1:
if (!lp->tx_enable) {
if ((sts = ping_media(dev, 3000)) < 0) {
@@ -2751,11 +2751,11 @@ dc21041_autoconf(struct net_device *dev)
break;
}
break;
-
+
case BNC_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
break;
-
+
case NC:
omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
outl(omr | OMR_FDX, DE4X5_OMR);
@@ -2768,7 +2768,7 @@ dc21041_autoconf(struct net_device *dev)
lp->tx_enable = NO;
break;
}
-
+
return next_tick;
}
@@ -2784,9 +2784,9 @@ dc21140m_autoconf(struct net_device *dev)
int ana, anlpa, cap, cr, slnk, sr;
int next_tick = DE4X5_AUTOSENSE_MS;
u_long imr, omr, iobase = dev->base_addr;
-
+
switch(lp->media) {
- case INIT:
+ case INIT:
if (lp->timeout < 0) {
DISABLE_IRQs;
lp->tx_enable = FALSE;
@@ -2813,7 +2813,7 @@ dc21140m_autoconf(struct net_device *dev)
lp->media = _100Mb;
} else if (lp->autosense == _10Mb) {
lp->media = _10Mb;
- } else if ((lp->autosense == AUTO) &&
+ } else if ((lp->autosense == AUTO) &&
((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -2831,7 +2831,7 @@ dc21140m_autoconf(struct net_device *dev)
next_tick = dc21140m_autoconf(dev);
}
break;
-
+
case ANS:
switch (lp->local_state) {
case 0:
@@ -2851,7 +2851,7 @@ dc21140m_autoconf(struct net_device *dev)
next_tick = dc21140m_autoconf(dev);
}
break;
-
+
case 1:
if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
next_tick = sr & ~TIMER_CB;
@@ -2862,7 +2862,7 @@ dc21140m_autoconf(struct net_device *dev)
lp->tmp = MII_SR_ASSC;
anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
+ if (!(anlpa & MII_ANLPA_RF) &&
(cap = anlpa & MII_ANLPA_TAF & ana)) {
if (cap & MII_ANA_100M) {
lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -2879,10 +2879,10 @@ dc21140m_autoconf(struct net_device *dev)
break;
}
break;
-
+
case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
if (lp->timeout < 0) {
- lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
+ lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
(~gep_rd(dev) & GEP_LNP));
SET_100Mb_PDET;
}
@@ -2899,7 +2899,7 @@ dc21140m_autoconf(struct net_device *dev)
next_tick = dc21140m_autoconf(dev);
}
break;
-
+
case _100Mb: /* Set 100Mb/s */
next_tick = 3000;
if (!lp->tx_enable) {
@@ -2933,7 +2933,7 @@ dc21140m_autoconf(struct net_device *dev)
}
}
break;
-
+
case NC:
if (lp->media != lp->c_media) {
de4x5_dbg_media(dev);
@@ -2943,7 +2943,7 @@ dc21140m_autoconf(struct net_device *dev)
lp->tx_enable = FALSE;
break;
}
-
+
return next_tick;
}
@@ -3002,7 +3002,7 @@ dc2114x_autoconf(struct net_device *dev)
lp->media = AUI;
} else {
lp->media = SPD_DET;
- if ((lp->infoblock_media == ANS) &&
+ if ((lp->infoblock_media == ANS) &&
((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -3014,7 +3014,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
}
break;
-
+
case ANS:
switch (lp->local_state) {
case 0:
@@ -3034,7 +3034,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
}
break;
-
+
case 1:
if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
next_tick = sr & ~TIMER_CB;
@@ -3045,7 +3045,7 @@ dc2114x_autoconf(struct net_device *dev)
lp->tmp = MII_SR_ASSC;
anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
+ if (!(anlpa & MII_ANLPA_RF) &&
(cap = anlpa & MII_ANLPA_TAF & ana)) {
if (cap & MII_ANA_100M) {
lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -3087,11 +3087,11 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = 3000;
}
break;
-
+
case AUI_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
break;
-
+
case BNC:
switch (lp->local_state) {
case 0:
@@ -3109,7 +3109,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
}
break;
-
+
case 1:
if (!lp->tx_enable) {
if ((sts = ping_media(dev, 3000)) < 0) {
@@ -3130,11 +3130,11 @@ dc2114x_autoconf(struct net_device *dev)
break;
}
break;
-
+
case BNC_SUSPECT:
next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
break;
-
+
case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
if (srom_map_media(dev) < 0) {
lp->tcount++;
@@ -3161,7 +3161,7 @@ dc2114x_autoconf(struct net_device *dev)
next_tick = dc2114x_autoconf(dev);
} else if (((lp->media == _100Mb) && is_100_up(dev)) ||
(((lp->media == _10Mb) || (lp->media == TP) ||
- (lp->media == BNC) || (lp->media == AUI)) &&
+ (lp->media == BNC) || (lp->media == AUI)) &&
is_10_up(dev))) {
next_tick = dc2114x_autoconf(dev);
} else {
@@ -3169,7 +3169,7 @@ dc2114x_autoconf(struct net_device *dev)
lp->media = INIT;
}
break;
-
+
case _10Mb:
next_tick = 3000;
if (!lp->tx_enable) {
@@ -3208,7 +3208,7 @@ printk("Huh?: media:%02x\n", lp->media);
lp->media = INIT;
break;
}
-
+
return next_tick;
}
@@ -3231,7 +3231,7 @@ srom_map_media(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
lp->fdx = 0;
- if (lp->infoblock_media == lp->media)
+ if (lp->infoblock_media == lp->media)
return 0;
switch(lp->infoblock_media) {
@@ -3270,7 +3270,7 @@ srom_map_media(struct net_device *dev)
case SROM_100BASEFF:
if (!lp->params.fdx) return -1;
lp->fdx = TRUE;
- case SROM_100BASEF:
+ case SROM_100BASEF:
if (lp->params.fdx && !lp->fdx) return -1;
lp->media = _100Mb;
break;
@@ -3280,8 +3280,8 @@ srom_map_media(struct net_device *dev)
lp->fdx = lp->params.fdx;
break;
- default:
- printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
+ default:
+ printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
lp->infoblock_media);
return -1;
break;
@@ -3359,7 +3359,7 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14,
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 sts, csr12;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
@@ -3372,22 +3372,22 @@ test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14,
/* clear all pending interrupts */
sts = inl(DE4X5_STS);
outl(sts, DE4X5_STS);
-
+
/* clear csr12 NRA and SRA bits */
if ((lp->chipset == DC21041) || lp->useSROM) {
csr12 = inl(DE4X5_SISR);
outl(csr12, DE4X5_SISR);
}
}
-
+
sts = inl(DE4X5_STS) & ~TIMER_CB;
-
+
if (!(sts & irqs) && --lp->timeout) {
sts = 100 | TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return sts;
}
@@ -3397,11 +3397,11 @@ test_tp(struct net_device *dev, s32 msec)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
int sisr;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
}
-
+
sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
if (sisr && --lp->timeout) {
@@ -3409,7 +3409,7 @@ test_tp(struct net_device *dev, s32 msec)
} else {
lp->timeout = -1;
}
-
+
return sisr;
}
@@ -3436,7 +3436,7 @@ test_for_100Mb(struct net_device *dev, int msec)
lp->timeout = msec/SAMPLE_INTERVAL;
}
}
-
+
if (lp->phy[lp->active].id || lp->useSROM) {
gep = is_100_up(dev) | is_spd_100(dev);
} else {
@@ -3447,7 +3447,7 @@ test_for_100Mb(struct net_device *dev, int msec)
} else {
lp->timeout = -1;
}
-
+
return gep;
}
@@ -3459,13 +3459,13 @@ wait_for_link(struct net_device *dev)
if (lp->timeout < 0) {
lp->timeout = 1;
}
-
+
if (lp->timeout--) {
return TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return 0;
}
@@ -3479,21 +3479,21 @@ test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec)
struct de4x5_private *lp = netdev_priv(dev);
int test;
u_long iobase = dev->base_addr;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
}
-
+
if (pol) pol = ~0;
reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
test = (reg ^ pol) & mask;
-
+
if (test && --lp->timeout) {
reg = 100 | TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return reg;
}
@@ -3503,7 +3503,7 @@ is_spd_100(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
int spd;
-
+
if (lp->useMII) {
spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
spd = ~(spd ^ lp->phy[lp->active].spd.value);
@@ -3517,7 +3517,7 @@ is_spd_100(struct net_device *dev)
spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
(lp->linkOK & ~lp->asBitValid);
}
-
+
return spd;
}
@@ -3526,7 +3526,7 @@ is_100_up(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (lp->useMII) {
/* Double read for sticky bits & temporary drops */
mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3547,7 +3547,7 @@ is_10_up(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (lp->useMII) {
/* Double read for sticky bits & temporary drops */
mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3570,7 +3570,7 @@ is_anc_capable(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
@@ -3590,24 +3590,24 @@ ping_media(struct net_device *dev, int msec)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
int sisr;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
-
+
lp->tmp = lp->tx_new; /* Remember the ring position */
load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD);
}
-
+
sisr = inl(DE4X5_SISR);
- if ((!(sisr & SISR_NCR)) &&
- ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
+ if ((!(sisr & SISR_NCR)) &&
+ ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
(--lp->timeout)) {
sisr = 100 | TIMER_CB;
} else {
- if ((!(sisr & SISR_NCR)) &&
+ if ((!(sisr & SISR_NCR)) &&
!(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
lp->timeout) {
sisr = 0;
@@ -3616,7 +3616,7 @@ ping_media(struct net_device *dev, int msec)
}
lp->timeout = -1;
}
-
+
return sisr;
}
@@ -3668,7 +3668,7 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
} else { /* Linear buffer */
memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
}
-
+
return p;
#endif
}
@@ -3751,23 +3751,23 @@ de4x5_rst_desc_ring(struct net_device *dev)
outl(lp->dma_rings, DE4X5_RRBA);
outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
DE4X5_TRBA);
-
+
lp->rx_new = lp->rx_old = 0;
lp->tx_new = lp->tx_old = 0;
-
+
for (i = 0; i < lp->rxRingSize; i++) {
lp->rx_ring[i].status = cpu_to_le32(R_OWN);
}
-
+
for (i = 0; i < lp->txRingSize; i++) {
lp->tx_ring[i].status = cpu_to_le32(0);
}
-
+
barrier();
lp->cache.save_cnt--;
START_DE4X5;
}
-
+
return;
}
@@ -3792,7 +3792,7 @@ de4x5_cache_state(struct net_device *dev, int flag)
gep_wr(lp->cache.gepc, dev);
gep_wr(lp->cache.gep, dev);
} else {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
+ reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
lp->cache.csr15);
}
break;
@@ -3854,25 +3854,25 @@ test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 sts, ans;
-
+
if (lp->timeout < 0) {
lp->timeout = msec/100;
outl(irq_mask, DE4X5_IMR);
-
+
/* clear all pending interrupts */
sts = inl(DE4X5_STS);
outl(sts, DE4X5_STS);
}
-
+
ans = inl(DE4X5_SISR) & SISR_ANS;
sts = inl(DE4X5_STS) & ~TIMER_CB;
-
+
if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
sts = 100 | TIMER_CB;
} else {
lp->timeout = -1;
}
-
+
return sts;
}
@@ -3882,7 +3882,7 @@ de4x5_setup_intr(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
s32 imr, sts;
-
+
if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
imr = 0;
UNMASK_IRQs;
@@ -3890,7 +3890,7 @@ de4x5_setup_intr(struct net_device *dev)
outl(sts, DE4X5_STS);
ENABLE_IRQs;
}
-
+
return;
}
@@ -3936,17 +3936,17 @@ create_packet(struct net_device *dev, char *frame, int len)
{
int i;
char *buf = frame;
-
+
for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
*buf++ = dev->dev_addr[i];
}
for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
*buf++ = dev->dev_addr[i];
}
-
+
*buf++ = 0; /* Packet length (2 bytes) */
*buf++ = 1;
-
+
return;
}
@@ -3978,7 +3978,7 @@ static int
PCI_signature(char *name, struct de4x5_private *lp)
{
int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
-
+
if (lp->chipset == DC21040) {
strcpy(name, "DE434/5");
return status;
@@ -4007,7 +4007,7 @@ PCI_signature(char *name, struct de4x5_private *lp)
} else if ((lp->chipset & ~0x00ff) == DC2114x) {
lp->useSROM = TRUE;
}
-
+
return status;
}
@@ -4024,7 +4024,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
{
int i, j=0;
struct de4x5_private *lp = netdev_priv(dev);
-
+
if (lp->chipset == DC21040) {
if (lp->bus == EISA) {
enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
@@ -4049,7 +4049,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
}
de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
}
-
+
return;
}
@@ -4071,11 +4071,11 @@ enet_addr_rst(u_long aprom_addr)
short sigLength=0;
s8 data;
int i, j;
-
+
dev.llsig.a = ETH_PROM_SIG;
dev.llsig.b = ETH_PROM_SIG;
sigLength = sizeof(u32) << 1;
-
+
for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
data = inb(aprom_addr);
if (dev.Sig[j] == data) { /* track signature */
@@ -4088,7 +4088,7 @@ enet_addr_rst(u_long aprom_addr)
}
}
}
-
+
return;
}
@@ -4111,7 +4111,7 @@ get_hw_addr(struct net_device *dev)
for (i=0,k=0,j=0;j<3;j++) {
k <<= 1;
if (k > 0xffff) k-=0xffff;
-
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4133,11 +4133,11 @@ get_hw_addr(struct net_device *dev)
k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
dev->dev_addr[i++] = (u_char) tmp;
}
-
+
if (k > 0xffff) k-=0xffff;
}
if (k == 0xffff) k=0;
-
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4156,7 +4156,7 @@ get_hw_addr(struct net_device *dev)
srom_repair(dev, broken);
#ifdef CONFIG_PPC_MULTIPLATFORM
- /*
+ /*
** If the address starts with 00 a0, we have to bit-reverse
** each byte of the address.
*/
@@ -4245,7 +4245,7 @@ test_bad_enet(struct net_device *dev, int status)
for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
if ((tmp == 0) || (tmp == 0x5fa)) {
- if ((lp->chipset == last.chipset) &&
+ if ((lp->chipset == last.chipset) &&
(lp->bus_num == last.bus) && (lp->bus_num > 0)) {
for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
for (i=ETH_ALEN-1; i>2; --i) {
@@ -4275,7 +4275,7 @@ test_bad_enet(struct net_device *dev, int status)
static int
an_exception(struct de4x5_private *lp)
{
- if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
+ if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
(*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
return -1;
}
@@ -4290,11 +4290,11 @@ static short
srom_rd(u_long addr, u_char offset)
{
sendto_srom(SROM_RD | SROM_SR, addr);
-
+
srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-
+
return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
}
@@ -4304,7 +4304,7 @@ srom_latch(u_int command, u_long addr)
sendto_srom(command, addr);
sendto_srom(command | DT_CLK, addr);
sendto_srom(command, addr);
-
+
return;
}
@@ -4314,7 +4314,7 @@ srom_command(u_int command, u_long addr)
srom_latch(command, addr);
srom_latch(command, addr);
srom_latch((command & 0x0000ff00) | DT_CS, addr);
-
+
return;
}
@@ -4322,15 +4322,15 @@ static void
srom_address(u_int command, u_long addr, u_char offset)
{
int i, a;
-
+
a = offset << 2;
for (i=0; i<6; i++, a <<= 1) {
srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
}
udelay(1);
-
+
i = (getfrom_srom(addr) >> 3) & 0x01;
-
+
return;
}
@@ -4340,17 +4340,17 @@ srom_data(u_int command, u_long addr)
int i;
short word = 0;
s32 tmp;
-
+
for (i=0; i<16; i++) {
sendto_srom(command | DT_CLK, addr);
tmp = getfrom_srom(addr);
sendto_srom(command, addr);
-
+
word = (word << 1) | ((tmp >> 3) & 0x01);
}
-
+
sendto_srom(command & 0x0000ff00, addr);
-
+
return word;
}
@@ -4359,13 +4359,13 @@ static void
srom_busy(u_int command, u_long addr)
{
sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-
+
while (!((getfrom_srom(addr) >> 3) & 0x01)) {
mdelay(1);
}
-
+
sendto_srom(command & 0x0000ff00, addr);
-
+
return;
}
*/
@@ -4375,7 +4375,7 @@ sendto_srom(u_int command, u_long addr)
{
outl(command, addr);
udelay(1);
-
+
return;
}
@@ -4383,10 +4383,10 @@ static int
getfrom_srom(u_long addr)
{
s32 tmp;
-
+
tmp = inl(addr);
udelay(1);
-
+
return tmp;
}
@@ -4403,7 +4403,7 @@ srom_infoleaf_info(struct net_device *dev)
}
if (i == INFOLEAF_SIZE) {
lp->useSROM = FALSE;
- printk("%s: Cannot find correct chipset for SROM decoding!\n",
+ printk("%s: Cannot find correct chipset for SROM decoding!\n",
dev->name);
return -ENXIO;
}
@@ -4420,7 +4420,7 @@ srom_infoleaf_info(struct net_device *dev)
}
if (i == 0) {
lp->useSROM = FALSE;
- printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
+ printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
dev->name, lp->device);
return -ENXIO;
}
@@ -4494,9 +4494,9 @@ srom_exec(struct net_device *dev, u_char *p)
if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
if (lp->chipset != DC21140) RESET_SIA;
-
+
while (count--) {
- gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
+ gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
*p++ : TWIDDLE(w++)), dev);
mdelay(2); /* 2ms per action */
}
@@ -4514,13 +4514,13 @@ srom_exec(struct net_device *dev, u_char *p)
** unless I implement the DC21041 SROM functions. There's no need
** since the existing code will be satisfactory for all boards.
*/
-static int
+static int
dc21041_infoleaf(struct net_device *dev)
{
return DE4X5_AUTOSENSE_MS;
}
-static int
+static int
dc21140_infoleaf(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4558,7 +4558,7 @@ dc21140_infoleaf(struct net_device *dev)
return next_tick & ~TIMER_CB;
}
-static int
+static int
dc21142_infoleaf(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4593,7 +4593,7 @@ dc21142_infoleaf(struct net_device *dev)
return next_tick & ~TIMER_CB;
}
-static int
+static int
dc21143_infoleaf(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4631,7 +4631,7 @@ dc21143_infoleaf(struct net_device *dev)
** The compact infoblock is only designed for DC21140[A] chips, so
** we'll reuse the dc21140m_autoconf function. Non MII media only.
*/
-static int
+static int
compact_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4671,7 +4671,7 @@ compact_infoblock(struct net_device *dev, u_char count, u_char *p)
/*
** This block describes non MII media for the DC21140[A] only.
*/
-static int
+static int
type0_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4711,7 +4711,7 @@ type0_infoblock(struct net_device *dev, u_char count, u_char *p)
/* These functions are under construction! */
-static int
+static int
type1_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4750,7 +4750,7 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p)
return dc21140m_autoconf(dev);
}
-static int
+static int
type2_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4791,7 +4791,7 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p)
return dc2114x_autoconf(dev);
}
-static int
+static int
type3_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4833,7 +4833,7 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
return dc2114x_autoconf(dev);
}
-static int
+static int
type4_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4878,7 +4878,7 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p)
** This block type provides information for resetting external devices
** (chips) through the General Purpose Register.
*/
-static int
+static int
type5_infoblock(struct net_device *dev, u_char count, u_char *p)
{
struct de4x5_private *lp = netdev_priv(dev);
@@ -4916,7 +4916,7 @@ mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
mii_address(phyreg, ioaddr); /* PHY Register to read */
mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
-
+
return mii_rdata(ioaddr); /* Read data */
}
@@ -4931,7 +4931,7 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
data = mii_swap(data, 16); /* Swap data bit ordering */
mii_wdata(data, 16, ioaddr); /* Write data */
-
+
return;
}
@@ -4940,12 +4940,12 @@ mii_rdata(u_long ioaddr)
{
int i;
s32 tmp = 0;
-
+
for (i=0; i<16; i++) {
tmp <<= 1;
tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
}
-
+
return tmp;
}
@@ -4953,12 +4953,12 @@ static void
mii_wdata(int data, int len, u_long ioaddr)
{
int i;
-
+
for (i=0; i<len; i++) {
sendto_mii(MII_MWR | MII_WR, data, ioaddr);
data >>= 1;
}
-
+
return;
}
@@ -4966,13 +4966,13 @@ static void
mii_address(u_char addr, u_long ioaddr)
{
int i;
-
+
addr = mii_swap(addr, 5);
for (i=0; i<5; i++) {
sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
addr >>= 1;
}
-
+
return;
}
@@ -4980,12 +4980,12 @@ static void
mii_ta(u_long rw, u_long ioaddr)
{
if (rw == MII_STWR) {
- sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
- sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+ sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
} else {
getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
}
-
+
return;
}
@@ -4993,13 +4993,13 @@ static int
mii_swap(int data, int len)
{
int i, tmp = 0;
-
+
for (i=0; i<len; i++) {
tmp <<= 1;
tmp |= (data & 1);
data >>= 1;
}
-
+
return tmp;
}
@@ -5007,13 +5007,13 @@ static void
sendto_mii(u32 command, int data, u_long ioaddr)
{
u32 j;
-
+
j = (data & 1) << 17;
outl(command | j, ioaddr);
udelay(1);
outl(command | MII_MDC | j, ioaddr);
udelay(1);
-
+
return;
}
@@ -5024,7 +5024,7 @@ getfrom_mii(u32 command, u_long ioaddr)
udelay(1);
outl(command | MII_MDC, ioaddr);
udelay(1);
-
+
return ((inl(ioaddr) >> 19) & 1);
}
@@ -5085,7 +5085,7 @@ mii_get_phy(struct net_device *dev)
u_long iobase = dev->base_addr;
int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
int id;
-
+
lp->active = 0;
lp->useMII = TRUE;
@@ -5094,7 +5094,7 @@ mii_get_phy(struct net_device *dev)
lp->phy[lp->active].addr = i;
if (i==0) n++; /* Count cycles */
while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
- id = mii_get_oui(i, DE4X5_MII);
+ id = mii_get_oui(i, DE4X5_MII);
if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
for (j=0; j<limit; j++) { /* Search PHY table */
if (id != phy_info[j].id) continue; /* ID match? */
@@ -5133,7 +5133,7 @@ mii_get_phy(struct net_device *dev)
for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-
+
de4x5_dbg_mii(dev, k);
}
}
@@ -5148,12 +5148,12 @@ build_setup_frame(struct net_device *dev, int mode)
struct de4x5_private *lp = netdev_priv(dev);
int i;
char *pa = lp->setup_frame;
-
+
/* Initialise the setup frame */
if (mode == ALL) {
memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
}
-
+
if (lp->setup_f == HASH_PERF) {
for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
*(pa + i) = dev->dev_addr[i]; /* Host address */
@@ -5170,7 +5170,7 @@ build_setup_frame(struct net_device *dev, int mode)
if (i & 0x01) pa += 4;
}
}
-
+
return pa; /* Points to the next entry */
}
@@ -5178,7 +5178,7 @@ static void
enable_ast(struct net_device *dev, u32 time_out)
{
timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
-
+
return;
}
@@ -5186,9 +5186,9 @@ static void
disable_ast(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
-
+
del_timer(&lp->timer);
-
+
return;
}
@@ -5207,10 +5207,10 @@ de4x5_switch_mac_port(struct net_device *dev)
omr |= lp->infoblock_csr6;
if (omr & OMR_PS) omr |= OMR_HBD;
outl(omr, DE4X5_OMR);
-
+
/* Soft Reset */
RESET_DE4X5;
-
+
/* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
if (lp->chipset == DC21140) {
gep_wr(lp->cache.gepc, dev);
@@ -5263,21 +5263,21 @@ timeout(struct net_device *dev, void (*fn)(u_long data), u_long data, u_long mse
{
struct de4x5_private *lp = netdev_priv(dev);
int dt;
-
+
/* First, cancel any pending timer events */
del_timer(&lp->timer);
-
+
/* Convert msec to ticks */
dt = (msec * HZ) / 1000;
if (dt==0) dt=1;
-
+
/* Set up timer */
init_timer(&lp->timer);
lp->timer.expires = jiffies + dt;
lp->timer.function = fn;
lp->timer.data = data;
add_timer(&lp->timer);
-
+
return;
}
@@ -5375,7 +5375,7 @@ de4x5_dbg_open(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
int i;
-
+
if (de4x5_debug & DEBUG_OPEN) {
printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
printk("\tphysical address: ");
@@ -5413,11 +5413,11 @@ de4x5_dbg_open(struct net_device *dev)
}
}
printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size: \nRX: %d\nTX: %d\n",
- (short)lp->rxRingSize,
- (short)lp->txRingSize);
+ printk("Ring size: \nRX: %d\nTX: %d\n",
+ (short)lp->rxRingSize,
+ (short)lp->txRingSize);
}
-
+
return;
}
@@ -5426,7 +5426,7 @@ de4x5_dbg_mii(struct net_device *dev, int k)
{
struct de4x5_private *lp = netdev_priv(dev);
u_long iobase = dev->base_addr;
-
+
if (de4x5_debug & DEBUG_MII) {
printk("\nMII device address: %d\n", lp->phy[k].addr);
printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
@@ -5445,7 +5445,7 @@ de4x5_dbg_mii(struct net_device *dev, int k)
printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
}
}
-
+
return;
}
@@ -5453,17 +5453,17 @@ static void
de4x5_dbg_media(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
-
+
if (lp->media != lp->c_media) {
if (de4x5_debug & DEBUG_MEDIA) {
printk("%s: media is %s%s\n", dev->name,
(lp->media == NC ? "unconnected, link down or incompatible connection" :
(lp->media == TP ? "TP" :
(lp->media == ANS ? "TP/Nway" :
- (lp->media == BNC ? "BNC" :
- (lp->media == AUI ? "AUI" :
- (lp->media == BNC_AUI ? "BNC/AUI" :
- (lp->media == EXT_SIA ? "EXT SIA" :
+ (lp->media == BNC ? "BNC" :
+ (lp->media == AUI ? "AUI" :
+ (lp->media == BNC_AUI ? "BNC/AUI" :
+ (lp->media == EXT_SIA ? "EXT SIA" :
(lp->media == _100Mb ? "100Mb/s" :
(lp->media == _10Mb ? "10Mb/s" :
"???"
@@ -5471,7 +5471,7 @@ de4x5_dbg_media(struct net_device *dev)
}
lp->c_media = lp->media;
}
-
+
return;
}
@@ -5554,7 +5554,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
u32 lval[36];
} tmp;
u_long flags = 0;
-
+
switch(ioc->cmd) {
case DE4X5_GET_HWADDR: /* Get the hardware address */
ioc->len = ETH_ALEN;
@@ -5575,7 +5575,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
+ load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
SETUP_FRAME_LEN, (struct sk_buff *)1);
lp->tx_new = (++lp->tx_new) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
@@ -5617,8 +5617,8 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
spin_lock_irqsave(&lp->lock, flags);
memcpy(&statbuf, &lp->pktStats, ioc->len);
spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
+ if (copy_to_user(ioc->data, &statbuf, ioc->len))
+ return -EFAULT;
break;
}
case DE4X5_CLR_STATS: /* Zero out the driver statistics */
@@ -5652,9 +5652,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ioc->len = j;
if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
break;
-
+
#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
-/*
+/*
case DE4X5_DUMP:
j = 0;
tmp.addr[j++] = dev->irq;
@@ -5664,7 +5664,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.addr[j++] = lp->rxRingSize;
tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-
+
for (i=0;i<lp->rxRingSize-1;i++){
if (i < 3) {
tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
@@ -5677,7 +5677,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-
+
for (i=0;i<lp->rxRingSize-1;i++){
if (i < 3) {
tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
@@ -5690,14 +5690,14 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-
+
for (i=0;i<lp->rxRingSize;i++){
tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
}
for (i=0;i<lp->txRingSize;i++){
tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
}
-
+
tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
@@ -5706,18 +5706,18 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[j>>2] = lp->chipset; j+=4;
+ tmp.lval[j>>2] = lp->chipset; j+=4;
if (lp->chipset == DC21140) {
tmp.lval[j>>2] = gep_rd(dev); j+=4;
} else {
tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
+ tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
}
- tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
+ tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- tmp.lval[j>>2] = lp->active; j+=4;
+ tmp.lval[j>>2] = lp->active; j+=4;
tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
@@ -5734,10 +5734,10 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
}
}
-
+
tmp.addr[j++] = lp->txRingSize;
tmp.addr[j++] = netif_queue_stopped(dev);
-
+
ioc->len = j;
if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
break;
@@ -5746,7 +5746,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
default:
return -EOPNOTSUPP;
}
-
+
return status;
}
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index ad37a4074302..57226e5eb8a6 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -38,11 +38,11 @@
/*
** EISA Register Address Map
*/
-#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
+#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
+#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
+#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
+#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
+#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
#define EISA_CR iobase+0x0c84 /* EISA Control Register */
#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
@@ -1008,8 +1008,8 @@ struct de4x5_ioctl {
unsigned char __user *data; /* Pointer to the data buffer */
};
-/*
-** Recognised commands for the driver
+/*
+** Recognised commands for the driver
*/
#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 74e9075d9c48..ba5b112093f4 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -50,7 +50,7 @@
forget to unmap PCI mapped skbs.
Alan Cox <alan@redhat.com>
- Added new PCI identifiers provided by Clear Zhang at ALi
+ Added new PCI identifiers provided by Clear Zhang at ALi
for their 1563 ethernet device.
TODO
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index fbd9ab60b052..5ffbd5b300c0 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -96,11 +96,11 @@ static const char *block_name[] __devinitdata = {
* tulip_build_fake_mediatable - Build a fake mediatable entry.
* @tp: Ptr to the tulip private data.
*
- * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
+ * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
* srom and can not be handled under the fixup routine. These cards
- * still need a valid mediatable entry for correct csr12 setup and
+ * still need a valid mediatable entry for correct csr12 setup and
* mii handling.
- *
+ *
* Since this is currently a parisc-linux specific function, the
* #ifdef __hppa__ should completely optimize this function away for
* non-parisc hardware.
@@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
tp->flags |= HAS_PHY_IRQ;
tp->csr12_shadow = -1;
}
-#endif
+#endif
}
void __devinit tulip_parse_eeprom(struct net_device *dev)
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index bb3558164a5b..da4f7593c50f 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -139,22 +139,22 @@ int tulip_poll(struct net_device *dev, int *budget)
}
/* Acknowledge current RX interrupt sources. */
iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
-
-
+
+
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
-
-
+
+
if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
break;
-
+
if (tulip_debug > 5)
printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
dev->name, entry, status);
if (--rx_work_limit < 0)
goto not_done;
-
+
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
@@ -180,7 +180,7 @@ int tulip_poll(struct net_device *dev, int *budget)
/* Omit the four octet CRC from the length. */
short pkt_len = ((status >> 16) & 0x7ff) - 4;
struct sk_buff *skb;
-
+
#ifndef final_version
if (pkt_len > 1518) {
printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
@@ -213,7 +213,7 @@ int tulip_poll(struct net_device *dev, int *budget)
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
-
+
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
@@ -225,17 +225,17 @@ int tulip_poll(struct net_device *dev, int *budget)
skb->head, temp);
}
#endif
-
+
pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
-
+
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
}
skb->protocol = eth_type_trans(skb, dev);
-
+
netif_receive_skb(skb);
-
+
dev->last_rx = jiffies;
tp->stats.rx_packets++;
tp->stats.rx_bytes += pkt_len;
@@ -245,12 +245,12 @@ int tulip_poll(struct net_device *dev, int *budget)
entry = (++tp->cur_rx) % RX_RING_SIZE;
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
tulip_refill_rx(dev);
-
+
}
-
+
/* New ack strategy... irq does not ack Rx any longer
hopefully this helps */
-
+
/* Really bad things can happen here... If new packet arrives
* and an irq arrives (tx or just due to occasionally unset
* mask), it will be acked by irq handler, but new thread
@@ -259,28 +259,28 @@ int tulip_poll(struct net_device *dev, int *budget)
* tomorrow (night 011029). If it will not fail, we won
* finally: amount of IO did not increase at all. */
} while ((ioread32(tp->base_addr + CSR5) & RxIntr));
-
+
done:
-
+
#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
-
+
/* We use this simplistic scheme for IM. It's proven by
real life installations. We can have IM enabled
- continuesly but this would cause unnecessary latency.
- Unfortunely we can't use all the NET_RX_* feedback here.
- This would turn on IM for devices that is not contributing
- to backlog congestion with unnecessary latency.
-
+ continuesly but this would cause unnecessary latency.
+ Unfortunely we can't use all the NET_RX_* feedback here.
+ This would turn on IM for devices that is not contributing
+ to backlog congestion with unnecessary latency.
+
We monitor the the device RX-ring and have:
-
+
HW Interrupt Mitigation either ON or OFF.
-
- ON: More then 1 pkt received (per intr.) OR we are dropping
+
+ ON: More then 1 pkt received (per intr.) OR we are dropping
OFF: Only 1 pkt received
-
+
Note. We only use min and max (0, 15) settings from mit_table */
-
-
+
+
if( tp->flags & HAS_INTR_MITIGATION) {
if( received > 1 ) {
if( ! tp->mit_on ) {
@@ -297,20 +297,20 @@ done:
}
#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
-
+
dev->quota -= received;
*budget -= received;
-
+
tulip_refill_rx(dev);
-
+
/* If RX ring is not full we are out of memory. */
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
-
+
/* Remove us from polling list and enable RX intr. */
-
+
netif_rx_complete(dev);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
-
+
/* The last op happens after poll completion. Which means the following:
* 1. it can race with disabling irqs in irq handler
* 2. it can race with dise/enabling irqs in other poll threads
@@ -321,9 +321,9 @@ done:
* due to races in masking and due to too late acking of already
* processed irqs. But it must not result in losing events.
*/
-
+
return 0;
-
+
not_done:
if (!received) {
@@ -331,29 +331,29 @@ done:
}
dev->quota -= received;
*budget -= received;
-
+
if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
tulip_refill_rx(dev);
-
+
if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
-
+
return 1;
-
-
+
+
oom: /* Executed with RX ints disabled */
-
-
+
+
/* Start timer, stop polling, but do not enable rx interrupts. */
mod_timer(&tp->oom_timer, jiffies+1);
-
+
/* Think: timer_pending() was an explicit signature of bug.
* Timer can be pending now but fired and completed
* before we did netif_rx_complete(). See? We would lose it. */
-
+
/* remove ourselves from the polling list */
netif_rx_complete(dev);
-
+
return 0;
}
@@ -521,9 +521,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
/* Let's see whether the interrupt really is for us */
csr5 = ioread32(ioaddr + CSR5);
- if (tp->flags & HAS_PHY_IRQ)
+ if (tp->flags & HAS_PHY_IRQ)
handled = phy_interrupt (dev);
-
+
if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
return IRQ_RETVAL(handled);
@@ -538,17 +538,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
/* Mask RX intrs and add the device to poll list. */
iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
netif_rx_schedule(dev);
-
+
if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
break;
}
-
+
/* Acknowledge the interrupt sources we handle here ASAP
the poll function does Rx and RxNoBuf acking */
-
+
iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
-#else
+#else
/* Acknowledge all of the current interrupt sources ASAP. */
iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
@@ -559,11 +559,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
}
#endif /* CONFIG_TULIP_NAPI */
-
+
if (tulip_debug > 4)
printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
-
+
if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
unsigned int dirty_tx;
@@ -737,17 +737,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
#ifdef CONFIG_TULIP_NAPI
if (rxd)
csr5 &= ~RxPollInt;
- } while ((csr5 & (TxNoBuf |
- TxDied |
- TxIntr |
+ } while ((csr5 & (TxNoBuf |
+ TxDied |
+ TxIntr |
TimerInt |
/* Abnormal intr. */
- RxDied |
- TxFIFOUnderflow |
- TxJabber |
- TPLnkFail |
+ RxDied |
+ TxFIFOUnderflow |
+ TxJabber |
+ TPLnkFail |
SytemError )) != 0);
-#else
+#else
} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
tulip_refill_rx(dev);
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index f53396fe79c9..e9bc2a958c14 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -140,7 +140,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
spin_unlock_irqrestore(&tp->mii_lock, flags);
return;
}
-
+
/* Establish sync by sending 32 logic ones. */
for (i = 32; i >= 0; i--) {
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 05d2d96f7be2..d25020da6798 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -259,7 +259,7 @@ enum t21143_csr6_bits {
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 32
-#define RX_RING_SIZE 128
+#define RX_RING_SIZE 128
#define MEDIA_MASK 31
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index c67c91251d04..b3cf11d32e24 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1224,7 +1224,7 @@ out:
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
* is the DM910X and the on chip ULi devices
*/
-
+
static int tulip_uli_dm_quirk(struct pci_dev *pdev)
{
if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
@@ -1297,7 +1297,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
- aligned. Aries might need this too. The Saturn errata are not
+ aligned. Aries might need this too. The Saturn errata are not
pretty reading but thankfully it's an old 486 chipset.
2. The dreaded SiS496 486 chipset. Same workaround as Intel
@@ -1500,7 +1500,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
#endif
#ifdef CONFIG_MIPS_COBALT
- if ((pdev->bus->number == 0) &&
+ if ((pdev->bus->number == 0) &&
((PCI_SLOT(pdev->devfn) == 7) ||
(PCI_SLOT(pdev->devfn) == 12))) {
/* Cobalt MAC address in first EEPROM locations. */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 238e9c72cb3a..8b3a28f53c3d 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -9,7 +9,7 @@
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
*/
#define DRV_NAME "uli526x"
@@ -185,7 +185,7 @@ struct uli526x_board_info {
/* NIC SROM data */
unsigned char srom[128];
- u8 init;
+ u8 init;
};
enum uli526x_offsets {
@@ -258,7 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
int i, err;
-
+
ULI526X_DBUG(0, "uli526x_init_one()", 0);
if (!printed_version++)
@@ -316,7 +316,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
err = -ENOMEM;
goto err_out_nomem;
}
-
+
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
db->buf_pool_start = db->buf_pool_ptr;
@@ -324,14 +324,14 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
db->chip_id = ent->driver_data;
db->ioaddr = pci_resource_start(pdev, 0);
-
+
db->pdev = pdev;
db->init = 1;
-
+
dev->base_addr = db->ioaddr;
dev->irq = pdev->irq;
pci_set_drvdata(pdev, dev);
-
+
/* Register some necessary functions */
dev->open = &uli526x_open;
dev->hard_start_xmit = &uli526x_start_xmit;
@@ -341,7 +341,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
dev->ethtool_ops = &netdev_ethtool_ops;
spin_lock_init(&db->lock);
-
+
/* read 64 word srom data */
for (i = 0; i < 64; i++)
((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
@@ -374,7 +374,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
goto err_out_res;
printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
-
+
for (i = 0; i < 6; i++)
printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
printk(", irq %d.\n", dev->irq);
@@ -389,7 +389,7 @@ err_out_nomem:
if(db->desc_pool_ptr)
pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
db->desc_pool_ptr, db->desc_pool_dma_ptr);
-
+
if(db->buf_pool_ptr != NULL)
pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -433,7 +433,7 @@ static int uli526x_open(struct net_device *dev)
{
int ret;
struct uli526x_board_info *db = netdev_priv(dev);
-
+
ULI526X_DBUG(0, "uli526x_open", 0);
ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
@@ -454,7 +454,7 @@ static int uli526x_open(struct net_device *dev)
/* CR6 operation mode decision */
db->cr6_data |= ULI526X_TXTH_256;
db->cr0_data = CR0_DEFAULT;
-
+
/* Initialize ULI526X board */
uli526x_init(dev);
@@ -604,7 +604,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Restore CR7 to enable interrupt */
spin_unlock_irqrestore(&db->lock, flags);
outl(db->cr7_data, dev->base_addr + DCR7);
-
+
/* free this SKB */
dev_kfree_skb(skb);
@@ -782,7 +782,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
struct sk_buff *skb;
int rxlen;
u32 rdes0;
-
+
rxptr = db->rx_ready_ptr;
while(db->rx_avail_cnt) {
@@ -821,7 +821,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
if ( !(rdes0 & 0x8000) ||
((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
skb = rxptr->rx_skb_ptr;
-
+
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
if ( (rxlen < RX_COPY_SIZE) &&
@@ -841,7 +841,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
dev->last_rx = jiffies;
db->stats.rx_packets++;
db->stats.rx_bytes += rxlen;
-
+
} else {
/* Reuse SKB buffer when the packet is error */
ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
@@ -911,7 +911,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_MII);
-
+
ecmd->advertising = (ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
@@ -924,13 +924,13 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
ecmd->phy_address = db->phy_addr;
ecmd->transceiver = XCVR_EXTERNAL;
-
+
ecmd->speed = 10;
ecmd->duplex = DUPLEX_HALF;
-
+
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
{
- ecmd->speed = 100;
+ ecmd->speed = 100;
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
@@ -939,11 +939,11 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
if(db->link_failed)
{
ecmd->speed = -1;
- ecmd->duplex = -1;
+ ecmd->duplex = -1;
}
-
+
if (db->media_mode & ULI526X_AUTO)
- {
+ {
ecmd->autoneg = AUTONEG_ENABLE;
}
}
@@ -964,15 +964,15 @@ static void netdev_get_drvinfo(struct net_device *dev,
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
struct uli526x_board_info *np = netdev_priv(dev);
-
+
ULi_ethtool_gset(np, cmd);
-
+
return 0;
}
static u32 netdev_get_link(struct net_device *dev) {
struct uli526x_board_info *np = netdev_priv(dev);
-
+
if(np->link_failed)
return 0;
else
@@ -1005,11 +1005,11 @@ static void uli526x_timer(unsigned long data)
struct uli526x_board_info *db = netdev_priv(dev);
unsigned long flags;
u8 TmpSpeed=10;
-
+
//ULI526X_DBUG(0, "uli526x_timer()", 0);
spin_lock_irqsave(&db->lock, flags);
-
+
/* Dynamic reset ULI526X : system error or transmit time-out */
tmp_cr8 = inl(db->ioaddr + DCR8);
if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
@@ -1021,9 +1021,9 @@ static void uli526x_timer(unsigned long data)
/* TX polling kick monitor */
if ( db->tx_packet_cnt &&
time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
- outl(0x1, dev->base_addr + DCR1); // Tx polling again
+ outl(0x1, dev->base_addr + DCR1); // Tx polling again
- // TX Timeout
+ // TX Timeout
if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
@@ -1073,7 +1073,7 @@ static void uli526x_timer(unsigned long data)
uli526x_sense_speed(db) )
db->link_failed = 1;
uli526x_process_mode(db);
-
+
if(db->link_failed==0)
{
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
@@ -1404,7 +1404,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
if ( (phy_mode & 0x24) == 0x24 ) {
-
+
phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
if(phy_mode&0x8000)
phy_mode = 0x8000;
@@ -1414,7 +1414,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
phy_mode = 0x2000;
else
phy_mode = 0x1000;
-
+
/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
switch (phy_mode) {
case 0x1000: db->op_mode = ULI526X_10MHF; break;
@@ -1442,7 +1442,7 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
static void uli526x_set_phyxcer(struct uli526x_board_info *db)
{
u16 phy_reg;
-
+
/* Phyxcer capability setting */
phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
@@ -1457,7 +1457,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
case ULI526X_100MHF: phy_reg |= 0x80; break;
case ULI526X_100MFD: phy_reg |= 0x100; break;
}
-
+
}
/* Write new capability to Phyxcer Reg4 */
@@ -1556,7 +1556,7 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data
/* Write a word data to PHY controller */
for ( i = 0x8000; i > 0; i >>= 1)
phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
-
+
}
@@ -1574,7 +1574,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
return phy_readby_cr10(iobase, phy_addr, offset);
/* M5261/M5263 Chip */
ioaddr = iobase + DCR9;
-
+
/* Send 33 synchronization clock to Phy controller */
for (i = 0; i < 35; i++)
phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
@@ -1610,7 +1610,7 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
{
unsigned long ioaddr,cr10_value;
-
+
ioaddr = iobase + DCR10;
cr10_value = phy_addr;
cr10_value = (cr10_value<<5) + offset;
@@ -1629,7 +1629,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
{
unsigned long ioaddr,cr10_value;
-
+
ioaddr = iobase + DCR10;
cr10_value = phy_addr;
cr10_value = (cr10_value<<5) + offset;
@@ -1659,7 +1659,7 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
{
u16 phy_data;
-
+
outl(0x50000 , ioaddr);
udelay(1);
phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 136a70c4d5e4..64ecf929d2ac 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -38,12 +38,12 @@
Copyright (C) 2001 Manfred Spraul
* ethtool support (jgarzik)
* Replace some MII-related magic numbers with constants (jgarzik)
-
+
TODO:
* enable pci_power_off
* Wake-On-LAN
*/
-
+
#define DRV_NAME "winbond-840"
#define DRV_VERSION "1.01-d"
#define DRV_RELDATE "Nov-17-2001"
@@ -57,7 +57,7 @@ c-help-name: Winbond W89c840 PCI Ethernet support
c-help-symbol: CONFIG_WINBOND_840
c-help: This driver is for the Winbond W89c840 chip. It also works with
c-help: the TX9882 chip on the Compex RL100-ATX board.
-c-help: More specific information and updates are available from
+c-help: More specific information and updates are available from
c-help: http://www.scyld.com/network/drivers.html
*/
@@ -207,7 +207,7 @@ Test with 'ping -s 10000' on a fast computer.
*/
-
+
/*
PCI probe table.
@@ -374,7 +374,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct ethtool_ops netdev_ethtool_ops;
static int netdev_close(struct net_device *dev);
-
+
static int __devinit w840_probe1 (struct pci_dev *pdev,
const struct pci_device_id *ent)
@@ -434,7 +434,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
np->mii_if.mdio_read = mdio_read;
np->mii_if.mdio_write = mdio_write;
np->base_addr = ioaddr;
-
+
pci_set_drvdata(pdev, dev);
if (dev->mem_start)
@@ -510,7 +510,7 @@ err_out_netdev:
return -ENODEV;
}
-
+
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
often serial bit streams generated by the host processor.
The example below is for the common 93c46 EEPROM, 64 16 bit words. */
@@ -660,7 +660,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
return;
}
-
+
static int netdev_open(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
@@ -731,7 +731,7 @@ static int update_link(struct net_device *dev)
dev->name, np->phys[0]);
netif_carrier_on(dev);
}
-
+
if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
/* If the link partner doesn't support autonegotiation
* the MII detects it's abilities with the "parallel detection".
@@ -761,7 +761,7 @@ static int update_link(struct net_device *dev)
result |= 0x20000000;
if (result != np->csr6 && debug)
printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
+ dev->name, fasteth ? 100 : 10,
duplex ? "full" : "half", np->phys[0]);
return result;
}
@@ -947,7 +947,7 @@ static void init_registers(struct net_device *dev)
iowrite32(i, ioaddr + PCIBusCfg);
np->csr6 = 0;
- /* 128 byte Tx threshold;
+ /* 128 byte Tx threshold;
Transmit on; Receive on; */
update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
@@ -1584,7 +1584,7 @@ static int netdev_close(struct net_device *dev)
static void __devexit w840_remove1 (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
-
+
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
@@ -1640,7 +1640,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_wait(&dev->xmit_lock);
synchronize_irq(dev->irq);
-
+
np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
/* no more hardware accesses behind this line. */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 56344103ac23..63c2175ed138 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1,11 +1,11 @@
/*
- * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
+ * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
*
* This software is (C) by the respective authors, and licensed under the GPL
* License.
*
* Written by Arjan van de Ven for Red Hat, Inc.
- * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
+ * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
@@ -93,7 +93,7 @@ struct xircom_private {
unsigned long io_port;
int open;
-
+
/* transmit_used is the rotating counter that indicates which transmit
descriptor has to be used next */
int transmit_used;
@@ -153,10 +153,10 @@ static struct pci_device_id xircom_pci_table[] = {
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
static struct pci_driver xircom_ops = {
- .name = "xircom_cb",
- .id_table = xircom_pci_table,
- .probe = xircom_probe,
- .remove = xircom_remove,
+ .name = "xircom_cb",
+ .id_table = xircom_pci_table,
+ .probe = xircom_probe,
+ .remove = xircom_remove,
.suspend =NULL,
.resume =NULL
};
@@ -174,7 +174,7 @@ static void print_binary(unsigned int number)
buffer[i2++]='1';
else
buffer[i2++]='0';
- if ((i&3)==0)
+ if ((i&3)==0)
buffer[i2++]=' ';
}
printk("%s\n",buffer);
@@ -196,10 +196,10 @@ static struct ethtool_ops netdev_ethtool_ops = {
/* xircom_probe is the code that gets called on device insertion.
it sets up the hardware and registers the device to the networklayer.
-
+
TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
first two packets that get send, and pump hates that.
-
+
*/
static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -209,7 +209,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
unsigned long flags;
unsigned short tmp16;
enter("xircom_probe");
-
+
/* First do the PCI initialisation */
if (pci_enable_device(pdev))
@@ -217,24 +217,24 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
/* disable all powermanagement */
pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
-
+
pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
- /* clear PCI status, if any */
- pci_read_config_word (pdev,PCI_STATUS, &tmp16);
+ /* clear PCI status, if any */
+ pci_read_config_word (pdev,PCI_STATUS, &tmp16);
pci_write_config_word (pdev, PCI_STATUS,tmp16);
-
+
pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
-
+
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
return -ENODEV;
}
- /*
+ /*
Before changing the hardware, allocate the memory.
This way, we can fail gracefully if not enough memory
- is available.
+ is available.
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev) {
@@ -242,13 +242,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
goto device_fail;
}
private = netdev_priv(dev);
-
+
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
goto rx_buf_fail;
- }
+ }
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
@@ -265,11 +265,11 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
spin_lock_init(&private->lock);
dev->irq = pdev->irq;
dev->base_addr = private->io_port;
-
+
initialize_card(private);
read_mac_address(private);
setup_descriptors(private);
-
+
dev->open = &xircom_open;
dev->hard_start_xmit = &xircom_start_xmit;
dev->stop = &xircom_close;
@@ -285,19 +285,19 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
goto reg_fail;
}
-
+
printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
-
+
spin_lock_irqsave(&private->lock,flags);
activate_transmitter(private);
activate_receiver(private);
spin_unlock_irqrestore(&private->lock,flags);
-
+
trigger_receive(private);
-
+
leave("xircom_probe");
return 0;
@@ -332,7 +332,7 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
leave("xircom_remove");
-}
+}
static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
@@ -346,11 +346,11 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
-#ifdef DEBUG
+#ifdef DEBUG
print_binary(status);
printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
-#endif
+#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
spin_unlock(&card->lock);
@@ -366,21 +366,21 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs
netif_carrier_on(dev);
else
netif_carrier_off(dev);
-
+
}
- /* Clear all remaining interrupts */
+ /* Clear all remaining interrupts */
status |= 0xffffffff; /* FIXME: make this clear only the
real existing bits */
outl(status,card->io_port+CSR5);
-
- for (i=0;i<NUMDESCRIPTORS;i++)
+
+ for (i=0;i<NUMDESCRIPTORS;i++)
investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
- for (i=0;i<NUMDESCRIPTORS;i++)
+ for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
-
+
spin_unlock(&card->lock);
leave("xircom_interrupt");
return IRQ_HANDLED;
@@ -393,38 +393,38 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
int nextdescriptor;
int desc;
enter("xircom_start_xmit");
-
+
card = netdev_priv(dev);
spin_lock_irqsave(&card->lock,flags);
-
+
/* First see if we can free some descriptors */
- for (desc=0;desc<NUMDESCRIPTORS;desc++)
+ for (desc=0;desc<NUMDESCRIPTORS;desc++)
investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
-
-
+
+
nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
desc = card->transmit_used;
-
+
/* only send the packet if the descriptor is free */
if (card->tx_buffer[4*desc]==0) {
/* Copy the packet data; zero the memory first as the card
sometimes sends more than you ask it to. */
-
+
memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
-
-
+
+
/* FIXME: The specification tells us that the length we send HAS to be a multiple of
4 bytes. */
-
+
card->tx_buffer[4*desc+1] = skb->len;
if (desc == NUMDESCRIPTORS-1)
card->tx_buffer[4*desc+1] |= (1<<25); /* bit 25: last descriptor of the ring */
card->tx_buffer[4*desc+1] |= 0xF0000000;
- /* 0xF0... means want interrupts*/
+ /* 0xF0... means want interrupts*/
card->tx_skb[desc] = skb;
-
+
wmb();
/* This gives the descriptor to the card */
card->tx_buffer[4*desc] = 0x80000000;
@@ -433,18 +433,18 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
card->transmit_used = nextdescriptor;
- leave("xircom-start_xmit - sent");
+ leave("xircom-start_xmit - sent");
spin_unlock_irqrestore(&card->lock,flags);
return 0;
}
-
+
/* Uh oh... no free descriptor... drop the packet */
netif_stop_queue(dev);
spin_unlock_irqrestore(&card->lock,flags);
trigger_transmit(card);
-
+
return -EIO;
}
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
leave("xircom_open - No IRQ");
return retval;
}
-
+
xircom_up(xp);
xp->open = 1;
leave("xircom_open");
@@ -473,31 +473,31 @@ static int xircom_close(struct net_device *dev)
{
struct xircom_private *card;
unsigned long flags;
-
+
enter("xircom_close");
card = netdev_priv(dev);
netif_stop_queue(dev); /* we don't want new packets */
-
+
spin_lock_irqsave(&card->lock,flags);
-
+
disable_all_interrupts(card);
-#if 0
+#if 0
/* We can enable this again once we send dummy packets on ifconfig ethX up */
deactivate_receiver(card);
deactivate_transmitter(card);
-#endif
+#endif
remove_descriptors(card);
-
+
spin_unlock_irqrestore(&card->lock,flags);
-
+
card->open = 0;
free_irq(dev->irq,dev);
-
+
leave("xircom_close");
-
+
return 0;
-
+
}
@@ -506,8 +506,8 @@ static struct net_device_stats *xircom_get_stats(struct net_device *dev)
{
struct xircom_private *card = netdev_priv(dev);
return &card->stats;
-}
-
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xircom_poll_controller(struct net_device *dev)
@@ -540,7 +540,7 @@ static void initialize_card(struct xircom_private *card)
outl(val, card->io_port + CSR0);
- val = 0; /* Value 0x00 is a safe and conservative value
+ val = 0; /* Value 0x00 is a safe and conservative value
for the PCI configuration settings */
outl(val, card->io_port + CSR0);
@@ -617,23 +617,23 @@ static void setup_descriptors(struct xircom_private *card)
/* Rx Descr2: address of the buffer
we store the buffer at the 2nd half of the page */
-
+
address = (unsigned long) card->rx_dma_handle;
card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
/* Rx Desc3: address of 2nd buffer -> 0 */
card->rx_buffer[i*4 + 3] = 0;
}
-
+
wmb();
/* Write the receive descriptor ring address to the card */
address = (unsigned long) card->rx_dma_handle;
- val = cpu_to_le32(address);
+ val = cpu_to_le32(address);
outl(val, card->io_port + CSR3); /* Receive descr list address */
/* transmit descriptors */
memset(card->tx_buffer, 0, 128); /* clear the descriptors */
-
+
for (i=0;i<NUMDESCRIPTORS;i++ ) {
/* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
card->tx_buffer[i*4 + 0] = 0x00000000;
@@ -641,7 +641,7 @@ static void setup_descriptors(struct xircom_private *card)
card->tx_buffer[i*4 + 1] = 1536;
if (i==NUMDESCRIPTORS-1)
card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
-
+
/* Tx Descr2: address of the buffer
we store the buffer at the 2nd half of the page */
address = (unsigned long) card->tx_dma_handle;
@@ -748,7 +748,7 @@ static int receive_active(struct xircom_private *card)
activate_receiver enables the receiver on the card.
Before being allowed to active the receiver, the receiver
must be completely de-activated. To achieve this,
-this code actually disables the receiver first; then it waits for the
+this code actually disables the receiver first; then it waits for the
receiver to become inactive, then it activates the receiver and then
it waits for the receiver to be active.
@@ -762,13 +762,13 @@ static void activate_receiver(struct xircom_private *card)
val = inl(card->io_port + CSR6); /* Operation mode */
-
+
/* If the "active" bit is set and the receiver is already
active, no need to do the expensive thing */
if ((val&2) && (receive_active(card)))
return;
-
-
+
+
val = val & ~2; /* disable the receiver */
outl(val, card->io_port + CSR6);
@@ -805,7 +805,7 @@ static void activate_receiver(struct xircom_private *card)
/*
deactivate_receiver disables the receiver on the card.
-To achieve this this code disables the receiver first;
+To achieve this this code disables the receiver first;
then it waits for the receiver to become inactive.
must be called with the lock held and interrupts disabled.
@@ -840,7 +840,7 @@ static void deactivate_receiver(struct xircom_private *card)
activate_transmitter enables the transmitter on the card.
Before being allowed to active the transmitter, the transmitter
must be completely de-activated. To achieve this,
-this code actually disables the transmitter first; then it waits for the
+this code actually disables the transmitter first; then it waits for the
transmitter to become inactive, then it activates the transmitter and then
it waits for the transmitter to be active again.
@@ -856,7 +856,7 @@ static void activate_transmitter(struct xircom_private *card)
val = inl(card->io_port + CSR6); /* Operation mode */
/* If the "active" bit is set and the receiver is already
- active, no need to do the expensive thing */
+ active, no need to do the expensive thing */
if ((val&(1<<13)) && (transmit_active(card)))
return;
@@ -896,7 +896,7 @@ static void activate_transmitter(struct xircom_private *card)
/*
deactivate_transmitter disables the transmitter on the card.
-To achieve this this code disables the transmitter first;
+To achieve this this code disables the transmitter first;
then it waits for the transmitter to become inactive.
must be called with the lock held and interrupts disabled.
@@ -990,7 +990,7 @@ static void disable_all_interrupts(struct xircom_private *card)
{
unsigned int val;
enter("enable_all_interrupts");
-
+
val = 0; /* disable all interrupts */
outl(val, card->io_port + CSR7);
@@ -1031,8 +1031,8 @@ static int enable_promisc(struct xircom_private *card)
unsigned int val;
enter("enable_promisc");
- val = inl(card->io_port + CSR6);
- val = val | (1 << 6);
+ val = inl(card->io_port + CSR6);
+ val = val | (1 << 6);
outl(val, card->io_port + CSR6);
leave("enable_promisc");
@@ -1042,7 +1042,7 @@ static int enable_promisc(struct xircom_private *card)
-/*
+/*
link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
Must be called in locked state with interrupts disabled
@@ -1051,15 +1051,15 @@ static int link_status(struct xircom_private *card)
{
unsigned int val;
enter("link_status");
-
+
val = inb(card->io_port + CSR12);
-
+
if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
return 10;
if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
return 100;
-
- /* If we get here -> no link at all */
+
+ /* If we get here -> no link at all */
leave("link_status");
return 0;
@@ -1071,7 +1071,7 @@ static int link_status(struct xircom_private *card)
/*
read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
-
+
This function will take the spinlock itself and can, as a result, not be called with the lock helt.
*/
static void read_mac_address(struct xircom_private *card)
@@ -1081,7 +1081,7 @@ static void read_mac_address(struct xircom_private *card)
int i;
enter("read_mac_address");
-
+
spin_lock_irqsave(&card->lock, flags);
outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
@@ -1095,7 +1095,7 @@ static void read_mac_address(struct xircom_private *card)
outl(i + 3, card->io_port + CSR10);
data_count = inl(card->io_port + CSR9) & 0xff;
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
- /*
+ /*
* This is it. We have the data we want.
*/
for (j = 0; j < 6; j++) {
@@ -1136,12 +1136,12 @@ static void transceiver_voodoo(struct xircom_private *card)
spin_lock_irqsave(&card->lock, flags);
outl(0x0008, card->io_port + CSR15);
- udelay(25);
+ udelay(25);
outl(0xa8050000, card->io_port + CSR15);
udelay(25);
outl(0xa00f0000, card->io_port + CSR15);
udelay(25);
-
+
spin_unlock_irqrestore(&card->lock, flags);
netif_start_queue(card->dev);
@@ -1163,15 +1163,15 @@ static void xircom_up(struct xircom_private *card)
spin_lock_irqsave(&card->lock, flags);
-
+
enable_link_interrupt(card);
enable_transmit_interrupt(card);
enable_receive_interrupt(card);
enable_common_interrupts(card);
enable_promisc(card);
-
+
/* The card can have received packets already, read them away now */
- for (i=0;i<NUMDESCRIPTORS;i++)
+ for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
@@ -1185,15 +1185,15 @@ static void xircom_up(struct xircom_private *card)
/* Bufferoffset is in BYTES */
static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
{
- int status;
-
+ int status;
+
enter("investigate_read_descriptor");
status = card->rx_buffer[4*descnr];
-
+
if ((status > 0)) { /* packet received */
-
+
/* TODO: discard error packets */
-
+
short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */
struct sk_buff *skb;
@@ -1216,7 +1216,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
dev->last_rx = jiffies;
card->stats.rx_packets++;
card->stats.rx_bytes += pkt_len;
-
+
out:
/* give the buffer back to the card */
card->rx_buffer[4*descnr] = 0x80000000;
@@ -1234,9 +1234,9 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
int status;
enter("investigate_write_descriptor");
-
+
status = card->tx_buffer[4*descnr];
-#if 0
+#if 0
if (status & 0x8000) { /* Major error */
printk(KERN_ERR "Major transmit error status %x \n", status);
card->tx_buffer[4*descnr] = 0;
@@ -1258,7 +1258,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
}
leave("investigate_write_descriptor");
-
+
}
@@ -1271,8 +1271,8 @@ static int __init xircom_init(void)
static void __exit xircom_exit(void)
{
pci_unregister_driver(&xircom_ops);
-}
+}
-module_init(xircom_init)
+module_init(xircom_init)
module_exit(xircom_exit)
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index d9a774b91ddc..f1b2640ebdc6 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -307,7 +307,7 @@ enum velocity_owner {
#define TX_QUEUE_NO 4
#define MAX_HW_MIB_COUNTER 32
-#define VELOCITY_MIN_MTU (1514-14)
+#define VELOCITY_MIN_MTU (64)
#define VELOCITY_MAX_MTU (9000)
/*
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index eba8e5cfacc2..f485a97844cc 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -50,10 +50,6 @@ static const char* devname = "PCI200SYN";
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
-#define PCI_VENDOR_ID_GORAMO 0x10B5 /* uses PLX:9050 ID - this card */
-#define PCI_DEVICE_ID_PCI200SYN 0x9050 /* doesn't have its own ID */
-
-
/*
* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
@@ -262,7 +258,7 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
int i;
card_t *card = pci_get_drvdata(pdev);
- for(i = 0; i < 2; i++)
+ for (i = 0; i < 2; i++)
if (card->ports[i].card) {
struct net_device *dev = port_to_dev(&card->ports[i]);
unregister_hdlc_device(dev);
@@ -385,6 +381,15 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
" %u RX packets rings\n", ramsize / 1024, ramphys,
pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+ if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) {
+ printk(KERN_ERR "Detected PCI200SYN card with old "
+ "configuration data.\n");
+ printk(KERN_ERR "See <http://www.kernel.org/pub/"
+ "linux/utils/net/hdlc/pci200syn/> for update.\n");
+ printk(KERN_ERR "The card will stop working with"
+ " future versions of Linux if not updated.\n");
+ }
+
if (card->tx_ring_buffers < 1) {
printk(KERN_ERR "pci200syn: RAM test failed\n");
pci200_pci_remove_one(pdev);
@@ -396,7 +401,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
writew(readw(p) | 0x0040, p);
/* Allocate IRQ */
- if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
+ if (request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
pdev->irq);
pci200_pci_remove_one(pdev);
@@ -406,7 +411,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
sca_init(card, 0);
- for(i = 0; i < 2; i++) {
+ for (i = 0; i < 2; i++) {
port_t *port = &card->ports[i];
struct net_device *dev = port_to_dev(port);
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -425,7 +430,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
- if(register_hdlc_device(dev)) {
+ if (register_hdlc_device(dev)) {
printk(KERN_ERR "pci200syn: unable to register hdlc "
"device\n");
port->card = NULL;
@@ -445,8 +450,10 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
- { PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+ PCI_DEVICE_ID_PLX_9050, 0, 0, 0 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+ PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e0874cbfefea..30ec235e6935 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -235,7 +235,35 @@ config IPW2200_MONITOR
promiscuous mode via the Wireless Tool's Monitor mode. While in this
mode, no packets can be sent.
-config IPW_QOS
+config IPW2200_RADIOTAP
+ bool "Enable radiotap format 802.11 raw packet support"
+ depends on IPW2200_MONITOR
+
+config IPW2200_PROMISCUOUS
+ bool "Enable creation of a RF radiotap promiscuous interface"
+ depends on IPW2200_MONITOR
+ select IPW2200_RADIOTAP
+ ---help---
+ Enables the creation of a second interface prefixed 'rtap'.
+ This second interface will provide every received in radiotap
+ format.
+
+ This is useful for performing wireless network analysis while
+ maintaining an active association.
+
+ Example usage:
+
+ % modprobe ipw2200 rtap_iface=1
+ % ifconfig rtap0 up
+ % tethereal -i rtap0
+
+ If you do not specify 'rtap_iface=1' as a module parameter then
+ the rtap interface will not be created and you will need to turn
+ it on via sysfs:
+
+ % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
+
+config IPW2200_QOS
bool "Enable QoS support"
depends on IPW2200 && EXPERIMENTAL
@@ -503,6 +531,23 @@ config PRISM54
say M here and read <file:Documentation/modules.txt>. The module
will be called prism54.ko.
+config USB_ZD1201
+ tristate "USB ZD1201 based Wireless device support"
+ depends on USB && NET_RADIO
+ select FW_LOADER
+ ---help---
+ Say Y if you want to use wireless LAN adapters based on the ZyDAS
+ ZD1201 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on wlan0.
+
+ The zd1201 device requires external firmware to be loaded.
+ This can be found at http://linux-lc100020.sourceforge.net/
+
+ To compile this driver as a module, choose M here: the
+ module will be called zd1201.
+
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/bcm43xx/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c86779879361..512603de309a 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -40,3 +40,5 @@ obj-$(CONFIG_BCM43XX) += bcm43xx/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
+
+obj-$(CONFIG_USB_ZD1201) += zd1201.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 00764ddd74d8..4069b79d8259 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -47,6 +47,7 @@
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
+#include <net/ieee80211.h>
#include "airo.h"
@@ -467,6 +468,8 @@ static int do8bitIO = 0;
#define RID_ECHOTEST_RESULTS 0xFF71
#define RID_BSSLISTFIRST 0xFF72
#define RID_BSSLISTNEXT 0xFF73
+#define RID_WPA_BSSLISTFIRST 0xFF74
+#define RID_WPA_BSSLISTNEXT 0xFF75
typedef struct {
u16 cmd;
@@ -739,6 +742,14 @@ typedef struct {
u16 extSoftCap;
} CapabilityRid;
+
+/* Only present on firmware >= 5.30.17 */
+typedef struct {
+ u16 unknown[4];
+ u8 fixed[12]; /* WLAN management frame */
+ u8 iep[624];
+} BSSListRidExtra;
+
typedef struct {
u16 len;
u16 index; /* First is 0 and 0xffff means end of list */
@@ -767,6 +778,9 @@ typedef struct {
} fh;
u16 dsChannel;
u16 atimWindow;
+
+ /* Only present on firmware >= 5.30.17 */
+ BSSListRidExtra extra;
} BSSListRid;
typedef struct {
@@ -1140,8 +1154,6 @@ struct airo_info {
char defindex; // Used with auto wep
struct proc_dir_entry *proc_entry;
spinlock_t aux_lock;
- unsigned long flags;
-#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
#define FLAG_RADIO_OFF 0 /* User disabling of MAC */
#define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */
#define FLAG_RADIO_MASK 0x03
@@ -1151,6 +1163,7 @@ struct airo_info {
#define FLAG_UPDATE_MULTI 5
#define FLAG_UPDATE_UNI 6
#define FLAG_802_11 7
+#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
#define FLAG_PENDING_XMIT 9
#define FLAG_PENDING_XMIT11 10
#define FLAG_MPI 11
@@ -1158,17 +1171,19 @@ struct airo_info {
#define FLAG_COMMIT 13
#define FLAG_RESET 14
#define FLAG_FLASHING 15
-#define JOB_MASK 0x2ff0000
-#define JOB_DIE 16
-#define JOB_XMIT 17
-#define JOB_XMIT11 18
-#define JOB_STATS 19
-#define JOB_PROMISC 20
-#define JOB_MIC 21
-#define JOB_EVENT 22
-#define JOB_AUTOWEP 23
-#define JOB_WSTATS 24
-#define JOB_SCAN_RESULTS 25
+#define FLAG_WPA_CAPABLE 16
+ unsigned long flags;
+#define JOB_DIE 0
+#define JOB_XMIT 1
+#define JOB_XMIT11 2
+#define JOB_STATS 3
+#define JOB_PROMISC 4
+#define JOB_MIC 5
+#define JOB_EVENT 6
+#define JOB_AUTOWEP 7
+#define JOB_WSTATS 8
+#define JOB_SCAN_RESULTS 9
+ unsigned long jobs;
int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen,
int whichbap);
unsigned short *flash;
@@ -1208,6 +1223,11 @@ struct airo_info {
#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
char proc_name[IFNAMSIZ];
+ /* WPA-related stuff */
+ unsigned int bssListFirst;
+ unsigned int bssListNext;
+ unsigned int bssListRidLen;
+
struct list_head network_list;
struct list_head network_free_list;
BSSListElement *networks;
@@ -1264,7 +1284,7 @@ static void micinit(struct airo_info *ai)
{
MICRid mic_rid;
- clear_bit(JOB_MIC, &ai->flags);
+ clear_bit(JOB_MIC, &ai->jobs);
PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
up(&ai->sem);
@@ -1705,24 +1725,24 @@ static void emmh32_final(emmh32_context *context, u8 digest[4])
static int readBSSListRid(struct airo_info *ai, int first,
BSSListRid *list) {
int rc;
- Cmd cmd;
- Resp rsp;
+ Cmd cmd;
+ Resp rsp;
if (first == 1) {
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
- issuecommand(ai, &cmd, &rsp);
- up(&ai->sem);
- /* Let the command take effect */
- ai->task = current;
- ssleep(3);
- ai->task = NULL;
- }
- rc = PC4500_readrid(ai, first ? RID_BSSLISTFIRST : RID_BSSLISTNEXT,
- list, sizeof(*list), 1);
+ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_LISTBSS;
+ if (down_interruptible(&ai->sem))
+ return -ERESTARTSYS;
+ issuecommand(ai, &cmd, &rsp);
+ up(&ai->sem);
+ /* Let the command take effect */
+ ai->task = current;
+ ssleep(3);
+ ai->task = NULL;
+ }
+ rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
+ list, ai->bssListRidLen, 1);
list->len = le16_to_cpu(list->len);
list->index = le16_to_cpu(list->index);
@@ -2112,7 +2132,7 @@ static void airo_end_xmit(struct net_device *dev) {
int fid = priv->xmit.fid;
u32 *fids = priv->fids;
- clear_bit(JOB_XMIT, &priv->flags);
+ clear_bit(JOB_XMIT, &priv->jobs);
clear_bit(FLAG_PENDING_XMIT, &priv->flags);
status = transmit_802_3_packet (priv, fids[fid], skb->data);
up(&priv->sem);
@@ -2162,7 +2182,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
if (down_trylock(&priv->sem) != 0) {
set_bit(FLAG_PENDING_XMIT, &priv->flags);
netif_stop_queue(dev);
- set_bit(JOB_XMIT, &priv->flags);
+ set_bit(JOB_XMIT, &priv->jobs);
wake_up_interruptible(&priv->thr_wait);
} else
airo_end_xmit(dev);
@@ -2177,7 +2197,7 @@ static void airo_end_xmit11(struct net_device *dev) {
int fid = priv->xmit11.fid;
u32 *fids = priv->fids;
- clear_bit(JOB_XMIT11, &priv->flags);
+ clear_bit(JOB_XMIT11, &priv->jobs);
clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
status = transmit_802_11_packet (priv, fids[fid], skb->data);
up(&priv->sem);
@@ -2233,7 +2253,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
if (down_trylock(&priv->sem) != 0) {
set_bit(FLAG_PENDING_XMIT11, &priv->flags);
netif_stop_queue(dev);
- set_bit(JOB_XMIT11, &priv->flags);
+ set_bit(JOB_XMIT11, &priv->jobs);
wake_up_interruptible(&priv->thr_wait);
} else
airo_end_xmit11(dev);
@@ -2244,7 +2264,7 @@ static void airo_read_stats(struct airo_info *ai) {
StatsRid stats_rid;
u32 *vals = stats_rid.vals;
- clear_bit(JOB_STATS, &ai->flags);
+ clear_bit(JOB_STATS, &ai->jobs);
if (ai->power.event) {
up(&ai->sem);
return;
@@ -2272,10 +2292,10 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
{
struct airo_info *local = dev->priv;
- if (!test_bit(JOB_STATS, &local->flags)) {
+ if (!test_bit(JOB_STATS, &local->jobs)) {
/* Get stats out of the card if available */
if (down_trylock(&local->sem) != 0) {
- set_bit(JOB_STATS, &local->flags);
+ set_bit(JOB_STATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
airo_read_stats(local);
@@ -2290,7 +2310,7 @@ static void airo_set_promisc(struct airo_info *ai) {
memset(&cmd, 0, sizeof(cmd));
cmd.cmd=CMD_SETMODE;
- clear_bit(JOB_PROMISC, &ai->flags);
+ clear_bit(JOB_PROMISC, &ai->jobs);
cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
issuecommand(ai, &cmd, &rsp);
up(&ai->sem);
@@ -2302,7 +2322,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
change_bit(FLAG_PROMISC, &ai->flags);
if (down_trylock(&ai->sem) != 0) {
- set_bit(JOB_PROMISC, &ai->flags);
+ set_bit(JOB_PROMISC, &ai->jobs);
wake_up_interruptible(&ai->thr_wait);
} else
airo_set_promisc(ai);
@@ -2380,7 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
}
clear_bit(FLAG_REGISTERED, &ai->flags);
}
- set_bit(JOB_DIE, &ai->flags);
+ set_bit(JOB_DIE, &ai->jobs);
kill_proc(ai->thr_pid, SIGTERM, 1);
wait_for_completion(&ai->thr_exited);
@@ -2701,14 +2721,14 @@ static int reset_card( struct net_device *dev , int lock) {
return 0;
}
-#define MAX_NETWORK_COUNT 64
+#define AIRO_MAX_NETWORK_COUNT 64
static int airo_networks_allocate(struct airo_info *ai)
{
if (ai->networks)
return 0;
ai->networks =
- kzalloc(MAX_NETWORK_COUNT * sizeof(BSSListElement),
+ kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
GFP_KERNEL);
if (!ai->networks) {
airo_print_warn(ai->dev->name, "Out of memory allocating beacons");
@@ -2732,11 +2752,33 @@ static void airo_networks_initialize(struct airo_info *ai)
INIT_LIST_HEAD(&ai->network_free_list);
INIT_LIST_HEAD(&ai->network_list);
- for (i = 0; i < MAX_NETWORK_COUNT; i++)
+ for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++)
list_add_tail(&ai->networks[i].list,
&ai->network_free_list);
}
+static int airo_test_wpa_capable(struct airo_info *ai)
+{
+ int status;
+ CapabilityRid cap_rid;
+ const char *name = ai->dev->name;
+
+ status = readCapabilityRid(ai, &cap_rid, 1);
+ if (status != SUCCESS) return 0;
+
+ /* Only firmware versions 5.30.17 or better can do WPA */
+ if ((cap_rid.softVer > 0x530)
+ || ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) {
+ airo_print_info(name, "WPA is supported.");
+ return 1;
+ }
+
+ /* No WPA support */
+ airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17"
+ " and greater support WPA. Detected %s)", cap_rid.prodVer);
+ return 0;
+}
+
static struct net_device *_init_airo_card( unsigned short irq, int port,
int is_pcmcia, struct pci_dev *pci,
struct device *dmdev )
@@ -2759,6 +2801,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai = dev->priv;
ai->wifidev = NULL;
ai->flags = 0;
+ ai->jobs = 0;
ai->dev = dev;
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
airo_print_dbg(dev->name, "Found an MPI350 card");
@@ -2838,6 +2881,18 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
set_bit(FLAG_FLASHING, &ai->flags);
}
+ /* Test for WPA support */
+ if (airo_test_wpa_capable(ai)) {
+ set_bit(FLAG_WPA_CAPABLE, &ai->flags);
+ ai->bssListFirst = RID_WPA_BSSLISTFIRST;
+ ai->bssListNext = RID_WPA_BSSLISTNEXT;
+ ai->bssListRidLen = sizeof(BSSListRid);
+ } else {
+ ai->bssListFirst = RID_BSSLISTFIRST;
+ ai->bssListNext = RID_BSSLISTNEXT;
+ ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
+ }
+
rc = register_netdev(dev);
if (rc) {
airo_print_err(dev->name, "Couldn't register_netdev");
@@ -2875,7 +2930,7 @@ err_out_irq:
err_out_unlink:
del_airo_dev(dev);
err_out_thr:
- set_bit(JOB_DIE, &ai->flags);
+ set_bit(JOB_DIE, &ai->jobs);
kill_proc(ai->thr_pid, SIGTERM, 1);
wait_for_completion(&ai->thr_exited);
err_out_free:
@@ -2933,7 +2988,7 @@ static void airo_send_event(struct net_device *dev) {
union iwreq_data wrqu;
StatusRid status_rid;
- clear_bit(JOB_EVENT, &ai->flags);
+ clear_bit(JOB_EVENT, &ai->jobs);
PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0);
up(&ai->sem);
wrqu.data.length = 0;
@@ -2947,7 +3002,7 @@ static void airo_send_event(struct net_device *dev) {
static void airo_process_scan_results (struct airo_info *ai) {
union iwreq_data wrqu;
- BSSListRid BSSList;
+ BSSListRid bss;
int rc;
BSSListElement * loop_net;
BSSListElement * tmp_net;
@@ -2960,15 +3015,15 @@ static void airo_process_scan_results (struct airo_info *ai) {
}
/* Try to read the first entry of the scan result */
- rc = PC4500_readrid(ai, RID_BSSLISTFIRST, &BSSList, sizeof(BSSList), 0);
- if((rc) || (BSSList.index == 0xffff)) {
+ rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
+ if((rc) || (bss.index == 0xffff)) {
/* No scan results */
goto out;
}
/* Read and parse all entries */
tmp_net = NULL;
- while((!rc) && (BSSList.index != 0xffff)) {
+ while((!rc) && (bss.index != 0xffff)) {
/* Grab a network off the free list */
if (!list_empty(&ai->network_free_list)) {
tmp_net = list_entry(ai->network_free_list.next,
@@ -2977,19 +3032,19 @@ static void airo_process_scan_results (struct airo_info *ai) {
}
if (tmp_net != NULL) {
- memcpy(tmp_net, &BSSList, sizeof(tmp_net->bss));
+ memcpy(tmp_net, &bss, sizeof(tmp_net->bss));
list_add_tail(&tmp_net->list, &ai->network_list);
tmp_net = NULL;
}
/* Read next entry */
- rc = PC4500_readrid(ai, RID_BSSLISTNEXT,
- &BSSList, sizeof(BSSList), 0);
+ rc = PC4500_readrid(ai, ai->bssListNext,
+ &bss, ai->bssListRidLen, 0);
}
out:
ai->scan_timeout = 0;
- clear_bit(JOB_SCAN_RESULTS, &ai->flags);
+ clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
up(&ai->sem);
/* Send an empty event to user space.
@@ -3019,10 +3074,10 @@ static int airo_thread(void *data) {
/* make swsusp happy with our thread */
try_to_freeze();
- if (test_bit(JOB_DIE, &ai->flags))
+ if (test_bit(JOB_DIE, &ai->jobs))
break;
- if (ai->flags & JOB_MASK) {
+ if (ai->jobs) {
locked = down_interruptible(&ai->sem);
} else {
wait_queue_t wait;
@@ -3031,16 +3086,16 @@ static int airo_thread(void *data) {
add_wait_queue(&ai->thr_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ai->flags & JOB_MASK)
+ if (ai->jobs)
break;
if (ai->expires || ai->scan_timeout) {
if (ai->scan_timeout &&
time_after_eq(jiffies,ai->scan_timeout)){
- set_bit(JOB_SCAN_RESULTS,&ai->flags);
+ set_bit(JOB_SCAN_RESULTS, &ai->jobs);
break;
} else if (ai->expires &&
time_after_eq(jiffies,ai->expires)){
- set_bit(JOB_AUTOWEP,&ai->flags);
+ set_bit(JOB_AUTOWEP, &ai->jobs);
break;
}
if (!signal_pending(current)) {
@@ -3069,7 +3124,7 @@ static int airo_thread(void *data) {
if (locked)
continue;
- if (test_bit(JOB_DIE, &ai->flags)) {
+ if (test_bit(JOB_DIE, &ai->jobs)) {
up(&ai->sem);
break;
}
@@ -3079,23 +3134,23 @@ static int airo_thread(void *data) {
continue;
}
- if (test_bit(JOB_XMIT, &ai->flags))
+ if (test_bit(JOB_XMIT, &ai->jobs))
airo_end_xmit(dev);
- else if (test_bit(JOB_XMIT11, &ai->flags))
+ else if (test_bit(JOB_XMIT11, &ai->jobs))
airo_end_xmit11(dev);
- else if (test_bit(JOB_STATS, &ai->flags))
+ else if (test_bit(JOB_STATS, &ai->jobs))
airo_read_stats(ai);
- else if (test_bit(JOB_WSTATS, &ai->flags))
+ else if (test_bit(JOB_WSTATS, &ai->jobs))
airo_read_wireless_stats(ai);
- else if (test_bit(JOB_PROMISC, &ai->flags))
+ else if (test_bit(JOB_PROMISC, &ai->jobs))
airo_set_promisc(ai);
- else if (test_bit(JOB_MIC, &ai->flags))
+ else if (test_bit(JOB_MIC, &ai->jobs))
micinit(ai);
- else if (test_bit(JOB_EVENT, &ai->flags))
+ else if (test_bit(JOB_EVENT, &ai->jobs))
airo_send_event(dev);
- else if (test_bit(JOB_AUTOWEP, &ai->flags))
+ else if (test_bit(JOB_AUTOWEP, &ai->jobs))
timer_func(dev);
- else if (test_bit(JOB_SCAN_RESULTS, &ai->flags))
+ else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs))
airo_process_scan_results(ai);
else /* Shouldn't get here, but we make sure to unlock */
up(&ai->sem);
@@ -3133,7 +3188,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
if ( status & EV_MIC ) {
OUT4500( apriv, EVACK, EV_MIC );
if (test_bit(FLAG_MIC_CAPABLE, &apriv->flags)) {
- set_bit(JOB_MIC, &apriv->flags);
+ set_bit(JOB_MIC, &apriv->jobs);
wake_up_interruptible(&apriv->thr_wait);
}
}
@@ -3187,7 +3242,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
if (down_trylock(&apriv->sem) != 0) {
- set_bit(JOB_EVENT, &apriv->flags);
+ set_bit(JOB_EVENT, &apriv->jobs);
wake_up_interruptible(&apriv->thr_wait);
} else
airo_send_event(dev);
@@ -5485,7 +5540,7 @@ static void timer_func( struct net_device *dev ) {
up(&apriv->sem);
/* Schedule check to see if the change worked */
- clear_bit(JOB_AUTOWEP, &apriv->flags);
+ clear_bit(JOB_AUTOWEP, &apriv->jobs);
apriv->expires = RUN_AT(HZ*3);
}
@@ -6876,7 +6931,7 @@ static int airo_get_range(struct net_device *dev,
}
range->num_txpower = i;
range->txpower_capa = IW_TXPOW_MWATT;
- range->we_version_source = 12;
+ range->we_version_source = 19;
range->we_version_compiled = WIRELESS_EXT;
range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
range->retry_flags = IW_RETRY_LIMIT;
@@ -7152,6 +7207,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
u16 capabilities;
char * current_val; /* For rates */
int i;
+ char * buf;
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
@@ -7238,8 +7294,69 @@ static inline char *airo_translate_scan(struct net_device *dev,
if((current_val - current_ev) > IW_EV_LCP_LEN)
current_ev = current_val;
- /* The other data in the scan result are not really
- * interesting, so for now drop it - Jean II */
+ /* Beacon interval */
+ buf = kmalloc(30, GFP_KERNEL);
+ if (buf) {
+ iwe.cmd = IWEVCUSTOM;
+ sprintf(buf, "bcn_int=%d", bss->beaconInterval);
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, buf);
+ kfree(buf);
+ }
+
+ /* Put WPA/RSN Information Elements into the event stream */
+ if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) {
+ unsigned int num_null_ies = 0;
+ u16 length = sizeof (bss->extra.iep);
+ struct ieee80211_info_element *info_element =
+ (struct ieee80211_info_element *) &bss->extra.iep;
+
+ while ((length >= sizeof(*info_element)) && (num_null_ies < 2)) {
+ if (sizeof(*info_element) + info_element->len > length) {
+ /* Invalid element, don't continue parsing IE */
+ break;
+ }
+
+ switch (info_element->id) {
+ case MFIE_TYPE_SSID:
+ /* Two zero-length SSID elements
+ * mean we're done parsing elements */
+ if (!info_element->len)
+ num_null_ies++;
+ break;
+
+ case MFIE_TYPE_GENERIC:
+ if (info_element->len >= 4 &&
+ info_element->data[0] == 0x00 &&
+ info_element->data[1] == 0x50 &&
+ info_element->data[2] == 0xf2 &&
+ info_element->data[3] == 0x01) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = min(info_element->len + 2,
+ MAX_WPA_IE_LEN);
+ current_ev = iwe_stream_add_point(current_ev, end_buf,
+ &iwe, (char *) info_element);
+ }
+ break;
+
+ case MFIE_TYPE_RSN:
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = min(info_element->len + 2,
+ MAX_WPA_IE_LEN);
+ current_ev = iwe_stream_add_point(current_ev, end_buf,
+ &iwe, (char *) info_element);
+ break;
+
+ default:
+ break;
+ }
+
+ length -= sizeof(*info_element) + info_element->len;
+ info_element =
+ (struct ieee80211_info_element *)&info_element->
+ data[info_element->len];
+ }
+ }
return current_ev;
}
@@ -7521,7 +7638,7 @@ static void airo_read_wireless_stats(struct airo_info *local)
u32 *vals = stats_rid.vals;
/* Get stats out of the card */
- clear_bit(JOB_WSTATS, &local->flags);
+ clear_bit(JOB_WSTATS, &local->jobs);
if (local->power.event) {
up(&local->sem);
return;
@@ -7565,10 +7682,10 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
{
struct airo_info *local = dev->priv;
- if (!test_bit(JOB_WSTATS, &local->flags)) {
+ if (!test_bit(JOB_WSTATS, &local->jobs)) {
/* Get stats out of the card if available */
if (down_trylock(&local->sem) != 0) {
- set_bit(JOB_WSTATS, &local->flags);
+ set_bit(JOB_WSTATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
airo_read_wireless_stats(local);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 2e83083935e1..e66fdb1f3cfd 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -645,7 +645,6 @@ struct bcm43xx_private {
unsigned int irq;
void __iomem *mmio_addr;
- unsigned int mmio_len;
/* Do not use the lock directly. Use the bcm43xx_lock* helper
* functions, to be MMIO-safe. */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
index 35a4fcb6d923..7497fb16076e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
@@ -92,7 +92,7 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
fappend("subsystem_vendor: 0x%04x subsystem_device: 0x%04x\n",
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
fappend("IRQ: %d\n", bcm->irq);
- fappend("mmio_addr: 0x%p mmio_len: %u\n", bcm->mmio_addr, bcm->mmio_len);
+ fappend("mmio_addr: 0x%p\n", bcm->mmio_addr);
fappend("chip_id: 0x%04x chip_rev: 0x%02x\n", bcm->chip_id, bcm->chip_rev);
if ((bcm->core_80211[0].rev >= 3) && (bcm43xx_read32(bcm, 0x0158) & (1 << 16)))
fappend("Radio disabled by hardware!\n");
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 7ed18cad29f7..736dde96c4a3 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -128,13 +128,15 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging.");
static struct pci_device_id bcm43xx_pci_tbl[] = {
/* Broadcom 4303 802.11b */
{ PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /* Broadcom 4307 802.11b */
+ /* Broadcom 4307 802.11b */
{ PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /* Broadcom 4318 802.11b/g */
+ /* Broadcom 4318 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ /* Broadcom 4319 802.11a/b/g */
+ { PCI_VENDOR_ID_BROADCOM, 0x4319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4306 802.11b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /* Broadcom 4306 802.11a */
+ /* Broadcom 4306 802.11a */
// { PCI_VENDOR_ID_BROADCOM, 0x4321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
/* Broadcom 4309 802.11a/b/g */
{ PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
@@ -3299,8 +3301,7 @@ static void bcm43xx_detach_board(struct bcm43xx_private *bcm)
bcm43xx_chipset_detach(bcm);
/* Do _not_ access the chip, after it is detached. */
- iounmap(bcm->mmio_addr);
-
+ pci_iounmap(pci_dev, bcm->mmio_addr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
@@ -3390,40 +3391,26 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
struct net_device *net_dev = bcm->net_dev;
int err;
int i;
- unsigned long mmio_start, mmio_flags, mmio_len;
u32 coremask;
err = pci_enable_device(pci_dev);
if (err) {
- printk(KERN_ERR PFX "unable to wake up pci device (%i)\n", err);
+ printk(KERN_ERR PFX "pci_enable_device() failed\n");
goto out;
}
- mmio_start = pci_resource_start(pci_dev, 0);
- mmio_flags = pci_resource_flags(pci_dev, 0);
- mmio_len = pci_resource_len(pci_dev, 0);
- if (!(mmio_flags & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX
- "%s, region #0 not an MMIO resource, aborting\n",
- pci_name(pci_dev));
- err = -ENODEV;
- goto err_pci_disable;
- }
err = pci_request_regions(pci_dev, KBUILD_MODNAME);
if (err) {
- printk(KERN_ERR PFX
- "could not access PCI resources (%i)\n", err);
+ printk(KERN_ERR PFX "pci_request_regions() failed\n");
goto err_pci_disable;
}
/* enable PCI bus-mastering */
pci_set_master(pci_dev);
- bcm->mmio_addr = ioremap(mmio_start, mmio_len);
+ bcm->mmio_addr = pci_iomap(pci_dev, 0, ~0UL);
if (!bcm->mmio_addr) {
- printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n",
- pci_name(pci_dev));
+ printk(KERN_ERR PFX "pci_iomap() failed\n");
err = -EIO;
goto err_pci_release;
}
- bcm->mmio_len = mmio_len;
net_dev->base_addr = (unsigned long)bcm->mmio_addr;
bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID,
@@ -3517,7 +3504,7 @@ err_80211_unwind:
err_chipset_detach:
bcm43xx_chipset_detach(bcm);
err_iounmap:
- iounmap(bcm->mmio_addr);
+ pci_iounmap(pci_dev, bcm->mmio_addr);
err_pci_release:
pci_release_regions(pci_dev);
err_pci_disable:
@@ -3568,7 +3555,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
unsigned long flags;
int keyidx;
- dprintk(KERN_INFO PFX "set security called\n");
+ dprintk(KERN_INFO PFX "set security called");
bcm43xx_lock_mmio(bcm, flags);
@@ -3581,24 +3568,25 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
if (sec->flags & SEC_ACTIVE_KEY) {
secinfo->active_key = sec->active_key;
- dprintk(KERN_INFO PFX " .active_key = %d\n", sec->active_key);
+ dprintk(", .active_key = %d", sec->active_key);
}
if (sec->flags & SEC_UNICAST_GROUP) {
secinfo->unicast_uses_group = sec->unicast_uses_group;
- dprintk(KERN_INFO PFX " .unicast_uses_group = %d\n", sec->unicast_uses_group);
+ dprintk(", .unicast_uses_group = %d", sec->unicast_uses_group);
}
if (sec->flags & SEC_LEVEL) {
secinfo->level = sec->level;
- dprintk(KERN_INFO PFX " .level = %d\n", sec->level);
+ dprintk(", .level = %d", sec->level);
}
if (sec->flags & SEC_ENABLED) {
secinfo->enabled = sec->enabled;
- dprintk(KERN_INFO PFX " .enabled = %d\n", sec->enabled);
+ dprintk(", .enabled = %d", sec->enabled);
}
if (sec->flags & SEC_ENCRYPT) {
secinfo->encrypt = sec->encrypt;
- dprintk(KERN_INFO PFX " .encrypt = %d\n", sec->encrypt);
+ dprintk(", .encrypt = %d", sec->encrypt);
}
+ dprintk("\n");
if (bcm->initialized && !bcm->ieee->host_encrypt) {
if (secinfo->enabled) {
/* upload WEP keys to hardware */
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 346c6febb033..2aa2f389c0d5 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -121,12 +121,6 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
hw->iobase = address;
hw->reg_spacing = reg_spacing;
hw->inten = 0x0;
-
-#ifdef HERMES_DEBUG_BUFFER
- hw->dbufp = 0;
- memset(&hw->dbuf, 0xff, sizeof(hw->dbuf));
- memset(&hw->profile, 0, sizeof(hw->profile));
-#endif
}
int hermes_init(hermes_t *hw)
@@ -347,19 +341,6 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
reg = hermes_read_reg(hw, oreg);
}
-#ifdef HERMES_DEBUG_BUFFER
- hw->profile[HERMES_BAP_BUSY_TIMEOUT - k]++;
-
- if (k < HERMES_BAP_BUSY_TIMEOUT) {
- struct hermes_debug_entry *e =
- &hw->dbuf[(hw->dbufp++) % HERMES_DEBUG_BUFSIZE];
- e->bap = bap;
- e->id = id;
- e->offset = offset;
- e->cycles = HERMES_BAP_BUSY_TIMEOUT - k;
- }
-#endif
-
if (reg & HERMES_OFFSET_BUSY)
return -ETIMEDOUT;
@@ -419,8 +400,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
}
/* Write a block of data to the chip's buffer, via the
- * BAP. Synchronization/serialization is the caller's problem. len
- * must be even.
+ * BAP. Synchronization/serialization is the caller's problem.
*
* Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
*/
@@ -430,7 +410,7 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
- if ( (len < 0) || (len % 2) )
+ if (len < 0)
return -EINVAL;
err = hermes_bap_seek(hw, bap, id, offset);
@@ -438,49 +418,12 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
goto out;
/* Actually do the transfer */
- hermes_write_words(hw, dreg, buf, len/2);
+ hermes_write_bytes(hw, dreg, buf, len);
out:
return err;
}
-/* Write a block of data to the chip's buffer with padding if
- * neccessary, via the BAP. Synchronization/serialization is the
- * caller's problem. len must be even.
- *
- * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
- */
-int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, int len,
- u16 id, u16 offset)
-{
- int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
- int err = 0;
-
- if (len < 0 || len % 2 || data_len > len)
- return -EINVAL;
-
- err = hermes_bap_seek(hw, bap, id, offset);
- if (err)
- goto out;
-
- /* Transfer all the complete words of data */
- hermes_write_words(hw, dreg, buf, data_len/2);
- /* If there is an odd byte left over pad and transfer it */
- if (data_len & 1) {
- u8 end[2];
- end[1] = 0;
- end[0] = ((unsigned char *)buf)[data_len - 1];
- hermes_write_words(hw, dreg, end, 1);
- data_len ++;
- }
- /* Now send zeros for the padding */
- if (data_len < len)
- hermes_clear_words(hw, dreg, (len - data_len) / 2);
- /* Complete */
- out:
- return err;
-}
-
/* Read a Length-Type-Value record from the card.
*
* If length is NULL, we ignore the length read from the card, and
@@ -553,7 +496,7 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
count = length - 1;
- hermes_write_words(hw, dreg, value, count);
+ hermes_write_bytes(hw, dreg, value, count << 1);
err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
rid, NULL);
@@ -568,7 +511,6 @@ EXPORT_SYMBOL(hermes_allocate);
EXPORT_SYMBOL(hermes_bap_pread);
EXPORT_SYMBOL(hermes_bap_pwrite);
-EXPORT_SYMBOL(hermes_bap_pwrite_pad);
EXPORT_SYMBOL(hermes_read_ltv);
EXPORT_SYMBOL(hermes_write_ltv);
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 7644f72a9f4e..8e3f0e3edb58 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -328,16 +328,6 @@ struct hermes_multicast {
u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
} __attribute__ ((packed));
-// #define HERMES_DEBUG_BUFFER 1
-#define HERMES_DEBUG_BUFSIZE 4096
-struct hermes_debug_entry {
- int bap;
- u16 id, offset;
- int cycles;
-};
-
-#ifdef __KERNEL__
-
/* Timeouts */
#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
@@ -347,14 +337,7 @@ typedef struct hermes {
int reg_spacing;
#define HERMES_16BIT_REGSPACING 0
#define HERMES_32BIT_REGSPACING 1
-
u16 inten; /* Which interrupts should be enabled? */
-
-#ifdef HERMES_DEBUG_BUFFER
- struct hermes_debug_entry dbuf[HERMES_DEBUG_BUFSIZE];
- unsigned long dbufp;
- unsigned long profile[HERMES_BAP_BUSY_TIMEOUT+1];
-#endif
} hermes_t;
/* Register access convenience macros */
@@ -376,8 +359,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
u16 id, u16 offset);
int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
u16 id, u16 offset);
-int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf,
- unsigned data_len, int len, u16 id, u16 offset);
int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
u16 *length, void *buf);
int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
@@ -425,10 +406,13 @@ static inline void hermes_read_words(struct hermes *hw, int off, void *buf, unsi
ioread16_rep(hw->iobase + off, buf, count);
}
-static inline void hermes_write_words(struct hermes *hw, int off, const void *buf, unsigned count)
+static inline void hermes_write_bytes(struct hermes *hw, int off,
+ const char *buf, unsigned count)
{
off = off << hw->reg_spacing;
- iowrite16_rep(hw->iobase + off, buf, count);
+ iowrite16_rep(hw->iobase + off, buf, count >> 1);
+ if (unlikely(count & 1))
+ iowrite8(buf[count - 1], hw->iobase + off);
}
static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count)
@@ -462,21 +446,4 @@ static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
}
-#else /* ! __KERNEL__ */
-
-/* These are provided for the benefit of userspace drivers and testing programs
- which use ioperm() or iopl() */
-
-#define hermes_read_reg(base, off) (inw((base) + (off)))
-#define hermes_write_reg(base, off, val) (outw((val), (base) + (off)))
-
-#define hermes_read_regn(base, name) (hermes_read_reg((base), HERMES_##name))
-#define hermes_write_regn(base, name, val) (hermes_write_reg((base), HERMES_##name, (val)))
-
-/* Note that for the next two, the count is in 16-bit words, not bytes */
-#define hermes_read_data(base, off, buf, count) (insw((base) + (off), (buf), (count)))
-#define hermes_write_data(base, off, buf, count) (outsw((base) + (off), (buf), (count)))
-
-#endif /* ! __KERNEL__ */
-
#endif /* _HERMES_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 06a5214145e3..4a5be70c0419 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -534,5 +534,4 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
-EXPORT_SYMBOL(hostap_dump_tx_80211);
EXPORT_SYMBOL(hostap_master_start_xmit);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 06c3fa32b310..ba13125024cb 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -3276,17 +3276,6 @@ EXPORT_SYMBOL(hostap_init_data);
EXPORT_SYMBOL(hostap_init_ap_proc);
EXPORT_SYMBOL(hostap_free_data);
EXPORT_SYMBOL(hostap_check_sta_fw_version);
-EXPORT_SYMBOL(hostap_handle_sta_tx);
-EXPORT_SYMBOL(hostap_handle_sta_release);
EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
-EXPORT_SYMBOL(hostap_update_sta_ps);
-EXPORT_SYMBOL(hostap_handle_sta_rx);
-EXPORT_SYMBOL(hostap_is_sta_assoc);
-EXPORT_SYMBOL(hostap_is_sta_authorized);
-EXPORT_SYMBOL(hostap_add_sta);
-EXPORT_SYMBOL(hostap_update_rates);
-EXPORT_SYMBOL(hostap_add_wds_links);
-EXPORT_SYMBOL(hostap_wds_link_oper);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-EXPORT_SYMBOL(hostap_deauth_all_stas);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 55bed923fbe9..db03dc2646df 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -881,6 +881,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12(
"ZoomAir 11Mbps High", "Rate wireless Networking",
0x273fe3db, 0x32a1eaee),
+ PCMCIA_DEVICE_PROD_ID123(
+ "Pretec", "CompactWLAN Card 802.11b", "2.5",
+ 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
+ PCMCIA_DEVICE_PROD_ID123(
+ "U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
+ 0xc7b8df9d, 0x1700d087, 0x4b74baa0),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 8dd4c4446a64..93786f4218f0 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -1125,11 +1125,9 @@ EXPORT_SYMBOL(hostap_set_auth_algs);
EXPORT_SYMBOL(hostap_dump_rx_header);
EXPORT_SYMBOL(hostap_dump_tx_header);
EXPORT_SYMBOL(hostap_80211_header_parse);
-EXPORT_SYMBOL(hostap_80211_prism_header_parse);
EXPORT_SYMBOL(hostap_80211_get_hdrlen);
EXPORT_SYMBOL(hostap_get_stats);
EXPORT_SYMBOL(hostap_setup_dev);
-EXPORT_SYMBOL(hostap_proc);
EXPORT_SYMBOL(hostap_set_multicast_list_queue);
EXPORT_SYMBOL(hostap_set_hostapd);
EXPORT_SYMBOL(hostap_set_hostapd_sta);
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index bca89cff85a6..39f82f219749 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -33,7 +33,44 @@
#include "ipw2200.h"
#include <linux/version.h>
-#define IPW2200_VERSION "git-1.1.1"
+
+#ifndef KBUILD_EXTMOD
+#define VK "k"
+#else
+#define VK
+#endif
+
+#ifdef CONFIG_IPW2200_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#ifdef CONFIG_IPW2200_MONITOR
+#define VM "m"
+#else
+#define VM
+#endif
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+#define VP "p"
+#else
+#define VP
+#endif
+
+#ifdef CONFIG_IPW2200_RADIOTAP
+#define VR "r"
+#else
+#define VR
+#endif
+
+#ifdef CONFIG_IPW2200_QOS
+#define VQ "q"
+#else
+#define VQ
+#endif
+
+#define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
#define DRV_VERSION IPW2200_VERSION
@@ -46,7 +83,9 @@ MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
static int cmdlog = 0;
+#ifdef CONFIG_IPW2200_DEBUG
static int debug = 0;
+#endif
static int channel = 0;
static int mode = 0;
@@ -61,8 +100,14 @@ static int roaming = 1;
static const char ipw_modes[] = {
'a', 'b', 'g', '?'
};
+static int antenna = CFG_SYS_ANTENNA_BOTH;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
+#endif
+
+
+#ifdef CONFIG_IPW2200_QOS
static int qos_enable = 0;
static int qos_burst_enable = 0;
static int qos_no_ack_mask = 0;
@@ -126,7 +171,7 @@ static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_q
*qos_param);
static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
*qos_param);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
static void ipw_remove_current_network(struct ipw_priv *priv);
@@ -1269,6 +1314,105 @@ static ssize_t show_cmd_log(struct device *d,
static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static void ipw_prom_free(struct ipw_priv *priv);
+static int ipw_prom_alloc(struct ipw_priv *priv);
+static ssize_t store_rtap_iface(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+ int rc = 0;
+
+ if (count < 1)
+ return -EINVAL;
+
+ switch (buf[0]) {
+ case '0':
+ if (!rtap_iface)
+ return count;
+
+ if (netif_running(priv->prom_net_dev)) {
+ IPW_WARNING("Interface is up. Cannot unregister.\n");
+ return count;
+ }
+
+ ipw_prom_free(priv);
+ rtap_iface = 0;
+ break;
+
+ case '1':
+ if (rtap_iface)
+ return count;
+
+ rc = ipw_prom_alloc(priv);
+ if (!rc)
+ rtap_iface = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rc) {
+ IPW_ERROR("Failed to register promiscuous network "
+ "device (error %d).\n", rc);
+ }
+
+ return count;
+}
+
+static ssize_t show_rtap_iface(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+ if (rtap_iface)
+ return sprintf(buf, "%s", priv->prom_net_dev->name);
+ else {
+ buf[0] = '-';
+ buf[1] = '1';
+ buf[2] = '\0';
+ return 3;
+ }
+}
+
+static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
+ store_rtap_iface);
+
+static ssize_t store_rtap_filter(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+
+ if (!priv->prom_priv) {
+ IPW_ERROR("Attempting to set filter without "
+ "rtap_iface enabled.\n");
+ return -EPERM;
+ }
+
+ priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
+
+ IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
+ BIT_ARG16(priv->prom_priv->filter));
+
+ return count;
+}
+
+static ssize_t show_rtap_filter(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipw_priv *priv = dev_get_drvdata(d);
+ return sprintf(buf, "0x%04X",
+ priv->prom_priv ? priv->prom_priv->filter : 0);
+}
+
+static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
+ store_rtap_filter);
+#endif
+
static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -2025,16 +2169,11 @@ static int ipw_send_host_complete(struct ipw_priv *priv)
return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
}
-static int ipw_send_system_config(struct ipw_priv *priv,
- struct ipw_sys_config *config)
+static int ipw_send_system_config(struct ipw_priv *priv)
{
- if (!priv || !config) {
- IPW_ERROR("Invalid args\n");
- return -1;
- }
-
- return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
- config);
+ return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
+ sizeof(priv->sys_config),
+ &priv->sys_config);
}
static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
@@ -3104,10 +3243,10 @@ static int ipw_reset_nic(struct ipw_priv *priv)
struct ipw_fw {
- u32 ver;
- u32 boot_size;
- u32 ucode_size;
- u32 fw_size;
+ __le32 ver;
+ __le32 boot_size;
+ __le32 ucode_size;
+ __le32 fw_size;
u8 data[0];
};
@@ -3131,8 +3270,8 @@ static int ipw_get_fw(struct ipw_priv *priv,
fw = (void *)(*raw)->data;
- if ((*raw)->size < sizeof(*fw) +
- fw->boot_size + fw->ucode_size + fw->fw_size) {
+ if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
+ le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
IPW_ERROR("%s is too small or corrupt (%zd)\n",
name, (*raw)->size);
return -EINVAL;
@@ -3237,8 +3376,9 @@ static int ipw_load(struct ipw_priv *priv)
fw = (void *)raw->data;
boot_img = &fw->data[0];
- ucode_img = &fw->data[fw->boot_size];
- fw_img = &fw->data[fw->boot_size + fw->ucode_size];
+ ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
+ fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
+ le32_to_cpu(fw->ucode_size)];
if (rc < 0)
goto error;
@@ -3272,7 +3412,7 @@ static int ipw_load(struct ipw_priv *priv)
IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
/* DMA the initial boot firmware into the device */
- rc = ipw_load_firmware(priv, boot_img, fw->boot_size);
+ rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
if (rc < 0) {
IPW_ERROR("Unable to load boot firmware: %d\n", rc);
goto error;
@@ -3294,7 +3434,7 @@ static int ipw_load(struct ipw_priv *priv)
ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
/* DMA the ucode into the device */
- rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size);
+ rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
if (rc < 0) {
IPW_ERROR("Unable to load ucode: %d\n", rc);
goto error;
@@ -3304,7 +3444,7 @@ static int ipw_load(struct ipw_priv *priv)
ipw_stop_nic(priv);
/* DMA bss firmware into the device */
- rc = ipw_load_firmware(priv, fw_img, fw->fw_size);
+ rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
if (rc < 0) {
IPW_ERROR("Unable to load firmware: %d\n", rc);
goto error;
@@ -3700,7 +3840,17 @@ static void ipw_bg_disassociate(void *data)
static void ipw_system_config(void *data)
{
struct ipw_priv *priv = data;
- ipw_send_system_config(priv, &priv->sys_config);
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
+ priv->sys_config.accept_all_data_frames = 1;
+ priv->sys_config.accept_non_directed_frames = 1;
+ priv->sys_config.accept_all_mgmt_bcpr = 1;
+ priv->sys_config.accept_all_mgmt_frames = 1;
+ }
+#endif
+
+ ipw_send_system_config(priv);
}
struct ipw_status_code {
@@ -3771,6 +3921,13 @@ static void inline average_init(struct average *avg)
memset(avg, 0, sizeof(*avg));
}
+#define DEPTH_RSSI 8
+#define DEPTH_NOISE 16
+static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
+{
+ return ((depth-1)*prev_avg + val)/depth;
+}
+
static void average_add(struct average *avg, s16 val)
{
avg->sum -= avg->entries[avg->pos];
@@ -3800,8 +3957,8 @@ static void ipw_reset_stats(struct ipw_priv *priv)
priv->quality = 0;
average_init(&priv->average_missed_beacons);
- average_init(&priv->average_rssi);
- average_init(&priv->average_noise);
+ priv->exp_avg_rssi = -60;
+ priv->exp_avg_noise = -85 + 0x100;
priv->last_rate = 0;
priv->last_missed_beacons = 0;
@@ -4008,7 +4165,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
tx_quality, tx_failures_delta, tx_packets_delta);
- rssi = average_value(&priv->average_rssi);
+ rssi = priv->exp_avg_rssi;
signal_quality =
(100 *
(priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
@@ -4185,7 +4342,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
queue_work(priv->workqueue,
&priv->system_config);
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
if ((priv->status & STATUS_AUTH) &&
@@ -4482,6 +4639,24 @@ static void ipw_rx_notification(struct ipw_priv *priv,
&& priv->status & STATUS_ASSOCIATED)
queue_delayed_work(priv->workqueue,
&priv->request_scan, HZ);
+
+ /* Send an empty event to user space.
+ * We don't send the received data on the event because
+ * it would require us to do complex transcoding, and
+ * we want to minimise the work done in the irq handler
+ * Use a request to extract the data.
+ * Also, we generate this even for any scan, regardless
+ * on how the scan was initiated. User space can just
+ * sync on periodic scan to get fresh data...
+ * Jean II */
+ if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
+ union iwreq_data wrqu;
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wireless_send_event(priv->net_dev, SIOCGIWSCAN,
+ &wrqu, NULL);
+ }
break;
}
@@ -4577,11 +4752,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
case HOST_NOTIFICATION_NOISE_STATS:{
if (notif->size == sizeof(u32)) {
- priv->last_noise =
- (u8) (le32_to_cpu(notif->u.noise.value) &
- 0xff);
- average_add(&priv->average_noise,
- priv->last_noise);
+ priv->exp_avg_noise =
+ exponential_average(priv->exp_avg_noise,
+ (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
+ DEPTH_NOISE);
break;
}
@@ -6170,8 +6344,6 @@ static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
{
/* make sure WPA is enabled */
ipw_wpa_enable(priv, 1);
-
- ipw_disassociate(priv);
}
static int ipw_set_rsn_capa(struct ipw_priv *priv,
@@ -6365,6 +6537,7 @@ static int ipw_wx_set_auth(struct net_device *dev,
case IW_AUTH_WPA_ENABLED:
ret = ipw_wpa_enable(priv, param->value);
+ ipw_disassociate(priv);
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
@@ -6506,7 +6679,7 @@ static int ipw_wx_set_mlme(struct net_device *dev,
return 0;
}
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
/* QoS */
/*
@@ -6853,61 +7026,55 @@ static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
return from_priority_to_tx_queue[priority] - 1;
}
-/*
-* add QoS parameter to the TX command
-*/
-static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
- u16 priority,
- struct tfd_data *tfd, u8 unicast)
+static int ipw_is_qos_active(struct net_device *dev,
+ struct sk_buff *skb)
{
- int ret = 0;
- int tx_queue_id = 0;
+ struct ipw_priv *priv = ieee80211_priv(dev);
struct ieee80211_qos_data *qos_data = NULL;
int active, supported;
- unsigned long flags;
+ u8 *daddr = skb->data + ETH_ALEN;
+ int unicast = !is_multicast_ether_addr(daddr);
if (!(priv->status & STATUS_ASSOCIATED))
return 0;
qos_data = &priv->assoc_network->qos_data;
- spin_lock_irqsave(&priv->ieee->lock, flags);
-
if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
if (unicast == 0)
qos_data->active = 0;
else
qos_data->active = qos_data->supported;
}
-
active = qos_data->active;
supported = qos_data->supported;
-
- spin_unlock_irqrestore(&priv->ieee->lock, flags);
-
IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
"unicast %d\n",
priv->qos_data.qos_enable, active, supported, unicast);
- if (active && priv->qos_data.qos_enable) {
- ret = from_priority_to_tx_queue[priority];
- tx_queue_id = ret - 1;
- IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
- if (priority <= 7) {
- tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
- tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
- tfd->tfd.tfd_26.mchdr.frame_ctl |=
- IEEE80211_STYPE_QOS_DATA;
-
- if (priv->qos_data.qos_no_ack_mask &
- (1UL << tx_queue_id)) {
- tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
- tfd->tfd.tfd_26.mchdr.qos_ctrl |=
- CTRL_QOS_NO_ACK;
- }
- }
- }
+ if (active && priv->qos_data.qos_enable)
+ return 1;
- return ret;
+ return 0;
+
+}
+/*
+* add QoS parameter to the TX command
+*/
+static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
+ u16 priority,
+ struct tfd_data *tfd)
+{
+ int tx_queue_id = 0;
+
+
+ tx_queue_id = from_priority_to_tx_queue[priority] - 1;
+ tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
+
+ if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
+ tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
+ tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
+ }
+ return 0;
}
/*
@@ -6977,7 +7144,7 @@ static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos
qos_param);
}
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
static int ipw_associate_network(struct ipw_priv *priv,
struct ieee80211_network *network,
@@ -7116,7 +7283,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
else
priv->sys_config.answer_broadcast_ssid_probe = 0;
- err = ipw_send_system_config(priv, &priv->sys_config);
+ err = ipw_send_system_config(priv);
if (err) {
IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
return err;
@@ -7141,7 +7308,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
priv->assoc_network = network;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
ipw_qos_association(priv, network);
#endif
@@ -7415,7 +7582,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv,
}
}
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
struct ipw_rx_mem_buffer *rxb,
struct ieee80211_rx_stats *stats)
@@ -7432,15 +7599,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
/* Magic struct that slots into the radiotap header -- no reason
* to build this manually element by element, we can write it much
* more efficiently than we can parse it. ORDER MATTERS HERE */
- struct ipw_rt_hdr {
- struct ieee80211_radiotap_header rt_hdr;
- u8 rt_flags; /* radiotap packet flags */
- u8 rt_rate; /* rate in 500kb/s */
- u16 rt_channel; /* channel in mhz */
- u16 rt_chbitmask; /* channel bitfield */
- s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
- u8 rt_antenna; /* antenna number */
- } *ipw_rt;
+ struct ipw_rt_hdr *ipw_rt;
short len = le16_to_cpu(pkt->u.frame.length);
@@ -7494,9 +7653,11 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
/* Big bitfield of all the fields we provide in radiotap */
ipw_rt->rt_hdr.it_present =
((1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_RATE) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
(1 << IEEE80211_RADIOTAP_ANTENNA));
/* Zero the flags, we'll add to them as we go */
@@ -7582,6 +7743,217 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
}
#endif
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+#define ieee80211_is_probe_response(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
+ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
+
+#define ieee80211_is_management(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
+
+#define ieee80211_is_control(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
+
+#define ieee80211_is_data(fc) \
+ ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
+
+#define ieee80211_is_assoc_request(fc) \
+ ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
+
+#define ieee80211_is_reassoc_request(fc) \
+ ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
+
+static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
+ struct ipw_rx_mem_buffer *rxb,
+ struct ieee80211_rx_stats *stats)
+{
+ struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
+ struct ipw_rx_frame *frame = &pkt->u.frame;
+ struct ipw_rt_hdr *ipw_rt;
+
+ /* First cache any information we need before we overwrite
+ * the information provided in the skb from the hardware */
+ struct ieee80211_hdr *hdr;
+ u16 channel = frame->received_channel;
+ u8 phy_flags = frame->antennaAndPhy;
+ s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
+ s8 noise = frame->noise;
+ u8 rate = frame->rate;
+ short len = le16_to_cpu(pkt->u.frame.length);
+ u64 tsf = 0;
+ struct sk_buff *skb;
+ int hdr_only = 0;
+ u16 filter = priv->prom_priv->filter;
+
+ /* If the filter is set to not include Rx frames then return */
+ if (filter & IPW_PROM_NO_RX)
+ return;
+
+ /* We received data from the HW, so stop the watchdog */
+ priv->prom_net_dev->trans_start = jiffies;
+
+ if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
+ priv->prom_priv->ieee->stats.rx_errors++;
+ IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
+ return;
+ }
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!netif_running(priv->prom_net_dev))) {
+ priv->prom_priv->ieee->stats.rx_dropped++;
+ IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+ return;
+ }
+
+ /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
+ * that now */
+ if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
+ /* FIXME: Should alloc bigger skb instead */
+ priv->prom_priv->ieee->stats.rx_dropped++;
+ IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
+ return;
+ }
+
+ hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
+ if (ieee80211_is_management(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_MGMT)
+ return;
+ if (filter & IPW_PROM_MGMT_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_control(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_CTL)
+ return;
+ if (filter & IPW_PROM_CTL_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_data(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_DATA)
+ return;
+ if (filter & IPW_PROM_DATA_HEADER_ONLY)
+ hdr_only = 1;
+ }
+
+ /* Copy the SKB since this is for the promiscuous side */
+ skb = skb_copy(rxb->skb, GFP_ATOMIC);
+ if (skb == NULL) {
+ IPW_ERROR("skb_clone failed for promiscuous copy.\n");
+ return;
+ }
+
+ /* copy the frame data to write after where the radiotap header goes */
+ ipw_rt = (void *)skb->data;
+
+ if (hdr_only)
+ len = ieee80211_get_hdrlen(hdr->frame_ctl);
+
+ memcpy(ipw_rt->payload, hdr, len);
+
+ /* Zero the radiotap static buffer ... We only need to zero the bytes
+ * NOT part of our real header, saves a little time.
+ *
+ * No longer necessary since we fill in all our data. Purge before
+ * merging patch officially.
+ * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
+ * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
+ */
+
+ ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+ ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
+ ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
+
+ /* Set the size of the skb to the size of the frame */
+ skb_put(skb, ipw_rt->rt_hdr.it_len + len);
+
+ /* Big bitfield of all the fields we provide in radiotap */
+ ipw_rt->rt_hdr.it_present =
+ ((1 << IEEE80211_RADIOTAP_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_TSFT) |
+ (1 << IEEE80211_RADIOTAP_RATE) |
+ (1 << IEEE80211_RADIOTAP_CHANNEL) |
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
+ (1 << IEEE80211_RADIOTAP_ANTENNA));
+
+ /* Zero the flags, we'll add to them as we go */
+ ipw_rt->rt_flags = 0;
+
+ ipw_rt->rt_tsf = tsf;
+
+ /* Convert to DBM */
+ ipw_rt->rt_dbmsignal = signal;
+ ipw_rt->rt_dbmnoise = noise;
+
+ /* Convert the channel data and set the flags */
+ ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
+ if (channel > 14) { /* 802.11a */
+ ipw_rt->rt_chbitmask =
+ cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
+ } else if (phy_flags & (1 << 5)) { /* 802.11b */
+ ipw_rt->rt_chbitmask =
+ cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
+ } else { /* 802.11g */
+ ipw_rt->rt_chbitmask =
+ (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
+ }
+
+ /* set the rate in multiples of 500k/s */
+ switch (rate) {
+ case IPW_TX_RATE_1MB:
+ ipw_rt->rt_rate = 2;
+ break;
+ case IPW_TX_RATE_2MB:
+ ipw_rt->rt_rate = 4;
+ break;
+ case IPW_TX_RATE_5MB:
+ ipw_rt->rt_rate = 10;
+ break;
+ case IPW_TX_RATE_6MB:
+ ipw_rt->rt_rate = 12;
+ break;
+ case IPW_TX_RATE_9MB:
+ ipw_rt->rt_rate = 18;
+ break;
+ case IPW_TX_RATE_11MB:
+ ipw_rt->rt_rate = 22;
+ break;
+ case IPW_TX_RATE_12MB:
+ ipw_rt->rt_rate = 24;
+ break;
+ case IPW_TX_RATE_18MB:
+ ipw_rt->rt_rate = 36;
+ break;
+ case IPW_TX_RATE_24MB:
+ ipw_rt->rt_rate = 48;
+ break;
+ case IPW_TX_RATE_36MB:
+ ipw_rt->rt_rate = 72;
+ break;
+ case IPW_TX_RATE_48MB:
+ ipw_rt->rt_rate = 96;
+ break;
+ case IPW_TX_RATE_54MB:
+ ipw_rt->rt_rate = 108;
+ break;
+ default:
+ ipw_rt->rt_rate = 0;
+ break;
+ }
+
+ /* antenna number */
+ ipw_rt->rt_antenna = (phy_flags & 3);
+
+ /* set the preamble flag if we have it */
+ if (phy_flags & (1 << 6))
+ ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+
+ IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
+
+ if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
+ priv->prom_priv->ieee->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ }
+}
+#endif
+
static int is_network_packet(struct ipw_priv *priv,
struct ieee80211_hdr_4addr *header)
{
@@ -7808,15 +8180,21 @@ static void ipw_rx(struct ipw_priv *priv)
priv->rx_packets++;
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
+ ipw_handle_promiscuous_rx(priv, rxb, &stats);
+#endif
+
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
-#ifdef CONFIG_IEEE80211_RADIOTAP
- ipw_handle_data_packet_monitor(priv,
- rxb,
- &stats);
+#ifdef CONFIG_IPW2200_RADIOTAP
+
+ ipw_handle_data_packet_monitor(priv,
+ rxb,
+ &stats);
#else
- ipw_handle_data_packet(priv, rxb,
- &stats);
+ ipw_handle_data_packet(priv, rxb,
+ &stats);
#endif
break;
}
@@ -7837,9 +8215,9 @@ static void ipw_rx(struct ipw_priv *priv)
if (network_packet && priv->assoc_network) {
priv->assoc_network->stats.rssi =
stats.rssi;
- average_add(&priv->average_rssi,
- stats.rssi);
- priv->last_rx_rssi = stats.rssi;
+ priv->exp_avg_rssi =
+ exponential_average(priv->exp_avg_rssi,
+ stats.rssi, DEPTH_RSSI);
}
IPW_DEBUG_RX("Frame: len=%u\n",
@@ -7982,10 +8360,10 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
/* TODO: Validate that provided channel is in range */
}
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
ipw_qos_init(priv, qos_enable, qos_burst_enable,
burst_duration_CCK, burst_duration_OFDM);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
switch (mode) {
case 1:
@@ -7996,7 +8374,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
#ifdef CONFIG_IPW2200_MONITOR
case 2:
priv->ieee->iw_mode = IW_MODE_MONITOR;
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
#else
priv->net_dev->type = ARPHRD_IEEE80211;
@@ -8251,7 +8629,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
priv->net_dev->type = ARPHRD_ETHER;
if (wrqu->mode == IW_MODE_MONITOR)
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
#else
priv->net_dev->type = ARPHRD_IEEE80211;
@@ -8379,7 +8757,8 @@ static int ipw_wx_get_range(struct net_device *dev,
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
- IW_EVENT_CAPA_MASK(SIOCGIWAP));
+ IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+ IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
@@ -8734,6 +9113,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
struct ipw_priv *priv = ieee80211_priv(dev);
mutex_lock(&priv->mutex);
wrqu->bitrate.value = priv->last_rate;
+ wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
mutex_unlock(&priv->mutex);
IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
return 0;
@@ -9351,7 +9731,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
if (enable) {
if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
-#ifdef CONFIG_IEEE80211_RADIOTAP
+#ifdef CONFIG_IPW2200_RADIOTAP
priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
#else
priv->net_dev->type = ARPHRD_IEEE80211;
@@ -9579,8 +9959,8 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
}
wstats->qual.qual = priv->quality;
- wstats->qual.level = average_value(&priv->average_rssi);
- wstats->qual.noise = average_value(&priv->average_noise);
+ wstats->qual.level = priv->exp_avg_rssi;
+ wstats->qual.noise = priv->exp_avg_noise;
wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
@@ -9608,7 +9988,9 @@ static void init_sys_config(struct ipw_sys_config *sys_config)
sys_config->disable_unicast_decryption = 1;
sys_config->exclude_multicast_unencrypted = 0;
sys_config->disable_multicast_decryption = 1;
- sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV;
+ if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
+ antenna = CFG_SYS_ANTENNA_BOTH;
+ sys_config->antenna_diversity = antenna;
sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
sys_config->dot11g_auto_detection = 0;
sys_config->enable_cts_to_self = 0;
@@ -9647,11 +10029,11 @@ we need to heavily modify the ieee80211_skb_to_txb.
static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
int pri)
{
- struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
+ struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
txb->fragments[0]->data;
int i = 0;
struct tfd_frame *tfd;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
int tx_id = ipw_get_tx_queue_number(priv, pri);
struct clx2_tx_queue *txq = &priv->txq[tx_id];
#else
@@ -9662,9 +10044,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes;
int fc;
+ hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC:
- hdr_len = IEEE80211_3ADDR_LEN;
unicast = !is_multicast_ether_addr(hdr->addr1);
id = ipw_find_station(priv, hdr->addr1);
if (id == IPW_INVALID_STATION) {
@@ -9681,7 +10063,6 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
case IW_MODE_INFRA:
default:
unicast = !is_multicast_ether_addr(hdr->addr3);
- hdr_len = IEEE80211_3ADDR_LEN;
id = 0;
break;
}
@@ -9759,9 +10140,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
/* No hardware encryption */
tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
-#ifdef CONFIG_IPW_QOS
- ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
-#endif /* CONFIG_IPW_QOS */
+#ifdef CONFIG_IPW2200_QOS
+ if (fc & IEEE80211_STYPE_QOS_DATA)
+ ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
+#endif /* CONFIG_IPW2200_QOS */
/* payload */
tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
@@ -9841,12 +10223,12 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
static int ipw_net_is_queue_full(struct net_device *dev, int pri)
{
struct ipw_priv *priv = ieee80211_priv(dev);
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
int tx_id = ipw_get_tx_queue_number(priv, pri);
struct clx2_tx_queue *txq = &priv->txq[tx_id];
#else
struct clx2_tx_queue *txq = &priv->txq[0];
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
if (ipw_queue_space(&txq->q) < txq->q.high_mark)
return 1;
@@ -9854,6 +10236,88 @@ static int ipw_net_is_queue_full(struct net_device *dev, int pri)
return 0;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
+ struct ieee80211_txb *txb)
+{
+ struct ieee80211_rx_stats dummystats;
+ struct ieee80211_hdr *hdr;
+ u8 n;
+ u16 filter = priv->prom_priv->filter;
+ int hdr_only = 0;
+
+ if (filter & IPW_PROM_NO_TX)
+ return;
+
+ memset(&dummystats, 0, sizeof(dummystats));
+
+ /* Filtering of fragment chains is done agains the first fragment */
+ hdr = (void *)txb->fragments[0]->data;
+ if (ieee80211_is_management(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_MGMT)
+ return;
+ if (filter & IPW_PROM_MGMT_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_control(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_CTL)
+ return;
+ if (filter & IPW_PROM_CTL_HEADER_ONLY)
+ hdr_only = 1;
+ } else if (ieee80211_is_data(hdr->frame_ctl)) {
+ if (filter & IPW_PROM_NO_DATA)
+ return;
+ if (filter & IPW_PROM_DATA_HEADER_ONLY)
+ hdr_only = 1;
+ }
+
+ for(n=0; n<txb->nr_frags; ++n) {
+ struct sk_buff *src = txb->fragments[n];
+ struct sk_buff *dst;
+ struct ieee80211_radiotap_header *rt_hdr;
+ int len;
+
+ if (hdr_only) {
+ hdr = (void *)src->data;
+ len = ieee80211_get_hdrlen(hdr->frame_ctl);
+ } else
+ len = src->len;
+
+ dst = alloc_skb(
+ len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
+ if (!dst) continue;
+
+ rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
+
+ rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
+ rt_hdr->it_pad = 0;
+ rt_hdr->it_present = 0; /* after all, it's just an idea */
+ rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
+
+ *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
+ ieee80211chan2mhz(priv->channel));
+ if (priv->channel > 14) /* 802.11a */
+ *(u16*)skb_put(dst, sizeof(u16)) =
+ cpu_to_le16(IEEE80211_CHAN_OFDM |
+ IEEE80211_CHAN_5GHZ);
+ else if (priv->ieee->mode == IEEE_B) /* 802.11b */
+ *(u16*)skb_put(dst, sizeof(u16)) =
+ cpu_to_le16(IEEE80211_CHAN_CCK |
+ IEEE80211_CHAN_2GHZ);
+ else /* 802.11g */
+ *(u16*)skb_put(dst, sizeof(u16)) =
+ cpu_to_le16(IEEE80211_CHAN_OFDM |
+ IEEE80211_CHAN_2GHZ);
+
+ rt_hdr->it_len = dst->len;
+
+ memcpy(skb_put(dst, len), src->data, len);
+
+ if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
+ dev_kfree_skb_any(dst);
+ }
+}
+#endif
+
static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
struct net_device *dev, int pri)
{
@@ -9871,6 +10335,11 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
goto fail_unlock;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (rtap_iface && netif_running(priv->prom_net_dev))
+ ipw_handle_promiscuous_tx(priv, txb);
+#endif
+
ret = ipw_tx_skb(priv, txb, pri);
if (ret == NETDEV_TX_OK)
__ipw_led_activity_on(priv);
@@ -10169,10 +10638,10 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->merge_networks,
(void (*)(void *))ipw_merge_adhoc_network, priv);
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
priv);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
ipw_irq_tasklet, (unsigned long)priv);
@@ -10318,12 +10787,21 @@ static int ipw_config(struct ipw_priv *priv)
|= CFG_BT_COEXISTENCE_OOB;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
+ priv->sys_config.accept_all_data_frames = 1;
+ priv->sys_config.accept_non_directed_frames = 1;
+ priv->sys_config.accept_all_mgmt_bcpr = 1;
+ priv->sys_config.accept_all_mgmt_frames = 1;
+ }
+#endif
+
if (priv->ieee->iw_mode == IW_MODE_ADHOC)
priv->sys_config.answer_broadcast_ssid_probe = 1;
else
priv->sys_config.answer_broadcast_ssid_probe = 0;
- if (ipw_send_system_config(priv, &priv->sys_config))
+ if (ipw_send_system_config(priv))
goto error;
init_supported_rates(priv, &priv->rates);
@@ -10335,10 +10813,10 @@ static int ipw_config(struct ipw_priv *priv)
if (ipw_send_rts_threshold(priv, priv->rts_threshold))
goto error;
}
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
ipw_qos_activate(priv, NULL);
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
if (ipw_set_random_seed(priv))
goto error;
@@ -10639,6 +11117,7 @@ static int ipw_up(struct ipw_priv *priv)
if (priv->cmdlog == NULL) {
IPW_ERROR("Error allocating %d command log entries.\n",
cmdlog);
+ return -ENOMEM;
} else {
memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
priv->cmdlog_len = cmdlog;
@@ -10860,6 +11339,10 @@ static struct attribute *ipw_sysfs_entries[] = {
&dev_attr_led.attr,
&dev_attr_speed_scan.attr,
&dev_attr_net_stats.attr,
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ &dev_attr_rtap_iface.attr,
+ &dev_attr_rtap_filter.attr,
+#endif
NULL
};
@@ -10868,6 +11351,109 @@ static struct attribute_group ipw_attribute_group = {
.attrs = ipw_sysfs_entries,
};
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static int ipw_prom_open(struct net_device *dev)
+{
+ struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
+ struct ipw_priv *priv = prom_priv->priv;
+
+ IPW_DEBUG_INFO("prom dev->open\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+
+ if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+ priv->sys_config.accept_all_data_frames = 1;
+ priv->sys_config.accept_non_directed_frames = 1;
+ priv->sys_config.accept_all_mgmt_bcpr = 1;
+ priv->sys_config.accept_all_mgmt_frames = 1;
+
+ ipw_send_system_config(priv);
+ }
+
+ return 0;
+}
+
+static int ipw_prom_stop(struct net_device *dev)
+{
+ struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
+ struct ipw_priv *priv = prom_priv->priv;
+
+ IPW_DEBUG_INFO("prom dev->stop\n");
+
+ if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+ priv->sys_config.accept_all_data_frames = 0;
+ priv->sys_config.accept_non_directed_frames = 0;
+ priv->sys_config.accept_all_mgmt_bcpr = 0;
+ priv->sys_config.accept_all_mgmt_frames = 0;
+
+ ipw_send_system_config(priv);
+ }
+
+ return 0;
+}
+
+static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ IPW_DEBUG_INFO("prom dev->xmit\n");
+ netif_stop_queue(dev);
+ return -EOPNOTSUPP;
+}
+
+static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
+{
+ struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
+ return &prom_priv->ieee->stats;
+}
+
+static int ipw_prom_alloc(struct ipw_priv *priv)
+{
+ int rc = 0;
+
+ if (priv->prom_net_dev)
+ return -EPERM;
+
+ priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
+ if (priv->prom_net_dev == NULL)
+ return -ENOMEM;
+
+ priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
+ priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
+ priv->prom_priv->priv = priv;
+
+ strcpy(priv->prom_net_dev->name, "rtap%d");
+
+ priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ priv->prom_net_dev->open = ipw_prom_open;
+ priv->prom_net_dev->stop = ipw_prom_stop;
+ priv->prom_net_dev->get_stats = ipw_prom_get_stats;
+ priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
+
+ priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
+
+ rc = register_netdev(priv->prom_net_dev);
+ if (rc) {
+ free_ieee80211(priv->prom_net_dev);
+ priv->prom_net_dev = NULL;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void ipw_prom_free(struct ipw_priv *priv)
+{
+ if (!priv->prom_net_dev)
+ return;
+
+ unregister_netdev(priv->prom_net_dev);
+ free_ieee80211(priv->prom_net_dev);
+
+ priv->prom_net_dev = NULL;
+}
+
+#endif
+
+
static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = 0;
@@ -10959,11 +11545,12 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->ieee->set_security = shim__set_security;
priv->ieee->is_queue_full = ipw_net_is_queue_full;
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_QOS
+ priv->ieee->is_qos_active = ipw_is_qos_active;
priv->ieee->handle_probe_response = ipw_handle_beacon;
priv->ieee->handle_beacon = ipw_handle_probe_response;
priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
priv->ieee->perfect_rssi = -20;
priv->ieee->worst_rssi = -85;
@@ -10997,6 +11584,18 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_remove_sysfs;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ if (rtap_iface) {
+ err = ipw_prom_alloc(priv);
+ if (err) {
+ IPW_ERROR("Failed to register promiscuous network "
+ "device (error %d).\n", err);
+ unregister_netdev(priv->net_dev);
+ goto out_remove_sysfs;
+ }
+ }
+#endif
+
printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
"channels, %d 802.11a channels)\n",
priv->ieee->geo.name, priv->ieee->geo.bg_channels,
@@ -11076,6 +11675,10 @@ static void ipw_pci_remove(struct pci_dev *pdev)
priv->error = NULL;
}
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ ipw_prom_free(priv);
+#endif
+
free_irq(pdev->irq, priv);
iounmap(priv->hw_base);
pci_release_regions(pdev);
@@ -11200,7 +11803,12 @@ MODULE_PARM_DESC(debug, "debug output mask");
module_param(channel, int, 0444);
MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
-#ifdef CONFIG_IPW_QOS
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+module_param(rtap_iface, int, 0444);
+MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
+#endif
+
+#ifdef CONFIG_IPW2200_QOS
module_param(qos_enable, int, 0444);
MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
@@ -11215,7 +11823,7 @@ MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
module_param(burst_duration_OFDM, int, 0444);
MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
-#endif /* CONFIG_IPW_QOS */
+#endif /* CONFIG_IPW2200_QOS */
#ifdef CONFIG_IPW2200_MONITOR
module_param(mode, int, 0444);
@@ -11238,5 +11846,8 @@ MODULE_PARM_DESC(cmdlog,
module_param(roaming, int, 0444);
MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
+module_param(antenna, int, 0444);
+MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
+
module_exit(ipw_exit);
module_init(ipw_init);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 4b9804900702..6044c0be2c80 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -789,7 +789,7 @@ struct ipw_sys_config {
u8 bt_coexist_collision_thr;
u8 silence_threshold;
u8 accept_all_mgmt_bcpr;
- u8 accept_all_mgtm_frames;
+ u8 accept_all_mgmt_frames;
u8 pass_noise_stats_to_host;
u8 reserved3;
} __attribute__ ((packed));
@@ -1122,6 +1122,52 @@ struct ipw_fw_error {
u8 payload[0];
} __attribute__ ((packed));
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+
+enum ipw_prom_filter {
+ IPW_PROM_CTL_HEADER_ONLY = (1 << 0),
+ IPW_PROM_MGMT_HEADER_ONLY = (1 << 1),
+ IPW_PROM_DATA_HEADER_ONLY = (1 << 2),
+ IPW_PROM_ALL_HEADER_ONLY = 0xf, /* bits 0..3 */
+ IPW_PROM_NO_TX = (1 << 4),
+ IPW_PROM_NO_RX = (1 << 5),
+ IPW_PROM_NO_CTL = (1 << 6),
+ IPW_PROM_NO_MGMT = (1 << 7),
+ IPW_PROM_NO_DATA = (1 << 8),
+};
+
+struct ipw_priv;
+struct ipw_prom_priv {
+ struct ipw_priv *priv;
+ struct ieee80211_device *ieee;
+ enum ipw_prom_filter filter;
+ int tx_packets;
+ int rx_packets;
+};
+#endif
+
+#if defined(CONFIG_IPW2200_RADIOTAP) || defined(CONFIG_IPW2200_PROMISCUOUS)
+/* Magic struct that slots into the radiotap header -- no reason
+ * to build this manually element by element, we can write it much
+ * more efficiently than we can parse it. ORDER MATTERS HERE
+ *
+ * When sent to us via the simulated Rx interface in sysfs, the entire
+ * structure is provided regardless of any bits unset.
+ */
+struct ipw_rt_hdr {
+ struct ieee80211_radiotap_header rt_hdr;
+ u64 rt_tsf; /* TSF */
+ u8 rt_flags; /* radiotap packet flags */
+ u8 rt_rate; /* rate in 500kb/s */
+ u16 rt_channel; /* channel in mhz */
+ u16 rt_chbitmask; /* channel bitfield */
+ s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
+ s8 rt_dbmnoise;
+ u8 rt_antenna; /* antenna number */
+ u8 payload[0]; /* payload... */
+} __attribute__ ((packed));
+#endif
+
struct ipw_priv {
/* ieee device used by generic ieee processing code */
struct ieee80211_device *ieee;
@@ -1133,6 +1179,12 @@ struct ipw_priv {
struct pci_dev *pci_dev;
struct net_device *net_dev;
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+ /* Promiscuous mode */
+ struct ipw_prom_priv *prom_priv;
+ struct net_device *prom_net_dev;
+#endif
+
/* pci hardware address support */
void __iomem *hw_base;
unsigned long hw_len;
@@ -1153,11 +1205,9 @@ struct ipw_priv {
u32 config;
u32 capability;
- u8 last_rx_rssi;
- u8 last_noise;
struct average average_missed_beacons;
- struct average average_rssi;
- struct average average_noise;
+ s16 exp_avg_rssi;
+ s16 exp_avg_noise;
u32 port_type;
int rx_bufs_min; /**< minimum number of bufs in Rx queue */
int rx_pend_max; /**< maximum pending buffers for one IRQ */
@@ -1308,6 +1358,29 @@ struct ipw_priv {
/* debug macros */
+/* Debug and printf string expansion helpers for printing bitfields */
+#define BIT_FMT8 "%c%c%c%c-%c%c%c%c"
+#define BIT_FMT16 BIT_FMT8 ":" BIT_FMT8
+#define BIT_FMT32 BIT_FMT16 " " BIT_FMT16
+
+#define BITC(x,y) (((x>>y)&1)?'1':'0')
+#define BIT_ARG8(x) \
+BITC(x,7),BITC(x,6),BITC(x,5),BITC(x,4),\
+BITC(x,3),BITC(x,2),BITC(x,1),BITC(x,0)
+
+#define BIT_ARG16(x) \
+BITC(x,15),BITC(x,14),BITC(x,13),BITC(x,12),\
+BITC(x,11),BITC(x,10),BITC(x,9),BITC(x,8),\
+BIT_ARG8(x)
+
+#define BIT_ARG32(x) \
+BITC(x,31),BITC(x,30),BITC(x,29),BITC(x,28),\
+BITC(x,27),BITC(x,26),BITC(x,25),BITC(x,24),\
+BITC(x,23),BITC(x,22),BITC(x,21),BITC(x,20),\
+BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
+BIT_ARG16(x)
+
+
#ifdef CONFIG_IPW2200_DEBUG
#define IPW_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index c2d0b09e0418..b563decf599e 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -201,41 +201,12 @@ static struct {
/* Data types */
/********************************************************************/
-/* Used in Event handling.
- * We avoid nested structures as they break on ARM -- Moustafa */
-struct hermes_tx_descriptor_802_11 {
- /* hermes_tx_descriptor */
- __le16 status;
- __le16 reserved1;
- __le16 reserved2;
- __le32 sw_support;
- u8 retry_count;
- u8 tx_rate;
- __le16 tx_control;
-
- /* ieee80211_hdr */
+/* Beginning of the Tx descriptor, used in TxExc handling */
+struct hermes_txexc_data {
+ struct hermes_tx_descriptor desc;
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
-
- __le16 data_len;
-
- /* ethhdr */
- u8 h_dest[ETH_ALEN]; /* destination eth addr */
- u8 h_source[ETH_ALEN]; /* source ether addr */
- __be16 h_proto; /* packet type ID field */
-
- /* p8022_hdr */
- u8 dsap;
- u8 ssap;
- u8 ctrl;
- u8 oui[3];
-
- __be16 ethertype;
} __attribute__ ((packed));
/* Rx frame header except compatibility 802.3 header */
@@ -450,53 +421,39 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
hermes_t *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
- char *p;
struct ethhdr *eh;
- int len, data_len, data_off;
+ int data_off;
struct hermes_tx_descriptor desc;
unsigned long flags;
- TRACE_ENTER(dev->name);
-
if (! netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
dev->name);
- TRACE_EXIT(dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (netif_queue_stopped(dev)) {
printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
dev->name);
- TRACE_EXIT(dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (orinoco_lock(priv, &flags) != 0) {
printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
dev->name);
- TRACE_EXIT(dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
if (! netif_carrier_ok(dev) || (priv->iw_mode == IW_MODE_MONITOR)) {
/* Oops, the firmware hasn't established a connection,
silently drop the packet (this seems to be the
safest approach). */
- stats->tx_errors++;
- orinoco_unlock(priv, &flags);
- dev_kfree_skb(skb);
- TRACE_EXIT(dev->name);
- return 0;
+ goto drop;
}
- /* Length of the packet body */
- /* FIXME: what if the skb is smaller than this? */
- len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
- skb = skb_padto(skb, len);
- if (skb == NULL)
- goto fail;
- len -= ETH_HLEN;
+ /* Check packet length */
+ if (skb->len < ETH_HLEN)
+ goto drop;
eh = (struct ethhdr *)skb->data;
@@ -507,8 +464,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx descriptor "
"to BAP\n", dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
/* Clear the 802.11 header and data length fields - some
@@ -519,50 +475,38 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
/* Encapsulate Ethernet-II frames */
if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
- struct header_struct hdr;
- data_len = len;
- data_off = HERMES_802_3_OFFSET + sizeof(hdr);
- p = skb->data + ETH_HLEN;
-
- /* 802.3 header */
- memcpy(hdr.dest, eh->h_dest, ETH_ALEN);
- memcpy(hdr.src, eh->h_source, ETH_ALEN);
- hdr.len = htons(data_len + ENCAPS_OVERHEAD);
-
- /* 802.2 header */
- memcpy(&hdr.dsap, &encaps_hdr, sizeof(encaps_hdr));
-
- hdr.ethertype = eh->h_proto;
- err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
- txfid, HERMES_802_3_OFFSET);
+ struct header_struct {
+ struct ethhdr eth; /* 802.3 header */
+ u8 encap[6]; /* 802.2 header */
+ } __attribute__ ((packed)) hdr;
+
+ /* Strip destination and source from the data */
+ skb_pull(skb, 2 * ETH_ALEN);
+ data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
+
+ /* And move them to a separate header */
+ memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
+ hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
+ memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
+
+ err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr),
+ txfid, HERMES_802_3_OFFSET);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing packet "
"header to BAP\n", dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
- /* Actual xfer length - allow for padding */
- len = ALIGN(data_len, 2);
- if (len < ETH_ZLEN - ETH_HLEN)
- len = ETH_ZLEN - ETH_HLEN;
} else { /* IEEE 802.3 frame */
- data_len = len + ETH_HLEN;
data_off = HERMES_802_3_OFFSET;
- p = skb->data;
- /* Actual xfer length - round up for odd length packets */
- len = ALIGN(data_len, 2);
- if (len < ETH_ZLEN)
- len = ETH_ZLEN;
}
- err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
+ err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
txfid, data_off);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
/* Finally, we actually initiate the send */
@@ -575,25 +519,27 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d transmitting packet\n",
dev->name, err);
- stats->tx_errors++;
- goto fail;
+ goto busy;
}
dev->trans_start = jiffies;
- stats->tx_bytes += data_off + data_len;
+ stats->tx_bytes += data_off + skb->len;
+ goto ok;
- orinoco_unlock(priv, &flags);
+ drop:
+ stats->tx_errors++;
+ stats->tx_dropped++;
+ ok:
+ orinoco_unlock(priv, &flags);
dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
- TRACE_EXIT(dev->name);
-
- return 0;
- fail:
- TRACE_EXIT(dev->name);
-
+ busy:
+ if (err == -EIO)
+ schedule_work(&priv->reset_work);
orinoco_unlock(priv, &flags);
- return err;
+ return NETDEV_TX_BUSY;
}
static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
@@ -629,7 +575,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
struct net_device_stats *stats = &priv->stats;
u16 fid = hermes_read_regn(hw, TXCOMPLFID);
u16 status;
- struct hermes_tx_descriptor_802_11 hdr;
+ struct hermes_txexc_data hdr;
int err = 0;
if (fid == DUMMY_FID)
@@ -637,8 +583,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
/* Read part of the frame header - we need status and addr1 */
err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
- offsetof(struct hermes_tx_descriptor_802_11,
- addr2),
+ sizeof(struct hermes_txexc_data),
fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
@@ -658,7 +603,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
* exceeded, because that's the only status that really mean
* that this particular node went away.
* Other errors means that *we* screwed up. - Jean II */
- status = le16_to_cpu(hdr.status);
+ status = le16_to_cpu(hdr.desc.status);
if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
union iwreq_data wrqu;
@@ -1398,16 +1343,12 @@ int __orinoco_down(struct net_device *dev)
return 0;
}
-int orinoco_reinit_firmware(struct net_device *dev)
+static int orinoco_allocate_fid(struct net_device *dev)
{
struct orinoco_private *priv = netdev_priv(dev);
struct hermes *hw = &priv->hw;
int err;
- err = hermes_init(hw);
- if (err)
- return err;
-
err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid);
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
/* Try workaround for old Symbol firmware bug */
@@ -1426,6 +1367,19 @@ int orinoco_reinit_firmware(struct net_device *dev)
return err;
}
+int orinoco_reinit_firmware(struct net_device *dev)
+{
+ struct orinoco_private *priv = netdev_priv(dev);
+ struct hermes *hw = &priv->hw;
+ int err;
+
+ err = hermes_init(hw);
+ if (!err)
+ err = orinoco_allocate_fid(dev);
+
+ return err;
+}
+
static int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
@@ -2272,14 +2226,12 @@ static int orinoco_init(struct net_device *dev)
u16 reclen;
int len;
- TRACE_ENTER(dev->name);
-
/* No need to lock, the hw_unavailable flag is already set in
* alloc_orinocodev() */
priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
/* Initialize the firmware */
- err = orinoco_reinit_firmware(dev);
+ err = hermes_init(hw);
if (err != 0) {
printk(KERN_ERR "%s: failed to initialize firmware (err = %d)\n",
dev->name, err);
@@ -2337,6 +2289,13 @@ static int orinoco_init(struct net_device *dev)
printk(KERN_DEBUG "%s: Station name \"%s\"\n", dev->name, priv->nick);
+ err = orinoco_allocate_fid(dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to allocate NIC buffer!\n",
+ dev->name);
+ goto out;
+ }
+
/* Get allowed channels */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
&priv->channel_mask);
@@ -2427,7 +2386,6 @@ static int orinoco_init(struct net_device *dev)
printk(KERN_DEBUG "%s: ready\n", dev->name);
out:
- TRACE_EXIT(dev->name);
return err;
}
@@ -2795,8 +2753,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
int numrates;
int i, k;
- TRACE_ENTER(dev->name);
-
rrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
@@ -2886,8 +2842,6 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
- TRACE_EXIT(dev->name);
-
return 0;
}
@@ -3069,8 +3023,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
int err = 0;
unsigned long flags;
- TRACE_ENTER(dev->name);
-
if (netif_running(dev)) {
err = orinoco_hw_get_essid(priv, &active, essidbuf);
if (err)
@@ -3085,8 +3037,6 @@ static int orinoco_ioctl_getessid(struct net_device *dev,
erq->flags = 1;
erq->length = strlen(essidbuf) + 1;
- TRACE_EXIT(dev->name);
-
return 0;
}
@@ -4347,69 +4297,6 @@ static struct ethtool_ops orinoco_ethtool_ops = {
};
/********************************************************************/
-/* Debugging */
-/********************************************************************/
-
-#if 0
-static void show_rx_frame(struct orinoco_rxframe_hdr *frame)
-{
- printk(KERN_DEBUG "RX descriptor:\n");
- printk(KERN_DEBUG " status = 0x%04x\n", frame->desc.status);
- printk(KERN_DEBUG " time = 0x%08x\n", frame->desc.time);
- printk(KERN_DEBUG " silence = 0x%02x\n", frame->desc.silence);
- printk(KERN_DEBUG " signal = 0x%02x\n", frame->desc.signal);
- printk(KERN_DEBUG " rate = 0x%02x\n", frame->desc.rate);
- printk(KERN_DEBUG " rxflow = 0x%02x\n", frame->desc.rxflow);
- printk(KERN_DEBUG " reserved = 0x%08x\n", frame->desc.reserved);
-
- printk(KERN_DEBUG "IEEE 802.11 header:\n");
- printk(KERN_DEBUG " frame_ctl = 0x%04x\n",
- frame->p80211.frame_ctl);
- printk(KERN_DEBUG " duration_id = 0x%04x\n",
- frame->p80211.duration_id);
- printk(KERN_DEBUG " addr1 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr1[0], frame->p80211.addr1[1],
- frame->p80211.addr1[2], frame->p80211.addr1[3],
- frame->p80211.addr1[4], frame->p80211.addr1[5]);
- printk(KERN_DEBUG " addr2 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr2[0], frame->p80211.addr2[1],
- frame->p80211.addr2[2], frame->p80211.addr2[3],
- frame->p80211.addr2[4], frame->p80211.addr2[5]);
- printk(KERN_DEBUG " addr3 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr3[0], frame->p80211.addr3[1],
- frame->p80211.addr3[2], frame->p80211.addr3[3],
- frame->p80211.addr3[4], frame->p80211.addr3[5]);
- printk(KERN_DEBUG " seq_ctl = 0x%04x\n",
- frame->p80211.seq_ctl);
- printk(KERN_DEBUG " addr4 = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p80211.addr4[0], frame->p80211.addr4[1],
- frame->p80211.addr4[2], frame->p80211.addr4[3],
- frame->p80211.addr4[4], frame->p80211.addr4[5]);
- printk(KERN_DEBUG " data_len = 0x%04x\n",
- frame->p80211.data_len);
-
- printk(KERN_DEBUG "IEEE 802.3 header:\n");
- printk(KERN_DEBUG " dest = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p8023.h_dest[0], frame->p8023.h_dest[1],
- frame->p8023.h_dest[2], frame->p8023.h_dest[3],
- frame->p8023.h_dest[4], frame->p8023.h_dest[5]);
- printk(KERN_DEBUG " src = %02x:%02x:%02x:%02x:%02x:%02x\n",
- frame->p8023.h_source[0], frame->p8023.h_source[1],
- frame->p8023.h_source[2], frame->p8023.h_source[3],
- frame->p8023.h_source[4], frame->p8023.h_source[5]);
- printk(KERN_DEBUG " len = 0x%04x\n", frame->p8023.h_proto);
-
- printk(KERN_DEBUG "IEEE 802.2 LLC/SNAP header:\n");
- printk(KERN_DEBUG " DSAP = 0x%02x\n", frame->p8022.dsap);
- printk(KERN_DEBUG " SSAP = 0x%02x\n", frame->p8022.ssap);
- printk(KERN_DEBUG " ctrl = 0x%02x\n", frame->p8022.ctrl);
- printk(KERN_DEBUG " OUI = %02x:%02x:%02x\n",
- frame->p8022.oui[0], frame->p8022.oui[1], frame->p8022.oui[2]);
- printk(KERN_DEBUG " ethertype = 0x%04x\n", frame->ethertype);
-}
-#endif /* 0 */
-
-/********************************************************************/
/* Module initialization */
/********************************************************************/
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index f5d856db92a1..16db3e14b7d2 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -7,7 +7,7 @@
#ifndef _ORINOCO_H
#define _ORINOCO_H
-#define DRIVER_VERSION "0.15rc3"
+#define DRIVER_VERSION "0.15"
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -30,20 +30,6 @@ struct orinoco_key {
char data[ORINOCO_MAX_KEY_SIZE];
} __attribute__ ((packed));
-struct header_struct {
- /* 802.3 */
- u8 dest[ETH_ALEN];
- u8 src[ETH_ALEN];
- __be16 len;
- /* 802.2 */
- u8 dsap;
- u8 ssap;
- u8 ctrl;
- /* SNAP */
- u8 oui[3];
- unsigned short ethertype;
-} __attribute__ ((packed));
-
typedef enum {
FIRMWARE_TYPE_AGERE,
FIRMWARE_TYPE_INTERSIL,
@@ -132,9 +118,6 @@ extern int orinoco_debug;
#define DEBUG(n, args...) do { } while (0)
#endif /* ORINOCO_DEBUG */
-#define TRACE_ENTER(devname) DEBUG(2, "%s: -> %s()\n", devname, __FUNCTION__);
-#define TRACE_EXIT(devname) DEBUG(2, "%s: <- %s()\n", devname, __FUNCTION__);
-
/********************************************************************/
/* Exported prototypes */
/********************************************************************/
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 434f7d7ad841..b2aec4d9fbb1 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -147,14 +147,11 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ if (link->dev_node)
+ unregister_netdev(dev);
+
orinoco_cs_release(link);
- DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
- if (link->dev_node) {
- DEBUG(0, PFX "About to unregister net device %p\n",
- dev);
- unregister_netdev(dev);
- }
free_orinocodev(dev);
} /* orinoco_cs_detach */
@@ -178,13 +175,10 @@ orinoco_cs_config(struct pcmcia_device *link)
int last_fn, last_ret;
u_char buf[64];
config_info_t conf;
- cisinfo_t info;
tuple_t tuple;
cisparse_t parse;
void __iomem *mem;
- CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
-
/*
* This reads the card's CONFIG tuple to find its
* configuration registers.
@@ -234,12 +228,6 @@ orinoco_cs_config(struct pcmcia_device *link)
goto next_entry;
link->conf.ConfigIndex = cfg->index;
- /* Does this card need audio output? */
- if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
-
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
@@ -355,19 +343,10 @@ orinoco_cs_config(struct pcmcia_device *link)
net_device has been registered */
/* Finally, report what we've done */
- printk(KERN_DEBUG "%s: index 0x%02x: ",
- dev->name, link->conf.ConfigIndex);
- if (link->conf.Vpp)
- printk(", Vpp %d.%d", link->conf.Vpp / 10,
- link->conf.Vpp % 10);
- printk(", irq %d", link->irq.AssignedIRQ);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- printk("\n");
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
+ "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id,
+ link->irq.AssignedIRQ, link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
return 0;
@@ -436,7 +415,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
struct orinoco_private *priv = netdev_priv(dev);
struct orinoco_pccard *card = priv->card;
int err = 0;
- unsigned long flags;
if (! test_bit(0, &card->hard_reset_in_progress)) {
err = orinoco_reinit_firmware(dev);
@@ -446,7 +424,7 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
return -EIO;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
netif_device_attach(dev);
priv->hw_unavailable--;
@@ -458,10 +436,10 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
dev->name, err);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
}
- return 0;
+ return err;
}
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index d1a670b35338..74b9d5b2ba9e 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -1,9 +1,8 @@
/* orinoco_nortel.c
- *
+ *
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in
* Nortel emobility, Symbol LA-4113 and Symbol LA-4123.
- * but are connected to the PCI bus by a Nortel PCI-PCMCIA-Adapter.
*
* Copyright (C) 2002 Tobias Hoffmann
* (C) 2003 Christoph Jungegger <disdos@traum404.de>
@@ -50,67 +49,62 @@
#include <pcmcia/cisreg.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
-/* Nortel specific data */
-struct nortel_pci_card {
- unsigned long iobase1;
- unsigned long iobase2;
-};
-
/*
- * Do a soft reset of the PCI card using the Configuration Option Register
+ * Do a soft reset of the card using the Configuration Option Register
* We need this to get going...
* This is the part of the code that is strongly inspired from wlan-ng
*
* Note bis : Don't try to access HERMES_CMD during the reset phase.
* It just won't work !
*/
-static int nortel_pci_cor_reset(struct orinoco_private *priv)
+static int orinoco_nortel_cor_reset(struct orinoco_private *priv)
{
- struct nortel_pci_card *card = priv->card;
+ struct orinoco_pci_card *card = priv->card;
- /* Assert the reset until the card notice */
- outw_p(8, card->iobase1 + 2);
- inw(card->iobase2 + COR_OFFSET);
- outw_p(0x80, card->iobase2 + COR_OFFSET);
+ /* Assert the reset until the card notices */
+ iowrite16(8, card->bridge_io + 2);
+ ioread16(card->attr_io + COR_OFFSET);
+ iowrite16(0x80, card->attr_io + COR_OFFSET);
mdelay(1);
/* Give time for the card to recover from this hard effort */
- outw_p(0, card->iobase2 + COR_OFFSET);
- outw_p(0, card->iobase2 + COR_OFFSET);
+ iowrite16(0, card->attr_io + COR_OFFSET);
+ iowrite16(0, card->attr_io + COR_OFFSET);
mdelay(1);
- /* set COR as usual */
- outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
- outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
+ /* Set COR as usual */
+ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
+ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
- outw_p(0x228, card->iobase1 + 2);
+ iowrite16(0x228, card->bridge_io + 2);
return 0;
}
-static int nortel_pci_hw_init(struct nortel_pci_card *card)
+static int orinoco_nortel_hw_init(struct orinoco_pci_card *card)
{
int i;
u32 reg;
- /* setup bridge */
- if (inw(card->iobase1) & 1) {
+ /* Setup bridge */
+ if (ioread16(card->bridge_io) & 1) {
printk(KERN_ERR PFX "brg1 answer1 wrong\n");
return -EBUSY;
}
- outw_p(0x118, card->iobase1 + 2);
- outw_p(0x108, card->iobase1 + 2);
+ iowrite16(0x118, card->bridge_io + 2);
+ iowrite16(0x108, card->bridge_io + 2);
mdelay(30);
- outw_p(0x8, card->iobase1 + 2);
+ iowrite16(0x8, card->bridge_io + 2);
for (i = 0; i < 30; i++) {
mdelay(30);
- if (inw(card->iobase1) & 0x10) {
+ if (ioread16(card->bridge_io) & 0x10) {
break;
}
}
@@ -118,42 +112,42 @@ static int nortel_pci_hw_init(struct nortel_pci_card *card)
printk(KERN_ERR PFX "brg1 timed out\n");
return -EBUSY;
}
- if (inw(card->iobase2 + 0xe0) & 1) {
+ if (ioread16(card->attr_io + COR_OFFSET) & 1) {
printk(KERN_ERR PFX "brg2 answer1 wrong\n");
return -EBUSY;
}
- if (inw(card->iobase2 + 0xe2) & 1) {
+ if (ioread16(card->attr_io + COR_OFFSET + 2) & 1) {
printk(KERN_ERR PFX "brg2 answer2 wrong\n");
return -EBUSY;
}
- if (inw(card->iobase2 + 0xe4) & 1) {
+ if (ioread16(card->attr_io + COR_OFFSET + 4) & 1) {
printk(KERN_ERR PFX "brg2 answer3 wrong\n");
return -EBUSY;
}
- /* set the PCMCIA COR-Register */
- outw_p(COR_VALUE, card->iobase2 + COR_OFFSET);
+ /* Set the PCMCIA COR register */
+ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
- reg = inw(card->iobase2 + COR_OFFSET);
+ reg = ioread16(card->attr_io + COR_OFFSET);
if (reg != COR_VALUE) {
printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n",
reg);
return -EBUSY;
}
- /* set leds */
- outw_p(1, card->iobase1 + 10);
+ /* Set LEDs */
+ iowrite16(1, card->bridge_io + 10);
return 0;
}
-static int nortel_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int orinoco_nortel_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int err;
struct orinoco_private *priv;
- struct nortel_pci_card *card;
+ struct orinoco_pci_card *card;
struct net_device *dev;
- void __iomem *iomem;
+ void __iomem *hermes_io, *bridge_io, *attr_io;
err = pci_enable_device(pdev);
if (err) {
@@ -162,19 +156,34 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- iomem = pci_iomap(pdev, 2, 0);
- if (!iomem) {
- err = -ENOMEM;
- goto fail_map_io;
+ bridge_io = pci_iomap(pdev, 0, 0);
+ if (!bridge_io) {
+ printk(KERN_ERR PFX "Cannot map bridge registers\n");
+ err = -EIO;
+ goto fail_map_bridge;
+ }
+
+ attr_io = pci_iomap(pdev, 1, 0);
+ if (!attr_io) {
+ printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
+ err = -EIO;
+ goto fail_map_attr;
+ }
+
+ hermes_io = pci_iomap(pdev, 2, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot map chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
- dev = alloc_orinocodev(sizeof(*card), nortel_pci_cor_reset);
+ dev = alloc_orinocodev(sizeof(*card), orinoco_nortel_cor_reset);
if (!dev) {
printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
@@ -183,16 +192,12 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
priv = netdev_priv(dev);
card = priv->card;
- card->iobase1 = pci_resource_start(pdev, 0);
- card->iobase2 = pci_resource_start(pdev, 1);
- dev->base_addr = pci_resource_start(pdev, 2);
+ card->bridge_io = bridge_io;
+ card->attr_io = attr_io;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, iomem, HERMES_16BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected Nortel PCI device at %s irq:%d, "
- "io addr:0x%lx\n", pci_name(pdev), pdev->irq, dev->base_addr);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -201,21 +206,19 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
- err = nortel_pci_hw_init(card);
+ err = orinoco_nortel_hw_init(card);
if (err) {
printk(KERN_ERR PFX "Hardware initialization failed\n");
goto fail;
}
- err = nortel_pci_cor_reset(priv);
+ err = orinoco_nortel_cor_reset(priv);
if (err) {
printk(KERN_ERR PFX "Initial reset failed\n");
goto fail;
}
-
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register network device\n");
@@ -223,6 +226,8 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -234,9 +239,15 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- pci_iounmap(pdev, iomem);
+ pci_iounmap(pdev, hermes_io);
- fail_map_io:
+ fail_map_hermes:
+ pci_iounmap(pdev, attr_io);
+
+ fail_map_attr:
+ pci_iounmap(pdev, bridge_io);
+
+ fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
@@ -245,26 +256,27 @@ static int nortel_pci_init_one(struct pci_dev *pdev,
return err;
}
-static void __devexit nortel_pci_remove_one(struct pci_dev *pdev)
+static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = netdev_priv(dev);
- struct nortel_pci_card *card = priv->card;
+ struct orinoco_pci_card *card = priv->card;
- /* clear leds */
- outw_p(0, card->iobase1 + 10);
+ /* Clear LEDs */
+ iowrite16(0, card->bridge_io + 10);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
pci_iounmap(pdev, priv->hw.iobase);
+ pci_iounmap(pdev, card->attr_io);
+ pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-
-static struct pci_device_id nortel_pci_id_table[] = {
+static struct pci_device_id orinoco_nortel_id_table[] = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
@@ -272,13 +284,15 @@ static struct pci_device_id nortel_pci_id_table[] = {
{0,},
};
-MODULE_DEVICE_TABLE(pci, nortel_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_nortel_id_table);
-static struct pci_driver nortel_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = nortel_pci_id_table,
- .probe = nortel_pci_init_one,
- .remove = __devexit_p(nortel_pci_remove_one),
+static struct pci_driver orinoco_nortel_driver = {
+ .name = DRIVER_NAME,
+ .id_table = orinoco_nortel_id_table,
+ .probe = orinoco_nortel_init_one,
+ .remove = __devexit_p(orinoco_nortel_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
@@ -288,20 +302,19 @@ MODULE_DESCRIPTION
("Driver for wireless LAN cards using the Nortel PCI bridge");
MODULE_LICENSE("Dual MPL/GPL");
-static int __init nortel_pci_init(void)
+static int __init orinoco_nortel_init(void)
{
printk(KERN_DEBUG "%s\n", version);
- return pci_module_init(&nortel_pci_driver);
+ return pci_module_init(&orinoco_nortel_driver);
}
-static void __exit nortel_pci_exit(void)
+static void __exit orinoco_nortel_exit(void)
{
- pci_unregister_driver(&nortel_pci_driver);
- ssleep(1);
+ pci_unregister_driver(&orinoco_nortel_driver);
}
-module_init(nortel_pci_init);
-module_exit(nortel_pci_exit);
+module_init(orinoco_nortel_init);
+module_exit(orinoco_nortel_exit);
/*
* Local variables:
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 5362c214fc8e..1c105f40f8d5 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -1,11 +1,11 @@
/* orinoco_pci.c
*
- * Driver for Prism II devices that have a direct PCI interface
- * (i.e., not in a Pcmcia or PLX bridge)
- *
- * Specifically here we're talking about the Linksys WMP11
+ * Driver for Prism 2.5/3 devices that have a direct PCI interface
+ * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge).
+ * The card contains only one PCI region, which contains all the usual
+ * hermes registers, as well as the COR register.
*
- * Current maintainers (as of 29 September 2003) are:
+ * Current maintainers are:
* Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
@@ -41,54 +41,6 @@
* under either the MPL or the GPL.
*/
-/*
- * Theory of operation...
- * -------------------
- * Maybe you had a look in orinoco_plx. Well, this is totally different...
- *
- * The card contains only one PCI region, which contains all the usual
- * hermes registers.
- *
- * The driver will memory map this region in normal memory. Because
- * the hermes registers are mapped in normal memory and not in ISA I/O
- * post space, we can't use the usual inw/outw macros and we need to
- * use readw/writew.
- * This slight difference force us to compile our own version of
- * hermes.c with the register access macro changed. That's a bit
- * hackish but works fine.
- *
- * Note that the PCI region is pretty big (4K). That's much more than
- * the usual set of hermes register (0x0 -> 0x3E). I've got a strong
- * suspicion that the whole memory space of the adapter is in fact in
- * this region. Accessing directly the adapter memory instead of going
- * through the usual register would speed up significantely the
- * operations...
- *
- * Finally, the card looks like this :
------------------------
- Bus 0, device 14, function 0:
- Network controller: PCI device 1260:3873 (Harris Semiconductor) (rev 1).
- IRQ 11.
- Master Capable. Latency=248.
- Prefetchable 32 bit memory at 0xffbcc000 [0xffbccfff].
------------------------
-00:0e.0 Network controller: Harris Semiconductor: Unknown device 3873 (rev 01)
- Subsystem: Unknown device 1737:3874
- Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B-
- Status: Cap+ 66Mhz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR-
- Latency: 248 set, cache line size 08
- Interrupt: pin A routed to IRQ 11
- Region 0: Memory at ffbcc000 (32-bit, prefetchable) [size=4K]
- Capabilities: [dc] Power Management version 2
- Flags: PMEClk- AuxPwr- DSI- D1+ D2+ PME+
- Status: D0 PME-Enable- DSel=0 DScale=0 PME-
------------------------
- *
- * That's all..
- *
- * Jean II
- */
-
#define DRIVER_NAME "orinoco_pci"
#define PFX DRIVER_NAME ": "
@@ -100,12 +52,14 @@
#include <linux/pci.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
-/* All the magic there is from wlan-ng */
-/* Magic offset of the reset register of the PCI card */
+/* Offset of the COR register of the PCI card */
#define HERMES_PCI_COR (0x26)
-/* Magic bitmask to reset the card */
+
+/* Bitmask to reset the card */
#define HERMES_PCI_COR_MASK (0x0080)
+
/* Magic timeouts for doing the reset.
* Those times are straight from wlan-ng, and it is claimed that they
* are necessary. Alan will kill me. Take your time and grab a coffee. */
@@ -113,13 +67,8 @@
#define HERMES_PCI_COR_OFFT (500) /* ms */
#define HERMES_PCI_COR_BUSYT (500) /* ms */
-/* Orinoco PCI specific data */
-struct orinoco_pci_card {
- void __iomem *pci_ioaddr;
-};
-
/*
- * Do a soft reset of the PCI card using the Configuration Option Register
+ * Do a soft reset of the card using the Configuration Option Register
* We need this to get going...
* This is the part of the code that is strongly inspired from wlan-ng
*
@@ -131,14 +80,13 @@ struct orinoco_pci_card {
* Note bis : Don't try to access HERMES_CMD during the reset phase.
* It just won't work !
*/
-static int
-orinoco_pci_cor_reset(struct orinoco_private *priv)
+static int orinoco_pci_cor_reset(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
- unsigned long timeout;
- u16 reg;
+ unsigned long timeout;
+ u16 reg;
- /* Assert the reset until the card notice */
+ /* Assert the reset until the card notices */
hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK);
mdelay(HERMES_PCI_COR_ONT);
@@ -163,19 +111,14 @@ orinoco_pci_cor_reset(struct orinoco_private *priv)
return 0;
}
-/*
- * Initialise a card. Mostly similar to PLX code.
- */
static int orinoco_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err = 0;
- unsigned long pci_iorange;
- u16 __iomem *pci_ioaddr = NULL;
- unsigned long pci_iolen;
- struct orinoco_private *priv = NULL;
+ int err;
+ struct orinoco_private *priv;
struct orinoco_pci_card *card;
- struct net_device *dev = NULL;
+ struct net_device *dev;
+ void __iomem *hermes_io;
err = pci_enable_device(pdev);
if (err) {
@@ -184,39 +127,32 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- /* Resource 0 is mapped to the hermes registers */
- pci_iorange = pci_resource_start(pdev, 0);
- pci_iolen = pci_resource_len(pdev, 0);
- pci_ioaddr = ioremap(pci_iorange, pci_iolen);
- if (!pci_iorange) {
- printk(KERN_ERR PFX "Cannot remap hardware registers\n");
- goto fail_map;
+ hermes_io = pci_iomap(pdev, 0, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot remap chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset);
- if (! dev) {
+ if (!dev) {
+ printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
goto fail_alloc;
}
priv = netdev_priv(dev);
card = priv->card;
- card->pci_ioaddr = pci_ioaddr;
- dev->mem_start = pci_iorange;
- dev->mem_end = pci_iorange + pci_iolen - 1;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, pci_ioaddr, HERMES_32BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected device %s, mem:0x%lx-0x%lx, irq %d\n",
- pci_name(pdev), dev->mem_start, dev->mem_end, pdev->irq);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -225,9 +161,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
- /* Perform a COR reset to start the card */
err = orinoco_pci_cor_reset(priv);
if (err) {
printk(KERN_ERR PFX "Initial reset failed\n");
@@ -236,11 +170,13 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Failed to register net device\n");
+ printk(KERN_ERR PFX "Cannot register network device\n");
goto fail;
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -252,9 +188,9 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- iounmap(pci_ioaddr);
+ pci_iounmap(pdev, hermes_io);
- fail_map:
+ fail_map_hermes:
pci_release_regions(pdev);
fail_resources:
@@ -267,87 +203,17 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = netdev_priv(dev);
- struct orinoco_pci_card *card = priv->card;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
- iounmap(card->pci_ioaddr);
+ pci_iounmap(pdev, priv->hw.iobase);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err;
-
-
- err = orinoco_lock(priv, &flags);
- if (err) {
- printk(KERN_ERR "%s: hw_unavailable on orinoco_pci_suspend\n",
- dev->name);
- return err;
- }
-
- err = __orinoco_down(dev);
- if (err)
- printk(KERN_WARNING "%s: orinoco_pci_suspend(): Error %d downing interface\n",
- dev->name, err);
-
- netif_device_detach(dev);
-
- priv->hw_unavailable++;
-
- orinoco_unlock(priv, &flags);
-
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int orinoco_pci_resume(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err;
-
- printk(KERN_DEBUG "%s: Orinoco-PCI waking up\n", dev->name);
-
- pci_set_power_state(pdev, 0);
- pci_restore_state(pdev);
-
- err = orinoco_reinit_firmware(dev);
- if (err) {
- printk(KERN_ERR "%s: Error %d re-initializing firmware on orinoco_pci_resume()\n",
- dev->name, err);
- return err;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- netif_device_attach(dev);
-
- priv->hw_unavailable--;
-
- if (priv->open && (! priv->hw_unavailable)) {
- err = __orinoco_up(dev);
- if (err)
- printk(KERN_ERR "%s: Error %d restarting card on orinoco_pci_resume()\n",
- dev->name, err);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static struct pci_device_id orinoco_pci_pci_id_table[] = {
+static struct pci_device_id orinoco_pci_id_table[] = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
@@ -357,11 +223,11 @@ static struct pci_device_id orinoco_pci_pci_id_table[] = {
{0,},
};
-MODULE_DEVICE_TABLE(pci, orinoco_pci_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_pci_id_table);
static struct pci_driver orinoco_pci_driver = {
.name = DRIVER_NAME,
- .id_table = orinoco_pci_pci_id_table,
+ .id_table = orinoco_pci_id_table,
.probe = orinoco_pci_init_one,
.remove = __devexit_p(orinoco_pci_remove_one),
.suspend = orinoco_pci_suspend,
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco_pci.h
new file mode 100644
index 000000000000..7eb1e08113e0
--- /dev/null
+++ b/drivers/net/wireless/orinoco_pci.h
@@ -0,0 +1,104 @@
+/* orinoco_pci.h
+ *
+ * Common code for all Orinoco drivers for PCI devices, including
+ * both native PCI and PCMCIA-to-PCI bridges.
+ *
+ * Copyright (C) 2005, Pavel Roskin.
+ * See orinoco.c for license.
+ */
+
+#ifndef _ORINOCO_PCI_H
+#define _ORINOCO_PCI_H
+
+#include <linux/netdevice.h>
+
+/* Driver specific data */
+struct orinoco_pci_card {
+ void __iomem *bridge_io;
+ void __iomem *attr_io;
+};
+
+#ifdef CONFIG_PM
+static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ err = orinoco_lock(priv, &flags);
+ if (err) {
+ printk(KERN_ERR "%s: cannot lock hardware for suspend\n",
+ dev->name);
+ return err;
+ }
+
+ err = __orinoco_down(dev);
+ if (err)
+ printk(KERN_WARNING "%s: error %d bringing interface down "
+ "for suspend\n", dev->name, err);
+
+ netif_device_detach(dev);
+
+ priv->hw_unavailable++;
+
+ orinoco_unlock(priv, &flags);
+
+ free_irq(pdev->irq, dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int orinoco_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct orinoco_private *priv = netdev_priv(dev);
+ unsigned long flags;
+ int err;
+
+ pci_set_power_state(pdev, 0);
+ pci_enable_device(pdev);
+ pci_restore_state(pdev);
+
+ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "%s: cannot re-allocate IRQ on resume\n",
+ dev->name);
+ pci_disable_device(pdev);
+ return -EBUSY;
+ }
+
+ err = orinoco_reinit_firmware(dev);
+ if (err) {
+ printk(KERN_ERR "%s: error %d re-initializing firmware "
+ "on resume\n", dev->name, err);
+ return err;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_attach(dev);
+
+ priv->hw_unavailable--;
+
+ if (priv->open && (! priv->hw_unavailable)) {
+ err = __orinoco_up(dev);
+ if (err)
+ printk(KERN_ERR "%s: Error %d restarting card on resume\n",
+ dev->name, err);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+#else
+#define orinoco_pci_suspend NULL
+#define orinoco_pci_resume NULL
+#endif
+
+#endif /* _ORINOCO_PCI_H */
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 210e73776545..84f696c77551 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -3,7 +3,7 @@
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a PLX9052.
*
- * Current maintainers (as of 29 September 2003) are:
+ * Current maintainers are:
* Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
@@ -30,38 +30,18 @@
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
-
- * Caution: this is experimental and probably buggy. For success and
- * failure reports for different cards and adaptors, see
- * orinoco_plx_pci_id_table near the end of the file. If you have a
- * card we don't have the PCI id for, and looks like it should work,
- * drop me mail with the id and "it works"/"it doesn't work".
- *
- * Note: if everything gets detected fine but it doesn't actually send
- * or receive packets, your first port of call should probably be to
- * try newer firmware in the card. Especially if you're doing Ad-Hoc
- * modes.
- *
- * The actual driving is done by orinoco.c, this is just resource
- * allocation stuff. The explanation below is courtesy of Ryan Niemi
- * on the linux-wlan-ng list at
- * http://archives.neohapsis.com/archives/dev/linux-wlan/2001-q1/0026.html
*
- * The PLX9052-based cards (WL11000 and several others) are a
- * different beast than the usual PCMCIA-based PRISM2 configuration
- * expected by wlan-ng. Here's the general details on how the WL11000
- * PCI adapter works:
+ * Here's the general details on how the PLX9052 adapter works:
*
* - Two PCI I/O address spaces, one 0x80 long which contains the
* PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA
* slot I/O address space.
*
- * - One PCI memory address space, mapped to the PCMCIA memory space
+ * - One PCI memory address space, mapped to the PCMCIA attribute space
* (containing the CIS).
*
- * After identifying the I/O and memory space, you can read through
- * the memory space to confirm the CIS's device ID or manufacturer ID
- * to make sure it's the expected card. qKeep in mind that the PCMCIA
+ * Using the later, you can read through the CIS data to make sure the
+ * card is compatible with the driver. Keep in mind that the PCMCIA
* spec specifies the CIS as the lower 8 bits of each word read from
* the CIS, so to read the bytes of the CIS, read every other byte
* (0,2,4,...). Passing that test, you need to enable the I/O address
@@ -71,7 +51,7 @@
* within the PCI memory space. Write 0x41 to the COR register to
* enable I/O mode and to select level triggered interrupts. To
* confirm you actually succeeded, read the COR register back and make
- * sure it actually got set to 0x41, incase you have an unexpected
+ * sure it actually got set to 0x41, in case you have an unexpected
* card inserted.
*
* Following that, you can treat the second PCI I/O address space (the
@@ -101,16 +81,6 @@
* that, I've hot-swapped a number of times during debugging and
* driver development for various reasons (stuck WAIT# line after the
* radio card's firmware locks up).
- *
- * Hope this is enough info for someone to add PLX9052 support to the
- * wlan-ng card. In the case of the WL11000, the PCI ID's are
- * 0x1639/0x0200, with matching subsystem ID's. Other PLX9052-based
- * manufacturers other than Eumitcom (or on cards other than the
- * WL11000) may have different PCI ID's.
- *
- * If anyone needs any more specific info, let me know. I haven't had
- * time to implement support myself yet, and with the way things are
- * going, might not have time for a while..
*/
#define DRIVER_NAME "orinoco_plx"
@@ -125,6 +95,7 @@
#include <pcmcia/cisreg.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
@@ -134,30 +105,20 @@
#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
#define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */
-static const u8 cis_magic[] = {
- 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
-};
-
-/* Orinoco PLX specific data */
-struct orinoco_plx_card {
- void __iomem *attr_mem;
-};
-
/*
* Do a soft reset of the card using the Configuration Option Register
*/
static int orinoco_plx_cor_reset(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
- struct orinoco_plx_card *card = priv->card;
- u8 __iomem *attr_mem = card->attr_mem;
+ struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
- writeb(COR_VALUE | COR_RESET, attr_mem + COR_OFFSET);
+ iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET);
mdelay(1);
- writeb(COR_VALUE, attr_mem + COR_OFFSET);
+ iowrite8(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
@@ -168,7 +129,7 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv)
reg = hermes_read_regn(hw, CMD);
}
- /* Did we timeout ? */
+ /* Still busy? */
if (reg & HERMES_CMD_BUSY) {
printk(KERN_ERR PFX "Busy timeout\n");
return -ETIMEDOUT;
@@ -177,20 +138,55 @@ static int orinoco_plx_cor_reset(struct orinoco_private *priv)
return 0;
}
+static int orinoco_plx_hw_init(struct orinoco_pci_card *card)
+{
+ int i;
+ u32 csr_reg;
+ static const u8 cis_magic[] = {
+ 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
+ };
+
+ printk(KERN_DEBUG PFX "CIS: ");
+ for (i = 0; i < 16; i++) {
+ printk("%02X:", ioread8(card->attr_io + (i << 1)));
+ }
+ printk("\n");
+
+ /* Verify whether a supported PC card is present */
+ /* FIXME: we probably need to be smarted about this */
+ for (i = 0; i < sizeof(cis_magic); i++) {
+ if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) {
+ printk(KERN_ERR PFX "The CIS value of Prism2 PC "
+ "card is unexpected\n");
+ return -ENODEV;
+ }
+ }
+
+ /* bjoern: We need to tell the card to enable interrupts, in
+ case the serial eprom didn't do this already. See the
+ PLX9052 data book, p8-1 and 8-24 for reference. */
+ csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
+ if (!(csr_reg & PLX_INTCSR_INTEN)) {
+ csr_reg |= PLX_INTCSR_INTEN;
+ iowrite32(csr_reg, card->bridge_io + PLX_INTCSR);
+ csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
+ if (!(csr_reg & PLX_INTCSR_INTEN)) {
+ printk(KERN_ERR PFX "Cannot enable interrupts\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
static int orinoco_plx_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err = 0;
- u8 __iomem *attr_mem = NULL;
- u32 csr_reg, plx_addr;
- struct orinoco_private *priv = NULL;
- struct orinoco_plx_card *card;
- unsigned long pccard_ioaddr = 0;
- unsigned long pccard_iolen = 0;
- struct net_device *dev = NULL;
- void __iomem *mem;
- int i;
+ int err;
+ struct orinoco_private *priv;
+ struct orinoco_pci_card *card;
+ struct net_device *dev;
+ void __iomem *hermes_io, *attr_io, *bridge_io;
err = pci_enable_device(pdev);
if (err) {
@@ -199,30 +195,30 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- /* Resource 1 is mapped to PLX-specific registers */
- plx_addr = pci_resource_start(pdev, 1);
+ bridge_io = pci_iomap(pdev, 1, 0);
+ if (!bridge_io) {
+ printk(KERN_ERR PFX "Cannot map bridge registers\n");
+ err = -EIO;
+ goto fail_map_bridge;
+ }
- /* Resource 2 is mapped to the PCMCIA attribute memory */
- attr_mem = ioremap(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- if (!attr_mem) {
- printk(KERN_ERR PFX "Cannot remap PCMCIA space\n");
+ attr_io = pci_iomap(pdev, 2, 0);
+ if (!attr_io) {
+ printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
+ err = -EIO;
goto fail_map_attr;
}
- /* Resource 3 is mapped to the PCMCIA I/O address space */
- pccard_ioaddr = pci_resource_start(pdev, 3);
- pccard_iolen = pci_resource_len(pdev, 3);
-
- mem = pci_iomap(pdev, 3, 0);
- if (!mem) {
- err = -ENOMEM;
- goto fail_map_io;
+ hermes_io = pci_iomap(pdev, 3, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot map chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
@@ -235,16 +231,12 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
priv = netdev_priv(dev);
card = priv->card;
- card->attr_mem = attr_mem;
- dev->base_addr = pccard_ioaddr;
+ card->bridge_io = bridge_io;
+ card->attr_io = attr_io;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 PLX device "
- "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
- pccard_ioaddr);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -253,20 +245,11 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
- /* bjoern: We need to tell the card to enable interrupts, in
- case the serial eprom didn't do this already. See the
- PLX9052 data book, p8-1 and 8-24 for reference. */
- csr_reg = inl(plx_addr + PLX_INTCSR);
- if (!(csr_reg & PLX_INTCSR_INTEN)) {
- csr_reg |= PLX_INTCSR_INTEN;
- outl(csr_reg, plx_addr + PLX_INTCSR);
- csr_reg = inl(plx_addr + PLX_INTCSR);
- if (!(csr_reg & PLX_INTCSR_INTEN)) {
- printk(KERN_ERR PFX "Cannot enable interrupts\n");
- goto fail;
- }
+ err = orinoco_plx_hw_init(card);
+ if (err) {
+ printk(KERN_ERR PFX "Hardware initialization failed\n");
+ goto fail;
}
err = orinoco_plx_cor_reset(priv);
@@ -275,23 +258,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
goto fail;
}
- printk(KERN_DEBUG PFX "CIS: ");
- for (i = 0; i < 16; i++) {
- printk("%02X:", readb(attr_mem + 2*i));
- }
- printk("\n");
-
- /* Verify whether a supported PC card is present */
- /* FIXME: we probably need to be smarted about this */
- for (i = 0; i < sizeof(cis_magic); i++) {
- if (cis_magic[i] != readb(attr_mem +2*i)) {
- printk(KERN_ERR PFX "The CIS value of Prism2 PC "
- "card is unexpected\n");
- err = -EIO;
- goto fail;
- }
- }
-
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register network device\n");
@@ -299,6 +265,8 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -310,12 +278,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- pci_iounmap(pdev, mem);
+ pci_iounmap(pdev, hermes_io);
- fail_map_io:
- iounmap(attr_mem);
+ fail_map_hermes:
+ pci_iounmap(pdev, attr_io);
fail_map_attr:
+ pci_iounmap(pdev, bridge_io);
+
+ fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
@@ -328,23 +299,20 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = netdev_priv(dev);
- struct orinoco_plx_card *card = priv->card;
- u8 __iomem *attr_mem = card->attr_mem;
-
- BUG_ON(! dev);
+ struct orinoco_pci_card *card = priv->card;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
pci_iounmap(pdev, priv->hw.iobase);
- iounmap(attr_mem);
+ pci_iounmap(pdev, card->attr_io);
+ pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-
-static struct pci_device_id orinoco_plx_pci_id_table[] = {
+static struct pci_device_id orinoco_plx_id_table[] = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
@@ -362,13 +330,15 @@ static struct pci_device_id orinoco_plx_pci_id_table[] = {
{0,},
};
-MODULE_DEVICE_TABLE(pci, orinoco_plx_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table);
static struct pci_driver orinoco_plx_driver = {
.name = DRIVER_NAME,
- .id_table = orinoco_plx_pci_id_table,
+ .id_table = orinoco_plx_id_table,
.probe = orinoco_plx_init_one,
.remove = __devexit_p(orinoco_plx_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
@@ -388,7 +358,6 @@ static int __init orinoco_plx_init(void)
static void __exit orinoco_plx_exit(void)
{
pci_unregister_driver(&orinoco_plx_driver);
- ssleep(1);
}
module_init(orinoco_plx_init);
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 5e68b7026186..d2b4decb7a7d 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -1,5 +1,5 @@
/* orinoco_tmd.c
- *
+ *
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a TMD7160.
*
@@ -26,25 +26,13 @@
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
-
- * Caution: this is experimental and probably buggy. For success and
- * failure reports for different cards and adaptors, see
- * orinoco_tmd_pci_id_table near the end of the file. If you have a
- * card we don't have the PCI id for, and looks like it should work,
- * drop me mail with the id and "it works"/"it doesn't work".
- *
- * Note: if everything gets detected fine but it doesn't actually send
- * or receive packets, your first port of call should probably be to
- * try newer firmware in the card. Especially if you're doing Ad-Hoc
- * modes
*
* The actual driving is done by orinoco.c, this is just resource
* allocation stuff.
*
* This driver is modeled after the orinoco_plx driver. The main
- * difference is that the TMD chip has only IO port ranges and no
- * memory space, i.e. no access to the CIS. Compared to the PLX chip,
- * the io range functionalities are exchanged.
+ * difference is that the TMD chip has only IO port ranges and doesn't
+ * provide access to the PCMCIA attribute space.
*
* Pheecom sells cards with the TMD chip as "ASIC version"
*/
@@ -61,32 +49,26 @@
#include <pcmcia/cisreg.h>
#include "orinoco.h"
+#include "orinoco_pci.h"
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
#define COR_RESET (0x80) /* reset bit in the COR register */
#define TMD_RESET_TIME (500) /* milliseconds */
-/* Orinoco TMD specific data */
-struct orinoco_tmd_card {
- u32 tmd_io;
-};
-
-
/*
* Do a soft reset of the card using the Configuration Option Register
*/
static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
{
hermes_t *hw = &priv->hw;
- struct orinoco_tmd_card *card = priv->card;
- u32 addr = card->tmd_io;
+ struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
- outb(COR_VALUE | COR_RESET, addr);
+ iowrite8(COR_VALUE | COR_RESET, card->bridge_io);
mdelay(1);
- outb(COR_VALUE, addr);
+ iowrite8(COR_VALUE, card->bridge_io);
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
@@ -97,7 +79,7 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
reg = hermes_read_regn(hw, CMD);
}
- /* Did we timeout ? */
+ /* Still busy? */
if (reg & HERMES_CMD_BUSY) {
printk(KERN_ERR PFX "Busy timeout\n");
return -ETIMEDOUT;
@@ -110,11 +92,11 @@ static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
static int orinoco_tmd_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int err = 0;
- struct orinoco_private *priv = NULL;
- struct orinoco_tmd_card *card;
- struct net_device *dev = NULL;
- void __iomem *mem;
+ int err;
+ struct orinoco_private *priv;
+ struct orinoco_pci_card *card;
+ struct net_device *dev;
+ void __iomem *hermes_io, *bridge_io;
err = pci_enable_device(pdev);
if (err) {
@@ -123,20 +105,28 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
}
err = pci_request_regions(pdev, DRIVER_NAME);
- if (err != 0) {
+ if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
- mem = pci_iomap(pdev, 2, 0);
- if (! mem) {
- err = -ENOMEM;
- goto fail_iomap;
+ bridge_io = pci_iomap(pdev, 1, 0);
+ if (!bridge_io) {
+ printk(KERN_ERR PFX "Cannot map bridge registers\n");
+ err = -EIO;
+ goto fail_map_bridge;
+ }
+
+ hermes_io = pci_iomap(pdev, 2, 0);
+ if (!hermes_io) {
+ printk(KERN_ERR PFX "Cannot map chipset registers\n");
+ err = -EIO;
+ goto fail_map_hermes;
}
/* Allocate network device */
dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset);
- if (! dev) {
+ if (!dev) {
printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
goto fail_alloc;
@@ -144,16 +134,11 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
priv = netdev_priv(dev);
card = priv->card;
- card->tmd_io = pci_resource_start(pdev, 1);
- dev->base_addr = pci_resource_start(pdev, 2);
+ card->bridge_io = bridge_io;
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- hermes_struct_init(&priv->hw, mem, HERMES_16BIT_REGSPACING);
-
- printk(KERN_DEBUG PFX "Detected Orinoco/Prism2 TMD device "
- "at %s irq:%d, io addr:0x%lx\n", pci_name(pdev), pdev->irq,
- dev->base_addr);
+ hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
dev->name, dev);
@@ -162,7 +147,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
err = -EBUSY;
goto fail_irq;
}
- dev->irq = pdev->irq;
err = orinoco_tmd_cor_reset(priv);
if (err) {
@@ -177,6 +161,8 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, dev);
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s\n", dev->name,
+ pci_name(pdev));
return 0;
@@ -188,9 +174,12 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
free_orinocodev(dev);
fail_alloc:
- pci_iounmap(pdev, mem);
+ pci_iounmap(pdev, hermes_io);
+
+ fail_map_hermes:
+ pci_iounmap(pdev, bridge_io);
- fail_iomap:
+ fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
@@ -203,31 +192,32 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct orinoco_private *priv = dev->priv;
-
- BUG_ON(! dev);
+ struct orinoco_pci_card *card = priv->card;
unregister_netdev(dev);
- free_irq(dev->irq, dev);
+ free_irq(pdev->irq, dev);
pci_set_drvdata(pdev, NULL);
free_orinocodev(dev);
pci_iounmap(pdev, priv->hw.iobase);
+ pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
-
-static struct pci_device_id orinoco_tmd_pci_id_table[] = {
+static struct pci_device_id orinoco_tmd_id_table[] = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
-MODULE_DEVICE_TABLE(pci, orinoco_tmd_pci_id_table);
+MODULE_DEVICE_TABLE(pci, orinoco_tmd_id_table);
static struct pci_driver orinoco_tmd_driver = {
.name = DRIVER_NAME,
- .id_table = orinoco_tmd_pci_id_table,
+ .id_table = orinoco_tmd_id_table,
.probe = orinoco_tmd_init_one,
.remove = __devexit_p(orinoco_tmd_remove_one),
+ .suspend = orinoco_pci_suspend,
+ .resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
@@ -245,7 +235,6 @@ static int __init orinoco_tmd_init(void)
static void __exit orinoco_tmd_exit(void)
{
pci_unregister_driver(&orinoco_tmd_driver);
- ssleep(1);
}
module_init(orinoco_tmd_init);
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index f7b77ce54d7b..7f9aa139c347 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -1,6 +1,6 @@
/*
* Driver for 802.11b cards using RAM-loadable Symbol firmware, such as
- * Symbol Wireless Networker LA4100, CompactFlash cards by Socket
+ * Symbol Wireless Networker LA4137, CompactFlash cards by Socket
* Communications and Intel PRO/Wireless 2011B.
*
* The driver implements Symbol firmware download. The rest is handled
@@ -120,8 +120,8 @@ static void spectrum_cs_release(struct pcmcia_device *link);
* Each block has the following structure.
*/
struct dblock {
- __le32 _addr; /* adapter address where to write the block */
- __le16 _len; /* length of the data only, in bytes */
+ __le32 addr; /* adapter address where to write the block */
+ __le16 len; /* length of the data only, in bytes */
char data[0]; /* data to be written */
} __attribute__ ((packed));
@@ -131,9 +131,9 @@ struct dblock {
* items with matching ID should be written.
*/
struct pdr {
- __le32 _id; /* record ID */
- __le32 _addr; /* adapter address where to write the data */
- __le32 _len; /* expected length of the data, in bytes */
+ __le32 id; /* record ID */
+ __le32 addr; /* adapter address where to write the data */
+ __le32 len; /* expected length of the data, in bytes */
char next[0]; /* next PDR starts here */
} __attribute__ ((packed));
@@ -144,8 +144,8 @@ struct pdr {
* be plugged into the secondary firmware.
*/
struct pdi {
- __le16 _len; /* length of ID and data, in words */
- __le16 _id; /* record ID */
+ __le16 len; /* length of ID and data, in words */
+ __le16 id; /* record ID */
char data[0]; /* plug data */
} __attribute__ ((packed));
@@ -154,44 +154,44 @@ struct pdi {
static inline u32
dblock_addr(const struct dblock *blk)
{
- return le32_to_cpu(blk->_addr);
+ return le32_to_cpu(blk->addr);
}
static inline u32
dblock_len(const struct dblock *blk)
{
- return le16_to_cpu(blk->_len);
+ return le16_to_cpu(blk->len);
}
static inline u32
pdr_id(const struct pdr *pdr)
{
- return le32_to_cpu(pdr->_id);
+ return le32_to_cpu(pdr->id);
}
static inline u32
pdr_addr(const struct pdr *pdr)
{
- return le32_to_cpu(pdr->_addr);
+ return le32_to_cpu(pdr->addr);
}
static inline u32
pdr_len(const struct pdr *pdr)
{
- return le32_to_cpu(pdr->_len);
+ return le32_to_cpu(pdr->len);
}
static inline u32
pdi_id(const struct pdi *pdi)
{
- return le16_to_cpu(pdi->_id);
+ return le16_to_cpu(pdi->id);
}
/* Return length of the data only, in bytes */
static inline u32
pdi_len(const struct pdi *pdi)
{
- return 2 * (le16_to_cpu(pdi->_len) - 1);
+ return 2 * (le16_to_cpu(pdi->len) - 1);
}
@@ -343,8 +343,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
/* do the actual plugging */
spectrum_aux_setaddr(hw, pdr_addr(pdr));
- hermes_write_words(hw, HERMES_AUXDATA, pdi->data,
- pdi_len(pdi) / 2);
+ hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
return 0;
}
@@ -424,8 +423,8 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
while (dblock_addr(blk) != BLOCK_END) {
spectrum_aux_setaddr(hw, blkaddr);
- hermes_write_words(hw, HERMES_AUXDATA, blk->data,
- blklen / 2);
+ hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
+ blklen);
blk = (struct dblock *) &blk->data[blklen];
blkaddr = dblock_addr(blk);
@@ -626,14 +625,11 @@ static void spectrum_cs_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ if (link->dev_node)
+ unregister_netdev(dev);
+
spectrum_cs_release(link);
- DEBUG(0, PFX "detach: link=%p link->dev_node=%p\n", link, link->dev_node);
- if (link->dev_node) {
- DEBUG(0, PFX "About to unregister net device %p\n",
- dev);
- unregister_netdev(dev);
- }
free_orinocodev(dev);
} /* spectrum_cs_detach */
@@ -653,13 +649,10 @@ spectrum_cs_config(struct pcmcia_device *link)
int last_fn, last_ret;
u_char buf[64];
config_info_t conf;
- cisinfo_t info;
tuple_t tuple;
cisparse_t parse;
void __iomem *mem;
- CS_CHECK(ValidateCIS, pcmcia_validate_cis(link, &info));
-
/*
* This reads the card's CONFIG tuple to find its
* configuration registers.
@@ -709,12 +702,6 @@ spectrum_cs_config(struct pcmcia_device *link)
goto next_entry;
link->conf.ConfigIndex = cfg->index;
- /* Does this card need audio output? */
- if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
- link->conf.Attributes |= CONF_ENABLE_SPKR;
- link->conf.Status = CCSR_AUDIO_ENA;
- }
-
/* Use power settings for Vcc and Vpp if present */
/* Note that the CIS values need to be rescaled */
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
@@ -835,19 +822,10 @@ spectrum_cs_config(struct pcmcia_device *link)
net_device has been registered */
/* Finally, report what we've done */
- printk(KERN_DEBUG "%s: index 0x%02x: ",
- dev->name, link->conf.ConfigIndex);
- if (link->conf.Vpp)
- printk(", Vpp %d.%d", link->conf.Vpp / 10,
- link->conf.Vpp % 10);
- printk(", irq %d", link->irq.AssignedIRQ);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1 + link->io.NumPorts1 - 1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2 + link->io.NumPorts2 - 1);
- printk("\n");
+ printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
+ "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id,
+ link->irq.AssignedIRQ, link->io.BasePort1,
+ link->io.BasePort1 + link->io.NumPorts1 - 1);
return 0;
@@ -888,11 +866,10 @@ spectrum_cs_suspend(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct orinoco_private *priv = netdev_priv(dev);
- unsigned long flags;
int err = 0;
/* Mark the device as stopped, to block IO until later */
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
err = __orinoco_down(dev);
if (err)
@@ -902,9 +879,9 @@ spectrum_cs_suspend(struct pcmcia_device *link)
netif_device_detach(dev);
priv->hw_unavailable++;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
- return 0;
+ return err;
}
static int
@@ -932,7 +909,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" David Gibson <hermes@gibson.dropbear.id.au>, et al)";
static struct pcmcia_device_id spectrum_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */
+ PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */
PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
new file mode 100644
index 000000000000..662ecc8a33ff
--- /dev/null
+++ b/drivers/net/wireless/zd1201.c
@@ -0,0 +1,1929 @@
+/*
+ * Driver for ZyDAS zd1201 based wireless USB devices.
+ *
+ * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Parts of this driver have been derived from a wlan-ng version
+ * modified by ZyDAS. They also made documentation available, thanks!
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/string.h>
+#include <linux/if_arp.h>
+#include <linux/firmware.h>
+#include <net/ieee80211.h>
+#include "zd1201.h"
+
+static struct usb_device_id zd1201_table[] = {
+ {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */
+ {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */
+ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */
+ {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */
+ {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */
+ {}
+};
+
+static int ap; /* Are we an AP or a normal station? */
+
+#define ZD1201_VERSION "0.15"
+
+MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>");
+MODULE_DESCRIPTION("Driver for ZyDAS ZD1201 based USB Wireless adapters");
+MODULE_VERSION(ZD1201_VERSION);
+MODULE_LICENSE("GPL");
+module_param(ap, int, 0);
+MODULE_PARM_DESC(ap, "If non-zero Access Point firmware will be loaded");
+MODULE_DEVICE_TABLE(usb, zd1201_table);
+
+
+static int zd1201_fw_upload(struct usb_device *dev, int apfw)
+{
+ const struct firmware *fw_entry;
+ char *data;
+ unsigned long len;
+ int err;
+ unsigned char ret;
+ char *buf;
+ char *fwfile;
+
+ if (apfw)
+ fwfile = "zd1201-ap.fw";
+ else
+ fwfile = "zd1201.fw";
+
+ err = request_firmware(&fw_entry, fwfile, &dev->dev);
+ if (err) {
+ dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile);
+ dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n");
+ dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n");
+ return err;
+ }
+
+ data = fw_entry->data;
+ len = fw_entry->size;
+
+ buf = kmalloc(1024, GFP_ATOMIC);
+ if (!buf)
+ goto exit;
+
+ while (len > 0) {
+ int translen = (len > 1024) ? 1024 : len;
+ memcpy(buf, data, translen);
+
+ err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+ USB_DIR_OUT | 0x40, 0, 0, buf, translen,
+ ZD1201_FW_TIMEOUT);
+ if (err < 0)
+ goto exit;
+
+ len -= translen;
+ data += translen;
+ }
+
+ err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x2,
+ USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT);
+ if (err < 0)
+ goto exit;
+
+ err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
+ USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+ if (err < 0)
+ goto exit;
+
+ if (ret & 0x80) {
+ err = -EIO;
+ goto exit;
+ }
+
+ err = 0;
+exit:
+ kfree(buf);
+ release_firmware(fw_entry);
+ return err;
+}
+
+static void zd1201_usbfree(struct urb *urb, struct pt_regs *regs)
+{
+ struct zd1201 *zd = urb->context;
+
+ switch(urb->status) {
+ case -EILSEQ:
+ case -ENODEV:
+ case -ETIMEDOUT:
+ case -ENOENT:
+ case -EPIPE:
+ case -EOVERFLOW:
+ case -ESHUTDOWN:
+ dev_warn(&zd->usb->dev, "%s: urb failed: %d\n",
+ zd->dev->name, urb->status);
+ }
+
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ return;
+}
+
+/* cmdreq message:
+ u32 type
+ u16 cmd
+ u16 parm0
+ u16 parm1
+ u16 parm2
+ u8 pad[4]
+
+ total: 4 + 2 + 2 + 2 + 2 + 4 = 16
+*/
+static int zd1201_docmd(struct zd1201 *zd, int cmd, int parm0,
+ int parm1, int parm2)
+{
+ unsigned char *command;
+ int ret;
+ struct urb *urb;
+
+ command = kmalloc(16, GFP_ATOMIC);
+ if (!command)
+ return -ENOMEM;
+
+ *((__le32*)command) = cpu_to_le32(ZD1201_USB_CMDREQ);
+ *((__le16*)&command[4]) = cpu_to_le16(cmd);
+ *((__le16*)&command[6]) = cpu_to_le16(parm0);
+ *((__le16*)&command[8]) = cpu_to_le16(parm1);
+ *((__le16*)&command[10])= cpu_to_le16(parm2);
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ kfree(command);
+ return -ENOMEM;
+ }
+ usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2),
+ command, 16, zd1201_usbfree, zd);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ kfree(command);
+ usb_free_urb(urb);
+ }
+
+ return ret;
+}
+
+/* Callback after sending out a packet */
+static void zd1201_usbtx(struct urb *urb, struct pt_regs *regs)
+{
+ struct zd1201 *zd = urb->context;
+ netif_wake_queue(zd->dev);
+ return;
+}
+
+/* Incoming data */
+static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
+{
+ struct zd1201 *zd = urb->context;
+ int free = 0;
+ unsigned char *data = urb->transfer_buffer;
+ struct sk_buff *skb;
+ unsigned char type;
+
+ if (!zd) {
+ free = 1;
+ goto exit;
+ }
+
+ switch(urb->status) {
+ case -EILSEQ:
+ case -ENODEV:
+ case -ETIMEDOUT:
+ case -ENOENT:
+ case -EPIPE:
+ case -EOVERFLOW:
+ case -ESHUTDOWN:
+ dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n",
+ zd->dev->name, urb->status);
+ free = 1;
+ goto exit;
+ }
+
+ if (urb->status != 0 || urb->actual_length == 0)
+ goto resubmit;
+
+ type = data[0];
+ if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) {
+ memcpy(zd->rxdata, data, urb->actual_length);
+ zd->rxlen = urb->actual_length;
+ zd->rxdatas = 1;
+ wake_up(&zd->rxdataq);
+ }
+ /* Info frame */
+ if (type == ZD1201_PACKET_INQUIRE) {
+ int i = 0;
+ unsigned short infotype, framelen, copylen;
+ framelen = le16_to_cpu(*(__le16*)&data[4]);
+ infotype = le16_to_cpu(*(__le16*)&data[6]);
+
+ if (infotype == ZD1201_INF_LINKSTATUS) {
+ short linkstatus;
+
+ linkstatus = le16_to_cpu(*(__le16*)&data[8]);
+ switch(linkstatus) {
+ case 1:
+ netif_carrier_on(zd->dev);
+ break;
+ case 2:
+ netif_carrier_off(zd->dev);
+ break;
+ case 3:
+ netif_carrier_off(zd->dev);
+ break;
+ case 4:
+ netif_carrier_on(zd->dev);
+ break;
+ default:
+ netif_carrier_off(zd->dev);
+ }
+ goto resubmit;
+ }
+ if (infotype == ZD1201_INF_ASSOCSTATUS) {
+ short status = le16_to_cpu(*(__le16*)(data+8));
+ int event;
+ union iwreq_data wrqu;
+
+ switch (status) {
+ case ZD1201_ASSOCSTATUS_STAASSOC:
+ case ZD1201_ASSOCSTATUS_REASSOC:
+ event = IWEVREGISTERED;
+ break;
+ case ZD1201_ASSOCSTATUS_DISASSOC:
+ case ZD1201_ASSOCSTATUS_ASSOCFAIL:
+ case ZD1201_ASSOCSTATUS_AUTHFAIL:
+ default:
+ event = IWEVEXPIRED;
+ }
+ memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ /* Send event to user space */
+ wireless_send_event(zd->dev, event, &wrqu, NULL);
+
+ goto resubmit;
+ }
+ if (infotype == ZD1201_INF_AUTHREQ) {
+ union iwreq_data wrqu;
+
+ memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ /* There isn't a event that trully fits this request.
+ We assume that userspace will be smart enough to
+ see a new station being expired and sends back a
+ authstation ioctl to authorize it. */
+ wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL);
+ goto resubmit;
+ }
+ /* Other infotypes are handled outside this handler */
+ zd->rxlen = 0;
+ while (i < urb->actual_length) {
+ copylen = le16_to_cpu(*(__le16*)&data[i+2]);
+ /* Sanity check, sometimes we get junk */
+ if (copylen+zd->rxlen > sizeof(zd->rxdata))
+ break;
+ memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen);
+ zd->rxlen += copylen;
+ i += 64;
+ }
+ if (i >= urb->actual_length) {
+ zd->rxdatas = 1;
+ wake_up(&zd->rxdataq);
+ }
+ goto resubmit;
+ }
+ /* Actual data */
+ if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
+ int datalen = urb->actual_length-1;
+ unsigned short len, fc, seq;
+ struct hlist_node *node;
+
+ len = ntohs(*(__be16 *)&data[datalen-2]);
+ if (len>datalen)
+ len=datalen;
+ fc = le16_to_cpu(*(__le16 *)&data[datalen-16]);
+ seq = le16_to_cpu(*(__le16 *)&data[datalen-24]);
+
+ if (zd->monitor) {
+ if (datalen < 24)
+ goto resubmit;
+ if (!(skb = dev_alloc_skb(datalen+24)))
+ goto resubmit;
+
+ memcpy(skb_put(skb, 2), &data[datalen-16], 2);
+ memcpy(skb_put(skb, 2), &data[datalen-2], 2);
+ memcpy(skb_put(skb, 6), &data[datalen-14], 6);
+ memcpy(skb_put(skb, 6), &data[datalen-22], 6);
+ memcpy(skb_put(skb, 6), &data[datalen-8], 6);
+ memcpy(skb_put(skb, 2), &data[datalen-24], 2);
+ memcpy(skb_put(skb, len), data, len);
+ skb->dev = zd->dev;
+ skb->dev->last_rx = jiffies;
+ skb->protocol = eth_type_trans(skb, zd->dev);
+ zd->stats.rx_packets++;
+ zd->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ goto resubmit;
+ }
+
+ if ((seq & IEEE80211_SCTL_FRAG) ||
+ (fc & IEEE80211_FCTL_MOREFRAGS)) {
+ struct zd1201_frag *frag = NULL;
+ char *ptr;
+
+ if (datalen<14)
+ goto resubmit;
+ if ((seq & IEEE80211_SCTL_FRAG) == 0) {
+ frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
+ if (!frag)
+ goto resubmit;
+ skb = dev_alloc_skb(IEEE80211_DATA_LEN +14+2);
+ if (!skb) {
+ kfree(frag);
+ goto resubmit;
+ }
+ frag->skb = skb;
+ frag->seq = seq & IEEE80211_SCTL_SEQ;
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, 12), &data[datalen-14], 12);
+ memcpy(skb_put(skb, 2), &data[6], 2);
+ memcpy(skb_put(skb, len), data+8, len);
+ hlist_add_head(&frag->fnode, &zd->fraglist);
+ goto resubmit;
+ }
+ hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
+ if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
+ break;
+ if (!frag)
+ goto resubmit;
+ skb = frag->skb;
+ ptr = skb_put(skb, len);
+ if (ptr)
+ memcpy(ptr, data+8, len);
+ if (fc & IEEE80211_FCTL_MOREFRAGS)
+ goto resubmit;
+ hlist_del_init(&frag->fnode);
+ kfree(frag);
+ } else {
+ if (datalen<14)
+ goto resubmit;
+ skb = dev_alloc_skb(len + 14 + 2);
+ if (!skb)
+ goto resubmit;
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, 12), &data[datalen-14], 12);
+ memcpy(skb_put(skb, 2), &data[6], 2);
+ memcpy(skb_put(skb, len), data+8, len);
+ }
+ skb->dev = zd->dev;
+ skb->dev->last_rx = jiffies;
+ skb->protocol = eth_type_trans(skb, zd->dev);
+ zd->stats.rx_packets++;
+ zd->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ }
+resubmit:
+ memset(data, 0, ZD1201_RXSIZE);
+
+ urb->status = 0;
+ urb->dev = zd->usb;
+ if(usb_submit_urb(urb, GFP_ATOMIC))
+ free = 1;
+
+exit:
+ if (free) {
+ zd->rxlen = 0;
+ zd->rxdatas = 1;
+ wake_up(&zd->rxdataq);
+ kfree(urb->transfer_buffer);
+ }
+ return;
+}
+
+static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
+ unsigned int riddatalen)
+{
+ int err;
+ int i = 0;
+ int code;
+ int rid_fid;
+ int length;
+ unsigned char *pdata;
+
+ zd->rxdatas = 0;
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_ACCESS, rid, 0, 0);
+ if (err)
+ return err;
+
+ wait_event_interruptible(zd->rxdataq, zd->rxdatas);
+ if (!zd->rxlen)
+ return -EIO;
+
+ code = le16_to_cpu(*(__le16*)(&zd->rxdata[4]));
+ rid_fid = le16_to_cpu(*(__le16*)(&zd->rxdata[6]));
+ length = le16_to_cpu(*(__le16*)(&zd->rxdata[8]));
+ if (length > zd->rxlen)
+ length = zd->rxlen-6;
+
+ /* If access bit is not on, then error */
+ if ((code & ZD1201_ACCESSBIT) != ZD1201_ACCESSBIT || rid_fid != rid )
+ return -EINVAL;
+
+ /* Not enough buffer for allocating data */
+ if (riddatalen != (length - 4)) {
+ dev_dbg(&zd->usb->dev, "riddatalen mismatches, expected=%u, (packet=%u) length=%u, rid=0x%04X, rid_fid=0x%04X\n",
+ riddatalen, zd->rxlen, length, rid, rid_fid);
+ return -ENODATA;
+ }
+
+ zd->rxdatas = 0;
+ /* Issue SetRxRid commnd */
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_SETRXRID, rid, 0, length);
+ if (err)
+ return err;
+
+ /* Receive RID record from resource packets */
+ wait_event_interruptible(zd->rxdataq, zd->rxdatas);
+ if (!zd->rxlen)
+ return -EIO;
+
+ if (zd->rxdata[zd->rxlen - 1] != ZD1201_PACKET_RESOURCE) {
+ dev_dbg(&zd->usb->dev, "Packet type mismatch: 0x%x not 0x3\n",
+ zd->rxdata[zd->rxlen-1]);
+ return -EINVAL;
+ }
+
+ /* Set the data pointer and received data length */
+ pdata = zd->rxdata;
+ length = zd->rxlen;
+
+ do {
+ int actual_length;
+
+ actual_length = (length > 64) ? 64 : length;
+
+ if (pdata[0] != 0x3) {
+ dev_dbg(&zd->usb->dev, "Rx Resource packet type error: %02X\n",
+ pdata[0]);
+ return -EINVAL;
+ }
+
+ if (actual_length != 64) {
+ /* Trim the last packet type byte */
+ actual_length--;
+ }
+
+ /* Skip the 4 bytes header (RID length and RID) */
+ if (i == 0) {
+ pdata += 8;
+ actual_length -= 8;
+ } else {
+ pdata += 4;
+ actual_length -= 4;
+ }
+
+ memcpy(riddata, pdata, actual_length);
+ riddata += actual_length;
+ pdata += actual_length;
+ length -= 64;
+ i++;
+ } while (length > 0);
+
+ return 0;
+}
+
+/*
+ * resreq:
+ * byte type
+ * byte sequence
+ * u16 reserved
+ * byte data[12]
+ * total: 16
+ */
+static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int wait)
+{
+ int err;
+ unsigned char *request;
+ int reqlen;
+ char seq=0;
+ struct urb *urb;
+ gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
+
+ len += 4; /* first 4 are for header */
+
+ zd->rxdatas = 0;
+ zd->rxlen = 0;
+ for (seq=0; len > 0; seq++) {
+ request = kmalloc(16, gfp_mask);
+ if (!request)
+ return -ENOMEM;
+ urb = usb_alloc_urb(0, gfp_mask);
+ if (!urb) {
+ kfree(request);
+ return -ENOMEM;
+ }
+ memset(request, 0, 16);
+ reqlen = len>12 ? 12 : len;
+ request[0] = ZD1201_USB_RESREQ;
+ request[1] = seq;
+ request[2] = 0;
+ request[3] = 0;
+ if (request[1] == 0) {
+ /* add header */
+ *(__le16*)&request[4] = cpu_to_le16((len-2+1)/2);
+ *(__le16*)&request[6] = cpu_to_le16(rid);
+ memcpy(request+8, buf, reqlen-4);
+ buf += reqlen-4;
+ } else {
+ memcpy(request+4, buf, reqlen);
+ buf += reqlen;
+ }
+
+ len -= reqlen;
+
+ usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb,
+ zd->endp_out2), request, 16, zd1201_usbfree, zd);
+ err = usb_submit_urb(urb, gfp_mask);
+ if (err)
+ goto err;
+ }
+
+ request = kmalloc(16, gfp_mask);
+ if (!request)
+ return -ENOMEM;
+ urb = usb_alloc_urb(0, gfp_mask);
+ if (!urb) {
+ kfree(request);
+ return -ENOMEM;
+ }
+ *((__le32*)request) = cpu_to_le32(ZD1201_USB_CMDREQ);
+ *((__le16*)&request[4]) =
+ cpu_to_le16(ZD1201_CMDCODE_ACCESS|ZD1201_ACCESSBIT);
+ *((__le16*)&request[6]) = cpu_to_le16(rid);
+ *((__le16*)&request[8]) = cpu_to_le16(0);
+ *((__le16*)&request[10]) = cpu_to_le16(0);
+ usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2),
+ request, 16, zd1201_usbfree, zd);
+ err = usb_submit_urb(urb, gfp_mask);
+ if (err)
+ goto err;
+
+ if (wait) {
+ wait_event_interruptible(zd->rxdataq, zd->rxdatas);
+ if (!zd->rxlen || le16_to_cpu(*(__le16*)&zd->rxdata[6]) != rid) {
+ dev_dbg(&zd->usb->dev, "wrong or no RID received\n");
+ }
+ }
+
+ return 0;
+err:
+ kfree(request);
+ usb_free_urb(urb);
+ return err;
+}
+
+static inline int zd1201_getconfig16(struct zd1201 *zd, int rid, short *val)
+{
+ int err;
+ __le16 zdval;
+
+ err = zd1201_getconfig(zd, rid, &zdval, sizeof(__le16));
+ if (err)
+ return err;
+ *val = le16_to_cpu(zdval);
+ return 0;
+}
+
+static inline int zd1201_setconfig16(struct zd1201 *zd, int rid, short val)
+{
+ __le16 zdval = cpu_to_le16(val);
+ return (zd1201_setconfig(zd, rid, &zdval, sizeof(__le16), 1));
+}
+
+static int zd1201_drvr_start(struct zd1201 *zd)
+{
+ int err, i;
+ short max;
+ __le16 zdmax;
+ unsigned char *buffer;
+
+ buffer = kzalloc(ZD1201_RXSIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ usb_fill_bulk_urb(zd->rx_urb, zd->usb,
+ usb_rcvbulkpipe(zd->usb, zd->endp_in), buffer, ZD1201_RXSIZE,
+ zd1201_usbrx, zd);
+
+ err = usb_submit_urb(zd->rx_urb, GFP_KERNEL);
+ if (err)
+ goto err_buffer;
+
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
+ if (err)
+ goto err_urb;
+
+ err = zd1201_getconfig(zd, ZD1201_RID_CNFMAXTXBUFFERNUMBER, &zdmax,
+ sizeof(__le16));
+ if (err)
+ goto err_urb;
+
+ max = le16_to_cpu(zdmax);
+ for (i=0; i<max; i++) {
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_ALLOC, 1514, 0, 0);
+ if (err)
+ goto err_urb;
+ }
+
+ return 0;
+
+err_urb:
+ usb_kill_urb(zd->rx_urb);
+ return err;
+err_buffer:
+ kfree(buffer);
+ return err;
+}
+
+/* Magic alert: The firmware doesn't seem to like the MAC state being
+ * toggled in promisc (aka monitor) mode.
+ * (It works a number of times, but will halt eventually)
+ * So we turn it of before disabling and on after enabling if needed.
+ */
+static int zd1201_enable(struct zd1201 *zd)
+{
+ int err;
+
+ if (zd->mac_enabled)
+ return 0;
+
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_ENABLE, 0, 0, 0);
+ if (!err)
+ zd->mac_enabled = 1;
+
+ if (zd->monitor)
+ err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 1);
+
+ return err;
+}
+
+static int zd1201_disable(struct zd1201 *zd)
+{
+ int err;
+
+ if (!zd->mac_enabled)
+ return 0;
+ if (zd->monitor) {
+ err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0);
+ if (err)
+ return err;
+ }
+
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_DISABLE, 0, 0, 0);
+ if (!err)
+ zd->mac_enabled = 0;
+ return err;
+}
+
+static int zd1201_mac_reset(struct zd1201 *zd)
+{
+ if (!zd->mac_enabled)
+ return 0;
+ zd1201_disable(zd);
+ return zd1201_enable(zd);
+}
+
+static int zd1201_join(struct zd1201 *zd, char *essid, int essidlen)
+{
+ int err, val;
+ char buf[IW_ESSID_MAX_SIZE+2];
+
+ err = zd1201_disable(zd);
+ if (err)
+ return err;
+
+ val = ZD1201_CNFAUTHENTICATION_OPENSYSTEM;
+ val |= ZD1201_CNFAUTHENTICATION_SHAREDKEY;
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, val);
+ if (err)
+ return err;
+
+ *(__le16 *)buf = cpu_to_le16(essidlen);
+ memcpy(buf+2, essid, essidlen);
+ if (!zd->ap) { /* Normal station */
+ err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf,
+ IW_ESSID_MAX_SIZE+2, 1);
+ if (err)
+ return err;
+ } else { /* AP */
+ err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNSSID, buf,
+ IW_ESSID_MAX_SIZE+2, 1);
+ if (err)
+ return err;
+ }
+
+ err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR,
+ zd->dev->dev_addr, zd->dev->addr_len, 1);
+ if (err)
+ return err;
+
+ err = zd1201_enable(zd);
+ if (err)
+ return err;
+
+ msleep(100);
+ return 0;
+}
+
+static int zd1201_net_open(struct net_device *dev)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ /* Start MAC with wildcard if no essid set */
+ if (!zd->mac_enabled)
+ zd1201_join(zd, zd->essid, zd->essidlen);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int zd1201_net_stop(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+/*
+ RFC 1042 encapsulates Ethernet frames in 802.11 frames
+ by prefixing them with 0xaa, 0xaa, 0x03) followed by a SNAP OID of 0
+ (0x00, 0x00, 0x00). Zd requires an additional padding, copy
+ of ethernet addresses, length of the standard RFC 1042 packet
+ and a command byte (which is nul for tx).
+
+ tx frame (from Wlan NG):
+ RFC 1042:
+ llc 0xAA 0xAA 0x03 (802.2 LLC)
+ snap 0x00 0x00 0x00 (Ethernet encapsulated)
+ type 2 bytes, Ethernet type field
+ payload (minus eth header)
+ Zydas specific:
+ padding 1B if (skb->len+8+1)%64==0
+ Eth MAC addr 12 bytes, Ethernet MAC addresses
+ length 2 bytes, RFC 1042 packet length
+ (llc+snap+type+payload)
+ zd 1 null byte, zd1201 packet type
+ */
+static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ unsigned char *txbuf = zd->txdata;
+ int txbuflen, pad = 0, err;
+ struct urb *urb = zd->tx_urb;
+
+ if (!zd->mac_enabled || zd->monitor) {
+ zd->stats.tx_dropped++;
+ kfree_skb(skb);
+ return 0;
+ }
+ netif_stop_queue(dev);
+
+ txbuflen = skb->len + 8 + 1;
+ if (txbuflen%64 == 0) {
+ pad = 1;
+ txbuflen++;
+ }
+ txbuf[0] = 0xAA;
+ txbuf[1] = 0xAA;
+ txbuf[2] = 0x03;
+ txbuf[3] = 0x00; /* rfc1042 */
+ txbuf[4] = 0x00;
+ txbuf[5] = 0x00;
+
+ memcpy(txbuf+6, skb->data+12, skb->len-12);
+ if (pad)
+ txbuf[skb->len-12+6]=0;
+ memcpy(txbuf+skb->len-12+6+pad, skb->data, 12);
+ *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6);
+ txbuf[txbuflen-1] = 0;
+
+ usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out),
+ txbuf, txbuflen, zd1201_usbtx, zd);
+
+ err = usb_submit_urb(zd->tx_urb, GFP_ATOMIC);
+ if (err) {
+ zd->stats.tx_errors++;
+ netif_start_queue(dev);
+ return err;
+ }
+ zd->stats.tx_packets++;
+ zd->stats.tx_bytes += skb->len;
+ dev->trans_start = jiffies;
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static void zd1201_tx_timeout(struct net_device *dev)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ if (!zd)
+ return;
+ dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n",
+ dev->name);
+ usb_unlink_urb(zd->tx_urb);
+ zd->stats.tx_errors++;
+ /* Restart the timeout to quiet the watchdog: */
+ dev->trans_start = jiffies;
+}
+
+static int zd1201_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ int err;
+
+ if (!zd)
+ return -ENODEV;
+
+ err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR,
+ addr->sa_data, dev->addr_len, 1);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return zd1201_mac_reset(zd);
+}
+
+static struct net_device_stats *zd1201_get_stats(struct net_device *dev)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ return &zd->stats;
+}
+
+static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ return &zd->iwstats;
+}
+
+static void zd1201_set_multicast(struct net_device *dev)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ struct dev_mc_list *mc = dev->mc_list;
+ unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
+ int i;
+
+ if (dev->mc_count > ZD1201_MAXMULTI)
+ return;
+
+ for (i=0; i<dev->mc_count; i++) {
+ memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN);
+ mc = mc->next;
+ }
+ zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
+ dev->mc_count*ETH_ALEN, 0);
+
+}
+
+static int zd1201_config_commit(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *data, char *essid)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ return zd1201_mac_reset(zd);
+}
+
+static int zd1201_get_name(struct net_device *dev,
+ struct iw_request_info *info, char *name, char *extra)
+{
+ strcpy(name, "IEEE 802.11b");
+ return 0;
+}
+
+static int zd1201_set_freq(struct net_device *dev,
+ struct iw_request_info *info, struct iw_freq *freq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short channel = 0;
+ int err;
+
+ if (freq->e == 0)
+ channel = freq->m;
+ else {
+ if (freq->m >= 2482)
+ channel = 14;
+ if (freq->m >= 2407)
+ channel = (freq->m-2407)/5;
+ }
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
+ if (err)
+ return err;
+
+ zd1201_mac_reset(zd);
+
+ return 0;
+}
+
+static int zd1201_get_freq(struct net_device *dev,
+ struct iw_request_info *info, struct iw_freq *freq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short channel;
+ int err;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, &channel);
+ if (err)
+ return err;
+ freq->e = 0;
+ freq->m = channel;
+
+ return 0;
+}
+
+static int zd1201_set_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 *mode, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short porttype, monitor = 0;
+ unsigned char buffer[IW_ESSID_MAX_SIZE+2];
+ int err;
+
+ if (zd->ap) {
+ if (*mode != IW_MODE_MASTER)
+ return -EINVAL;
+ return 0;
+ }
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0);
+ if (err)
+ return err;
+ zd->dev->type = ARPHRD_ETHER;
+ switch(*mode) {
+ case IW_MODE_MONITOR:
+ monitor = 1;
+ zd->dev->type = ARPHRD_IEEE80211;
+ /* Make sure we are no longer associated with by
+ setting an 'impossible' essid.
+ (otherwise we mess up firmware)
+ */
+ zd1201_join(zd, "\0-*#\0", 5);
+ /* Put port in pIBSS */
+ case 8: /* No pseudo-IBSS in wireless extensions (yet) */
+ porttype = ZD1201_PORTTYPE_PSEUDOIBSS;
+ break;
+ case IW_MODE_ADHOC:
+ porttype = ZD1201_PORTTYPE_IBSS;
+ break;
+ case IW_MODE_INFRA:
+ porttype = ZD1201_PORTTYPE_BSS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype);
+ if (err)
+ return err;
+ if (zd->monitor && !monitor) {
+ zd1201_disable(zd);
+ *(__le16 *)buffer = cpu_to_le16(zd->essidlen);
+ memcpy(buffer+2, zd->essid, zd->essidlen);
+ err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID,
+ buffer, IW_ESSID_MAX_SIZE+2, 1);
+ if (err)
+ return err;
+ }
+ zd->monitor = monitor;
+ /* If monitor mode is set we don't actually turn it on here since it
+ * is done during mac reset anyway (see zd1201_mac_enable).
+ */
+ zd1201_mac_reset(zd);
+
+ return 0;
+}
+
+static int zd1201_get_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 *mode, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short porttype;
+ int err;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFPORTTYPE, &porttype);
+ if (err)
+ return err;
+ switch(porttype) {
+ case ZD1201_PORTTYPE_IBSS:
+ *mode = IW_MODE_ADHOC;
+ break;
+ case ZD1201_PORTTYPE_BSS:
+ *mode = IW_MODE_INFRA;
+ break;
+ case ZD1201_PORTTYPE_WDS:
+ *mode = IW_MODE_REPEAT;
+ break;
+ case ZD1201_PORTTYPE_PSEUDOIBSS:
+ *mode = 8;/* No Pseudo-IBSS... */
+ break;
+ case ZD1201_PORTTYPE_AP:
+ *mode = IW_MODE_MASTER;
+ break;
+ default:
+ dev_dbg(&zd->usb->dev, "Unknown porttype: %d\n",
+ porttype);
+ *mode = IW_MODE_AUTO;
+ }
+ if (zd->monitor)
+ *mode = IW_MODE_MONITOR;
+
+ return 0;
+}
+
+static int zd1201_get_range(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *wrq, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+
+ wrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(struct iw_range));
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = WIRELESS_EXT;
+
+ range->max_qual.qual = 128;
+ range->max_qual.level = 128;
+ range->max_qual.noise = 128;
+ range->max_qual.updated = 7;
+
+ range->encoding_size[0] = 5;
+ range->encoding_size[1] = 13;
+ range->num_encoding_sizes = 2;
+ range->max_encoding_tokens = ZD1201_NUMKEYS;
+
+ range->num_bitrates = 4;
+ range->bitrate[0] = 1000000;
+ range->bitrate[1] = 2000000;
+ range->bitrate[2] = 5500000;
+ range->bitrate[3] = 11000000;
+
+ range->min_rts = 0;
+ range->min_frag = ZD1201_FRAGMIN;
+ range->max_rts = ZD1201_RTSMAX;
+ range->min_frag = ZD1201_FRAGMAX;
+
+ return 0;
+}
+
+/* Little bit of magic here: we only get the quality if we poll
+ * for it, and we never get an actual request to trigger such
+ * a poll. Therefore we 'assume' that the user will soon ask for
+ * the stats after asking the bssid.
+ */
+static int zd1201_get_wap(struct net_device *dev,
+ struct iw_request_info *info, struct sockaddr *ap_addr, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ unsigned char buffer[6];
+
+ if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) {
+ /* Unfortunately the quality and noise reported is useless.
+ they seem to be accumulators that increase until you
+ read them, unless we poll on a fixed interval we can't
+ use them
+ */
+ /*zd->iwstats.qual.qual = le16_to_cpu(((__le16 *)buffer)[0]);*/
+ zd->iwstats.qual.level = le16_to_cpu(((__le16 *)buffer)[1]);
+ /*zd->iwstats.qual.noise = le16_to_cpu(((__le16 *)buffer)[2]);*/
+ zd->iwstats.qual.updated = 2;
+ }
+
+ return zd1201_getconfig(zd, ZD1201_RID_CURRENTBSSID, ap_addr->sa_data, 6);
+}
+
+static int zd1201_set_scan(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *srq, char *extra)
+{
+ /* We do everything in get_scan */
+ return 0;
+}
+
+static int zd1201_get_scan(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *srq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ int err, i, j, enabled_save;
+ struct iw_event iwe;
+ char *cev = extra;
+ char *end_buf = extra + IW_SCAN_MAX_DATA;
+
+ /* No scanning in AP mode */
+ if (zd->ap)
+ return -EOPNOTSUPP;
+
+ /* Scan doesn't seem to work if disabled */
+ enabled_save = zd->mac_enabled;
+ zd1201_enable(zd);
+
+ zd->rxdatas = 0;
+ err = zd1201_docmd(zd, ZD1201_CMDCODE_INQUIRE,
+ ZD1201_INQ_SCANRESULTS, 0, 0);
+ if (err)
+ return err;
+
+ wait_event_interruptible(zd->rxdataq, zd->rxdatas);
+ if (!zd->rxlen)
+ return -EIO;
+
+ if (le16_to_cpu(*(__le16*)&zd->rxdata[2]) != ZD1201_INQ_SCANRESULTS)
+ return -EIO;
+
+ for(i=8; i<zd->rxlen; i+=62) {
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, zd->rxdata+i+6, 6);
+ cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_ADDR_LEN);
+
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.length = zd->rxdata[i+16];
+ iwe.u.data.flags = 1;
+ cev = iwe_stream_add_point(cev, end_buf, &iwe, zd->rxdata+i+18);
+
+ iwe.cmd = SIOCGIWMODE;
+ if (zd->rxdata[i+14]&0x01)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_UINT_LEN);
+
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = zd->rxdata[i+0];
+ iwe.u.freq.e = 0;
+ cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_FREQ_LEN);
+
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = 0;
+ iwe.u.bitrate.disabled = 0;
+ for (j=0; j<10; j++) if (zd->rxdata[i+50+j]) {
+ iwe.u.bitrate.value = (zd->rxdata[i+50+j]&0x7f)*500000;
+ cev=iwe_stream_add_event(cev, end_buf, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+
+ iwe.cmd = SIOCGIWENCODE;
+ iwe.u.data.length = 0;
+ if (zd->rxdata[i+14]&0x10)
+ iwe.u.data.flags = IW_ENCODE_ENABLED;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ cev = iwe_stream_add_point(cev, end_buf, &iwe, NULL);
+
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = zd->rxdata[i+4];
+ iwe.u.qual.noise= zd->rxdata[i+2]/10-100;
+ iwe.u.qual.level = (256+zd->rxdata[i+4]*100)/255-100;
+ iwe.u.qual.updated = 7;
+ cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_QUAL_LEN);
+ }
+
+ if (!enabled_save)
+ zd1201_disable(zd);
+
+ srq->length = cev - extra;
+ srq->flags = 0;
+
+ return 0;
+}
+
+static int zd1201_set_essid(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *data, char *essid)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ if (data->length > IW_ESSID_MAX_SIZE)
+ return -EINVAL;
+ if (data->length < 1)
+ data->length = 1;
+ zd->essidlen = data->length-1;
+ memset(zd->essid, 0, IW_ESSID_MAX_SIZE+1);
+ memcpy(zd->essid, essid, data->length);
+ return zd1201_join(zd, zd->essid, zd->essidlen);
+}
+
+static int zd1201_get_essid(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *data, char *essid)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ memcpy(essid, zd->essid, zd->essidlen);
+ data->flags = 1;
+ data->length = zd->essidlen;
+
+ return 0;
+}
+
+static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
+ struct iw_point *data, char *nick)
+{
+ strcpy(nick, "zd1201");
+ data->flags = 1;
+ data->length = strlen(nick);
+ return 0;
+}
+
+static int zd1201_set_rate(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short rate;
+ int err;
+
+ switch (rrq->value) {
+ case 1000000:
+ rate = ZD1201_RATEB1;
+ break;
+ case 2000000:
+ rate = ZD1201_RATEB2;
+ break;
+ case 5500000:
+ rate = ZD1201_RATEB5;
+ break;
+ case 11000000:
+ default:
+ rate = ZD1201_RATEB11;
+ break;
+ }
+ if (!rrq->fixed) { /* Also enable all lower bitrates */
+ rate |= rate-1;
+ }
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, rate);
+ if (err)
+ return err;
+
+ return zd1201_mac_reset(zd);
+}
+
+static int zd1201_get_rate(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short rate;
+ int err;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CURRENTTXRATE, &rate);
+ if (err)
+ return err;
+
+ switch(rate) {
+ case 1:
+ rrq->value = 1000000;
+ break;
+ case 2:
+ rrq->value = 2000000;
+ break;
+ case 5:
+ rrq->value = 5500000;
+ break;
+ case 11:
+ rrq->value = 11000000;
+ break;
+ default:
+ rrq->value = 0;
+ }
+ rrq->fixed = 0;
+ rrq->disabled = 0;
+
+ return 0;
+}
+
+static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *rts, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ int err;
+ short val = rts->value;
+
+ if (rts->disabled || !rts->fixed)
+ val = ZD1201_RTSMAX;
+ if (val > ZD1201_RTSMAX)
+ return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, val);
+ if (err)
+ return err;
+ return zd1201_mac_reset(zd);
+}
+
+static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *rts, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short rtst;
+ int err;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, &rtst);
+ if (err)
+ return err;
+ rts->value = rtst;
+ rts->disabled = (rts->value == ZD1201_RTSMAX);
+ rts->fixed = 1;
+
+ return 0;
+}
+
+static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *frag, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ int err;
+ short val = frag->value;
+
+ if (frag->disabled || !frag->fixed)
+ val = ZD1201_FRAGMAX;
+ if (val > ZD1201_FRAGMAX)
+ return -EINVAL;
+ if (val < ZD1201_FRAGMIN)
+ return -EINVAL;
+ if (val & 1)
+ return -EINVAL;
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, val);
+ if (err)
+ return err;
+ return zd1201_mac_reset(zd);
+}
+
+static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *frag, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short fragt;
+ int err;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, &fragt);
+ if (err)
+ return err;
+ frag->value = fragt;
+ frag->disabled = (frag->value == ZD1201_FRAGMAX);
+ frag->fixed = 1;
+
+ return 0;
+}
+
+static int zd1201_set_retry(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ return 0;
+}
+
+static int zd1201_get_retry(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ return 0;
+}
+
+static int zd1201_set_encode(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *erq, char *key)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short i;
+ int err, rid;
+
+ if (erq->length > ZD1201_MAXKEYLEN)
+ return -EINVAL;
+
+ i = (erq->flags & IW_ENCODE_INDEX)-1;
+ if (i == -1) {
+ err = zd1201_getconfig16(zd,ZD1201_RID_CNFDEFAULTKEYID,&i);
+ if (err)
+ return err;
+ } else {
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, i);
+ if (err)
+ return err;
+ }
+
+ if (i < 0 || i >= ZD1201_NUMKEYS)
+ return -EINVAL;
+
+ rid = ZD1201_RID_CNFDEFAULTKEY0 + i;
+ err = zd1201_setconfig(zd, rid, key, erq->length, 1);
+ if (err)
+ return err;
+ zd->encode_keylen[i] = erq->length;
+ memcpy(zd->encode_keys[i], key, erq->length);
+
+ i=0;
+ if (!(erq->flags & IW_ENCODE_DISABLED & IW_ENCODE_MODE)) {
+ i |= 0x01;
+ zd->encode_enabled = 1;
+ } else
+ zd->encode_enabled = 0;
+ if (erq->flags & IW_ENCODE_RESTRICTED & IW_ENCODE_MODE) {
+ i |= 0x02;
+ zd->encode_restricted = 1;
+ } else
+ zd->encode_restricted = 0;
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFWEBFLAGS, i);
+ if (err)
+ return err;
+
+ if (zd->encode_enabled)
+ i = ZD1201_CNFAUTHENTICATION_SHAREDKEY;
+ else
+ i = ZD1201_CNFAUTHENTICATION_OPENSYSTEM;
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, i);
+ if (err)
+ return err;
+
+ return zd1201_mac_reset(zd);
+}
+
+static int zd1201_get_encode(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *erq, char *key)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short i;
+ int err;
+
+ if (zd->encode_enabled)
+ erq->flags = IW_ENCODE_ENABLED;
+ else
+ erq->flags = IW_ENCODE_DISABLED;
+ if (zd->encode_restricted)
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ else
+ erq->flags |= IW_ENCODE_OPEN;
+
+ i = (erq->flags & IW_ENCODE_INDEX) -1;
+ if (i == -1) {
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, &i);
+ if (err)
+ return err;
+ }
+ if (i<0 || i>= ZD1201_NUMKEYS)
+ return -EINVAL;
+
+ erq->flags |= i+1;
+
+ erq->length = zd->encode_keylen[i];
+ memcpy(key, zd->encode_keys[i], erq->length);
+
+ return 0;
+}
+
+static int zd1201_set_power(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short enabled, duration, level;
+ int err;
+
+ enabled = vwrq->disabled ? 0 : 1;
+ if (enabled) {
+ if (vwrq->flags & IW_POWER_PERIOD) {
+ duration = vwrq->value;
+ err = zd1201_setconfig16(zd,
+ ZD1201_RID_CNFMAXSLEEPDURATION, duration);
+ if (err)
+ return err;
+ goto out;
+ }
+ if (vwrq->flags & IW_POWER_TIMEOUT) {
+ err = zd1201_getconfig16(zd,
+ ZD1201_RID_CNFMAXSLEEPDURATION, &duration);
+ if (err)
+ return err;
+ level = vwrq->value * 4 / duration;
+ if (level > 4)
+ level = 4;
+ if (level < 0)
+ level = 0;
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFPMEPS,
+ level);
+ if (err)
+ return err;
+ goto out;
+ }
+ return -EINVAL;
+ }
+out:
+ return zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled);
+}
+
+static int zd1201_get_power(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short enabled, level, duration;
+ int err;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMENABLED, &enabled);
+ if (err)
+ return err;
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMEPS, &level);
+ if (err)
+ return err;
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration);
+ if (err)
+ return err;
+ vwrq->disabled = enabled ? 0 : 1;
+ if (vwrq->flags & IW_POWER_TYPE) {
+ if (vwrq->flags & IW_POWER_PERIOD) {
+ vwrq->value = duration;
+ vwrq->flags = IW_POWER_PERIOD;
+ } else {
+ vwrq->value = duration * level / 4;
+ vwrq->flags = IW_POWER_TIMEOUT;
+ }
+ }
+ if (vwrq->flags & IW_POWER_MODE) {
+ if (enabled && level)
+ vwrq->flags = IW_POWER_UNICAST_R;
+ else
+ vwrq->flags = IW_POWER_ALL_R;
+ }
+
+ return 0;
+}
+
+
+static const iw_handler zd1201_iw_handler[] =
+{
+ (iw_handler) zd1201_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) zd1201_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) zd1201_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) zd1201_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) zd1201_set_mode, /* SIOCSIWMODE */
+ (iw_handler) zd1201_get_mode, /* SIOCGIWMODE */
+ (iw_handler) NULL, /* SIOCSIWSENS */
+ (iw_handler) NULL, /* SIOCGIWSENS */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) zd1201_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) NULL, /* SIOCGIWSTATS */
+ (iw_handler) NULL, /* SIOCSIWSPY */
+ (iw_handler) NULL, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL/*zd1201_set_wap*/, /* SIOCSIWAP */
+ (iw_handler) zd1201_get_wap, /* SIOCGIWAP */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* SIOCGIWAPLIST */
+ (iw_handler) zd1201_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) zd1201_get_scan, /* SIOCGIWSCAN */
+ (iw_handler) zd1201_set_essid, /* SIOCSIWESSID */
+ (iw_handler) zd1201_get_essid, /* SIOCGIWESSID */
+ (iw_handler) NULL, /* SIOCSIWNICKN */
+ (iw_handler) zd1201_get_nick, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) zd1201_set_rate, /* SIOCSIWRATE */
+ (iw_handler) zd1201_get_rate, /* SIOCGIWRATE */
+ (iw_handler) zd1201_set_rts, /* SIOCSIWRTS */
+ (iw_handler) zd1201_get_rts, /* SIOCGIWRTS */
+ (iw_handler) zd1201_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) zd1201_get_frag, /* SIOCGIWFRAG */
+ (iw_handler) NULL, /* SIOCSIWTXPOW */
+ (iw_handler) NULL, /* SIOCGIWTXPOW */
+ (iw_handler) zd1201_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) zd1201_get_retry, /* SIOCGIWRETRY */
+ (iw_handler) zd1201_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) zd1201_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) zd1201_set_power, /* SIOCSIWPOWER */
+ (iw_handler) zd1201_get_power, /* SIOCGIWPOWER */
+};
+
+static int zd1201_set_hostauth(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+
+ if (!zd->ap)
+ return -EOPNOTSUPP;
+
+ return zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value);
+}
+
+static int zd1201_get_hostauth(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short hostauth;
+ int err;
+
+ if (!zd->ap)
+ return -EOPNOTSUPP;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFHOSTAUTH, &hostauth);
+ if (err)
+ return err;
+ rrq->value = hostauth;
+ rrq->fixed = 1;
+
+ return 0;
+}
+
+static int zd1201_auth_sta(struct net_device *dev,
+ struct iw_request_info *info, struct sockaddr *sta, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ unsigned char buffer[10];
+
+ if (!zd->ap)
+ return -EOPNOTSUPP;
+
+ memcpy(buffer, sta->sa_data, ETH_ALEN);
+ *(short*)(buffer+6) = 0; /* 0==success, 1==failure */
+ *(short*)(buffer+8) = 0;
+
+ return zd1201_setconfig(zd, ZD1201_RID_AUTHENTICATESTA, buffer, 10, 1);
+}
+
+static int zd1201_set_maxassoc(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ int err;
+
+ if (!zd->ap)
+ return -EOPNOTSUPP;
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int zd1201_get_maxassoc(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *rrq, char *extra)
+{
+ struct zd1201 *zd = (struct zd1201 *)dev->priv;
+ short maxassoc;
+ int err;
+
+ if (!zd->ap)
+ return -EOPNOTSUPP;
+
+ err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, &maxassoc);
+ if (err)
+ return err;
+ rrq->value = maxassoc;
+ rrq->fixed = 1;
+
+ return 0;
+}
+
+static const iw_handler zd1201_private_handler[] = {
+ (iw_handler) zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */
+ (iw_handler) zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */
+ (iw_handler) zd1201_auth_sta, /* ZD1201SIWAUTHSTA */
+ (iw_handler) NULL, /* nothing to get */
+ (iw_handler) zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */
+ (iw_handler) zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */
+};
+
+static const struct iw_priv_args zd1201_private_args[] = {
+ { ZD1201SIWHOSTAUTH, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "sethostauth" },
+ { ZD1201GIWHOSTAUTH, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostauth" },
+ { ZD1201SIWAUTHSTA, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "authstation" },
+ { ZD1201SIWMAXASSOC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "setmaxassoc" },
+ { ZD1201GIWMAXASSOC, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmaxassoc" },
+};
+
+static const struct iw_handler_def zd1201_iw_handlers = {
+ .num_standard = ARRAY_SIZE(zd1201_iw_handler),
+ .num_private = ARRAY_SIZE(zd1201_private_handler),
+ .num_private_args = ARRAY_SIZE(zd1201_private_args),
+ .standard = (iw_handler *)zd1201_iw_handler,
+ .private = (iw_handler *)zd1201_private_handler,
+ .private_args = (struct iw_priv_args *) zd1201_private_args,
+ .get_wireless_stats = zd1201_get_wireless_stats,
+};
+
+static int zd1201_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct zd1201 *zd;
+ struct usb_device *usb;
+ int err;
+ short porttype;
+ char buf[IW_ESSID_MAX_SIZE+2];
+
+ usb = interface_to_usbdev(interface);
+
+ zd = kzalloc(sizeof(struct zd1201), GFP_KERNEL);
+ if (!zd)
+ return -ENOMEM;
+ zd->ap = ap;
+ zd->usb = usb;
+ zd->removed = 0;
+ init_waitqueue_head(&zd->rxdataq);
+ INIT_HLIST_HEAD(&zd->fraglist);
+
+ err = zd1201_fw_upload(usb, zd->ap);
+ if (err) {
+ dev_err(&usb->dev, "zd1201 firmware upload failed: %d\n", err);
+ goto err_zd;
+ }
+
+ zd->endp_in = 1;
+ zd->endp_out = 1;
+ zd->endp_out2 = 2;
+ zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!zd->rx_urb || !zd->tx_urb)
+ goto err_zd;
+
+ mdelay(100);
+ err = zd1201_drvr_start(zd);
+ if (err)
+ goto err_zd;
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXDATALEN, 2312);
+ if (err)
+ goto err_start;
+
+ err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL,
+ ZD1201_RATEB1 | ZD1201_RATEB2 | ZD1201_RATEB5 | ZD1201_RATEB11);
+ if (err)
+ goto err_start;
+
+ zd->dev = alloc_etherdev(0);
+ if (!zd->dev)
+ goto err_start;
+
+ zd->dev->priv = zd;
+ zd->dev->open = zd1201_net_open;
+ zd->dev->stop = zd1201_net_stop;
+ zd->dev->get_stats = zd1201_get_stats;
+ zd->dev->wireless_handlers =
+ (struct iw_handler_def *)&zd1201_iw_handlers;
+ zd->dev->hard_start_xmit = zd1201_hard_start_xmit;
+ zd->dev->watchdog_timeo = ZD1201_TX_TIMEOUT;
+ zd->dev->tx_timeout = zd1201_tx_timeout;
+ zd->dev->set_multicast_list = zd1201_set_multicast;
+ zd->dev->set_mac_address = zd1201_set_mac_address;
+ strcpy(zd->dev->name, "wlan%d");
+
+ err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR,
+ zd->dev->dev_addr, zd->dev->addr_len);
+ if (err)
+ goto err_net;
+
+ /* Set wildcard essid to match zd->essid */
+ *(__le16 *)buf = cpu_to_le16(0);
+ err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf,
+ IW_ESSID_MAX_SIZE+2, 1);
+ if (err)
+ goto err_net;
+
+ if (zd->ap)
+ porttype = ZD1201_PORTTYPE_AP;
+ else
+ porttype = ZD1201_PORTTYPE_BSS;
+ err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype);
+ if (err)
+ goto err_net;
+
+ SET_NETDEV_DEV(zd->dev, &usb->dev);
+
+ err = register_netdev(zd->dev);
+ if (err)
+ goto err_net;
+ dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n",
+ zd->dev->name);
+
+ usb_set_intfdata(interface, zd);
+ return 0;
+
+err_net:
+ free_netdev(zd->dev);
+err_start:
+ /* Leave the device in reset state */
+ zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
+err_zd:
+ if (zd->tx_urb)
+ usb_free_urb(zd->tx_urb);
+ if (zd->rx_urb)
+ usb_free_urb(zd->rx_urb);
+ kfree(zd);
+ return err;
+}
+
+static void zd1201_disconnect(struct usb_interface *interface)
+{
+ struct zd1201 *zd=(struct zd1201 *)usb_get_intfdata(interface);
+ struct hlist_node *node, *node2;
+ struct zd1201_frag *frag;
+
+ if (!zd)
+ return;
+ usb_set_intfdata(interface, NULL);
+ if (zd->dev) {
+ unregister_netdev(zd->dev);
+ free_netdev(zd->dev);
+ }
+
+ hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) {
+ hlist_del_init(&frag->fnode);
+ kfree_skb(frag->skb);
+ kfree(frag);
+ }
+
+ if (zd->tx_urb) {
+ usb_kill_urb(zd->tx_urb);
+ usb_free_urb(zd->tx_urb);
+ }
+ if (zd->rx_urb) {
+ usb_kill_urb(zd->rx_urb);
+ usb_free_urb(zd->rx_urb);
+ }
+ kfree(zd);
+}
+
+#ifdef CONFIG_PM
+
+static int zd1201_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct zd1201 *zd = usb_get_intfdata(interface);
+
+ netif_device_detach(zd->dev);
+
+ zd->was_enabled = zd->mac_enabled;
+
+ if (zd->was_enabled)
+ return zd1201_disable(zd);
+ else
+ return 0;
+}
+
+static int zd1201_resume(struct usb_interface *interface)
+{
+ struct zd1201 *zd = usb_get_intfdata(interface);
+
+ if (!zd || !zd->dev)
+ return -ENODEV;
+
+ netif_device_attach(zd->dev);
+
+ if (zd->was_enabled)
+ return zd1201_enable(zd);
+ else
+ return 0;
+}
+
+#else
+
+#define zd1201_suspend NULL
+#define zd1201_resume NULL
+
+#endif
+
+static struct usb_driver zd1201_usb = {
+ .name = "zd1201",
+ .probe = zd1201_probe,
+ .disconnect = zd1201_disconnect,
+ .id_table = zd1201_table,
+ .suspend = zd1201_suspend,
+ .resume = zd1201_resume,
+};
+
+static int __init zd1201_init(void)
+{
+ return usb_register(&zd1201_usb);
+}
+
+static void __exit zd1201_cleanup(void)
+{
+ usb_deregister(&zd1201_usb);
+}
+
+module_init(zd1201_init);
+module_exit(zd1201_cleanup);
diff --git a/drivers/net/wireless/zd1201.h b/drivers/net/wireless/zd1201.h
new file mode 100644
index 000000000000..235f0ee34b24
--- /dev/null
+++ b/drivers/net/wireless/zd1201.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Parts of this driver have been derived from a wlan-ng version
+ * modified by ZyDAS.
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
+ */
+
+#ifndef _INCLUDE_ZD1201_H_
+#define _INCLUDE_ZD1201_H_
+
+#define ZD1201_NUMKEYS 4
+#define ZD1201_MAXKEYLEN 13
+#define ZD1201_MAXMULTI 16
+#define ZD1201_FRAGMAX 2500
+#define ZD1201_FRAGMIN 256
+#define ZD1201_RTSMAX 2500
+
+#define ZD1201_RXSIZE 3000
+
+struct zd1201 {
+ struct usb_device *usb;
+ int removed;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ struct iw_statistics iwstats;
+
+ int endp_in;
+ int endp_out;
+ int endp_out2;
+ struct urb *rx_urb;
+ struct urb *tx_urb;
+
+ unsigned char rxdata[ZD1201_RXSIZE];
+ int rxlen;
+ wait_queue_head_t rxdataq;
+ int rxdatas;
+ struct hlist_head fraglist;
+ unsigned char txdata[ZD1201_RXSIZE];
+
+ int ap;
+ char essid[IW_ESSID_MAX_SIZE+1];
+ int essidlen;
+ int mac_enabled;
+ int was_enabled;
+ int monitor;
+ int encode_enabled;
+ int encode_restricted;
+ unsigned char encode_keys[ZD1201_NUMKEYS][ZD1201_MAXKEYLEN];
+ int encode_keylen[ZD1201_NUMKEYS];
+};
+
+struct zd1201_frag {
+ struct hlist_node fnode;
+ int seq;
+ struct sk_buff *skb;
+};
+
+#define ZD1201SIWHOSTAUTH SIOCIWFIRSTPRIV
+#define ZD1201GIWHOSTAUTH ZD1201SIWHOSTAUTH+1
+#define ZD1201SIWAUTHSTA SIOCIWFIRSTPRIV+2
+#define ZD1201SIWMAXASSOC SIOCIWFIRSTPRIV+4
+#define ZD1201GIWMAXASSOC ZD1201SIWMAXASSOC+1
+
+#define ZD1201_FW_TIMEOUT (1000)
+
+#define ZD1201_TX_TIMEOUT (2000)
+
+#define ZD1201_USB_CMDREQ 0
+#define ZD1201_USB_RESREQ 1
+
+#define ZD1201_CMDCODE_INIT 0x00
+#define ZD1201_CMDCODE_ENABLE 0x01
+#define ZD1201_CMDCODE_DISABLE 0x02
+#define ZD1201_CMDCODE_ALLOC 0x0a
+#define ZD1201_CMDCODE_INQUIRE 0x11
+#define ZD1201_CMDCODE_SETRXRID 0x17
+#define ZD1201_CMDCODE_ACCESS 0x21
+
+#define ZD1201_PACKET_EVENTSTAT 0x0
+#define ZD1201_PACKET_RXDATA 0x1
+#define ZD1201_PACKET_INQUIRE 0x2
+#define ZD1201_PACKET_RESOURCE 0x3
+
+#define ZD1201_ACCESSBIT 0x0100
+
+#define ZD1201_RID_CNFPORTTYPE 0xfc00
+#define ZD1201_RID_CNFOWNMACADDR 0xfc01
+#define ZD1201_RID_CNFDESIREDSSID 0xfc02
+#define ZD1201_RID_CNFOWNCHANNEL 0xfc03
+#define ZD1201_RID_CNFOWNSSID 0xfc04
+#define ZD1201_RID_CNFMAXDATALEN 0xfc07
+#define ZD1201_RID_CNFPMENABLED 0xfc09
+#define ZD1201_RID_CNFPMEPS 0xfc0a
+#define ZD1201_RID_CNFMAXSLEEPDURATION 0xfc0c
+#define ZD1201_RID_CNFDEFAULTKEYID 0xfc23
+#define ZD1201_RID_CNFDEFAULTKEY0 0xfc24
+#define ZD1201_RID_CNFDEFAULTKEY1 0xfc25
+#define ZD1201_RID_CNFDEFAULTKEY2 0xfc26
+#define ZD1201_RID_CNFDEFAULTKEY3 0xfc27
+#define ZD1201_RID_CNFWEBFLAGS 0xfc28
+#define ZD1201_RID_CNFAUTHENTICATION 0xfc2a
+#define ZD1201_RID_CNFMAXASSOCSTATIONS 0xfc2b
+#define ZD1201_RID_CNFHOSTAUTH 0xfc2e
+#define ZD1201_RID_CNFGROUPADDRESS 0xfc80
+#define ZD1201_RID_CNFFRAGTHRESHOLD 0xfc82
+#define ZD1201_RID_CNFRTSTHRESHOLD 0xfc83
+#define ZD1201_RID_TXRATECNTL 0xfc84
+#define ZD1201_RID_PROMISCUOUSMODE 0xfc85
+#define ZD1201_RID_CNFBASICRATES 0xfcb3
+#define ZD1201_RID_AUTHENTICATESTA 0xfce3
+#define ZD1201_RID_CURRENTBSSID 0xfd42
+#define ZD1201_RID_COMMSQUALITY 0xfd43
+#define ZD1201_RID_CURRENTTXRATE 0xfd44
+#define ZD1201_RID_CNFMAXTXBUFFERNUMBER 0xfda0
+#define ZD1201_RID_CURRENTCHANNEL 0xfdc1
+
+#define ZD1201_INQ_SCANRESULTS 0xf101
+
+#define ZD1201_INF_LINKSTATUS 0xf200
+#define ZD1201_INF_ASSOCSTATUS 0xf201
+#define ZD1201_INF_AUTHREQ 0xf202
+
+#define ZD1201_ASSOCSTATUS_STAASSOC 0x1
+#define ZD1201_ASSOCSTATUS_REASSOC 0x2
+#define ZD1201_ASSOCSTATUS_DISASSOC 0x3
+#define ZD1201_ASSOCSTATUS_ASSOCFAIL 0x4
+#define ZD1201_ASSOCSTATUS_AUTHFAIL 0x5
+
+#define ZD1201_PORTTYPE_IBSS 0
+#define ZD1201_PORTTYPE_BSS 1
+#define ZD1201_PORTTYPE_WDS 2
+#define ZD1201_PORTTYPE_PSEUDOIBSS 3
+#define ZD1201_PORTTYPE_AP 6
+
+#define ZD1201_RATEB1 1
+#define ZD1201_RATEB2 2
+#define ZD1201_RATEB5 4 /* 5.5 really, but 5 is shorter :) */
+#define ZD1201_RATEB11 8
+
+#define ZD1201_CNFAUTHENTICATION_OPENSYSTEM 0x0001
+#define ZD1201_CNFAUTHENTICATION_SHAREDKEY 0x0002
+
+#endif /* _INCLUDE_ZD1201_H_ */