summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/bnx2.c6
-rw-r--r--drivers/net/bonding/bond_alb.c12
-rw-r--r--drivers/net/bonding/bond_main.c30
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c31
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c26
-rw-r--r--drivers/net/igbvf/ethtool.c36
-rw-r--r--drivers/net/igbvf/igbvf.h6
-rw-r--r--drivers/net/igbvf/netdev.c17
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c369
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/lasi_82596.c6
-rw-r--r--drivers/net/mv643xx_eth.c87
-rw-r--r--drivers/net/netxen/netxen_nic.h12
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c156
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h10
-rw-r--r--drivers/net/netxen/netxen_nic_init.c15
-rw-r--r--drivers/net/netxen/netxen_nic_main.c55
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c154
-rw-r--r--drivers/net/r6040.c12
-rw-r--r--drivers/net/smsc911x.c48
-rw-r--r--drivers/net/usb/Kconfig14
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cdc_eem.c381
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c1
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--include/linux/etherdevice.h27
-rw-r--r--include/linux/netdevice.h38
-rw-r--r--include/linux/netfilter/xt_LED.h2
-rw-r--r--include/linux/netfilter/xt_cluster.h2
-rw-r--r--include/linux/usb/cdc.h3
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/tcp.h58
-rw-r--r--net/Kconfig6
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_sysfs.c73
-rw-r--r--net/core/dev.c250
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c23
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c6
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_pid_algo.c73
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c48
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/wimax/op-msg.c11
-rw-r--r--net/wimax/stack.c17
-rw-r--r--net/wireless/reg.c17
-rw-r--r--net/wireless/scan.c1
61 files changed, 1515 insertions, 725 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d47839184a06..b0cb29d4cc01 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "2.0.0"
-#define DRV_MODULE_RELDATE "April 2, 2009"
+#define DRV_MODULE_VERSION "2.0.1"
+#define DRV_MODULE_RELDATE "May 6, 2009"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
@@ -2600,6 +2600,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
/* Tell compiler that status block fields can change. */
barrier();
cons = *bnapi->hw_tx_cons_ptr;
+ barrier();
if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
cons++;
return cons;
@@ -2879,6 +2880,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
/* Tell compiler that status block fields can change. */
barrier();
cons = *bnapi->hw_rx_cons_ptr;
+ barrier();
if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
cons++;
return cons;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 553a89919778..46d312bedfb8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1706,10 +1706,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
* Called with RTNL
*/
int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
- __releases(&bond->curr_slave_lock)
- __releases(&bond->lock)
__acquires(&bond->lock)
- __acquires(&bond->curr_slave_lock)
+ __releases(&bond->lock)
{
struct bonding *bond = netdev_priv(bond_dev);
struct sockaddr *sa = addr;
@@ -1745,9 +1743,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
}
}
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
-
if (swap_slave) {
alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
@@ -1755,16 +1750,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
bond->alb_info.rlb_enabled);
+ read_lock(&bond->lock);
alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
if (bond->alb_info.rlb_enabled) {
/* inform clients mac address has changed */
rlb_req_update_slave_clients(bond, bond->curr_active_slave);
}
+ read_unlock(&bond->lock);
}
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
-
return 0;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e3af662b0559..815191dd03c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3459,8 +3459,28 @@ static void bond_destroy_proc_dir(void)
bond_proc_dir = NULL;
}
}
+
+#else /* !CONFIG_PROC_FS */
+
+static int bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static void bond_create_proc_dir(void)
+{
+}
+
+static void bond_destroy_proc_dir(void)
+{
+}
+
#endif /* CONFIG_PROC_FS */
+
/*-------------------------- netdev event handling --------------------------*/
/*
@@ -3468,10 +3488,8 @@ static void bond_destroy_proc_dir(void)
*/
static int bond_event_changename(struct bonding *bond)
{
-#ifdef CONFIG_PROC_FS
bond_remove_proc_entry(bond);
bond_create_proc_entry(bond);
-#endif
down_write(&(bonding_rwsem));
bond_destroy_sysfs_entry(bond);
bond_create_sysfs_entry(bond);
@@ -4637,9 +4655,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER);
-#ifdef CONFIG_PROC_FS
bond_create_proc_entry(bond);
-#endif
list_add_tail(&bond->bond_list, &bond_dev_list);
return 0;
@@ -4677,9 +4693,7 @@ static void bond_deinit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
-#ifdef CONFIG_PROC_FS
bond_remove_proc_entry(bond);
-#endif
}
/* Unregister and free all bond devices.
@@ -4698,9 +4712,7 @@ static void bond_free_all(void)
bond_destroy(bond);
}
-#ifdef CONFIG_PROC_FS
bond_destroy_proc_dir();
-#endif
}
/*------------------------- Module initialization ---------------------------*/
@@ -5196,9 +5208,7 @@ static int __init bonding_init(void)
goto out;
}
-#ifdef CONFIG_PROC_FS
bond_create_proc_dir();
-#endif
init_rwsem(&bonding_rwsem);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 71d4fe15976a..9a32d0c73cb3 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2646,6 +2646,8 @@ static void e1000_watchdog(unsigned long data)
* (Do the reset outside of interrupt context). */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
@@ -3739,7 +3741,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR);
- if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
+ if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index da6b37e05bea..ccaaee0951cf 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -3651,6 +3651,8 @@ link_up:
*/
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 6e317caf429c..16a41389575a 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0100"
+#define DRV_VERSION "EHEA_0101"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 37a190d85fcd..147c4b088fb3 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -545,14 +545,17 @@ static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
x &= (arr_len - 1);
pref = skb_array[x];
- prefetchw(pref);
- prefetchw(pref + EHEA_CACHE_LINE);
-
- pref = (skb_array[x]->data);
- prefetch(pref);
- prefetch(pref + EHEA_CACHE_LINE);
- prefetch(pref + EHEA_CACHE_LINE * 2);
- prefetch(pref + EHEA_CACHE_LINE * 3);
+ if (pref) {
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ pref = (skb_array[x]->data);
+ prefetch(pref);
+ prefetch(pref + EHEA_CACHE_LINE);
+ prefetch(pref + EHEA_CACHE_LINE * 2);
+ prefetch(pref + EHEA_CACHE_LINE * 3);
+ }
+
skb = skb_array[skb_index];
skb_array[skb_index] = NULL;
return skb;
@@ -569,12 +572,14 @@ static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
x &= (arr_len - 1);
pref = skb_array[x];
- prefetchw(pref);
- prefetchw(pref + EHEA_CACHE_LINE);
+ if (pref) {
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
- pref = (skb_array[x]->data);
- prefetchw(pref);
- prefetchw(pref + EHEA_CACHE_LINE);
+ pref = (skb_array[x]->data);
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+ }
skb = skb_array[wqe_index];
skb_array[wqe_index] = NULL;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 4e8464b9df2e..154c5acc6fce 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -238,7 +238,6 @@ struct igb_adapter {
u64 hw_csum_err;
u64 hw_csum_good;
u32 alloc_rx_buff_failed;
- bool rx_csum;
u32 gorc;
u64 gorc_old;
u16 rx_ps_hdr_size;
@@ -286,6 +285,7 @@ struct igb_adapter {
#define IGB_FLAG_DCA_ENABLED (1 << 1)
#define IGB_FLAG_QUAD_PORT_A (1 << 2)
#define IGB_FLAG_NEED_CTX_IDX (1 << 3)
+#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4)
enum e1000_state_t {
__IGB_TESTING,
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index b1367ce6586e..b8551a57dd3f 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -275,13 +275,17 @@ static int igb_set_pauseparam(struct net_device *netdev,
static u32 igb_get_rx_csum(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- return adapter->rx_csum;
+ return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED);
}
static int igb_set_rx_csum(struct net_device *netdev, u32 data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- adapter->rx_csum = data;
+
+ if (data)
+ adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED;
+ else
+ adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED;
return 0;
}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index bca7e9f76be4..ffd731539997 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1395,8 +1395,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_validate_mdi_setting(hw);
- adapter->rx_csum = 1;
-
/* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
* enable the ACPI Magic Packet filter
*/
@@ -2012,7 +2010,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
u32 srrctl = 0;
- int i, j;
+ int i;
rctl = rd32(E1000_RCTL);
@@ -2077,8 +2075,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
u32 vmolr;
- j = adapter->rx_ring[0].reg_idx;
-
/* set all queue drop enable bits */
wr32(E1000_QDE, ALL_QUEUES);
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2086,16 +2082,16 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
/* disable queue 0 to prevent tail write w/o re-config */
wr32(E1000_RXDCTL(0), 0);
- vmolr = rd32(E1000_VMOLR(j));
+ vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count));
if (rctl & E1000_RCTL_LPE)
vmolr |= E1000_VMOLR_LPE;
- if (adapter->num_rx_queues > 0)
+ if (adapter->num_rx_queues > 1)
vmolr |= E1000_VMOLR_RSSE;
- wr32(E1000_VMOLR(j), vmolr);
+ wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ int j = adapter->rx_ring[i].reg_idx;
wr32(E1000_SRRCTL(j), srrctl);
}
@@ -2249,13 +2245,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
rxcsum = rd32(E1000_RXCSUM);
/* Disable raw packet checksumming */
rxcsum |= E1000_RXCSUM_PCSD;
- /* Don't need to set TUOFL or IPOFL, they default to 1 */
- if (!adapter->rx_csum)
- rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL);
- else if (adapter->hw.mac.type == e1000_82576)
+
+ if (adapter->hw.mac.type == e1000_82576)
/* Enable Receive Checksum Offload for SCTP */
rxcsum |= E1000_RXCSUM_CRCOFL;
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
wr32(E1000_RXCSUM, rxcsum);
/* Set the default pool for the PF's first queue */
@@ -2714,6 +2709,8 @@ link_up:
* (Do the reset outside of interrupt context). */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
@@ -4455,7 +4452,8 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
skb->ip_summed = CHECKSUM_NONE;
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
- if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
+ if ((status_err & E1000_RXD_STAT_IXSM) ||
+ (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED))
return;
/* TCP/UDP checksum error bit is set */
if (status_err &
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 1dcaa6905312..ee17a097d1ca 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -133,6 +133,24 @@ static int igbvf_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
+static u32 igbvf_get_rx_csum(struct net_device *netdev)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+ return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED);
+}
+
+static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ if (data)
+ adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
+ else
+ adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
+
+ return 0;
+}
+
static u32 igbvf_get_tx_csum(struct net_device *netdev)
{
return ((netdev->features & NETIF_F_IP_CSUM) != 0);
@@ -150,8 +168,6 @@ static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
static int igbvf_set_tso(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- int i;
- struct net_device *v_netdev;
if (data) {
netdev->features |= NETIF_F_TSO;
@@ -159,24 +175,10 @@ static int igbvf_set_tso(struct net_device *netdev, u32 data)
} else {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
- /* disable TSO on all VLANs if they're present */
- if (!adapter->vlgrp)
- goto tso_out;
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- v_netdev = vlan_group_get_device(adapter->vlgrp, i);
- if (!v_netdev)
- continue;
-
- v_netdev->features &= ~NETIF_F_TSO;
- v_netdev->features &= ~NETIF_F_TSO6;
- vlan_group_set_device(adapter->vlgrp, i, v_netdev);
- }
}
-tso_out:
dev_info(&adapter->pdev->dev, "TSO is %s\n",
data ? "Enabled" : "Disabled");
- adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -517,6 +519,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.set_ringparam = igbvf_set_ringparam,
.get_pauseparam = igbvf_get_pauseparam,
.set_pauseparam = igbvf_set_pauseparam,
+ .get_rx_csum = igbvf_get_rx_csum,
+ .set_rx_csum = igbvf_set_rx_csum,
.get_tx_csum = igbvf_get_tx_csum,
.set_tx_csum = igbvf_set_tx_csum,
.get_sg = ethtool_op_get_sg,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 4bff35e46871..2ad6cd756539 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -286,11 +286,7 @@ struct igbvf_info {
};
/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 0)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 1)
-#define FLAG_MSI_ENABLED (1 << 2)
-#define FLAG_RX_CSUM_ENABLED (1 << 3)
-#define FLAG_TSO_FORCE (1 << 4)
+#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index b774666ad3cf..44a8eef03a74 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -58,8 +58,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
static struct igbvf_info igbvf_vf_info = {
.mac = e1000_vfadapt,
- .flags = FLAG_HAS_JUMBO_FRAMES
- | FLAG_RX_CSUM_ENABLED,
+ .flags = 0,
.pba = 10,
.init_ops = e1000_init_function_pointers_vf,
};
@@ -107,8 +106,10 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
skb->ip_summed = CHECKSUM_NONE;
/* Ignore Checksum bit is set or checksum is disabled through ethtool */
- if ((status_err & E1000_RXD_STAT_IXSM))
+ if ((status_err & E1000_RXD_STAT_IXSM) ||
+ (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
return;
+
/* TCP/UDP checksum error bit is set */
if (status_err &
(E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -116,6 +117,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
adapter->hw_csum_err++;
return;
}
+
/* It must be a TCP or UDP packet with a valid checksum */
if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2351,15 +2353,6 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- /* Jumbo frame size limits */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- dev_err(&adapter->pdev->dev,
- "Jumbo Frames not supported.\n");
- return -EINVAL;
- }
- }
-
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index cb9ecc48f6d0..04cb81a739c2 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1140,6 +1140,8 @@ ixgb_watchdog(unsigned long data)
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context). */
schedule_work(&adapter->tx_timeout_task);
+ /* return immediately since reset is imminent */
+ return;
}
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 4b44a8efac8c..d743d0ed5c2e 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -187,6 +187,7 @@ struct ixgbe_q_vector {
u8 tx_itr;
u8 rx_itr;
u32 eitr;
+ u32 v_idx; /* vector index in list */
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -230,7 +231,7 @@ struct ixgbe_adapter {
struct vlan_group *vlgrp;
u16 bd_number;
struct work_struct reset_task;
- struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
+ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
@@ -367,10 +368,8 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
+extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bd0a0c276952..99e0c106e671 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -124,13 +124,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- INIT_LIST_HEAD(&netdev->napi_list);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ ixgbe_clear_interrupt_scheme(adapter);
adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
@@ -144,13 +138,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->hw.fc.requested_mode = ixgbe_fc_default;
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- INIT_LIST_HEAD(&netdev->napi_list);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ ixgbe_clear_interrupt_scheme(adapter);
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d822c92058c3..c0167d617b1e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1114,7 +1114,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
}
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count)
/* tx vector gets half the rate */
q_vector->eitr = (adapter->eitr_param >> 1);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index be5eabce9e35..efb175b1e438 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -461,6 +461,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
**/
static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
@@ -468,7 +469,7 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]);
+ skb_record_rx_queue(skb, ring->queue_index);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan && (tag != 0))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -782,7 +783,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_packets++;
skb->protocol = eth_type_trans(skb, adapter->netdev);
- ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
+ ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -835,7 +836,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
- q_vector = &adapter->q_vector[v_idx];
+ q_vector = adapter->q_vector[v_idx];
/* XXX for_each_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
adapter->num_rx_queues);
@@ -984,8 +985,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
struct ixgbe_adapter *adapter = q_vector->adapter;
u32 new_itr;
u8 current_itr, ret_itr;
- int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
- sizeof(struct ixgbe_q_vector);
+ int i, r_idx, v_idx = q_vector->v_idx;
struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -1303,19 +1303,21 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
int r_idx)
{
- a->q_vector[v_idx].adapter = a;
- set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
- a->q_vector[v_idx].rxr_count++;
- a->rx_ring[r_idx].v_idx = 1 << v_idx;
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = (u64)1 << v_idx;
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
- int r_idx)
+ int t_idx)
{
- a->q_vector[v_idx].adapter = a;
- set_bit(r_idx, a->q_vector[v_idx].txr_idx);
- a->q_vector[v_idx].txr_count++;
- a->tx_ring[r_idx].v_idx = 1 << v_idx;
+ struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = (u64)1 << v_idx;
}
/**
@@ -1411,7 +1413,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
(!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
&ixgbe_msix_clean_many)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(&adapter->q_vector[vector]);
+ handler = SET_HANDLER(adapter->q_vector[vector]);
if(handler == &ixgbe_msix_clean_rx) {
sprintf(adapter->name[vector], "%s-%s-%d",
@@ -1427,7 +1429,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
handler, 0, adapter->name[vector],
- &(adapter->q_vector[vector]));
+ adapter->q_vector[vector]);
if (err) {
DPRINTK(PROBE, ERR,
"request_irq failed for MSIX interrupt "
@@ -1450,7 +1452,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
for (i = vector - 1; i >= 0; i--)
free_irq(adapter->msix_entries[--vector].vector,
- &(adapter->q_vector[i]));
+ adapter->q_vector[i]);
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
@@ -1461,7 +1463,7 @@ out:
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
{
- struct ixgbe_q_vector *q_vector = adapter->q_vector;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
@@ -1539,6 +1541,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
struct net_device *netdev = data;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u32 eicr;
/*
@@ -1566,13 +1569,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
- if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
+ if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0].total_packets = 0;
adapter->tx_ring[0].total_bytes = 0;
adapter->rx_ring[0].total_packets = 0;
adapter->rx_ring[0].total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
- __napi_schedule(&adapter->q_vector[0].napi);
+ __napi_schedule(&(q_vector->napi));
}
return IRQ_HANDLED;
@@ -1583,7 +1586,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
q_vector->rxr_count = 0;
@@ -1634,7 +1637,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
i--;
for (; i >= 0; i--) {
free_irq(adapter->msix_entries[i].vector,
- &(adapter->q_vector[i]));
+ adapter->q_vector[i]);
}
ixgbe_reset_q_vectors(adapter);
@@ -1737,7 +1740,18 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
unsigned long mask;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- queue0 = index;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+ if (dcb_i == 8)
+ queue0 = index >> 4;
+ else if (dcb_i == 4)
+ queue0 = index >> 5;
+ else
+ dev_err(&adapter->pdev->dev, "Invalid DCB "
+ "configuration\n");
+ } else {
+ queue0 = index;
+ }
} else {
mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
queue0 = index & mask;
@@ -1751,28 +1765,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBE_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
+ srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+ srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- srrctl |= ((IXGBE_RX_HDR_SIZE <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
} else {
+ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBE_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
}
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
@@ -1812,7 +1818,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
} else {
@@ -2135,7 +2142,7 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi;
- q_vector = &adapter->q_vector[q_idx];
+ q_vector = adapter->q_vector[q_idx];
if (!q_vector->rxr_count)
continue;
napi = &q_vector->napi;
@@ -2158,7 +2165,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
q_vectors = 1;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = &adapter->q_vector[q_idx];
+ q_vector = adapter->q_vector[q_idx];
if (!q_vector->rxr_count)
continue;
napi_disable(&q_vector->napi);
@@ -2451,6 +2458,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
ixgbe_irq_enable(adapter);
/*
+ * If this adapter has a fan, check to see if we had a failure
+ * before we enabled the interrupt.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ DPRINTK(DRV, CRIT,
+ "Fan has stopped, replace the adapter\n");
+ }
+
+ /*
* For hot-pluggable SFP+ devices, a new SFP+ module may have
* arrived before interrupts were enabled. We need to kick off
* the SFP+ module setup first, then try to bring up link.
@@ -2498,8 +2516,6 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
/* hardware has been reset, we need to reload some things */
ixgbe_configure(adapter);
- ixgbe_napi_add_all(adapter);
-
return ixgbe_up_complete(adapter);
}
@@ -2877,9 +2893,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- ixgbe_set_num_queues(adapter);
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
/*
@@ -3103,31 +3116,20 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
- if (!adapter->msix_entries) {
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- ixgbe_set_num_queues(adapter);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- err = ixgbe_alloc_queues(adapter);
- if (err) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory "
- "for queues\n");
- goto out;
- }
-
- goto try_msi;
- }
+ if (adapter->msix_entries) {
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
- for (vector = 0; vector < v_budget; vector++)
- adapter->msix_entries[vector].entry = vector;
+ ixgbe_acquire_msix_vectors(adapter, v_budget);
- ixgbe_acquire_msix_vectors(adapter, v_budget);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ goto out;
+ }
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- goto out;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ ixgbe_set_num_queues(adapter);
-try_msi:
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
@@ -3142,6 +3144,87 @@ out:
return err;
}
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbe_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbe_clean_rxonly;
+ } else {
+ num_q_vectors = 1;
+ napi_vectors = 1;
+ poll = &ixgbe_poll;
+ }
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ } else {
+ num_q_vectors = 1;
+ napi_vectors = 1;
+ }
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
{
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -3173,18 +3256,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
/* Number of supported queues */
ixgbe_set_num_queues(adapter);
- err = ixgbe_alloc_queues(adapter);
- if (err) {
- DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
goto err_set_interrupt;
}
+ err = ixgbe_alloc_q_vectors(adapter);
+ if (err) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbe_alloc_queues(adapter);
+ if (err) {
+ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
"Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" :
@@ -3194,11 +3284,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
return 0;
+err_alloc_queues:
+ ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
-err_alloc_queues:
- return err;
+ adapter->tx_ring = NULL;
+ adapter->rx_ring = NULL;
+
+ ixgbe_free_q_vectors(adapter);
+ ixgbe_reset_interrupt_capability(adapter);
}
/**
@@ -3284,9 +3393,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
- if (hw->mac.type == ixgbe_mac_82598EB)
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
+ adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
- else if (hw->mac.type == ixgbe_mac_82599EB) {
+ } else if (hw->mac.type == ixgbe_mac_82599EB) {
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags |= IXGBE_FLAG_RSC_CAPABLE;
adapter->flags |= IXGBE_FLAG_RSC_ENABLED;
@@ -3619,8 +3730,6 @@ static int ixgbe_open(struct net_device *netdev)
ixgbe_configure(adapter);
- ixgbe_napi_add_all(adapter);
-
err = ixgbe_request_irq(adapter);
if (err)
goto err_req_irq;
@@ -3672,55 +3781,6 @@ static int ixgbe_close(struct net_device *netdev)
return 0;
}
-/**
- * ixgbe_napi_add_all - prep napi structs for use
- * @adapter: private struct
- *
- * helper function to napi_add each possible q_vector->napi
- */
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
-{
- int q_idx, q_vectors;
- struct net_device *netdev = adapter->netdev;
- int (*poll)(struct napi_struct *, int);
-
- /* check if we already have our netdev->napi_list populated */
- if (&netdev->napi_list != netdev->napi_list.next)
- return;
-
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- poll = &ixgbe_clean_rxonly;
- /* Only enable as many vectors as we have rx queues. */
- q_vectors = adapter->num_rx_queues;
- } else {
- poll = &ixgbe_poll;
- /* only one q_vector for legacy modes */
- q_vectors = 1;
- }
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
- netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
- }
-}
-
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
-{
- int q_idx;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
- netif_napi_del(&q_vector->napi);
- }
-}
-
#ifdef CONFIG_PM
static int ixgbe_resume(struct pci_dev *pdev)
{
@@ -3730,7 +3790,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- err = pci_enable_device(pdev);
+
+ err = pci_enable_device_mem(pdev);
if (err) {
printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
"suspend\n");
@@ -3782,11 +3843,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
}
- ixgbe_reset_interrupt_capability(adapter);
- ixgbe_napi_del_all(adapter);
- INIT_LIST_HEAD(&netdev->napi_list);
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
+ ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -4000,7 +4057,7 @@ static void ixgbe_watchdog(unsigned long data)
int i;
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
- eics |= (1 << i);
+ eics |= ((u64)1 << i);
/* Cause software interrupt to ensure rx rings are cleaned */
switch (hw->mac.type) {
@@ -4739,7 +4796,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
int i, err, pci_using_dac;
u32 part_num, eec;
- err = pci_enable_device(pdev);
+ err = pci_enable_device_mem(pdev);
if (err)
return err;
@@ -4759,9 +4816,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_using_dac = 0;
}
- err = pci_request_regions(pdev, ixgbe_driver_name);
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), ixgbe_driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed 0x%x\n", err);
goto err_pci_reg;
}
@@ -4869,6 +4928,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /*
+ * If there is a fan on this device and it has failed log the
+ * failure.
+ */
+ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ DPRINTK(PROBE, CRIT,
+ "Fan has stopped, replace the adapter\n");
+ }
+
/* reset_hw fills in the perm_addr as well */
err = hw->mac.ops.reset_hw(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -5012,8 +5082,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
err_register:
ixgbe_release_hw_control(adapter);
err_hw_init:
+ ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
- ixgbe_reset_interrupt_capability(adapter);
err_eeprom:
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
@@ -5024,7 +5094,8 @@ err_eeprom:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -5071,16 +5142,15 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- ixgbe_reset_interrupt_capability(adapter);
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
iounmap(adapter->hw.hw_addr);
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
DPRINTK(PROBE, INFO, "complete\n");
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
free_netdev(netdev);
@@ -5108,6 +5178,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
netif_device_detach(netdev);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
if (netif_running(netdev))
ixgbe_down(adapter);
pci_disable_device(pdev);
@@ -5129,7 +5202,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result;
int err;
- if (pci_enable_device(pdev)) {
+ if (pci_enable_device_mem(pdev)) {
DPRINTK(PROBE, ERR,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index e49e8af59eda..9fd79a05ff0f 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1604,6 +1604,7 @@
#define IXGBE_PSRTYPE_UDPHDR 0x00000020
#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
/* SRRCTL bit definitions */
#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index efbae4b8398e..a0c578585a50 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -161,12 +161,12 @@ lan_init_chip(struct parisc_device *dev)
if (!dev->irq) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
- __FILE__, dev->hpa.start);
+ __FILE__, (unsigned long)dev->hpa.start);
return -ENODEV;
}
- printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
- dev->irq);
+ printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n",
+ (unsigned long)dev->hpa.start, dev->irq);
netdevice = alloc_etherdev(sizeof(struct i596_private));
if (!netdevice)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a400d7115f78..d5838528791f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -88,7 +88,24 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define MAC_ADDR_LOW 0x0014
#define MAC_ADDR_HIGH 0x0018
#define SDMA_CONFIG 0x001c
+#define TX_BURST_SIZE_16_64BIT 0x01000000
+#define TX_BURST_SIZE_4_64BIT 0x00800000
+#define BLM_TX_NO_SWAP 0x00000020
+#define BLM_RX_NO_SWAP 0x00000010
+#define RX_BURST_SIZE_16_64BIT 0x00000008
+#define RX_BURST_SIZE_4_64BIT 0x00000004
#define PORT_SERIAL_CONTROL 0x003c
+#define SET_MII_SPEED_TO_100 0x01000000
+#define SET_GMII_SPEED_TO_1000 0x00800000
+#define SET_FULL_DUPLEX_MODE 0x00200000
+#define MAX_RX_PACKET_9700BYTE 0x000a0000
+#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
+#define DO_NOT_FORCE_LINK_FAIL 0x00000400
+#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
+#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
+#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
+#define FORCE_LINK_PASS 0x00000002
+#define SERIAL_PORT_ENABLE 0x00000001
#define PORT_STATUS 0x0044
#define TX_FIFO_EMPTY 0x00000400
#define TX_IN_PROGRESS 0x00000080
@@ -106,7 +123,9 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define TX_BW_BURST 0x005c
#define INT_CAUSE 0x0060
#define INT_TX_END 0x07f80000
+#define INT_TX_END_0 0x00080000
#define INT_RX 0x000003fc
+#define INT_RX_0 0x00000004
#define INT_EXT 0x00000002
#define INT_CAUSE_EXT 0x0064
#define INT_EXT_LINK_PHY 0x00110000
@@ -135,15 +154,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
/*
- * SDMA configuration register.
+ * SDMA configuration register default value.
*/
-#define RX_BURST_SIZE_4_64BIT (2 << 1)
-#define RX_BURST_SIZE_16_64BIT (4 << 1)
-#define BLM_RX_NO_SWAP (1 << 4)
-#define BLM_TX_NO_SWAP (1 << 5)
-#define TX_BURST_SIZE_4_64BIT (2 << 22)
-#define TX_BURST_SIZE_16_64BIT (4 << 22)
-
#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
(RX_BURST_SIZE_4_64BIT | \
@@ -160,22 +172,11 @@ static char mv643xx_eth_driver_version[] = "1.4";
/*
- * Port serial control register.
+ * Misc definitions.
*/
-#define SET_MII_SPEED_TO_100 (1 << 24)
-#define SET_GMII_SPEED_TO_1000 (1 << 23)
-#define SET_FULL_DUPLEX_MODE (1 << 21)
-#define MAX_RX_PACKET_9700BYTE (5 << 17)
-#define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13)
-#define DO_NOT_FORCE_LINK_FAIL (1 << 10)
-#define SERIAL_PORT_CONTROL_RESERVED (1 << 9)
-#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3)
-#define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2)
-#define FORCE_LINK_PASS (1 << 1)
-#define SERIAL_PORT_ENABLE (1 << 0)
-
-#define DEFAULT_RX_QUEUE_SIZE 128
-#define DEFAULT_TX_QUEUE_SIZE 256
+#define DEFAULT_RX_QUEUE_SIZE 128
+#define DEFAULT_TX_QUEUE_SIZE 256
+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
/*
@@ -393,6 +394,7 @@ struct mv643xx_eth_private {
struct work_struct tx_timeout_task;
struct napi_struct napi;
+ u32 int_mask;
u8 oom;
u8 work_link;
u8 work_tx;
@@ -651,23 +653,20 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
refilled = 0;
while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
struct sk_buff *skb;
- int unaligned;
int rx;
struct rx_desc *rx_desc;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
- skb = dev_alloc_skb(mp->skb_size +
- dma_get_cache_alignment() - 1);
+ skb = dev_alloc_skb(mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
goto oom;
}
- unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
- if (unaligned)
- skb_reserve(skb, dma_get_cache_alignment() - unaligned);
+ if (SKB_DMA_REALIGN)
+ skb_reserve(skb, SKB_DMA_REALIGN);
refilled++;
rxq->rx_desc_count++;
@@ -966,8 +965,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
if (skb != NULL) {
if (skb_queue_len(&mp->rx_recycle) <
mp->rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size +
- dma_get_cache_alignment() - 1))
+ skb_recycle_check(skb, mp->skb_size))
__skb_queue_head(&mp->rx_recycle, skb);
else
dev_kfree_skb(skb);
@@ -1807,7 +1805,6 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
int port_num;
u32 accept;
- int i;
oom:
port_num = mp->port_num;
@@ -2064,15 +2061,16 @@ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
u32 int_cause;
u32 int_cause_ext;
- int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
+ int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
if (int_cause == 0)
return 0;
int_cause_ext = 0;
- if (int_cause & INT_EXT)
+ if (int_cause & INT_EXT) {
+ int_cause &= ~INT_EXT;
int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
+ }
- int_cause &= INT_TX_END | INT_RX;
if (int_cause) {
wrlp(mp, INT_CAUSE, ~int_cause);
mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
@@ -2179,6 +2177,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (mp->work_link) {
mp->work_link = 0;
handle_link_event(mp);
+ work_done++;
continue;
}
@@ -2217,7 +2216,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
napi_complete(napi);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
}
return work_done;
@@ -2338,6 +2337,14 @@ static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
* size field are ignored by the hardware.
*/
mp->skb_size = (skb_size + 7) & ~7;
+
+ /*
+ * If NET_SKB_PAD is smaller than a cache line,
+ * netdev_alloc_skb() will cause skb->data to be misaligned
+ * to a cache line boundary. If this is the case, include
+ * some extra space to allow re-aligning the data area.
+ */
+ mp->skb_size += SKB_DMA_REALIGN;
}
static int mv643xx_eth_open(struct net_device *dev)
@@ -2363,6 +2370,8 @@ static int mv643xx_eth_open(struct net_device *dev)
skb_queue_head_init(&mp->rx_recycle);
+ mp->int_mask = INT_EXT;
+
for (i = 0; i < mp->rxq_count; i++) {
err = rxq_init(mp, i);
if (err) {
@@ -2372,6 +2381,7 @@ static int mv643xx_eth_open(struct net_device *dev)
}
rxq_refill(mp->rxq + i, INT_MAX);
+ mp->int_mask |= INT_RX_0 << i;
}
if (mp->oom) {
@@ -2386,12 +2396,13 @@ static int mv643xx_eth_open(struct net_device *dev)
txq_deinit(mp->txq + i);
goto out_free;
}
+ mp->int_mask |= INT_TX_END_0 << i;
}
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
return 0;
@@ -2535,7 +2546,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
mv643xx_eth_irq(dev->irq, dev);
- wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
+ wrlp(mp, INT_MASK, mp->int_mask);
}
#endif
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8dacfbb003e2..9350c8663fd8 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1019,8 +1019,8 @@ typedef struct {
#define NETXEN_MAC_DEL 2
typedef struct nx_mac_list_s {
- struct nx_mac_list_s *next;
- uint8_t mac_addr[MAX_ADDR_LEN];
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
/*
@@ -1213,7 +1213,7 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- nx_mac_list_t *mac_list;
+ struct list_head mac_list;
u32 curr_window;
u32 crb_win;
@@ -1264,9 +1264,10 @@ struct netxen_adapter {
int (*enable_phy_interrupts) (struct netxen_adapter *);
int (*disable_phy_interrupts) (struct netxen_adapter *);
- int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t);
+ int (*macaddr_set) (struct netxen_adapter *, u8 *);
int (*set_mtu) (struct netxen_adapter *, int);
int (*set_promisc) (struct netxen_adapter *, u32);
+ void (*set_multi) (struct net_device *);
int (*phy_read) (struct netxen_adapter *, long reg, u32 *);
int (*phy_write) (struct netxen_adapter *, long reg, u32 val);
int (*init_port) (struct netxen_adapter *, int);
@@ -1331,6 +1332,9 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu);
+int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr);
+int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr);
+
#define NXRD32(adapter, off) \
(adapter->hw_read_wx(adapter, off))
#define NXWR32(adapter, off, val) \
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 9f5ced3eaf9d..86c9e78ec39e 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -321,27 +321,6 @@ static unsigned crb_hub_agt[64] =
#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
-int netxen_nic_set_mac(struct net_device *netdev, void *p)
-{
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- /* For P3, MAC addr is not set in NIU */
- if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
- if (adapter->macaddr_set)
- adapter->macaddr_set(adapter, addr->sa_data);
-
- return 0;
-}
-
#define NETXEN_UNICAST_ADDR(port, index) \
(NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
#define NETXEN_MCAST_ADDR(port, index) \
@@ -470,45 +449,6 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
netxen_nic_set_mcast_addr(adapter, index, null_addr);
}
-static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
- u8 *addr, nx_mac_list_t **add_list, nx_mac_list_t **del_list)
-{
- nx_mac_list_t *cur, *prev;
-
- /* if in del_list, move it to adapter->mac_list */
- for (cur = *del_list, prev = NULL; cur;) {
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
- if (prev == NULL)
- *del_list = cur->next;
- else
- prev->next = cur->next;
- cur->next = adapter->mac_list;
- adapter->mac_list = cur;
- return 0;
- }
- prev = cur;
- cur = cur->next;
- }
-
- /* make sure to add each mac address only once */
- for (cur = adapter->mac_list; cur; cur = cur->next) {
- if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
- return 0;
- }
- /* not in del_list, create new entry and add to add_list */
- cur = kmalloc(sizeof(*cur), in_atomic()? GFP_ATOMIC : GFP_KERNEL);
- if (cur == NULL) {
- printk(KERN_ERR "%s: cannot allocate memory. MAC filtering may"
- "not work properly from now.\n", __func__);
- return -1;
- }
-
- memcpy(cur->mac_addr, addr, ETH_ALEN);
- cur->next = *add_list;
- *add_list = cur;
- return 0;
-}
-
static int
netxen_send_cmd_descs(struct netxen_adapter *adapter,
struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
@@ -526,7 +466,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
producer = tx_ring->producer;
consumer = tx_ring->sw_consumer;
- if (nr_desc > find_diff_among(producer, consumer, tx_ring->num_desc)) {
+ if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) {
netif_tx_unlock_bh(adapter->netdev);
return -EBUSY;
}
@@ -555,14 +495,12 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
return 0;
}
-static int nx_p3_sre_macaddr_change(struct net_device *dev,
- u8 *addr, unsigned op)
+static int
+nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
{
- struct netxen_adapter *adapter = netdev_priv(dev);
nx_nic_req_t req;
nx_mac_req_t *mac_req;
u64 word;
- int rv;
memset(&req, 0, sizeof(nx_nic_req_t));
req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
@@ -574,28 +512,51 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
mac_req->op = op;
memcpy(mac_req->mac_addr, addr, 6);
- rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
- if (rv != 0) {
- printk(KERN_ERR "ERROR. Could not send mac update\n");
- return rv;
+ return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+ u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, nx_mac_list_t, list);
+
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
}
- return 0;
+ cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
+ if (cur == NULL) {
+ printk(KERN_ERR "%s: failed to add mac address filter\n",
+ adapter->netdev->name);
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_ADD);
}
void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- nx_mac_list_t *cur, *next, *del_list, *add_list = NULL;
struct dev_mc_list *mc_ptr;
u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ nx_mac_list_t *cur;
- del_list = adapter->mac_list;
- adapter->mac_list = NULL;
+ list_splice_tail_init(&adapter->mac_list, &del_list);
- nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list);
- nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list);
+ nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list);
+ nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
if (netdev->flags & IFF_PROMISC) {
mode = VPORT_MISS_MODE_ACCEPT_ALL;
@@ -611,25 +572,20 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
if (netdev->mc_count > 0) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
- nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr,
- &add_list, &del_list);
+ nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
}
}
send_fw_cmd:
adapter->set_promisc(adapter, mode);
- for (cur = del_list; cur;) {
- nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL);
- next = cur->next;
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
kfree(cur);
- cur = next;
- }
- for (cur = add_list; cur;) {
- nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_ADD);
- next = cur->next;
- cur->next = adapter->mac_list;
- adapter->mac_list = cur;
- cur = next;
}
}
@@ -654,17 +610,25 @@ int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
{
- nx_mac_list_t *cur, *next;
-
- cur = adapter->mac_list;
-
- while (cur) {
- next = cur->next;
+ nx_mac_list_t *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
kfree(cur);
- cur = next;
}
}
+int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ /* assuming caller has already copied new addr to netdev */
+ netxen_p3_nic_set_multi(adapter->netdev);
+ return 0;
+}
+
#define NETXEN_CONFIG_INTR_COALESCE 3
/*
@@ -752,7 +716,7 @@ int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(enable);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index f20c96591a87..d4e833339781 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -42,8 +42,6 @@ struct netxen_adapter;
void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
-typedef u8 netxen_ethernet_macaddr_t[6];
-
/* Nibble or Byte mode for phy interface (GbE mode only) */
#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
@@ -395,14 +393,6 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
u32 mode);
-/* set the MAC address for a given MAC */
-int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr);
-
-/* XG version */
-int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr);
-
/* Generic enable for GbE ports. Will detect the speed of the link. */
int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d18216779a09..4a51c31330da 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -319,13 +319,15 @@ err_out:
void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
{
+ adapter->macaddr_set = netxen_p2_nic_set_mac_addr;
+ adapter->set_multi = netxen_p2_nic_set_multi;
+
switch (adapter->ahw.port_type) {
case NETXEN_NIC_GBE:
adapter->enable_phy_interrupts =
netxen_niu_gbe_enable_phy_interrupts;
adapter->disable_phy_interrupts =
netxen_niu_gbe_disable_phy_interrupts;
- adapter->macaddr_set = netxen_niu_macaddr_set;
adapter->set_mtu = netxen_nic_set_mtu_gb;
adapter->set_promisc = netxen_niu_set_promiscuous_mode;
adapter->phy_read = netxen_niu_gbe_phy_read;
@@ -339,7 +341,6 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
netxen_niu_xgbe_enable_phy_interrupts;
adapter->disable_phy_interrupts =
netxen_niu_xgbe_disable_phy_interrupts;
- adapter->macaddr_set = netxen_niu_xg_macaddr_set;
adapter->set_mtu = netxen_nic_set_mtu_xgb;
adapter->init_port = netxen_niu_xg_init_port;
adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
@@ -353,6 +354,8 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
adapter->set_mtu = nx_fw_cmd_set_mtu;
adapter->set_promisc = netxen_p3_nic_set_promisc;
+ adapter->macaddr_set = netxen_p3_nic_set_mac_addr;
+ adapter->set_multi = netxen_p3_nic_set_multi;
}
}
@@ -1319,10 +1322,11 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
break;
}
- if (count) {
- tx_ring->sw_consumer = sw_consumer;
+ tx_ring->sw_consumer = sw_consumer;
+
+ if (count && netif_running(netdev)) {
smp_mb();
- if (netif_queue_stopped(netdev) && netif_running(netdev)) {
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
netif_tx_lock(netdev);
netif_wake_queue(netdev);
smp_mb();
@@ -1450,7 +1454,6 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
rds_ring->producer = producer;
NXWR32(adapter, rds_ring->crb_rcv_producer,
(producer - 1) & (rds_ring->num_desc - 1));
- wmb();
}
spin_unlock(&rds_ring->lock);
}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 5d79c19a6ec0..83dadfd78c3c 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -442,20 +442,38 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
if (!is_valid_ether_addr(netdev->perm_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
- else
- adapter->macaddr_set(adapter, netdev->dev_addr);
return 0;
}
+int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ netxen_napi_disable(adapter);
+ }
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ adapter->macaddr_set(adapter, addr->sa_data);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ netxen_napi_enable(adapter);
+ }
+ return 0;
+}
+
static void netxen_set_multicast_list(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- netxen_p3_nic_set_multi(dev);
- else
- netxen_p2_nic_set_multi(dev);
+ adapter->set_multi(dev);
}
static const struct net_device_ops netxen_netdev_ops = {
@@ -782,16 +800,13 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
netxen_nic_driver_name, adapter->portnum);
return err;
}
- adapter->macaddr_set(adapter, netdev->dev_addr);
-
- netxen_nic_set_link_parameters(adapter);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ adapter->macaddr_set(adapter, netdev->dev_addr);
- netxen_set_multicast_list(netdev);
- if (adapter->set_mtu)
- adapter->set_mtu(adapter, netdev->mtu);
+ adapter->set_multi(netdev);
+ adapter->set_mtu(adapter, netdev->mtu);
adapter->ahw.linkup = 0;
- mod_timer(&adapter->watchdog_timer, jiffies);
netxen_napi_enable(adapter);
@@ -800,6 +815,10 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
netxen_linkevent_request(adapter, 1);
+ else
+ netxen_nic_set_link_parameters(adapter);
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
return 0;
}
@@ -809,11 +828,15 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
{
netif_carrier_off(netdev);
netif_stop_queue(netdev);
+ smp_mb();
netxen_napi_disable(adapter);
if (adapter->stop_port)
adapter->stop_port(adapter);
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_p3_free_mac_list(adapter);
+
netxen_release_tx_buffers(adapter);
FLUSH_SCHEDULED_WORK();
@@ -960,6 +983,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->adapter_lock);
spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -1113,9 +1137,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
netxen_nic_detach(adapter);
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- netxen_p3_free_mac_list(adapter);
}
if (adapter->portnum == 0)
@@ -1340,7 +1361,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
producer = tx_ring->producer;
smp_mb();
consumer = tx_ring->sw_consumer;
- if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) {
+ if ((no_of_desc+2) >= find_diff_among(producer, consumer, num_txd)) {
netif_stop_queue(netdev);
smp_mb();
return NETDEV_TX_BUSY;
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 5e2698bf575a..5941c79be723 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -402,76 +402,6 @@ int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
return 0;
}
-/*
- * Return the current station MAC address.
- * Note that the passed-in value must already be in network byte order.
- */
-static int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t * addr)
-{
- u32 stationhigh;
- u32 stationlow;
- int phy = adapter->physical_port;
- u8 val[8];
-
- if (addr == NULL)
- return -EINVAL;
- if ((phy < 0) || (phy > 3))
- return -EINVAL;
-
- stationhigh = NXRD32(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy));
- stationlow = NXRD32(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy));
- ((__le32 *)val)[1] = cpu_to_le32(stationhigh);
- ((__le32 *)val)[0] = cpu_to_le32(stationlow);
-
- memcpy(addr, val + 2, 6);
-
- return 0;
-}
-
-/*
- * Set the station MAC address.
- * Note that the passed-in value must already be in network byte order.
- */
-int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr)
-{
- u8 temp[4];
- u32 val;
- int phy = adapter->physical_port;
- unsigned char mac_addr[6];
- int i;
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- return 0;
-
- for (i = 0; i < 10; i++) {
- temp[0] = temp[1] = 0;
- memcpy(temp + 2, addr, 2);
- val = le32_to_cpu(*(__le32 *)temp);
- if (NXWR32(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), val))
- return -EIO;
-
- memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32));
- val = le32_to_cpu(*(__le32 *)temp);
- if (NXWR32(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), val))
- return -2;
-
- netxen_niu_macaddr_get(adapter,
- (netxen_ethernet_macaddr_t *) mac_addr);
- if (memcmp(mac_addr, addr, 6) == 0)
- break;
- }
-
- if (i == 10) {
- printk(KERN_ERR "%s: cannot set Mac addr for %s\n",
- netxen_nic_driver_name, adapter->netdev->name);
- printk(KERN_ERR "MAC address set: %pM.\n", addr);
- printk(KERN_ERR "MAC address get: %pM.\n", mac_addr);
- }
- return 0;
-}
-
/* Disable a GbE interface */
int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
{
@@ -561,57 +491,6 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
return 0;
}
-/*
- * Set the MAC address for an XG port
- * Note that the passed-in value must already be in network byte order.
- */
-int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter,
- netxen_ethernet_macaddr_t addr)
-{
- int phy = adapter->physical_port;
- u8 temp[4];
- u32 val;
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- return 0;
-
- if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS))
- return -EIO;
-
- temp[0] = temp[1] = 0;
- switch (phy) {
- case 0:
- memcpy(temp + 2, addr, 2);
- val = le32_to_cpu(*(__le32 *)temp);
- if (NXWR32(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, val))
- return -EIO;
-
- memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
- val = le32_to_cpu(*(__le32 *)temp);
- if (NXWR32(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, val))
- return -EIO;
- break;
-
- case 1:
- memcpy(temp + 2, addr, 2);
- val = le32_to_cpu(*(__le32 *)temp);
- if (NXWR32(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1, val))
- return -EIO;
-
- memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
- val = le32_to_cpu(*(__le32 *)temp);
- if (NXWR32(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI, val))
- return -EIO;
- break;
-
- default:
- printk(KERN_ERR "Unknown port %d\n", phy);
- break;
- }
-
- return 0;
-}
-
int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
u32 mode)
{
@@ -636,3 +515,36 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
return 0;
}
+
+int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ u32 mac_hi, mac_lo;
+ u32 reg_hi, reg_lo;
+
+ u8 phy = adapter->physical_port;
+ u8 phy_count = (adapter->ahw.port_type == NETXEN_NIC_XGBE) ?
+ NETXEN_NIU_MAX_XG_PORTS : NETXEN_NIU_MAX_GBE_PORTS;
+
+ if (phy >= phy_count)
+ return -EINVAL;
+
+ mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24);
+ mac_hi = addr[2] | ((u32)addr[3] << 8) |
+ ((u32)addr[4] << 16) | ((u32)addr[5] << 24);
+
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy);
+ reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy);
+ } else {
+ reg_lo = NETXEN_NIU_GB_STATION_ADDR_1(phy);
+ reg_hi = NETXEN_NIU_GB_STATION_ADDR_0(phy);
+ }
+
+ /* write twice to flush */
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+
+ return 0;
+}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 6f97b47d74a6..1508b124e3d8 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.22"
-#define DRV_RELDATE "25Mar2009"
+#define DRV_VERSION "0.23"
+#define DRV_RELDATE "05May2009"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -742,6 +742,14 @@ static int r6040_up(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int ret;
+ u16 val;
+
+ /* Check presence of a second PHY */
+ val = r6040_phy_read(ioaddr, lp->phy_addr, 2);
+ if (val == 0xFFFF) {
+ printk(KERN_ERR DRV_NAME " no second PHY attached\n");
+ return -EIO;
+ }
/* Initialise and alloc RX/TX buffers */
r6040_init_txbufs(dev);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 5113b26fc2d9..3cff84078a9e 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1976,7 +1976,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = -ENODEV;
goto out_0;
}
- res_size = res->end - res->start;
+ res_size = res->end - res->start + 1;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
@@ -2109,12 +2109,58 @@ out_0:
return retval;
}
+#ifdef CONFIG_PM
+/* This implementation assumes the devices remains powered on its VDDVARIO
+ * pins during suspend. */
+
+static int smsc911x_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ /* enable wake on LAN, energy detection and the external PME
+ * signal. */
+ smsc911x_reg_write(pdata, PMT_CTRL,
+ PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
+ PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
+
+ return 0;
+}
+
+static int smsc911x_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int to = 100;
+
+ /* Note 3.11 from the datasheet:
+ * "When the LAN9220 is in a power saving state, a write of any
+ * data to the BYTE_TEST register will wake-up the device."
+ */
+ smsc911x_reg_write(pdata, BYTE_TEST, 0);
+
+ /* poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms and return -EIO
+ * if it failed. */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+
+ return (to == 0) ? -EIO : 0;
+}
+
+#else
+#define smsc911x_suspend NULL
+#define smsc911x_resume NULL
+#endif
+
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
.remove = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
},
+ .suspend = smsc911x_suspend,
+ .resume = smsc911x_resume,
};
/* Entry point for loading the module */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index e00b5b1f6743..3717569828bf 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -180,6 +180,20 @@ config USB_NET_CDCETHER
IEEE 802 "local assignment" bit is set in the address, a "usbX"
name is used instead.
+config USB_NET_CDC_EEM
+ tristate "CDC EEM support"
+ depends on USB_USBNET && EXPERIMENTAL
+ help
+ This option supports devices conforming to the Communication Device
+ Class (CDC) Ethernet Emulation Model, a specification that's easy to
+ implement in device firmware. The CDC EEM specifications are available
+ from <http://www.usb.org/>.
+
+ This driver creates an interface named "ethX", where X depends on
+ what other networking devices you have in use. However, if the
+ IEEE 802 "local assignment" bit is set in the address, a "usbX"
+ name is used instead.
+
config USB_NET_DM9601
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index f4402a06e52c..b870b0b1cbe0 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
+obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
new file mode 100644
index 000000000000..80e01778dd3b
--- /dev/null
+++ b/drivers/net/usb/cdc_eem.c
@@ -0,0 +1,381 @@
+/*
+ * USB CDC EEM network interface driver
+ * Copyright (C) 2009 Oberthur Technologies
+ * by Omar Laazimani, Olivier Condemine
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+
+
+/*
+ * This driver is an implementation of the CDC "Ethernet Emulation
+ * Model" (EEM) specification, which encapsulates Ethernet frames
+ * for transport over USB using a simpler USB device model than the
+ * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
+ *
+ * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
+ *
+ * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
+ * 2.6.27 and 2.6.30rc2 kernel.
+ * It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel).
+ * build on 23-April-2009
+ */
+
+#define EEM_HEAD 2 /* 2 byte header */
+
+/*-------------------------------------------------------------------------*/
+
+static void eem_linkcmd_complete(struct urb *urb)
+{
+ dev_kfree_skb(urb->context);
+ usb_free_urb(urb);
+}
+
+static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct urb *urb;
+ int status;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ goto fail;
+
+ usb_fill_bulk_urb(urb, dev->udev, dev->out,
+ skb->data, skb->len, eem_linkcmd_complete, skb);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status) {
+ usb_free_urb(urb);
+fail:
+ dev_kfree_skb(skb);
+ devwarn(dev, "link cmd failure\n");
+ return;
+ }
+}
+
+static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = 0;
+
+ status = usbnet_get_endpoints(dev, intf);
+ if (status < 0) {
+ usb_set_intfdata(intf, NULL);
+ usb_driver_release_interface(driver_of(intf), intf);
+ return status;
+ }
+
+ /* no jumbogram (16K) support for now */
+
+ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
+
+ return 0;
+}
+
+/*
+ * EEM permits packing multiple Ethernet frames into USB transfers
+ * (a "bundle"), but for TX we don't try to do that.
+ */
+static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ struct sk_buff *skb2 = NULL;
+ u16 len = skb->len;
+ u32 crc = 0;
+ int padlen = 0;
+
+ /* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is
+ * zero, stick two bytes of zero length EEM packet on the end.
+ * Else the framework would add invalid single byte padding,
+ * since it can't know whether ZLPs will be handled right by
+ * all the relevant hardware and software.
+ */
+ if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket))
+ padlen += 2;
+
+ if (!skb_cloned(skb)) {
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+
+ if ((tailroom >= ETH_FCS_LEN + padlen)
+ && (headroom >= EEM_HEAD))
+ goto done;
+
+ if ((headroom + tailroom)
+ > (EEM_HEAD + ETH_FCS_LEN + padlen)) {
+ skb->data = memmove(skb->head +
+ EEM_HEAD,
+ skb->data,
+ skb->len);
+ skb_set_tail_pointer(skb, len);
+ goto done;
+ }
+ }
+
+ skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
+ if (!skb2)
+ return NULL;
+
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+
+done:
+ /* we don't use the "no Ethernet CRC" option */
+ crc = crc32_le(~0, skb->data, skb->len);
+ crc = ~crc;
+
+ put_unaligned_le32(crc, skb_put(skb, 4));
+
+ /* EEM packet header format:
+ * b0..13: length of ethernet frame
+ * b14: bmCRC (1 == valid Ethernet CRC)
+ * b15: bmType (0 == data)
+ */
+ len = skb->len;
+ put_unaligned_le16(BIT(14) | len, skb_push(skb, 2));
+
+ /* Bundle a zero length EEM packet if needed */
+ if (padlen)
+ put_unaligned_le16(0, skb_put(skb, 2));
+
+ return skb;
+}
+
+static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ /*
+ * Our task here is to strip off framing, leaving skb with one
+ * data frame for the usbnet framework code to process. But we
+ * may have received multiple EEM payloads, or command payloads.
+ * So we must process _everything_ as if it's a header, except
+ * maybe the last data payload
+ *
+ * REVISIT the framework needs updating so that when we consume
+ * all payloads (the last or only message was a command, or a
+ * zero length EEM packet) that is not accounted as an rx_error.
+ */
+ do {
+ struct sk_buff *skb2 = NULL;
+ u16 header;
+ u16 len = 0;
+
+ /* incomplete EEM header? */
+ if (skb->len < EEM_HEAD)
+ return 0;
+
+ /*
+ * EEM packet header format:
+ * b0..14: EEM type dependant (Data or Command)
+ * b15: bmType
+ */
+ header = get_unaligned_le16(skb->data);
+ skb_pull(skb, EEM_HEAD);
+
+ /*
+ * The bmType bit helps to denote when EEM
+ * packet is data or command :
+ * bmType = 0 : EEM data payload
+ * bmType = 1 : EEM (link) command
+ */
+ if (header & BIT(15)) {
+ u16 bmEEMCmd;
+
+ /*
+ * EEM (link) command packet:
+ * b0..10: bmEEMCmdParam
+ * b11..13: bmEEMCmd
+ * b14: bmReserved (must be 0)
+ * b15: 1 (EEM command)
+ */
+ if (header & BIT(14)) {
+ devdbg(dev, "reserved command %04x\n", header);
+ continue;
+ }
+
+ bmEEMCmd = (header >> 11) & 0x7;
+ switch (bmEEMCmd) {
+
+ /* Responding to echo requests is mandatory. */
+ case 0: /* Echo command */
+ len = header & 0x7FF;
+
+ /* bogus command? */
+ if (skb->len < len)
+ return 0;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2))
+ goto next;
+ skb_trim(skb2, len);
+ put_unaligned_le16(BIT(15) | (1 << 11) | len,
+ skb_push(skb2, 2));
+ eem_linkcmd(dev, skb2);
+ break;
+
+ /*
+ * Host may choose to ignore hints.
+ * - suspend: peripheral ready to suspend
+ * - response: suggest N millisec polling
+ * - response complete: suggest N sec polling
+ */
+ case 2: /* Suspend hint */
+ case 3: /* Response hint */
+ case 4: /* Response complete hint */
+ continue;
+
+ /*
+ * Hosts should never receive host-to-peripheral
+ * or reserved command codes; or responses to an
+ * echo command we didn't send.
+ */
+ case 1: /* Echo response */
+ case 5: /* Tickle */
+ default: /* reserved */
+ devwarn(dev, "unexpected link command %d\n",
+ bmEEMCmd);
+ continue;
+ }
+
+ } else {
+ u32 crc, crc2;
+ int is_last;
+
+ /* zero length EEM packet? */
+ if (header == 0)
+ continue;
+
+ /*
+ * EEM data packet header :
+ * b0..13: length of ethernet frame
+ * b14: bmCRC
+ * b15: 0 (EEM data)
+ */
+ len = header & 0x3FFF;
+
+ /* bogus EEM payload? */
+ if (skb->len < len)
+ return 0;
+
+ /* bogus ethernet frame? */
+ if (len < (ETH_HLEN + ETH_FCS_LEN))
+ goto next;
+
+ /*
+ * Treat the last payload differently: framework
+ * code expects our "fixup" to have stripped off
+ * headers, so "skb" is a data packet (or error).
+ * Else if it's not the last payload, keep "skb"
+ * for further processing.
+ */
+ is_last = (len == skb->len);
+ if (is_last)
+ skb2 = skb;
+ else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2))
+ return 0;
+ }
+
+ crc = get_unaligned_le32(skb2->data
+ + len - ETH_FCS_LEN);
+ skb_trim(skb2, len - ETH_FCS_LEN);
+
+ /*
+ * The bmCRC helps to denote when the CRC field in
+ * the Ethernet frame contains a calculated CRC:
+ * bmCRC = 1 : CRC is calculated
+ * bmCRC = 0 : CRC = 0xDEADBEEF
+ */
+ if (header & BIT(14))
+ crc2 = ~crc32_le(~0, skb2->data, len);
+ else
+ crc2 = 0xdeadbeef;
+
+ if (is_last)
+ return crc == crc2;
+
+ if (unlikely(crc != crc2)) {
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(skb2);
+ } else
+ usbnet_skb_return(dev, skb2);
+ }
+
+next:
+ skb_pull(skb, len);
+ } while (skb->len);
+
+ return 1;
+}
+
+static const struct driver_info eem_info = {
+ .description = "CDC EEM Device",
+ .flags = FLAG_ETHER,
+ .bind = eem_bind,
+ .rx_fixup = eem_rx_fixup,
+ .tx_fixup = eem_tx_fixup,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static const struct usb_device_id products[] = {
+{
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM,
+ USB_CDC_PROTO_EEM),
+ .driver_info = (unsigned long) &eem_info,
+},
+{
+ /* EMPTY == end of list */
+},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver eem_driver = {
+ .name = "cdc_eem",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+};
+
+
+static int __init eem_init(void)
+{
+ return usb_register(&eem_driver);
+}
+module_init(eem_init);
+
+static void __exit eem_exit(void)
+{
+ usb_deregister(&eem_driver);
+}
+module_exit(eem_exit);
+
+MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>");
+MODULE_DESCRIPTION("USB CDC EEM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 28dd225269ad..0eb939c40ac1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -719,6 +719,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
{
unsigned long flags;
int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == priv->hw_params.bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
@@ -738,6 +746,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for a new key");
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+
+
/* This copy is acutally not needed: we get the key with each TX */
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -754,9 +765,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
{
u8 sta_id = IWL_INVALID_STATION;
unsigned long flags;
- __le16 key_flags = 0;
int i;
- DECLARE_MAC_BUF(mac);
sta_id = priv->cfg->ops->smgmt->find_station(priv, addr);
if (sta_id == IWL_INVALID_STATION) {
@@ -771,16 +780,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
return;
}
- key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == priv->hw_params.bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
for (i = 0; i < 5; i++)
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 9f5191a84a13..f6c1489a0c4a 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1460,7 +1460,6 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
-EXPORT_SYMBOL(iwl3945_rx_queue_free);
/* Convert linear signal-to-noise ratio into dB */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0254741bece0..c01c1196d45e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2460,6 +2460,8 @@ static void __devinit quirk_i82576_sriov(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index a1f17abba7dc..3d7a6687d247 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -182,6 +182,33 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
return compare_ether_addr(addr1, addr2);
#endif
}
+
+/**
+ * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
+ * @dev: Pointer to a device structure
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Compare passed address with all addresses of the device. Return true if the
+ * address if one of the device addresses.
+ *
+ * Note that this function calls compare_ether_addr_64bits() so take care of
+ * the right padding.
+ */
+static inline bool is_etherdev_addr(const struct net_device *dev,
+ const u8 addr[6 + 2])
+{
+ struct netdev_hw_addr *ha;
+ int res = 1;
+
+ rcu_read_lock();
+ for_each_dev_addr(dev, ha) {
+ res = compare_ether_addr_64bits(addr, ha->addr);
+ if (!res)
+ break;
+ }
+ rcu_read_unlock();
+ return !res;
+}
#endif /* __KERNEL__ */
/**
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ff42aba403c5..2af89b662cad 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -39,6 +39,7 @@
#include <linux/device.h>
#include <linux/percpu.h>
+#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
@@ -210,6 +211,16 @@ struct dev_addr_list
#define dmi_users da_users
#define dmi_gusers da_gusers
+struct netdev_hw_addr {
+ struct list_head list;
+ unsigned char addr[MAX_ADDR_LEN];
+ unsigned char type;
+#define NETDEV_HW_ADDR_T_LAN 1
+#define NETDEV_HW_ADDR_T_SAN 2
+#define NETDEV_HW_ADDR_T_SLAVE 3
+ struct rcu_head rcu_head;
+};
+
struct hh_cache
{
struct hh_cache *hh_next; /* Next entry */
@@ -784,8 +795,11 @@ struct net_device
*/
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
- unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
- because most packets are unicast) */
+ unsigned char *dev_addr; /* hw address, (before bcast
+ because most packets are
+ unicast) */
+
+ struct list_head dev_addr_list; /* list of device hw addresses */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
@@ -1791,6 +1805,13 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
spin_unlock_bh(&dev->addr_list_lock);
}
+/*
+ * dev_addr_list walker. Should be used only for read access. Call with
+ * rcu_read_lock held.
+ */
+#define for_each_dev_addr(dev, ha) \
+ list_for_each_entry_rcu(ha, &dev->dev_addr_list, list)
+
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
@@ -1803,6 +1824,19 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
alloc_netdev_mq(sizeof_priv, name, setup, 1)
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
+
+/* Functions used for device addresses handling */
+extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type);
+extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type);
+extern int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type);
+extern int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type);
+
/* Functions used for secondary unicast and multicast support */
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
diff --git a/include/linux/netfilter/xt_LED.h b/include/linux/netfilter/xt_LED.h
index 4c91a0d770d0..f5509e7524d3 100644
--- a/include/linux/netfilter/xt_LED.h
+++ b/include/linux/netfilter/xt_LED.h
@@ -1,6 +1,8 @@
#ifndef _XT_LED_H
#define _XT_LED_H
+#include <linux/types.h>
+
struct xt_led_info {
char id[27]; /* Unique ID for this trigger in the LED class */
__u8 always_blink; /* Blink even if the LED is already on */
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h
index 5e0a0d07b526..886682656f09 100644
--- a/include/linux/netfilter/xt_cluster.h
+++ b/include/linux/netfilter/xt_cluster.h
@@ -12,4 +12,6 @@ struct xt_cluster_match_info {
u_int32_t flags;
};
+#define XT_CLUSTER_NODES_MAX 32
+
#endif /* _XT_CLUSTER_MATCH_H */
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index 3c86ed25a04c..c24124a42ce5 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -17,6 +17,7 @@
#define USB_CDC_SUBCLASS_DMM 0x09
#define USB_CDC_SUBCLASS_MDLM 0x0a
#define USB_CDC_SUBCLASS_OBEX 0x0b
+#define USB_CDC_SUBCLASS_EEM 0x0c
#define USB_CDC_PROTO_NONE 0
@@ -28,6 +29,8 @@
#define USB_CDC_ACM_PROTO_AT_CDMA 6
#define USB_CDC_ACM_PROTO_VENDOR 0xff
+#define USB_CDC_PROTO_EEM 7
+
/*-------------------------------------------------------------------------*/
/*
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index be5bd713d2c9..73aead222b32 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -457,6 +457,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
int hci_register_sysfs(struct hci_dev *hdev);
void hci_unregister_sysfs(struct hci_dev *hdev);
+void hci_conn_init_sysfs(struct hci_conn *conn);
void hci_conn_add_sysfs(struct hci_conn *conn);
void hci_conn_del_sysfs(struct hci_conn *conn);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b55b4891029e..19f4150f4d4d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -41,6 +41,7 @@
#include <net/ip.h>
#include <net/tcp_states.h>
#include <net/inet_ecn.h>
+#include <net/dst.h>
#include <linux/seq_file.h>
@@ -543,6 +544,17 @@ static inline void tcp_fast_path_check(struct sock *sk)
tcp_fast_path_on(tp);
}
+/* Compute the actual rto_min value */
+static inline u32 tcp_rto_min(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ u32 rto_min = TCP_RTO_MIN;
+
+ if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
+ rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
+ return rto_min;
+}
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -890,30 +902,32 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (!sysctl_tcp_low_latency && tp->ucopy.task) {
- __skb_queue_tail(&tp->ucopy.prequeue, skb);
- tp->ucopy.memory += skb->truesize;
- if (tp->ucopy.memory > sk->sk_rcvbuf) {
- struct sk_buff *skb1;
-
- BUG_ON(sock_owned_by_user(sk));
-
- while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
- }
-
- tp->ucopy.memory = 0;
- } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible(sk->sk_sleep);
- if (!inet_csk_ack_scheduled(sk))
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- (3 * TCP_RTO_MIN) / 4,
- TCP_RTO_MAX);
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ return 0;
+
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
+ struct sk_buff *skb1;
+
+ BUG_ON(sock_owned_by_user(sk));
+
+ while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+ sk_backlog_rcv(sk, skb1);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPPREQUEUEDROPPED);
}
- return 1;
+
+ tp->ucopy.memory = 0;
+ } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+ wake_up_interruptible_poll(sk->sk_sleep,
+ POLLIN | POLLRDNORM | POLLRDBAND);
+ if (!inet_csk_ack_scheduled(sk))
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ (3 * tcp_rto_min(sk)) / 4,
+ TCP_RTO_MAX);
}
- return 0;
+ return 1;
}
diff --git a/net/Kconfig b/net/Kconfig
index ce77db4fcec8..c19f549c8e74 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -119,12 +119,6 @@ menuconfig NETFILTER
<file:Documentation/Changes> under "iptables" for the location of
these packages.
- Make sure to say N to "Fast switching" below if you intend to say Y
- here, as Fast switching currently bypasses netfilter.
-
- Chances are that you should say Y here if you compile a kernel which
- will run as a router and N for regular hosts. If unsure, say N.
-
if NETFILTER
config NETFILTER_DEBUG
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 375f4b4f7f79..61309b26f271 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -248,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
+ hci_conn_init_sysfs(conn);
+
tasklet_enable(&hdev->tx_task);
return conn;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index b7c51082ddeb..a05d45eb3ba1 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -9,7 +9,7 @@
struct class *bt_class = NULL;
EXPORT_SYMBOL_GPL(bt_class);
-static struct workqueue_struct *bluetooth;
+static struct workqueue_struct *bt_workq;
static inline char *link_typetostr(int type)
{
@@ -88,9 +88,12 @@ static struct device_type bt_link = {
static void add_conn(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
+ struct hci_dev *hdev = conn->hdev;
+
+ /* ensure previous del is complete */
+ flush_work(&conn->work_del);
- /* ensure previous add/del is complete */
- flush_workqueue(bluetooth);
+ dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
if (device_add(&conn->dev) < 0) {
BT_ERR("Failed to register connection device");
@@ -98,27 +101,6 @@ static void add_conn(struct work_struct *work)
}
}
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- conn->dev.type = &bt_link;
- conn->dev.class = bt_class;
- conn->dev.parent = &hdev->dev;
-
- dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
-
- dev_set_drvdata(&conn->dev, conn);
-
- device_initialize(&conn->dev);
-
- INIT_WORK(&conn->work_add, add_conn);
-
- queue_work(bluetooth, &conn->work_add);
-}
-
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
@@ -134,8 +116,11 @@ static void del_conn(struct work_struct *work)
struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
struct hci_dev *hdev = conn->hdev;
- /* ensure previous add/del is complete */
- flush_workqueue(bluetooth);
+ /* ensure previous add is complete */
+ flush_work(&conn->work_add);
+
+ if (!device_is_registered(&conn->dev))
+ return;
while (1) {
struct device *dev;
@@ -152,16 +137,36 @@ static void del_conn(struct work_struct *work)
hci_dev_put(hdev);
}
-void hci_conn_del_sysfs(struct hci_conn *conn)
+void hci_conn_init_sysfs(struct hci_conn *conn)
{
+ struct hci_dev *hdev = conn->hdev;
+
BT_DBG("conn %p", conn);
- if (!device_is_registered(&conn->dev))
- return;
+ conn->dev.type = &bt_link;
+ conn->dev.class = bt_class;
+ conn->dev.parent = &hdev->dev;
+
+ dev_set_drvdata(&conn->dev, conn);
+
+ device_initialize(&conn->dev);
+ INIT_WORK(&conn->work_add, add_conn);
INIT_WORK(&conn->work_del, del_conn);
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
+{
+ BT_DBG("conn %p", conn);
+
+ queue_work(bt_workq, &conn->work_add);
+}
+
+void hci_conn_del_sysfs(struct hci_conn *conn)
+{
+ BT_DBG("conn %p", conn);
- queue_work(bluetooth, &conn->work_del);
+ queue_work(bt_workq, &conn->work_del);
}
static inline char *host_typetostr(int type)
@@ -438,13 +443,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
int __init bt_sysfs_init(void)
{
- bluetooth = create_singlethread_workqueue("bluetooth");
- if (!bluetooth)
+ bt_workq = create_singlethread_workqueue("bluetooth");
+ if (!bt_workq)
return -ENOMEM;
bt_class = class_create(THIS_MODULE, "bluetooth");
if (IS_ERR(bt_class)) {
- destroy_workqueue(bluetooth);
+ destroy_workqueue(bt_workq);
return PTR_ERR(bt_class);
}
@@ -453,7 +458,7 @@ int __init bt_sysfs_init(void)
void bt_sysfs_cleanup(void)
{
- destroy_workqueue(bluetooth);
+ destroy_workqueue(bt_workq);
class_destroy(bt_class);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 3c8073fe970a..637ea71b0a0d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3434,6 +3434,252 @@ void dev_set_rx_mode(struct net_device *dev)
netif_addr_unlock_bh(dev);
}
+/* hw addresses list handling functions */
+
+static int __hw_addr_add(struct list_head *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ struct netdev_hw_addr *ha;
+ int alloc_size;
+
+ if (addr_len > MAX_ADDR_LEN)
+ return -EINVAL;
+
+ alloc_size = sizeof(*ha);
+ if (alloc_size < L1_CACHE_BYTES)
+ alloc_size = L1_CACHE_BYTES;
+ ha = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!ha)
+ return -ENOMEM;
+ memcpy(ha->addr, addr, addr_len);
+ ha->type = addr_type;
+ list_add_tail_rcu(&ha->list, list);
+ return 0;
+}
+
+static void ha_rcu_free(struct rcu_head *head)
+{
+ struct netdev_hw_addr *ha;
+
+ ha = container_of(head, struct netdev_hw_addr, rcu_head);
+ kfree(ha);
+}
+
+static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type,
+ int ignore_index)
+{
+ struct netdev_hw_addr *ha;
+ int i = 0;
+
+ list_for_each_entry(ha, list, list) {
+ if (i++ != ignore_index &&
+ !memcmp(ha->addr, addr, addr_len) &&
+ (ha->type == addr_type || !addr_type)) {
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int __hw_addr_add_multiple_ii(struct list_head *to_list,
+ struct list_head *from_list,
+ int addr_len, unsigned char addr_type,
+ int ignore_index)
+{
+ int err;
+ struct netdev_hw_addr *ha, *ha2;
+ unsigned char type;
+
+ list_for_each_entry(ha, from_list, list) {
+ type = addr_type ? addr_type : ha->type;
+ err = __hw_addr_add(to_list, ha->addr, addr_len, type);
+ if (err)
+ goto unroll;
+ }
+ return 0;
+
+unroll:
+ list_for_each_entry(ha2, from_list, list) {
+ if (ha2 == ha)
+ break;
+ type = addr_type ? addr_type : ha2->type;
+ __hw_addr_del_ii(to_list, ha2->addr, addr_len, type,
+ ignore_index);
+ }
+ return err;
+}
+
+static void __hw_addr_del_multiple_ii(struct list_head *to_list,
+ struct list_head *from_list,
+ int addr_len, unsigned char addr_type,
+ int ignore_index)
+{
+ struct netdev_hw_addr *ha;
+ unsigned char type;
+
+ list_for_each_entry(ha, from_list, list) {
+ type = addr_type ? addr_type : ha->type;
+ __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type,
+ ignore_index);
+ }
+}
+
+static void __hw_addr_flush(struct list_head *list)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, list, list) {
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ }
+}
+
+/* Device addresses handling functions */
+
+static void dev_addr_flush(struct net_device *dev)
+{
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_flush(&dev->dev_addr_list);
+ dev->dev_addr = NULL;
+}
+
+static int dev_addr_init(struct net_device *dev)
+{
+ unsigned char addr[MAX_ADDR_LEN];
+ struct netdev_hw_addr *ha;
+ int err;
+
+ /* rtnl_mutex must be held here */
+
+ INIT_LIST_HEAD(&dev->dev_addr_list);
+ memset(addr, 0, sizeof(*addr));
+ err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr),
+ NETDEV_HW_ADDR_T_LAN);
+ if (!err) {
+ /*
+ * Get the first (previously created) address from the list
+ * and set dev_addr pointer to this location.
+ */
+ ha = list_first_entry(&dev->dev_addr_list,
+ struct netdev_hw_addr, list);
+ dev->dev_addr = ha->addr;
+ }
+ return err;
+}
+
+/**
+ * dev_addr_add - Add a device address
+ * @dev: device
+ * @addr: address to add
+ * @addr_type: address type
+ *
+ * Add a device address to the device or increase the reference count if
+ * it already exists.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len,
+ addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add);
+
+/**
+ * dev_addr_del - Release a device address.
+ * @dev: device
+ * @addr: address to delete
+ * @addr_type: address type
+ *
+ * Release reference to a device address and remove it from the device
+ * if the reference count drops to zero.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len,
+ addr_type, 0);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_del);
+
+/**
+ * dev_addr_add_multiple - Add device addresses from another device
+ * @to_dev: device to which addresses will be added
+ * @from_dev: device from which addresses will be added
+ * @addr_type: address type - 0 means type will be used from from_dev
+ *
+ * Add device addresses of the one device to another.
+ **
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list,
+ &from_dev->dev_addr_list,
+ to_dev->addr_len, addr_type, 0);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add_multiple);
+
+/**
+ * dev_addr_del_multiple - Delete device addresses by another device
+ * @to_dev: device where the addresses will be deleted
+ * @from_dev: device by which addresses the addresses will be deleted
+ * @addr_type: address type - 0 means type will used from from_dev
+ *
+ * Deletes addresses in to device by the list of addresses in from device.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ __hw_addr_del_multiple_ii(&to_dev->dev_addr_list,
+ &from_dev->dev_addr_list,
+ to_dev->addr_len, addr_type, 0);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return 0;
+}
+EXPORT_SYMBOL(dev_addr_del_multiple);
+
+/* unicast and multicast addresses handling functions */
+
int __dev_addr_delete(struct dev_addr_list **list, int *count,
void *addr, int alen, int glbl)
{
@@ -4776,6 +5022,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->gso_max_size = GSO_MAX_SIZE;
+ dev_addr_init(dev);
netdev_init_queues(dev);
INIT_LIST_HEAD(&dev->napi_list);
@@ -4801,6 +5048,9 @@ void free_netdev(struct net_device *dev)
kfree(dev->_tx);
+ /* Flush device addresses */
+ dev_addr_flush(dev);
+
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f091a5a845c1..d152394b2611 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
shinfo->gso_segs = 0;
shinfo->gso_type = 0;
shinfo->ip6_frag_id = 0;
+ shinfo->tx_flags.flags = 0;
shinfo->frag_list = NULL;
+ memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 8554d0ea1719..68a8d892c711 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -49,19 +49,22 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
inet_twsk_put(tw);
}
-void inet_twsk_put(struct inet_timewait_sock *tw)
+static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
{
- if (atomic_dec_and_test(&tw->tw_refcnt)) {
- struct module *owner = tw->tw_prot->owner;
- twsk_destructor((struct sock *)tw);
+ struct module *owner = tw->tw_prot->owner;
+ twsk_destructor((struct sock *)tw);
#ifdef SOCK_REFCNT_DEBUG
- printk(KERN_DEBUG "%s timewait_sock %p released\n",
- tw->tw_prot->name, tw);
+ pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
#endif
- release_net(twsk_net(tw));
- kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
- module_put(owner);
- }
+ release_net(twsk_net(tw));
+ kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
+ module_put(owner);
+}
+
+void inet_twsk_put(struct inet_timewait_sock *tw)
+{
+ if (atomic_dec_and_test(&tw->tw_refcnt))
+ inet_twsk_free(tw);
}
EXPORT_SYMBOL_GPL(inet_twsk_put);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 56dcf97a97fb..eeb8a92aa416 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
tcp_grow_window(sk, skb);
}
-static u32 tcp_rto_min(struct sock *sk)
-{
- struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
-
- if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
- rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
- return rto_min;
-}
-
/* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bda74e8aed7e..fc79e3416288 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1593,7 +1593,7 @@ process:
#endif
{
if (!tcp_prequeue(sk, skb))
- ret = tcp_v4_do_rcv(sk, skb);
+ ret = tcp_v4_do_rcv(sk, skb);
}
} else
sk_add_backlog(sk, skb);
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 14e6724d5672..91490ad9302c 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
struct ipv6_opt_hdr _hdr;
int hdrlen;
- /* Is there enough space for the next ext header? */
- if (len < (int)sizeof(struct ipv6_opt_hdr))
- return false;
/* No more exthdr -> evaluate */
if (nexthdr == NEXTHDR_NONE) {
temp |= MASK_NONE;
break;
}
+ /* Is there enough space for the next ext header? */
+ if (len < (int)sizeof(struct ipv6_opt_hdr))
+ return false;
/* ESP -> evaluate */
if (nexthdr == NEXTHDR_ESP) {
temp |= MASK_ESP;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 5196006b35e0..efaf38349731 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -478,7 +478,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
return NULL;
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
- sband = hw->wiphy->bands[hw->conf.channel->band];
+ sband = hw->wiphy->bands[i];
if (sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 2b33e3a7ee7d..6704fb55c6b2 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -319,13 +319,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rc_pid_sta_info *spinfo = priv_sta;
+ struct rc_pid_info *pinfo = priv;
+ struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
struct sta_info *si;
+ int i, j, tmp;
+ bool s;
/* TODO: This routine should consider using RSSI from previous packets
* as we need to have IEEE 802.1X auth succeed immediately after assoc..
* Until that method is implemented, we will use the lowest supported
* rate as a workaround. */
+ /* Sort the rates. This is optimized for the most common case (i.e.
+ * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
+ * mapping too. */
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rinfo[i].index = i;
+ rinfo[i].rev_index = i;
+ if (RC_PID_FAST_START)
+ rinfo[i].diff = 0;
+ else
+ rinfo[i].diff = i * pinfo->norm_offset;
+ }
+ for (i = 1; i < sband->n_bitrates; i++) {
+ s = 0;
+ for (j = 0; j < sband->n_bitrates - i; j++)
+ if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
+ sband->bitrates[rinfo[j + 1].index].bitrate)) {
+ tmp = rinfo[j].index;
+ rinfo[j].index = rinfo[j + 1].index;
+ rinfo[j + 1].index = tmp;
+ rinfo[rinfo[j].index].rev_index = j;
+ rinfo[rinfo[j + 1].index].rev_index = j + 1;
+ s = 1;
+ }
+ if (!s)
+ break;
+ }
+
spinfo->txrate_idx = rate_lowest_index(sband, sta);
/* HACK */
si = container_of(sta, struct sta_info, sta);
@@ -338,21 +369,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
struct rc_pid_info *pinfo;
struct rc_pid_rateinfo *rinfo;
struct ieee80211_supported_band *sband;
- int i, j, tmp;
- bool s;
+ int i, max_rates = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
struct rc_pid_debugfs_entries *de;
#endif
- sband = hw->wiphy->bands[hw->conf.channel->band];
-
pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
if (!pinfo)
return NULL;
- /* We can safely assume that sband won't change unless we get
- * reinitialized. */
- rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ sband = hw->wiphy->bands[i];
+ if (sband->n_bitrates > max_rates)
+ max_rates = sband->n_bitrates;
+ }
+
+ rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
if (!rinfo) {
kfree(pinfo);
return NULL;
@@ -370,33 +402,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
pinfo->rinfo = rinfo;
pinfo->oldrate = 0;
- /* Sort the rates. This is optimized for the most common case (i.e.
- * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
- * mapping too. */
- for (i = 0; i < sband->n_bitrates; i++) {
- rinfo[i].index = i;
- rinfo[i].rev_index = i;
- if (RC_PID_FAST_START)
- rinfo[i].diff = 0;
- else
- rinfo[i].diff = i * pinfo->norm_offset;
- }
- for (i = 1; i < sband->n_bitrates; i++) {
- s = 0;
- for (j = 0; j < sband->n_bitrates - i; j++)
- if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
- sband->bitrates[rinfo[j + 1].index].bitrate)) {
- tmp = rinfo[j].index;
- rinfo[j].index = rinfo[j + 1].index;
- rinfo[j + 1].index = tmp;
- rinfo[rinfo[j].index].rev_index = j;
- rinfo[rinfo[j + 1].index].rev_index = j + 1;
- s = 1;
- }
- if (!s)
- break;
- }
-
#ifdef CONFIG_MAC80211_DEBUGFS
de = &pinfo->dentries;
de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 36e8e2de980c..5f9a8d7af83d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -792,7 +792,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
/* internal error, why is TX_FRAGMENTED set? */
- if (WARN_ON(skb->len <= frag_threshold))
+ if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
return TX_DROP;
/*
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index f13fc57e1ecb..c523f0b8cee5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
return 0;
}
-static inline void
-ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report)
-{
- unsigned int events = 0;
-
- if (test_bit(IPS_EXPECTED_BIT, &ct->status))
- events |= IPCT_RELATED;
- else
- events |= IPCT_NEW;
-
- nf_conntrack_event_report(IPCT_STATUS |
- IPCT_HELPER |
- IPCT_REFRESH |
- IPCT_PROTOINFO |
- IPCT_NATSEQADJ |
- IPCT_MARK |
- events,
- ct,
- pid,
- report);
-}
-
static struct nf_conn *
ctnetlink_create_conntrack(struct nlattr *cda[],
struct nf_conntrack_tuple *otuple,
@@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
err = -ENOENT;
if (nlh->nlmsg_flags & NLM_F_CREATE) {
struct nf_conn *ct;
+ enum ip_conntrack_events events;
ct = ctnetlink_create_conntrack(cda, &otuple,
&rtuple, u3);
@@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
err = 0;
nf_conntrack_get(&ct->ct_general);
spin_unlock_bh(&nf_conntrack_lock);
- ctnetlink_event_report(ct,
- NETLINK_CB(skb).pid,
- nlmsg_report(nlh));
+ if (test_bit(IPS_EXPECTED_BIT, &ct->status))
+ events = IPCT_RELATED;
+ else
+ events = IPCT_NEW;
+
+ nf_conntrack_event_report(IPCT_STATUS |
+ IPCT_HELPER |
+ IPCT_PROTOINFO |
+ IPCT_NATSEQADJ |
+ IPCT_MARK | events,
+ ct, NETLINK_CB(skb).pid,
+ nlmsg_report(nlh));
nf_ct_put(ct);
} else
spin_unlock_bh(&nf_conntrack_lock);
@@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (err == 0) {
nf_conntrack_get(&ct->ct_general);
spin_unlock_bh(&nf_conntrack_lock);
- ctnetlink_event_report(ct,
- NETLINK_CB(skb).pid,
- nlmsg_report(nlh));
+ nf_conntrack_event_report(IPCT_STATUS |
+ IPCT_HELPER |
+ IPCT_PROTOINFO |
+ IPCT_NATSEQADJ |
+ IPCT_MARK,
+ ct, NETLINK_CB(skb).pid,
+ nlmsg_report(nlh));
nf_ct_put(ct);
} else
spin_unlock_bh(&nf_conntrack_lock);
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 6c4847662b85..69a639f35403 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
- if (info->node_mask >= (1 << info->total_nodes)) {
+ if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
+ printk(KERN_ERR "xt_cluster: you have exceeded the maximum "
+ "number of cluster nodes (%u > %u)\n",
+ info->total_nodes, XT_CLUSTER_NODES_MAX);
+ return false;
+ }
+ if (info->node_mask >= (1ULL << info->total_nodes)) {
printk(KERN_ERR "xt_cluster: this node mask cannot be "
"higher than the total number of nodes\n");
return false;
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 92cfc9d7e3b9..69188e8358b4 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
if (sch->ops == &bfifo_qdisc_ops)
- limit *= qdisc_dev(sch)->mtu;
+ limit *= psched_mtu(qdisc_dev(sch));
q->limit = limit;
} else {
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 5d149c1b5f0d..9ad4d893a566 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
}
result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
if (result < 0) {
- dev_err(dev, "no memory to add payload in attribute\n");
+ dev_err(dev, "no memory to add payload (msg %p size %zu) in "
+ "attribute: %d\n", msg, size, result);
goto error_nla_put;
}
genlmsg_end(skb, genl_msg);
@@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
struct sk_buff *skb;
skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
- if (skb == NULL)
- goto error_msg_new;
- result = wimax_msg_send(wimax_dev, skb);
-error_msg_new:
+ if (IS_ERR(skb))
+ result = PTR_ERR(skb);
+ else
+ result = wimax_msg_send(wimax_dev, skb);
return result;
}
EXPORT_SYMBOL_GPL(wimax_msg);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index a0ee76b52510..933e1422b09f 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -338,8 +338,21 @@ out:
*/
void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
{
+ /*
+ * A driver cannot take the wimax_dev out of the
+ * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
+ * the wimax_dev's state is still NULL, we ignore any request
+ * to change its state because it means it hasn't been yet
+ * registered.
+ *
+ * There is no need to complain about it, as routines that
+ * call this might be shared from different code paths that
+ * are called before or after wimax_dev_add() has done its
+ * job.
+ */
mutex_lock(&wimax_dev->mutex);
- __wimax_state_change(wimax_dev, new_state);
+ if (wimax_dev->state > __WIMAX_ST_NULL)
+ __wimax_state_change(wimax_dev, new_state);
mutex_unlock(&wimax_dev->mutex);
return;
}
@@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get);
void wimax_dev_init(struct wimax_dev *wimax_dev)
{
INIT_LIST_HEAD(&wimax_dev->id_table_node);
- __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED);
+ __wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
mutex_init(&wimax_dev->mutex);
mutex_init(&wimax_dev->mutex_reset);
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 9fea910204db..537af62ec42b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -906,6 +906,7 @@ EXPORT_SYMBOL(freq_reg_info);
int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
const struct ieee80211_reg_rule **reg_rule)
{
+ assert_cfg80211_lock();
return freq_reg_info_regd(wiphy, center_freq,
bandwidth, reg_rule, NULL);
}
@@ -1134,7 +1135,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
if (is_world_regdom(cfg80211_regdomain->alpha2) ||
(wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
return true;
- if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ if (last_request &&
+ last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
wiphy->custom_regulatory)
return true;
return false;
@@ -1143,6 +1145,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
/* Reap the advantages of previously found beacons */
static void reg_process_beacons(struct wiphy *wiphy)
{
+ /*
+ * Means we are just firing up cfg80211, so no beacons would
+ * have been processed yet.
+ */
+ if (!last_request)
+ return;
if (!reg_is_world_roaming(wiphy))
return;
wiphy_update_beacon_reg(wiphy);
@@ -1177,6 +1185,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
+ assert_cfg80211_lock();
+
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
chan = &sband->channels[chan_idx];
@@ -1215,10 +1225,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
const struct ieee80211_regdomain *regd)
{
enum ieee80211_band band;
+
+ mutex_lock(&cfg80211_mutex);
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
handle_band_custom(wiphy, band, regd);
}
+ mutex_unlock(&cfg80211_mutex);
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
@@ -1424,7 +1437,7 @@ new_request:
return call_crda(last_request->alpha2);
}
-/* This currently only processes user and driver regulatory hints */
+/* This processes *all* regulatory hints */
static void reg_process_hint(struct regulatory_request *reg_request)
{
int r = 0;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 10b4887dfa6b..df59440290e5 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -393,6 +393,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
memcpy(ies, res->pub.information_elements, ielen);
found->ies_allocated = true;
found->pub.information_elements = ies;
+ found->pub.len_information_elements = ielen;
}
}
}