diff options
Diffstat (limited to 'drivers/net/ethernet')
84 files changed, 4461 insertions, 1921 deletions
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index e1d26433d619..b7232a9b7756 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev) #ifdef CONFIG_AX88796_93CX6 if (ax->plat->flags & AXFLG_HAS_93CX6) { - unsigned char mac_addr[6]; + unsigned char mac_addr[ETH_ALEN]; struct eeprom_93cx6 eeprom; eeprom.data = ei_local; @@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev) (__le16 __force *)mac_addr, sizeof(mac_addr) >> 1); - memcpy(dev->dev_addr, mac_addr, 6); + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); } #endif if (ax->plat->wordlength == 2) { diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index ed2130727643..2d8e28819779 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) char *chipname; struct net_device *dev; const struct pcnet32_access *a = NULL; - u8 promaddr[6]; + u8 promaddr[ETH_ALEN]; int ret = -ENODEV; /* reset the chip */ @@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* read PROM address and compare with CSR address */ - for (i = 0; i < 6; i++) + for (i = 0; i < ETH_ALEN; i++) promaddr[i] = inb(ioaddr + i); - if (memcmp(promaddr, dev->dev_addr, 6) || + if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) || !is_valid_ether_addr(dev->dev_addr)) { if (is_valid_ether_addr(promaddr)) { if (pcnet32_debug & NETIF_MSG_PROBE) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d80e34b8285f..7ecb44ad24fb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1331,7 +1331,7 @@ enum { BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, - BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, + BNX2X_SP_RTNL_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f2d1ff10054b..2e90868a9276 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2060,7 +2060,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp) rparam.mcast_obj = &bp->mcast_obj; __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); - /* Add a DEL command... */ + /* Add a DEL command... - Since we're doing a driver cleanup only, + * we take a lock surrounding both the initial send and the CONTs, + * as we don't want a true completion to disrupt us in the middle. + */ + netif_addr_lock_bh(bp->dev); rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", @@ -2072,11 +2076,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp) if (rc < 0) { BNX2X_ERR("Failed to clean multi-cast object: %d\n", rc); + netif_addr_unlock_bh(bp->dev); return; } rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); } + netif_addr_unlock_bh(bp->dev); } #ifndef BNX2X_STOP_ON_ERROR @@ -2432,9 +2438,7 @@ int bnx2x_load_cnic(struct bnx2x *bp) } /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* re-read iscsi info */ bnx2x_get_iscsi_info(bp); @@ -2704,9 +2708,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start fast path */ /* Initialize Rx filter. */ - netif_addr_lock_bh(bp->dev); - bnx2x_set_rx_mode(bp->dev); - netif_addr_unlock_bh(bp->dev); + bnx2x_set_rx_mode_inner(bp); /* Start the Tx */ switch (load_mode) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c07a6d054cfe..38be494ffa6e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); * netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev); +void bnx2x_set_rx_mode_inner(struct bnx2x *bp); /** * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e06186c305d8..ab5bd6c319d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9628,11 +9628,9 @@ sp_rtnl_not_reset: } } - if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, - &bp->sp_rtnl_state)) { - DP(BNX2X_MSG_SP, - "sending set storm rx mode vf pf channel message from rtnl sp-task\n"); - bnx2x_vfpf_storm_rx_mode(bp); + if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); + bnx2x_set_rx_mode_inner(bp); } if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, @@ -11849,34 +11847,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - u32 rx_mode = BNX2X_RX_MODE_NORMAL; if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); return; + } else { + /* Schedule an SP task to handle rest of change */ + DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); } +} + +void bnx2x_set_rx_mode_inner(struct bnx2x *bp) +{ + u32 rx_mode = BNX2X_RX_MODE_NORMAL; DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); - if (dev->flags & IFF_PROMISC) + netif_addr_lock_bh(bp->dev); + + if (bp->dev->flags & IFF_PROMISC) { rx_mode = BNX2X_RX_MODE_PROMISC; - else if ((dev->flags & IFF_ALLMULTI) || - ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && - CHIP_IS_E1(bp))) + } else if ((bp->dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) { rx_mode = BNX2X_RX_MODE_ALLMULTI; - else { + } else { if (IS_PF(bp)) { /* some multicasts */ if (bnx2x_set_mc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_ALLMULTI; + /* release bh lock, as bnx2x_set_uc_list might sleep */ + netif_addr_unlock_bh(bp->dev); if (bnx2x_set_uc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_PROMISC; + netif_addr_lock_bh(bp->dev); } else { /* configuring mcast to a vf involves sleeping (when we - * wait for the pf's response). Since this function is - * called from non sleepable context we must schedule - * a work item for this purpose + * wait for the pf's response). */ smp_mb__before_clear_bit(); set_bit(BNX2X_SP_RTNL_VFPF_MCAST, @@ -11894,22 +11906,20 @@ void bnx2x_set_rx_mode(struct net_device *dev) /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + netif_addr_unlock_bh(bp->dev); return; } if (IS_PF(bp)) { bnx2x_set_storm_rx_mode(bp); + netif_addr_unlock_bh(bp->dev); } else { - /* configuring rx mode to storms in a vf involves sleeping (when - * we wait for the pf's response). Since this function is - * called from non sleepable context we must schedule - * a work item for this purpose + /* VF will need to request the PF to make this change, and so + * the VF needs to release the bottom-half lock prior to the + * request (as it will likely require sleep on the VF side) */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); + netif_addr_unlock_bh(bp->dev); + bnx2x_vfpf_storm_rx_mode(bp); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 8f03c984550f..1d46b68fb766 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending( } } -static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, - struct bnx2x_exe_queue_obj *o) -{ - spin_lock_bh(&o->lock); - - __bnx2x_exe_queue_reset_pending(bp, o); - - spin_unlock_bh(&o->lock); -} - /** * bnx2x_exe_queue_step - execute one execution chunk atomically * @@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicity is ensured using the exe_queue->lock). + * (Should be called while holding the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, memset(&spacer, 0, sizeof(spacer)); - spin_lock_bh(&o->lock); - /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW @@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); __bnx2x_exe_queue_reset_pending(bp, o); } else { - spin_unlock_bh(&o->lock); return 1; } } @@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, } /* Sanity check */ - if (!cur_len) { - spin_unlock_bh(&o->lock); + if (!cur_len) return 0; - } rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) @@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ __bnx2x_exe_queue_reset_pending(bp, o); - spin_unlock_bh(&o->lock); return rc; } @@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) return true; } +/** + * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (o->head_reader) { + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n"); + return -EBUSY; + } + + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n"); + return 0; +} + +/** + * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n", + ramrod_flags); + o->head_exe_request = false; + o->saved_ramrod_flags = 0; + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); + if (rc != 0) { + BNX2X_ERR("execution of pending commands failed with rc %d\n", + rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } +} + +/** + * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run + * + * @bp: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = true; + o->saved_ramrod_flags = ramrod_flags; + DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n", + ramrod_flags); +} + +/** + * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n"); + __bnx2x_vlan_mac_h_exec_pending(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would perform it - + * possibly releasing and reclaiming the execution queue lock. + */ +void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_write_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + +/** + * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n", + o->head_reader); + + return 0; +} + +/** + * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + + spin_lock_bh(&o->exe_queue.lock); + rc = __bnx2x_vlan_mac_h_read_lock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + +/** + * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (!o->head_reader) { + BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } else { + o->head_reader--; + DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n"); + + /* Writer release will do the trick */ + __bnx2x_vlan_mac_h_write_unlock(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_read_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, int n, u8 *base, u8 stride, u8 size) { struct bnx2x_vlan_mac_registry_elem *pos; u8 *next = base; int counter = 0; + int read_lock; + + DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); /* traverse list */ list_for_each_entry(pos, &o->head, link) { @@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, next += stride + size; } } + + if (read_lock == 0) { + DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + } + return counter * ETH_ALEN; } @@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp, return -EBUSY; } +static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = 0; + + spin_lock_bh(&o->exe_queue.lock); + + DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n"); + rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); + + if (rc != 0) { + __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); + + /* Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = 1; + } else { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + } + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + /** * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod * @@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; int rc; + /* Clearing the pending list & raw state should be made + * atomically (as execution flow assumes they represent the same). + */ + spin_lock_bh(&o->exe_queue.lock); + /* Reset pending list */ - bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); /* Clear pending */ r->clear_pending(r); + spin_unlock_bh(&o->exe_queue.lock); + /* If ramrod failed this is most likely a SW bug */ if (cqe->message.error) return -EINVAL; /* Run the next bulk of pending commands if requested */ if (test_bit(RAMROD_CONT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); + if (rc < 0) return rc; } @@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd( * @p: * */ -int bnx2x_config_vlan_mac( - struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p) +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) { int rc = 0; struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; @@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac( /* Execute commands if required */ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac( return rc; /* Make a next step */ - rc = bnx2x_exe_queue_step(bp, &o->exe_queue, - ramrod_flags); + rc = __bnx2x_vlan_mac_execute_step(bp, + p->vlan_mac_obj, + &p->ramrod_flags); if (rc < 0) return rc; } @@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, unsigned long *ramrod_flags) { struct bnx2x_vlan_mac_registry_elem *pos = NULL; - int rc = 0; struct bnx2x_vlan_mac_ramrod_params p; struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + int read_lock; + int rc = 0; /* Clear pending commands first */ @@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, __clear_bit(RAMROD_EXEC, &p.ramrod_flags); __clear_bit(RAMROD_CONT, &p.ramrod_flags); + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + return read_lock; + list_for_each_entry(pos, &o->head, link) { if (pos->vlan_mac_flags == *vlan_mac_flags) { p.user_req.vlan_mac_flags = pos->vlan_mac_flags; @@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, rc = bnx2x_config_vlan_mac(bp, &p); if (rc < 0) { BNX2X_ERR("Failed to add a new DEL command\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); return rc; } } } + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + p.ramrod_flags = *ramrod_flags; __set_bit(RAMROD_CONT, &p.ramrod_flags); @@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, struct bnx2x_credit_pool_obj *vlans_pool) { INIT_LIST_HEAD(&o->head); + o->head_reader = 0; + o->head_exe_request = false; + o->saved_ramrod_flags = 0; o->macs_pool = macs_pool; o->vlans_pool = vlans_pool; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 798dfe996733..533a3abd8c82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj { * entries. */ struct list_head head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + u8 head_reader; /* Num. of readers accessing head list */ + bool head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ /* TODO: Add it's initialization in the init functions */ struct bnx2x_exe_queue_obj exe_queue; @@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, struct bnx2x_credit_pool_obj *macs_pool, struct bnx2x_credit_pool_obj *vlans_pool); +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); int bnx2x_config_vlan_mac(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *p); + struct bnx2x_vlan_mac_ramrod_params *p); int bnx2x_vlan_mac_move(struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..6291324913e9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -491,12 +491,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp, * and a valid credit counter */ if (!vfop->rc && args->credit) { - int cnt = 0; struct list_head *pos; + int read_lock; + int cnt = 0; + + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); list_for_each(pos, &obj->head) cnt++; + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); + atomic_set(args->credit, cnt); } } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index d78d4cf140ed..4f8a5357cedc 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,6 +1,6 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1427,6 +1427,28 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, rcu_read_unlock(); } +static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, + int en_tcp_dack) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; + u16 tstorm_flags = 0; + + if (time_stamps) { + xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + } + if (en_tcp_dack) + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN; + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); + + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); +} + static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) { struct cnic_local *cp = dev->cnic_priv; @@ -1506,6 +1528,10 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), hq_bds); + cnic_bnx2x_set_tcp_options(dev, + req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE, + req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE); + return 0; } @@ -2035,9 +2061,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, xstorm_buf->pseudo_header_checksum = swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); - if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) - tstorm_buf->params |= - L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; if (kwqe3->ka_timeout) { tstorm_buf->ka_enable = 1; tstorm_buf->ka_timeout = kwqe3->ka_timeout; @@ -2084,25 +2107,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev) mac[0]); } -static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) -{ - struct cnic_local *cp = dev->cnic_priv; - struct bnx2x *bp = netdev_priv(dev->netdev); - u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; - u16 tstorm_flags = 0; - - if (tcp_ts) { - xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; - } - - CNIC_WR8(dev, BAR_XSTRORM_INTMEM + - XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags); - - CNIC_WR16(dev, BAR_TSTRORM_INTMEM + - TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags); -} - static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], u32 num, int *work) { @@ -2178,9 +2182,6 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id); - cnic_bnx2x_set_tcp_timestamp(dev, - kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); - ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); if (!ret) @@ -3603,6 +3604,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, csk1->rcv_buf = DEF_RCV_BUF; csk1->snd_buf = DEF_SND_BUF; csk1->seed = DEF_SEED; + csk1->tcp_flags = 0; *csk = csk1; return 0; @@ -4020,15 +4022,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) cnic_cm_upcall(cp, csk, opcode); break; - case L5CM_RAMROD_CMD_ID_CLOSE: - if (l4kcqe->status != 0) { - netdev_warn(dev->netdev, "RAMROD CLOSE compl with " - "status 0x%x\n", l4kcqe->status); + case L5CM_RAMROD_CMD_ID_CLOSE: { + struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe; + + if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) { + netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", + l4kcqe->status, l5kcqe->completion_status); opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; /* Fall through */ } else { break; } + } case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: case L4_KCQE_OPCODE_VALUE_RESET_COMP: @@ -4219,7 +4224,7 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) u32 port = CNIC_PORT(cp); cnic_init_bnx2x_mac(dev); - cnic_bnx2x_set_tcp_timestamp(dev, 1); + cnic_bnx2x_set_tcp_options(dev, 0, 1); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); @@ -5271,6 +5276,13 @@ static int cnic_register_netdev(struct cnic_dev *dev) if (err) netdev_err(dev->netdev, "register_cnic failed\n"); + /* Read iSCSI config again. On some bnx2x device, iSCSI config + * can change after firmware is downloaded. + */ + dev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + dev->max_iscsi_conn = 0; + return err; } @@ -5628,7 +5640,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, dev = cnic_from_netdev(netdev); - if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) { + if (!dev && event == NETDEV_REGISTER) { /* Check for the hot-plug device */ dev = is_cnic_dev(netdev); if (dev) { @@ -5644,7 +5656,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, else if (event == NETDEV_UNREGISTER) cnic_ulp_exit(dev); - if (event == NETDEV_UP || (new_dev && netif_running(netdev))) { + if (event == NETDEV_UP) { if (cnic_register_netdev(dev) != 0) { cnic_put(dev); goto done; @@ -5693,21 +5705,8 @@ static struct notifier_block cnic_netdev_notifier = { static void cnic_release(void) { - struct cnic_dev *dev; struct cnic_uio_dev *udev; - while (!list_empty(&cnic_dev_list)) { - dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - } - - cnic_ulp_exit(dev); - cnic_unregister_netdev(dev); - list_del_init(&dev->list); - cnic_free_dev(dev); - } while (!list_empty(&cnic_udev_list)) { udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, list); diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 62c670619ae6..e7a247473596 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -1,6 +1,6 @@ /* cnic.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2011 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index ede3db35d757..95a8e4b11c9f 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -1,7 +1,7 @@ /* cnic.c: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags { u16 flags; #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 -#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12) -#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12 +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12) +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12 #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 #define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index ec9bb9ad4bb3..95aff7642b85 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@ /* cnic_if.h: Broadcom CNIC core network driver. * - * Copyright (c) 2006-2012 Broadcom Corporation + * Copyright (c) 2006-2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.16" -#define CNIC_MODULE_RELDATE "Dec 05, 2012" +#define CNIC_MODULE_VERSION "2.5.17" +#define CNIC_MODULE_RELDATE "July 28, 2013" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -238,8 +238,8 @@ struct cnic_sock { u16 src_port; u16 dst_port; u16 vlan_id; - unsigned char old_ha[6]; - unsigned char ha[6]; + unsigned char old_ha[ETH_ALEN]; + unsigned char ha[ETH_ALEN]; u32 mtu; u32 cid; u32 l5_cid; @@ -308,7 +308,7 @@ struct cnic_dev { #define CNIC_F_BNX2_CLASS 3 #define CNIC_F_BNX2X_CLASS 4 atomic_t ref_count; - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; int max_iscsi_conn; int max_fcoe_conn; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ddebc7a5dda0..51bf9ca42faf 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 132 +#define TG3_MIN_NUM 133 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "May 21, 2013" +#define DRV_MODULE_RELDATE "Jul 29, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -4226,8 +4226,6 @@ static int tg3_power_down_prepare(struct tg3 *tp) static void tg3_power_down(struct tg3 *tp) { - tg3_power_down_prepare(tp); - pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); pci_set_power_state(tp->pdev, PCI_D3hot); } @@ -6095,10 +6093,12 @@ static u64 tg3_refclk_read(struct tg3 *tp) /* tp->lock must be held */ static void tg3_refclk_write(struct tg3 *tp, u64 newval) { - tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP); + u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); - tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME); + tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); } static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); @@ -6214,6 +6214,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp, static int tg3_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + u32 clock_ctl; + int rval = 0; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + if (rq->perout.index != 0) + return -EINVAL; + + tg3_full_lock(tp, 0); + clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; + + if (on) { + u64 nsec; + + nsec = rq->perout.start.sec * 1000000000ULL + + rq->perout.start.nsec; + + if (rq->perout.period.sec || rq->perout.period.nsec) { + netdev_warn(tp->dev, + "Device supports only a one-shot timesync output, period must be 0\n"); + rval = -EINVAL; + goto err_out; + } + + if (nsec & (1ULL << 63)) { + netdev_warn(tp->dev, + "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); + rval = -EINVAL; + goto err_out; + } + + tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); + tw32(TG3_EAV_WATCHDOG0_MSB, + TG3_EAV_WATCHDOG0_EN | + ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); + + tw32(TG3_EAV_REF_CLCK_CTL, + clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); + } else { + tw32(TG3_EAV_WATCHDOG0_MSB, 0); + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); + } + +err_out: + tg3_full_unlock(tp); + return rval; + + default: + break; + } + return -EOPNOTSUPP; } @@ -6223,7 +6276,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .max_adj = 250000000, .n_alarm = 0, .n_ext_ts = 0, - .n_per_out = 0, + .n_per_out = 1, .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, @@ -10367,6 +10420,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, 5755_PLUS)) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; + if (tg3_flag(tp, ENABLE_RSS)) tp->rx_mode |= RX_MODE_RSS_ENABLE | RX_MODE_RSS_ITBL_HASH_BITS_7 | @@ -11502,7 +11558,7 @@ static int tg3_close(struct net_device *dev) memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); - tg3_power_down(tp); + tg3_power_down_prepare(tp); tg3_carrier_off(tp); @@ -11724,9 +11780,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, if (tg3_flag(tp, NO_NVRAM)) return -EINVAL; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - offset = eeprom->offset; len = eeprom->len; eeprom->len = 0; @@ -11784,9 +11837,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf; __be32 start, end; - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) return -EINVAL; @@ -13515,7 +13565,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, tg3_phy_start(tp); } if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - tg3_power_down(tp); + tg3_power_down_prepare(tp); } @@ -17547,11 +17597,6 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_asic_rev(tp) == ASIC_REV_5762) tg3_flag_set(tp, PTP_CAPABLE); - if (tg3_flag(tp, 5717_PLUS)) { - /* Resume a low-power mode */ - tg3_frob_aux_power(tp, false); - } - tg3_timer_init(tp); tg3_carrier_off(tp); @@ -17755,6 +17800,23 @@ out: static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +static void tg3_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + + rtnl_lock(); + netif_device_detach(dev); + + if (netif_running(dev)) + dev_close(dev); + + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); + + rtnl_unlock(); +} + /** * tg3_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -17911,6 +17973,7 @@ static struct pci_driver tg3_driver = { .remove = tg3_remove_one, .err_handler = &tg3_err_handler, .driver.pm = &tg3_pm_ops, + .shutdown = tg3_shutdown, }; module_pci_driver(tg3_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index cd63d1189aae..ddb8be1298ea 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -532,6 +532,7 @@ #define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 #define RX_MODE_RSS_ENABLE 0x00800000 #define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 +#define RX_MODE_IPV4_FRAG_FIX 0x02000000 #define MAC_RX_STATUS 0x0000046c #define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 #define RX_STATUS_XOFF_RCVD 0x00000002 @@ -1818,12 +1819,21 @@ #define TG3_EAV_REF_CLCK_CTL 0x00006908 #define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 #define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 +#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16) +#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17) + +#define TG3_EAV_WATCHDOG0_LSB 0x00006918 +#define TG3_EAV_WATCHDOG0_MSB 0x0000691c +#define TG3_EAV_WATCHDOG0_EN (1 << 31) +#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff + #define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 #define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) #define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) #define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff -/* 0x690c --> 0x7000 unused */ + +/* 0x692c --> 0x7000 unused */ /* NVRAM Control registers */ #define NVRAM_CMD 0x00007000 diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile index 9d4974bba247..e52296d9b256 100644 --- a/drivers/net/ethernet/cisco/enic/Makefile +++ b/drivers/net/ethernet/cisco/enic/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_ENIC) := enic.o enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ - enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o + enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ + enic_ethtool.o diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index afe9b1662b8c..2e37c63981c1 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -127,9 +127,57 @@ static inline struct device *enic_get_dev(struct enic *enic) return &(enic->pdev->dev); } +static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) +{ + return rq; +} + +static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) +{ + return enic->rq_count + wq; +} + +static inline unsigned int enic_legacy_io_intr(void) +{ + return 0; +} + +static inline unsigned int enic_legacy_err_intr(void) +{ + return 1; +} + +static inline unsigned int enic_legacy_notify_intr(void) +{ + return 2; +} + +static inline unsigned int enic_msix_rq_intr(struct enic *enic, + unsigned int rq) +{ + return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; +} + +static inline unsigned int enic_msix_wq_intr(struct enic *enic, + unsigned int wq) +{ + return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; +} + +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return enic->rq_count + enic->wq_count; +} + +static inline unsigned int enic_msix_notify_intr(struct enic *enic) +{ + return enic->rq_count + enic->wq_count + 1; +} + void enic_reset_addr_lists(struct enic *enic); int enic_sriov_enabled(struct enic *enic); int enic_is_valid_vf(struct enic *enic, int vf); int enic_is_dynamic(struct enic *enic); +void enic_set_ethtool_ops(struct net_device *netdev); #endif /* _ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h index 08bded051b93..129b14a4efb0 100644 --- a/drivers/net/ethernet/cisco/enic/enic_dev.h +++ b/drivers/net/ethernet/cisco/enic/enic_dev.h @@ -20,6 +20,7 @@ #define _ENIC_DEV_H_ #include "vnic_dev.h" +#include "vnic_vic.h" /* * Calls the devcmd function given by argument vnicdevcmdfn. diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c new file mode 100644 index 000000000000..47e3562f4866 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -0,0 +1,257 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" + +struct enic_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +#define ENIC_TX_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \ +} + +#define ENIC_RX_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ +} + +static const struct enic_stat enic_tx_stats[] = { + ENIC_TX_STAT(tx_frames_ok), + ENIC_TX_STAT(tx_unicast_frames_ok), + ENIC_TX_STAT(tx_multicast_frames_ok), + ENIC_TX_STAT(tx_broadcast_frames_ok), + ENIC_TX_STAT(tx_bytes_ok), + ENIC_TX_STAT(tx_unicast_bytes_ok), + ENIC_TX_STAT(tx_multicast_bytes_ok), + ENIC_TX_STAT(tx_broadcast_bytes_ok), + ENIC_TX_STAT(tx_drops), + ENIC_TX_STAT(tx_errors), + ENIC_TX_STAT(tx_tso), +}; + +static const struct enic_stat enic_rx_stats[] = { + ENIC_RX_STAT(rx_frames_ok), + ENIC_RX_STAT(rx_frames_total), + ENIC_RX_STAT(rx_unicast_frames_ok), + ENIC_RX_STAT(rx_multicast_frames_ok), + ENIC_RX_STAT(rx_broadcast_frames_ok), + ENIC_RX_STAT(rx_bytes_ok), + ENIC_RX_STAT(rx_unicast_bytes_ok), + ENIC_RX_STAT(rx_multicast_bytes_ok), + ENIC_RX_STAT(rx_broadcast_bytes_ok), + ENIC_RX_STAT(rx_drop), + ENIC_RX_STAT(rx_no_bufs), + ENIC_RX_STAT(rx_errors), + ENIC_RX_STAT(rx_rss), + ENIC_RX_STAT(rx_crc_errors), + ENIC_RX_STAT(rx_frames_64), + ENIC_RX_STAT(rx_frames_127), + ENIC_RX_STAT(rx_frames_255), + ENIC_RX_STAT(rx_frames_511), + ENIC_RX_STAT(rx_frames_1023), + ENIC_RX_STAT(rx_frames_1518), + ENIC_RX_STAT(rx_frames_to_max), +}; + +static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); +static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); + +static int enic_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(netdev)) { + ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void enic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_devcmd_fw_info *fw_info; + + enic_dev_fw_info(enic, &fw_info); + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, fw_info->fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(enic->pdev), + sizeof(drvinfo->bus_info)); +} + +static void enic_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < enic_n_tx_stats; i++) { + memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < enic_n_rx_stats; i++) { + memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int enic_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return enic_n_tx_stats + enic_n_rx_stats; + default: + return -EOPNOTSUPP; + } +} + +static void enic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *vstats; + unsigned int i; + + enic_dev_stats_dump(enic, &vstats); + + for (i = 0; i < enic_n_tx_stats; i++) + *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; + for (i = 0; i < enic_n_rx_stats; i++) + *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; +} + +static u32 enic_get_msglevel(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + return enic->msg_enable; +} + +static void enic_set_msglevel(struct net_device *netdev, u32 value) +{ + struct enic *enic = netdev_priv(netdev); + enic->msg_enable = value; +} + +static int enic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; + + return 0; +} + +static int enic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; + unsigned int i, intr; + + tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + if (tx_coalesce_usecs != rx_coalesce_usecs) + return -EINVAL; + + intr = enic_legacy_io_intr(); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + tx_coalesce_usecs); + break; + case VNIC_DEV_INTR_MODE_MSI: + if (tx_coalesce_usecs != rx_coalesce_usecs) + return -EINVAL; + + vnic_intr_coalescing_timer_set(&enic->intr[0], + tx_coalesce_usecs); + break; + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < enic->wq_count; i++) { + intr = enic_msix_wq_intr(enic, i); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + tx_coalesce_usecs); + } + + for (i = 0; i < enic->rq_count; i++) { + intr = enic_msix_rq_intr(enic, i); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + rx_coalesce_usecs); + } + + break; + default: + break; + } + + enic->tx_coalesce_usecs = tx_coalesce_usecs; + enic->rx_coalesce_usecs = rx_coalesce_usecs; + + return 0; +} + +static const struct ethtool_ops enic_ethtool_ops = { + .get_settings = enic_get_settings, + .get_drvinfo = enic_get_drvinfo, + .get_msglevel = enic_get_msglevel, + .set_msglevel = enic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = enic_get_strings, + .get_sset_count = enic_get_sset_count, + .get_ethtool_stats = enic_get_ethtool_stats, + .get_coalesce = enic_get_coalesce, + .set_coalesce = enic_set_coalesce, +}; + +void enic_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops); +} diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 992ec2ee64d9..b12b32bc53a6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -31,7 +31,6 @@ #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> -#include <linux/ethtool.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> @@ -73,57 +72,6 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, enic_id_table); -struct enic_stat { - char name[ETH_GSTRING_LEN]; - unsigned int offset; -}; - -#define ENIC_TX_STAT(stat) \ - { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } -#define ENIC_RX_STAT(stat) \ - { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } - -static const struct enic_stat enic_tx_stats[] = { - ENIC_TX_STAT(tx_frames_ok), - ENIC_TX_STAT(tx_unicast_frames_ok), - ENIC_TX_STAT(tx_multicast_frames_ok), - ENIC_TX_STAT(tx_broadcast_frames_ok), - ENIC_TX_STAT(tx_bytes_ok), - ENIC_TX_STAT(tx_unicast_bytes_ok), - ENIC_TX_STAT(tx_multicast_bytes_ok), - ENIC_TX_STAT(tx_broadcast_bytes_ok), - ENIC_TX_STAT(tx_drops), - ENIC_TX_STAT(tx_errors), - ENIC_TX_STAT(tx_tso), -}; - -static const struct enic_stat enic_rx_stats[] = { - ENIC_RX_STAT(rx_frames_ok), - ENIC_RX_STAT(rx_frames_total), - ENIC_RX_STAT(rx_unicast_frames_ok), - ENIC_RX_STAT(rx_multicast_frames_ok), - ENIC_RX_STAT(rx_broadcast_frames_ok), - ENIC_RX_STAT(rx_bytes_ok), - ENIC_RX_STAT(rx_unicast_bytes_ok), - ENIC_RX_STAT(rx_multicast_bytes_ok), - ENIC_RX_STAT(rx_broadcast_bytes_ok), - ENIC_RX_STAT(rx_drop), - ENIC_RX_STAT(rx_no_bufs), - ENIC_RX_STAT(rx_errors), - ENIC_RX_STAT(rx_rss), - ENIC_RX_STAT(rx_crc_errors), - ENIC_RX_STAT(rx_frames_64), - ENIC_RX_STAT(rx_frames_127), - ENIC_RX_STAT(rx_frames_255), - ENIC_RX_STAT(rx_frames_511), - ENIC_RX_STAT(rx_frames_1023), - ENIC_RX_STAT(rx_frames_1518), - ENIC_RX_STAT(rx_frames_to_max), -}; - -static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); -static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); - int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; @@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf) #endif } -static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) -{ - return rq; -} - -static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) -{ - return enic->rq_count + wq; -} - -static inline unsigned int enic_legacy_io_intr(void) -{ - return 0; -} - -static inline unsigned int enic_legacy_err_intr(void) -{ - return 1; -} - -static inline unsigned int enic_legacy_notify_intr(void) -{ - return 2; -} - -static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq) -{ - return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; -} - -static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq) -{ - return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; -} - -static inline unsigned int enic_msix_err_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count; -} - -static inline unsigned int enic_msix_notify_intr(struct enic *enic) -{ - return enic->rq_count + enic->wq_count + 1; -} - -static int enic_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) -{ - struct enic *enic = netdev_priv(netdev); - - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; - - if (netif_carrier_ok(netdev)) { - ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; - } - - ecmd->autoneg = AUTONEG_DISABLE; - - return 0; -} - -static void enic_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct enic *enic = netdev_priv(netdev); - struct vnic_devcmd_fw_info *fw_info; - - enic_dev_fw_info(enic, &fw_info); - - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); - strlcpy(drvinfo->fw_version, fw_info->fw_version, - sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, pci_name(enic->pdev), - sizeof(drvinfo->bus_info)); -} - -static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - unsigned int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < enic_n_tx_stats; i++) { - memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } - for (i = 0; i < enic_n_rx_stats; i++) { - memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } - break; - } -} - -static int enic_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return enic_n_tx_stats + enic_n_rx_stats; - default: - return -EOPNOTSUPP; - } -} - -static void enic_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct enic *enic = netdev_priv(netdev); - struct vnic_stats *vstats; - unsigned int i; - - enic_dev_stats_dump(enic, &vstats); - - for (i = 0; i < enic_n_tx_stats; i++) - *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; - for (i = 0; i < enic_n_rx_stats; i++) - *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; -} - -static u32 enic_get_msglevel(struct net_device *netdev) -{ - struct enic *enic = netdev_priv(netdev); - return enic->msg_enable; -} - -static void enic_set_msglevel(struct net_device *netdev, u32 value) -{ - struct enic *enic = netdev_priv(netdev); - enic->msg_enable = value; -} - -static int enic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) -{ - struct enic *enic = netdev_priv(netdev); - - ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; - ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; - - return 0; -} - -static int enic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) -{ - struct enic *enic = netdev_priv(netdev); - u32 tx_coalesce_usecs; - u32 rx_coalesce_usecs; - unsigned int i, intr; - - tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, - vnic_dev_get_intr_coal_timer_max(enic->vdev)); - rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, - vnic_dev_get_intr_coal_timer_max(enic->vdev)); - - switch (vnic_dev_get_intr_mode(enic->vdev)) { - case VNIC_DEV_INTR_MODE_INTX: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - - intr = enic_legacy_io_intr(); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSI: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - - vnic_intr_coalescing_timer_set(&enic->intr[0], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSIX: - for (i = 0; i < enic->wq_count; i++) { - intr = enic_msix_wq_intr(enic, i); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - } - - for (i = 0; i < enic->rq_count; i++) { - intr = enic_msix_rq_intr(enic, i); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - rx_coalesce_usecs); - } - - break; - default: - break; - } - - enic->tx_coalesce_usecs = tx_coalesce_usecs; - enic->rx_coalesce_usecs = rx_coalesce_usecs; - - return 0; -} - -static const struct ethtool_ops enic_ethtool_ops = { - .get_settings = enic_get_settings, - .get_drvinfo = enic_get_drvinfo, - .get_msglevel = enic_get_msglevel, - .set_msglevel = enic_set_msglevel, - .get_link = ethtool_op_get_link, - .get_strings = enic_get_strings, - .get_sset_count = enic_get_sset_count, - .get_ethtool_stats = enic_get_ethtool_stats, - .get_coalesce = enic_get_coalesce, - .set_coalesce = enic_set_coalesce, -}; - static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { struct enic *enic = vnic_dev_priv(wq->vdev); @@ -2496,7 +2228,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &enic_netdev_ops; netdev->watchdog_timeo = 2 * HZ; - netdev->ethtool_ops = &enic_ethtool_ops; + enic_set_ethtool_ops(netdev); netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (ENIC_SETTING(enic, LOOP)) { diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index c94152f1c6be..4e8cfa2ac803 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct tulip_private *tp; /* See note below on the multiport cards. */ - static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; + static unsigned char last_phys_addr[ETH_ALEN] = { + 0x00, 'L', 'i', 'n', 'u', 'x' + }; static int last_irq; static int multiport_cnt; /* For four-port boards w/one EEPROM */ int i, irq; @@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->dev_addr[i] = last_phys_addr[i] + 1; #if defined(CONFIG_SPARC) addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == 6) - memcpy(dev->dev_addr, addr, 6); + if (addr && len == ETH_ALEN) + memcpy(dev->dev_addr, addr, ETH_ALEN); #endif #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ if (last_irq) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..613d8879b345 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1339,6 +1339,10 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(wrb); *if_handle = le32_to_cpu(resp->interface_id); + + /* Hack to retrieve VF's pmac-id on BE3 */ + if (BE3_chip(adapter) && !be_physfn(adapter)) + adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); } err: @@ -2606,9 +2610,44 @@ err: return status; } -/* Uses synchronous MCCQ */ +/* Set privilege(s) for a function */ +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, + u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_fn_privileges *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), + wrb, NULL); + req->hdr.domain = domain; + if (lancer_chip(adapter)) + req->privileges_lancer = cpu_to_le32(privileges); + else + req->privileges = cpu_to_le32(privileges); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. + * pmac_id_valid: false => pmac_id or MAC address is requested. + * If pmac_id is returned, pmac_id_valid is returned as true + */ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_active, u32 *pmac_id, u8 domain) + bool *pmac_id_valid, u32 *pmac_id, u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; @@ -2644,12 +2683,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, get_mac_list_cmd.size, wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; - req->perm_override = 1; + if (*pmac_id_valid) { + req->mac_id = cpu_to_le32(*pmac_id); + req->iface_id = cpu_to_le16(adapter->if_handle); + req->perm_override = 0; + } else { + req->perm_override = 1; + } status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_mac_list *resp = get_mac_list_cmd.va; + + if (*pmac_id_valid) { + memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, + ETH_ALEN); + goto out; + } + mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids * or one or more true or pseudo permanant mac addresses. @@ -2667,14 +2719,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, * is 6 bytes */ if (mac_addr_size == sizeof(u32)) { - *pmac_id_active = true; + *pmac_id_valid = true; mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; *pmac_id = le32_to_cpu(mac_id); goto out; } } /* If no active mac_id found, return first mac addr */ - *pmac_id_active = false; + *pmac_id_valid = false; memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, ETH_ALEN); } @@ -2686,6 +2738,41 @@ out: return status; } +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) +{ + bool active = true; + + if (BEx_chip(adapter)) + return be_cmd_mac_addr_query(adapter, mac, false, + adapter->if_handle, curr_pmac_id); + else + /* Fetch the MAC address using pmac_id */ + return be_cmd_get_mac_from_list(adapter, mac, &active, + &curr_pmac_id, 0); +} + +int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) +{ + int status; + bool pmac_valid = false; + + memset(mac, 0, ETH_ALEN); + + if (BEx_chip(adapter)) { + if (be_physfn(adapter)) + status = be_cmd_mac_addr_query(adapter, mac, true, 0, + 0); + else + status = be_cmd_mac_addr_query(adapter, mac, false, + adapter->if_handle, 0); + } else { + status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, + NULL, 0); + } + + return status; +} + /* Uses synchronous MCCQ */ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain) @@ -2729,6 +2816,25 @@ err: return status; } +/* Wrapper to delete any active MACs and provision the new mac. + * Changes to MAC_LIST are allowed iff none of the MAC addresses in the + * current list are active. + */ +int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) +{ + bool active_mac = false; + u8 old_mac[ETH_ALEN]; + u32 pmac_id; + int status; + + status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, + &pmac_id, dom); + if (!status && active_mac) + be_cmd_pmac_del(adapter, if_id, pmac_id, dom); + + return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); +} + int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, u16 intf_id) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..eb541f0dfee8 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -202,6 +202,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_READ_TRANSRECV_DATA 73 #define OPCODE_COMMON_GET_PORT_NAME 77 #define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 +#define OPCODE_COMMON_SET_FN_PRIVILEGES 100 #define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 @@ -1474,6 +1475,11 @@ struct be_cmd_resp_get_fn_privileges { u32 privilege_mask; }; +struct be_cmd_req_set_fn_privileges { + struct be_cmd_req_hdr hdr; + u32 privileges; /* Used by BE3, SH-R */ + u32 privileges_lancer; /* Used by Lancer */ +}; /******************** GET/SET_MACLIST **************************/ #define BE_MAX_MAC 64 @@ -1921,11 +1927,18 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, u32 domain); +extern int be_cmd_set_fn_privileges(struct be_adapter *adapter, + u32 privileges, u32 vf_num); extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, bool *pmac_id_active, u32 *pmac_id, u8 domain); +extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, + u8 *mac); +extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); +extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, + u32 dom); extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, u16 intf_id); extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 181edb522450..3df15033b91a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -247,54 +247,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) static int be_mac_addr_set(struct net_device *netdev, void *p) { struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; struct sockaddr *addr = p; - int status = 0; - u8 current_mac[ETH_ALEN]; - u32 pmac_id = adapter->pmac_id[0]; - bool active_mac = true; + int status; + u8 mac[ETH_ALEN]; + u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - /* For BE VF, MAC address is already activated by PF. - * Hence only operation left is updating netdev->devaddr. - * Update it if user is passing the same MAC which was used - * during configuring VF MAC from PF(Hypervisor). + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT + * privilege or if PF did not provision the new MAC address. + * On BE3, this cmd will always fail if the VF doesn't have the + * FILTMGMT privilege. This failure is OK, only if the PF programmed + * the MAC for the VF. */ - if (!lancer_chip(adapter) && !be_physfn(adapter)) { - status = be_cmd_mac_addr_query(adapter, current_mac, - false, adapter->if_handle, 0); - if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN)) - goto done; - else - goto err; - } + status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, + adapter->if_handle, &adapter->pmac_id[0], 0); + if (!status) { + curr_pmac_id = adapter->pmac_id[0]; - if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN)) - goto done; + /* Delete the old programmed MAC. This call may fail if the + * old MAC was already deleted by the PF driver. + */ + if (adapter->pmac_id[0] != old_pmac_id) + be_cmd_pmac_del(adapter, adapter->if_handle, + old_pmac_id, 0); + } - /* For Lancer check if any MAC is active. - * If active, get its mac id. + /* Decide if the new MAC is successfully activated only after + * querying the FW */ - if (lancer_chip(adapter) && !be_physfn(adapter)) - be_cmd_get_mac_from_list(adapter, current_mac, &active_mac, - &pmac_id, 0); - - status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, - adapter->if_handle, - &adapter->pmac_id[0], 0); - + status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac); if (status) goto err; - if (active_mac) - be_cmd_pmac_del(adapter, adapter->if_handle, - pmac_id, 0); -done: + /* The MAC change did not happen, either due to lack of privilege + * or PF didn't pre-provision. + */ + if (memcmp(addr->sa_data, mac, ETH_ALEN)) { + status = -EPERM; + goto err; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + dev_info(dev, "MAC address changed to %pM\n", mac); return 0; err: - dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data); + dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); return status; } @@ -1146,9 +1146,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; int status; - bool active_mac = false; - u32 pmac_id; - u8 old_mac[ETH_ALEN]; if (!sriov_enabled(adapter)) return -EPERM; @@ -1156,20 +1153,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) return -EINVAL; - if (lancer_chip(adapter)) { - status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, - &pmac_id, vf + 1); - if (!status && active_mac) - be_cmd_pmac_del(adapter, vf_cfg->if_handle, - pmac_id, vf + 1); - - status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); - } else { - status = be_cmd_pmac_del(adapter, vf_cfg->if_handle, - vf_cfg->pmac_id, vf + 1); + if (BEx_chip(adapter)) { + be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, + vf + 1); status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, &vf_cfg->pmac_id, vf + 1); + } else { + status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, + vf + 1); } if (status) @@ -2735,13 +2727,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter) be_vf_eth_addr_generate(adapter, mac); for_all_vfs(adapter, vf_cfg, vf) { - if (lancer_chip(adapter)) { - status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); - } else { + if (BEx_chip(adapter)) status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, &vf_cfg->pmac_id, vf + 1); - } + else + status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, + vf + 1); if (status) dev_err(&adapter->pdev->dev, @@ -2759,7 +2751,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter) int status, vf; u8 mac[ETH_ALEN]; struct be_vf_cfg *vf_cfg; - bool active; + bool active = false; for_all_vfs(adapter, vf_cfg, vf) { be_cmd_get_mac_from_list(adapter, mac, &active, @@ -2788,11 +2780,12 @@ static void be_vf_clear(struct be_adapter *adapter) pci_disable_sriov(adapter->pdev); for_all_vfs(adapter, vf_cfg, vf) { - if (lancer_chip(adapter)) - be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); - else + if (BEx_chip(adapter)) be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, vf + 1); + else + be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle, + vf + 1); be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } @@ -2803,7 +2796,7 @@ done: static int be_clear(struct be_adapter *adapter) { - int i = 1; + int i; if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { cancel_delayed_work_sync(&adapter->work); @@ -2813,9 +2806,11 @@ static int be_clear(struct be_adapter *adapter) if (sriov_enabled(adapter)) be_vf_clear(adapter); - for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) + /* delete the primary mac along with the uc-mac list */ + for (i = 0; i < (adapter->uc_macs + 1); i++) be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -2880,6 +2875,7 @@ static int be_vf_setup(struct be_adapter *adapter) u16 def_vlan, lnk_speed; int status, old_vfs, vf; struct device *dev = &adapter->pdev->dev; + u32 privileges; old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { @@ -2923,6 +2919,18 @@ static int be_vf_setup(struct be_adapter *adapter) } for_all_vfs(adapter, vf_cfg, vf) { + /* Allow VFs to programs MAC/VLAN filters */ + status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); + if (!status && !(privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, + privileges | + BE_PRIV_FILTMGMT, + vf + 1); + if (!status) + dev_info(dev, "VF%d has FILTMGMT privilege\n", + vf); + } + /* BE3 FW, by default, caps VF TX-rate to 100mbps. * Allow full available bandwidth */ @@ -2971,41 +2979,6 @@ static void be_setup_init(struct be_adapter *adapter) adapter->cmd_privileges = MIN_PRIVILEGES; } -static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, - bool *active_mac, u32 *pmac_id) -{ - int status = 0; - - if (!is_zero_ether_addr(adapter->netdev->perm_addr)) { - memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); - if (!lancer_chip(adapter) && !be_physfn(adapter)) - *active_mac = true; - else - *active_mac = false; - - return status; - } - - if (lancer_chip(adapter)) { - status = be_cmd_get_mac_from_list(adapter, mac, - active_mac, pmac_id, 0); - if (*active_mac) { - status = be_cmd_mac_addr_query(adapter, mac, false, - if_handle, *pmac_id); - } - } else if (be_physfn(adapter)) { - /* For BE3, for PF get permanent MAC */ - status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0); - *active_mac = false; - } else { - /* For BE3, for VF get soft MAC assigned by PF*/ - status = be_cmd_mac_addr_query(adapter, mac, false, - if_handle, 0); - *active_mac = true; - } - return status; -} - static void be_get_resources(struct be_adapter *adapter) { u16 dev_num_vfs; @@ -3111,14 +3084,38 @@ err: return status; } +static int be_mac_setup(struct be_adapter *adapter) +{ + u8 mac[ETH_ALEN]; + int status; + + if (is_zero_ether_addr(adapter->netdev->dev_addr)) { + status = be_cmd_get_perm_mac(adapter, mac); + if (status) + return status; + + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + } else { + /* Maybe the HW was reset; dev_addr must be re-programmed */ + memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); + } + + /* On BE3 VFs this cmd may fail due to lack of privilege. + * Ignore the failure as in this case pmac_id is fetched + * in the IFACE_CREATE cmd. + */ + be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id[0], 0); + return 0; +} + static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; u32 en_flags; u32 tx_fc, rx_fc; int status; - u8 mac[ETH_ALEN]; - bool active_mac; be_setup_init(adapter); @@ -3158,36 +3155,18 @@ static int be_setup(struct be_adapter *adapter) en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; - if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) en_flags |= BE_IF_FLAGS_RSS; - en_flags = en_flags & adapter->if_cap_flags; - status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags, &adapter->if_handle, 0); if (status != 0) goto err; - memset(mac, 0, ETH_ALEN); - active_mac = false; - status = be_get_mac_addr(adapter, mac, adapter->if_handle, - &active_mac, &adapter->pmac_id[0]); - if (status != 0) + status = be_mac_setup(adapter); + if (status) goto err; - if (!active_mac) { - status = be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); - if (status != 0) - goto err; - } - - if (is_zero_ether_addr(adapter->netdev->dev_addr)) { - memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); - memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } - status = be_tx_qs_create(adapter); if (status) goto err; @@ -4253,7 +4232,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) status = pci_enable_pcie_error_reporting(pdev); if (status) - dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); + dev_info(&pdev->dev, "Could not use PCIe error reporting\n"); status = be_ctrl_init(adapter); if (status) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 77ea0db0bbfc..fdf9307ba9e6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2056,10 +2056,6 @@ fec_probe(struct platform_device *pdev) if (of_id) pdev->id_entry = of_id->data; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) - return -ENXIO; - /* Init network device */ ndev = alloc_etherdev(sizeof(struct fec_enet_private)); if (!ndev) @@ -2077,6 +2073,7 @@ fec_probe(struct platform_device *pdev) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; #endif + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); fep->hwp = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(fep->hwp)) { ret = PTR_ERR(fep->hwp); @@ -2126,10 +2123,25 @@ fec_probe(struct platform_device *pdev) fep->bufdesc_ex = 0; } - clk_prepare_enable(fep->clk_ahb); - clk_prepare_enable(fep->clk_ipg); - clk_prepare_enable(fep->clk_enet_out); - clk_prepare_enable(fep->clk_ptp); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk; + + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + if (fep->clk_enet_out) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + } + + if (fep->clk_ptp) { + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) + goto failed_clk_ptp; + } fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -2160,14 +2172,10 @@ fec_probe(struct platform_device *pdev) ret = irq; goto failed_irq; } - ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); - if (ret) { - while (--i >= 0) { - irq = platform_get_irq(pdev, i); - free_irq(irq, ndev); - } + ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, + IRQF_DISABLED, pdev->name, ndev); + if (ret) goto failed_irq; - } } ret = fec_enet_mii_init(pdev); @@ -2191,19 +2199,19 @@ failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_irq: - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: - clk_disable_unprepare(fep->clk_ahb); + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); +failed_clk_ptp: + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); +failed_clk_enet_out: clk_disable_unprepare(fep->clk_ipg); - clk_disable_unprepare(fep->clk_enet_out); - clk_disable_unprepare(fep->clk_ptp); +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); failed_clk: failed_ioremap: free_netdev(ndev); @@ -2216,25 +2224,21 @@ fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - int i; cancel_delayed_work_sync(&(fep->delay_work.delay_work)); unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } if (fep->reg_phy) regulator_disable(fep->reg_phy); - clk_disable_unprepare(fep->clk_ptp); + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); - clk_disable_unprepare(fep->clk_enet_out); - clk_disable_unprepare(fep->clk_ahb); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ipg); + clk_disable_unprepare(fep->clk_ahb); free_netdev(ndev); return 0; @@ -2251,9 +2255,12 @@ fec_suspend(struct device *dev) fec_stop(ndev); netif_device_detach(ndev); } - clk_disable_unprepare(fep->clk_enet_out); - clk_disable_unprepare(fep->clk_ahb); + if (fep->clk_ptp) + clk_disable_unprepare(fep->clk_ptp); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ipg); + clk_disable_unprepare(fep->clk_ahb); if (fep->reg_phy) regulator_disable(fep->reg_phy); @@ -2274,15 +2281,44 @@ fec_resume(struct device *dev) return ret; } - clk_prepare_enable(fep->clk_enet_out); - clk_prepare_enable(fep->clk_ahb); - clk_prepare_enable(fep->clk_ipg); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; + + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + if (fep->clk_enet_out) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + } + + if (fep->clk_ptp) { + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) + goto failed_clk_ptp; + } + if (netif_running(ndev)) { fec_restart(ndev, fep->full_duplex); netif_device_attach(ndev); } return 0; + +failed_clk_ptp: + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); +failed_clk_enet_out: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return ret; } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 8d2db7b808b7..dbb34f7ce448 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np, return -EINVAL; } - grp->grp_id = priv->num_grps; grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 04b552cd419d..ee19f2c138a6 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1009,7 +1009,6 @@ struct gfar_irqinfo { * @napi: the napi poll function * @priv: back pointer to the priv structure * @regs: the ioremapped register space for this group - * @grp_id: group id for this group * @irqinfo: TX/RX/ER irq data for this group */ @@ -1018,11 +1017,10 @@ struct gfar_priv_grp { struct napi_struct napi; struct gfar_private *priv; struct gfar __iomem *regs; - unsigned int grp_id; + unsigned int rstat; unsigned long num_rx_queues; unsigned long rx_bit_map; /* cacheline 3 */ - unsigned int rstat; unsigned int tstat; unsigned long num_tx_queues; unsigned long tx_bit_map; diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h index 93346f00486b..79aef681ac85 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.h +++ b/drivers/net/ethernet/i825xx/sun3_82586.h @@ -133,8 +133,8 @@ struct rfd_struct unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ unsigned short next; /* linkoffset to next RFD */ unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ - unsigned char dest[6]; /* ethernet-address, destination */ - unsigned char source[6]; /* ethernet-address, source */ + unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */ + unsigned char source[ETH_ALEN]; /* ethernet-address, source */ unsigned short length; /* 802.3 frame-length */ unsigned short zero_dummy; /* dummy */ }; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 5115ae76a5d1..ada6e210279f 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ } - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); - netif_printk(nic, hw, KERN_DEBUG, nic->netdev, - "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 4c303e2a7cb3..104fcec86af3 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -2057,6 +2057,7 @@ const struct e1000_info e1000_82583_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ffbc08f56c40..ad0edd11015d 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -90,9 +90,6 @@ struct e1000_info; #define E1000_MNG_VLAN_NONE (-1) -/* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) - #define DEFAULT_JUMBO 9234 /* Time to wait before putting the device into D3 if there's no link (in ms). */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 59c22bf18701..e4ebd7ddf5f2 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev, speed = adapter->link_speed; ecmd->duplex = adapter->link_duplex - 1; } - } else { + } else if (!pm_runtime_suspended(netdev->dev.parent)) { u32 status = er32(STATUS); if (status & E1000_STATUS_LU) { if (status & E1000_STATUS_SPEED_1000) @@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + int ret_val = 0; + + pm_runtime_get_sync(netdev->dev.parent); /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed @@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev, if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) { e_err("Cannot change link characteristics when SoL/IDER is active.\n"); - return -EINVAL; + ret_val = -EINVAL; + goto out; } /* MDI setting is only allowed when autoneg enabled because @@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev, * duplex is forced. */ if (ecmd->eth_tp_mdix_ctrl) { - if (hw->phy.media_type != e1000_media_type_copper) - return -EOPNOTSUPP; + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = -EOPNOTSUPP; + goto out; + } if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && (ecmd->autoneg != AUTONEG_ENABLE)) { e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); - return -EINVAL; + ret_val = -EINVAL; + goto out; } } @@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev, u32 speed = ethtool_cmd_speed(ecmd); /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { - clear_bit(__E1000_RESETTING, &adapter->state); - return -EINVAL; + ret_val = -EINVAL; + goto out; } } @@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev, e1000e_reset(adapter); } +out: + pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); - return 0; + return ret_val; } static void e1000_get_pauseparam(struct net_device *netdev, @@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev, while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); + pm_runtime_get_sync(netdev->dev.parent); + if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; if (netif_running(adapter->netdev)) { @@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev, } out: + pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return retval; } @@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev, u32 *regs_buff = p; u16 phy_data; + pm_runtime_get_sync(netdev->dev.parent); + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (adapter->pdev->revision << 16) | @@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev, e1e_rphy(hw, MII_STAT1000, &phy_data); regs_buff[24] = (u32)phy_data; /* phy local receiver status */ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + + pm_runtime_put_sync(netdev->dev.parent); } static int e1000_get_eeprom_len(struct net_device *netdev) @@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev, if (!eeprom_buff) return -ENOMEM; + pm_runtime_get_sync(netdev->dev.parent); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { ret_val = e1000_read_nvm(hw, first_word, last_word - first_word + 1, @@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev, } } + pm_runtime_put_sync(netdev->dev.parent); + if (ret_val) { /* a read error occurred, throw away the result */ memset(eeprom_buff, 0xff, sizeof(u16) * @@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev, ptr = (void *)eeprom_buff; + pm_runtime_get_sync(netdev->dev.parent); + if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ @@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev, ret_val = e1000e_update_nvm_checksum(hw); out: + pm_runtime_put_sync(netdev->dev.parent); kfree(eeprom_buff); return ret_val; } @@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev, } } + pm_runtime_get_sync(netdev->dev.parent); + e1000e_down(adapter); /* We can't just free everything and then setup again, because the @@ -739,6 +764,7 @@ err_setup_rx: e1000e_free_tx_resources(temp_tx); err_setup: e1000e_up(adapter); + pm_runtime_put_sync(netdev->dev.parent); free_temp: vfree(temp_tx); vfree(temp_rx); @@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev, u8 autoneg; bool if_running = netif_running(netdev); + pm_runtime_get_sync(netdev->dev.parent); + set_bit(__E1000_TESTING, &adapter->state); if (!if_running) { @@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev, } msleep_interruptible(4 * 1000); + + pm_runtime_put_sync(netdev->dev.parent); } static void e1000_get_wol(struct net_device *netdev, @@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: + pm_runtime_get_sync(netdev->dev.parent); + if (!hw->mac.ops.blink_led) return 2; /* cycle on/off twice per second */ @@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev, e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); hw->mac.ops.led_off(hw); hw->mac.ops.cleanup_led(hw); + pm_runtime_put_sync(netdev->dev.parent); break; case ETHTOOL_ID_ON: @@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev, hw->mac.ops.led_off(hw); break; } + return 0; } @@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev, adapter->itr_setting = adapter->itr & ~3; } + pm_runtime_get_sync(netdev->dev.parent); + if (adapter->itr_setting != 0) e1000e_write_itr(adapter, adapter->itr); else e1000e_write_itr(adapter, 0); + pm_runtime_put_sync(netdev->dev.parent); + return 0; } @@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev) if (!adapter->hw.mac.autoneg) return -EINVAL; + pm_runtime_get_sync(netdev->dev.parent); e1000e_reinit_locked(adapter); + pm_runtime_put_sync(netdev->dev.parent); return 0; } @@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, int i; char *p = NULL; + pm_runtime_get_sync(netdev->dev.parent); + e1000e_get_stats64(netdev, &net_stats); + + pm_runtime_put_sync(netdev->dev.parent); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: @@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev, case ETHTOOL_GRXFH: { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 mrqc = er32(MRQC); + u32 mrqc; + + pm_runtime_get_sync(netdev->dev.parent); + mrqc = er32(MRQC); + pm_runtime_put_sync(netdev->dev.parent); if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) return 0; @@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EOPNOTSUPP; } + pm_runtime_get_sync(netdev->dev.parent); + ret_val = hw->phy.ops.acquire(hw); - if (ret_val) + if (ret_val) { + pm_runtime_put_sync(netdev->dev.parent); return -EBUSY; + } /* EEE Capability */ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); @@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) /* EEE PCS Status */ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); + if (ret_val) + goto release; if (hw->phy.type == e1000_phy_82579) phy_data <<= 8; -release: - hw->phy.ops.release(hw); - if (ret_val) - return -ENODATA; - /* Result of the EEE auto negotiation - there is no register that * has the status of the EEE negotiation so do a best-guess based * on whether Tx or Rx LPI indications have been received. @@ -2136,7 +2186,14 @@ release: edata->tx_lpi_enabled = true; edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; - return 0; +release: + hw->phy.ops.release(hw); + if (ret_val) + ret_val = -ENODATA; + + pm_runtime_put_sync(netdev->dev.parent); + + return ret_val; } static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) @@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + pm_runtime_get_sync(netdev->dev.parent); + /* reset the link */ if (netif_running(netdev)) e1000e_reinit_locked(adapter); else e1000e_reset(adapter); + pm_runtime_put_sync(netdev->dev.parent); + return 0; } @@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev, return 0; } -static int e1000e_ethtool_begin(struct net_device *netdev) -{ - return pm_runtime_get_sync(netdev->dev.parent); -} - -static void e1000e_ethtool_complete(struct net_device *netdev) -{ - pm_runtime_put_sync(netdev->dev.parent); -} - static const struct ethtool_ops e1000_ethtool_ops = { - .begin = e1000e_ethtool_begin, - .complete = e1000e_ethtool_complete, .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, .get_drvinfo = e1000_get_drvinfo, diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index a6f903a9b773..b799fd9b6aa9 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -90,6 +90,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B #define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A #define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ #define E1000_REVISION_4 4 @@ -227,6 +231,9 @@ union e1000_rx_desc_extended { }; #define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) /* Receive Descriptor - Packet Split */ union e1000_rx_desc_packet_split { struct { @@ -251,7 +258,8 @@ union e1000_rx_desc_packet_split { } middle; struct { __le16 header_status; - __le16 length[3]; /* length of buffers 1-3 */ + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; } upper; __le64 reserved; } wb; /* writeback */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 9dde390f7e71..af08188d7e62 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) u32 phy_id = 0; s32 ret_val; u16 retry_count; + u32 mac_reg = 0; for (retry_count = 0; retry_count < 2; retry_count++) { ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); @@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (hw->phy.id) { if (hw->phy.id == phy_id) - return true; + goto out; } else if (phy_id) { hw->phy.id = phy_id; hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); - return true; + goto out; } /* In case the PHY needs to be in mdio slow mode, @@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) ret_val = e1000e_get_phy_id(hw); hw->phy.ops.acquire(hw); - return !ret_val; + if (ret_val) + return false; +out: + if (hw->mac.type == e1000_pch_lpt) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + + return true; } /** @@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { u32 mac_reg, fwsm = er32(FWSM); s32 ret_val; - u16 phy_reg; /* Gate automatic PHY configuration by hardware on managed and * non-managed 82579 and newer adapters. @@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_reg); + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msleep(50); + /* fall-through */ case e1000_pch2lan: - if (e1000_phy_is_accessible_pchlan(hw)) { - if (hw->mac.type == e1000_pch_lpt) { - /* Unforce SMBus mode in PHY */ - e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); - - /* Unforce SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - } + if (e1000_phy_is_accessible_pchlan(hw)) break; - } /* fall-through */ case e1000_pchlan: @@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) if (hw->phy.ops.check_reset_block(hw)) { e_dbg("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; break; } @@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; ew32(FEXTNVM3, mac_reg); - if (hw->mac.type == e1000_pch_lpt) { - /* Toggling LANPHYPC brings the PHY out of SMBus mode - * So ensure that the MAC is also out of SMBus mode - */ - mac_reg = er32(CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - } - /* Toggle LANPHYPC Value bit */ mac_reg = er32(CTRL); mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; @@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) usleep_range(5000, 10000); } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); + usleep_range(30000, 60000); + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; } break; default: @@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) } hw->phy.ops.release(hw); - - /* Reset the PHY before any access to it. Doing so, ensures - * that the PHY is in a known good state before we read/write - * PHY registers. The generic reset is sufficient here, - * because we haven't determined the PHY type yet. - */ - ret_val = e1000e_phy_hw_reset_generic(hw); + if (!ret_val) { + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + } out: /* Ungate automatic PHY configuration on non-managed 82579 */ @@ -793,29 +810,31 @@ release: * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. **/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) { u32 fextnvm6 = er32(FEXTNVM6); + u32 status = er32(STATUS); s32 ret_val = 0; + u16 reg; - if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { - u16 kmrn_reg; - + if (link && (status & E1000_STATUS_SPEED_1000)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - &kmrn_reg); + ®); if (ret_val) goto release; ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg & + reg & ~E1000_KMRNCTRLSTA_K1_ENABLE); if (ret_val) goto release; @@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg); + reg); release: hw->phy.ops.release(hw); } else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */ - ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if (!link || ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + ew32(FEXTNVM6, fextnvm6); } return ret_val; @@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; @@ -4168,7 +4222,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) u16 phy_reg, device_id = hw->adapter->pdev->device; if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || - (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 80034a2b297c..59865695b282 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -93,6 +93,7 @@ #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL @@ -197,6 +198,11 @@ #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) #define I82579_LPI_CTRL_100_ENABLE 0x2000 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 77f81cbb601a..e6d2c0f8f76a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2979,17 +2979,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 pages = 0; /* Workaround Si errata on PCHx - configure jumbo frame flow */ - if (hw->mac.type >= e1000_pch2lan) { - s32 ret_val; - - if (adapter->netdev->mtu > ETH_DATA_LEN) - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); - else - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); - - if (ret_val) - e_dbg("failed to enable jumbo frame workaround mode\n"); - } + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, true)) + e_dbg("failed to enable jumbo frame workaround mode\n"); /* Program MC offset vector base */ rctl = er32(RCTL); @@ -3826,6 +3819,8 @@ void e1000e_reset(struct e1000_adapter *adapter) break; } + pba = 14; + ew32(PBA, pba); fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; break; @@ -4034,6 +4029,12 @@ void e1000e_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; + /* Disable Si errata workaround on PCHx for jumbo frame flow */ + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, false)) + e_dbg("failed to disable jumbo frame workaround mode\n"); + if (!pci_channel_offline(adapter->pdev)) e1000e_reset(adapter); @@ -4683,11 +4684,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; - if ((er32(STATUS) & E1000_STATUS_LU) && + if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && + (er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { int ret_val; - pm_runtime_get_sync(&adapter->pdev->dev); ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); @@ -4698,7 +4699,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); if (ret_val) e_warn("Error reading PHY register\n"); - pm_runtime_put_sync(&adapter->pdev->dev); } else { /* Do not read PHY registers if link is not up * Set values to typical power-on defaults @@ -5995,6 +5995,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) */ e1000e_release_hw_control(adapter); + pci_clear_master(pdev); + /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the @@ -6723,10 +6725,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->hw.fc.current_mode = e1000_fc_default; adapter->hw.phy.autoneg_advertised = 0x2f; - /* ring size defaults */ - adapter->rx_ring->count = E1000_DEFAULT_RXD; - adapter->tx_ring->count = E1000_DEFAULT_TXD; - /* Initial Wake on LAN setting - If APM wake is enabled in * the EEPROM, enable the ACPI Magic Packet filter */ @@ -6976,6 +6974,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a6494e5daffe..0ac6b11c6e4e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -618,9 +618,8 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) -#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; @@ -754,7 +753,7 @@ enum ixgbe_state_t { __IXGBE_DOWN, __IXGBE_SERVICE_SCHED, __IXGBE_IN_SFP_INIT, - __IXGBE_READ_I2C, + __IXGBE_PTP_RUNNING, }; struct ixgbe_cb { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 4a5bfb6b3af0..a26f3fee4f35 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; + u16 gssr; u32 i; + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + return IXGBE_ERR_SWFW_SYNC; + if (hw->phy.type == ixgbe_phy_nl) { /* * phy SDA/SCL registers are at addresses 0xC30A to @@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, */ sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); - hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, - MDIO_MMD_PMAPMD, - sfp_addr); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + MDIO_MMD_PMAPMD, + sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { - hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, - MDIO_MMD_PMAPMD, - &sfp_stat); + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + MDIO_MMD_PMAPMD, + &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) break; @@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, } /* Read data */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, - MDIO_MMD_PMAPMD, &sfp_data); + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + MDIO_MMD_PMAPMD, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { @@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, } out: + hw->mac.ops.release_swfw_sync(hw, gssr); return status; } @@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { static struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, - .identify_sfp = &ixgbe_identify_sfp_module_generic, + .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82598, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, + .read_reg_mdi = &ixgbe_read_phy_reg_mdi, + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 0b82d38bc97d..207f68fbe3d3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -58,6 +58,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { @@ -219,6 +223,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = 0; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = true; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; + } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); @@ -397,6 +420,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_LS: media_type = ixgbe_media_type_fiber_lco; break; + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + media_type = ixgbe_media_type_fiber_qsfp; + break; default: media_type = ixgbe_media_type_unknown; break; @@ -527,6 +553,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** + * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * We set the module speed differently for fixed fiber. For other + * multi-speed devices we don't have an error value so here if we + * detect an error we just log it and exit. + */ +static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + hw_dbg(hw, "Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS0\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS0\n"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + hw_dbg(hw, "Failed to read Rx Rate Select RS1\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + hw_dbg(hw, "Failed to write Rx Rate Select RS1\n"); + goto out; + } +out: + return; +} + +/** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -573,9 +668,14 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, goto out; /* Set the module link speed */ - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); + if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { + ixgbe_set_fiber_fixed_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + } else { + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + } /* Allow module to change analog characteristics (1G->10G) */ msleep(40); @@ -625,10 +725,15 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, goto out; /* Set the module link speed */ - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); + if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { + ixgbe_set_fiber_fixed_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + } else { + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + } /* Allow module to change analog characteristics (10G->1G) */ msleep(40); @@ -1872,7 +1977,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) goto out; else - status = ixgbe_identify_sfp_module_generic(hw); + status = ixgbe_identify_module_generic(hw); } /* Set PHY type none if no PHY detected */ @@ -1978,10 +2083,12 @@ sfp_check: switch (hw->phy.type) { case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; break; case ixgbe_phy_sfp_ftl_active: case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; break; case ixgbe_phy_sfp_avago: @@ -1999,6 +2106,15 @@ sfp_check: else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; default: break; } @@ -2236,6 +2352,112 @@ reset_pipeline_out: return ret_val; } +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, @@ -2300,7 +2522,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { static struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, - .identify_sfp = &ixgbe_identify_sfp_module_generic, + .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82599, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9bcdeb89af5a..50e62a2b1a65 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); * function check the device id to see if the associated phy supports * autoneg flow control. **/ -s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { + bool supported = false; + ixgbe_link_speed speed; + bool link_up; - switch (hw->device_id) { - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - case IXGBE_DEV_ID_82599_T3_LOM: - return 0; + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: + case ixgbe_media_type_fiber: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case ixgbe_media_type_backplane: + supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + supported = true; + break; + default: + break; + } default: - return IXGBE_ERR_FC_NOT_SUPPORTED; + break; } + + return supported; } /** @@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * we link at 10G, the 1G advertisement is harmless and vice versa. */ switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: case ixgbe_media_type_backplane: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); @@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) IXGBE_GSSR_MAC_CSR_SM); } else if ((hw->phy.media_type == ixgbe_media_type_copper) && - (ixgbe_device_supports_autoneg_fc(hw) == 0)) { + ixgbe_device_supports_autoneg_fc(hw)) { hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, reg_cu); } @@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); @@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) /* Autoneg flow control on copper adapters */ case ixgbe_media_type_copper: - if (ixgbe_device_supports_autoneg_fc(hw) == 0) + if (ixgbe_device_supports_autoneg_fc(hw)) ret_val = ixgbe_fc_autoneg_copper(hw); break; @@ -2479,42 +2506,39 @@ out: **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) { - u32 gssr; + u32 gssr = 0; u32 swmask = mask; u32 fwmask = mask << 5; - s32 timeout = 200; + u32 timeout = 200; + u32 i; - while (timeout) { + for (i = 0; i < timeout; i++) { /* - * SW EEPROM semaphore bit is used for access to all - * SW_FW_SYNC/GSSR bits (not just EEPROM) + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_eeprom_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); - if (!(gssr & (fwmask | swmask))) - break; - - /* - * Firmware currently using resource (fwmask) or other software - * thread currently using resource (swmask) - */ - ixgbe_release_eeprom_semaphore(hw); - usleep_range(5000, 10000); - timeout--; - } - - if (!timeout) { - hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); - return IXGBE_ERR_SWFW_SYNC; + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); + usleep_range(5000, 10000); + } } - gssr |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); - ixgbe_release_eeprom_semaphore(hw); - return 0; + usleep_range(5000, 10000); + return IXGBE_ERR_SWFW_SYNC; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 22eee38868f1..1315b8ac7f58 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); -s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 24e2e7aafda2..50c1e9b2fd80 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -355,10 +355,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - if (hw->fc.disable_fc_autoneg) - pause->autoneg = 0; - else + if (ixgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) pause->autoneg = 1; + else + pause->autoneg = 0; if (hw->fc.current_mode == ixgbe_fc_rx_pause) { pause->rx_pause = 1; @@ -384,7 +385,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, /* some devices do not support autoneg of link flow control */ if ((pause->autoneg == AUTONEG_ENABLE) && - (ixgbe_device_supports_autoneg_fc(hw) != 0)) + !ixgbe_device_supports_autoneg_fc(hw)) return -EINVAL; fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); @@ -1140,11 +1141,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; #ifdef LL_EXTENDED_STATS - sprintf(p, "tx_q_%u_napi_yield", i); + sprintf(p, "tx_queue_%u_ll_napi_yield", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_q_%u_misses", i); + sprintf(p, "tx_queue_%u_ll_misses", i); p += ETH_GSTRING_LEN; - sprintf(p, "tx_q_%u_cleaned", i); + sprintf(p, "tx_queue_%u_ll_cleaned", i); p += ETH_GSTRING_LEN; #endif /* LL_EXTENDED_STATS */ } @@ -1154,11 +1155,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; #ifdef LL_EXTENDED_STATS - sprintf(p, "rx_q_%u_ll_poll_yield", i); + sprintf(p, "rx_queue_%u_ll_poll_yield", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_q_%u_misses", i); + sprintf(p, "rx_queue_%u_ll_misses", i); p += ETH_GSTRING_LEN; - sprintf(p, "rx_q_%u_cleaned", i); + sprintf(p, "rx_queue_%u_ll_cleaned", i); p += ETH_GSTRING_LEN; #endif /* LL_EXTENDED_STATS */ } @@ -2909,33 +2910,21 @@ static int ixgbe_get_module_info(struct net_device *dev, struct ixgbe_hw *hw = &adapter->hw; u32 status; u8 sff8472_rev, addr_mode; - int ret_val = 0; bool page_swap = false; - /* avoid concurent i2c reads */ - while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - msleep(100); - - /* used by the service task */ - set_bit(__IXGBE_READ_I2C, &adapter->state); - /* Check whether we support SFF-8472 or not */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_COMP, &sff8472_rev); - if (status != 0) { - ret_val = -EIO; - goto err_out; - } + if (status != 0) + return -EIO; /* addressing mode is not supported */ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_SFF_8472_SWAP, &addr_mode); - if (status != 0) { - ret_val = -EIO; - goto err_out; - } + if (status != 0) + return -EIO; if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); @@ -2952,9 +2941,7 @@ static int ixgbe_get_module_info(struct net_device *dev, modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } -err_out: - clear_bit(__IXGBE_READ_I2C, &adapter->state); - return ret_val; + return 0; } static int ixgbe_get_module_eeprom(struct net_device *dev, @@ -2968,48 +2955,25 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, int i = 0; int ret_val = 0; - /* ixgbe_get_module_info is called before this function in all - * cases, so we do not need any checks we already do above, - * and can trust ee->len to be a known value. - */ + if (ee->len == 0) + return -EINVAL; + + for (i = ee->offset; i < ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return -EBUSY; - while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) - msleep(100); - set_bit(__IXGBE_READ_I2C, &adapter->state); + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); - /* Read the first block, SFF-8079 */ - for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) { - status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); - if (status != 0) { - /* Error occured while reading module */ + if (status != 0) ret_val = -EIO; - goto err_out; - } - data[i] = databyte; - } - /* If the second block is requested, check if SFF-8472 is supported. */ - if (ee->len == ETH_MODULE_SFF_8472_LEN) { - if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP) - return -EOPNOTSUPP; - - /* Read the second block, SFF-8472 */ - for (i = ETH_MODULE_SFF_8079_LEN; - i < ETH_MODULE_SFF_8472_LEN; i++) { - status = hw->phy.ops.read_i2c_sff8472(hw, - i - ETH_MODULE_SFF_8079_LEN, &databyte); - if (status != 0) { - /* Error occured while reading module */ - ret_val = -EIO; - goto err_out; - } - data[i] = databyte; - } + data[i - ee->offset] = databyte; } -err_out: - clear_bit(__IXGBE_READ_I2C, &adapter->state); - return ret_val; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index be4b1fb3d0d2..128d6b885326 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "3.13.10-k" +#define DRV_VERSION "3.15.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2013 Intel Corporation."; @@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, @@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) return 0; } +/** + * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * @hw: hw specific details + * + * This function is used by probe to determine whether a device's PCI-Express + * bandwidth details should be gathered from the parent bus instead of from the + * device. Used to ensure that various locations all have the correct device ID + * checks. + */ +static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + return true; + default: + return false; + } +} + +static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + int expected_gts) +{ + int max_gts = 0; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + struct pci_dev *pdev; + + /* determine whether to use the the parent device + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + pdev = adapter->pdev->bus->parent->self; + else + pdev = adapter->pdev; + + if (pcie_get_minimum_link(pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + switch (speed) { + case PCIE_SPEED_2_5GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 2 * width; + break; + case PCIE_SPEED_5_0GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: + /* 128b/130b encoding only reduces throughput by 1% */ + max_gts = 8 * width; + break; + default: + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + e_dev_info("PCI Express bandwidth of %dGT/s available\n", + max_gts); + e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), + width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "N/a" : + "Unknown")); + + if (max_gts < expected_gts) { + e_dev_warn("This is not sufficient for optimal performance of this card.\n"); + e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", + expected_gts); + e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); + } +} + static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) { if (!test_bit(__IXGBE_DOWN, &adapter->state) && @@ -3724,8 +3805,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev) hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); - /* don't hardware filter vlans in promisc mode */ - ixgbe_vlan_filter_disable(adapter); + /* Only disable hardware filter vlans in promiscuous mode + * if SR-IOV and VMDQ are disabled - otherwise ensure + * that hardware VLAN filters remain enabled. + */ + if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED))) + ixgbe_vlan_filter_disable(adapter); + else + ixgbe_vlan_filter_enable(adapter); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; @@ -4352,7 +4440,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); } @@ -4714,8 +4802,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = - (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true; + hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); #ifdef CONFIG_PCI_IOV /* assign number of SR-IOV VFs */ @@ -5681,7 +5768,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) adapter->last_rx_ptp_check = jiffies; - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", @@ -5727,7 +5814,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Down\n"); @@ -5826,10 +5913,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) return; - /* concurent i2c reads are not supported */ - if (test_bit(__IXGBE_READ_I2C, &adapter->state)) - return; - /* someone else is in init, wait until next service event */ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) return; @@ -6038,7 +6121,7 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); - if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) { + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); ixgbe_ptp_rx_hang(adapter); } @@ -7247,6 +7330,42 @@ static const struct net_device_ops ixgbe_netdev_ops = { }; /** + * ixgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct list_head *entry; + int physfns = 0; + + /* Some cards can not use the generic count PCIe functions method, and + * so must be hardcoded to the correct value. + */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + physfns = 4; + break; + default: + list_for_each(entry, &adapter->pdev->bus_list) { + struct pci_dev *pdev = + list_entry(entry, struct pci_dev, bus_list); + /* don't count virtual functions */ + if (!pdev->is_virtfn) + physfns++; + } + } + + return physfns; +} + +/** * ixgbe_wol_supported - Check whether device supports WoL * @hw: hw specific details * @device_id: the device ID @@ -7328,7 +7447,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; - int i, err, pci_using_dac; + int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; #ifdef IXGBE_FCOE @@ -7617,7 +7736,7 @@ skip_sriov: /* pick up the PCI bus settings for reporting later */ hw->mac.ops.get_bus_info(hw); - if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) + if (ixgbe_pcie_from_parent(hw)) ixgbe_get_parent_bus_info(adapter); /* print bus type/speed/width info */ @@ -7643,12 +7762,20 @@ skip_sriov: e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, part_str); - if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { - e_dev_warn("PCI-Express bandwidth available for this card is " - "not sufficient for optimal performance.\n"); - e_dev_warn("For optimal performance a x8 PCI-Express slot " - "is required.\n"); + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure no warning is displayed if it can't be fixed. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); + break; + default: + expected_gts = ixgbe_enumerate_functions(adapter) * 10; + break; } + ixgbe_check_minimum_link(adapter, expected_gts); /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e5691ccbce9d..369eef526bc1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -204,7 +204,83 @@ out: } /** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY read command didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return 0; +} + +/** * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @phy_data: Pointer to read data from PHY register @@ -212,10 +288,7 @@ out: s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { - u32 command; - u32 i; - u32 data; - s32 status = 0; + s32 status; u16 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) @@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, else gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { status = IXGBE_ERR_SWFW_SYNC; + } - if (status == 0) { - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + return status; +} - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); +/** + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - command = IXGBE_READ_REG(hw, IXGBE_MSCA); + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address command did not complete.\n"); - status = IXGBE_ERR_PHY; - } + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); - if (status == 0) { - /* - * Address cycle complete, setup and write the read - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << - IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY read command didn't complete\n"); - status = IXGBE_ERR_PHY; - } else { - /* - * Read operation is complete. Get the data - * from MSRWD - */ - data = IXGBE_READ_REG(hw, IXGBE_MSRWD); - data >>= IXGBE_MSRWD_READ_DATA_SHIFT; - *phy_data = (u16)(data); - } - } + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } - hw->mac.ops.release_swfw_sync(hw, gssr); + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; } - return status; + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return 0; } /** * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type @@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { - u32 command; - u32 i; - s32 status = 0; + s32 status; u16 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) @@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, else gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) - status = IXGBE_ERR_SWFW_SYNC; - - if (status == 0) { - /* Put the data in the MDI single read and write data register*/ - IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); - - /* Setup and write the address cycle command */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle completed. - * The MDI Command bit will clear when the operation is - * complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); - status = IXGBE_ERR_PHY; - } - - if (status == 0) { - /* - * Address cycle complete, setup and write the write - * command - */ - command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.mdio.prtad << - IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); - - IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); - - /* - * Check every 10 usec to see if the address cycle - * completed. The MDI Command bit will clear when the - * operation is complete - */ - for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { - udelay(10); - - command = IXGBE_READ_REG(hw, IXGBE_MSCA); - - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) - break; - } - - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { - hw_dbg(hw, "PHY address cmd didn't complete\n"); - status = IXGBE_ERR_PHY; - } - } - + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + status = IXGBE_ERR_SWFW_SYNC; } return status; @@ -825,9 +841,35 @@ out: } /** - * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * ixgbe_identify_module_generic - Identifies module type * @hw: pointer to hardware structure * + * Determines HW type and calls appropriate function. + **/ +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + status = ixgbe_identify_sfp_module_generic(hw); + break; + case ixgbe_media_type_fiber_qsfp: + status = ixgbe_identify_qsfp_module_generic(hw); + break; + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + break; + } + + return status; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure +* * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) @@ -1106,6 +1148,156 @@ err_read_i2c_eeprom: } /** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + hw->phy.id = identifier; + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = 0; + } else { + if (hw->allow_unsupported_sfp == true) { + e_warn(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + status = 0; + } else { + hw_dbg(hw, + "QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = 0; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence * @hw: pointer to hardware structure * @list_offset: offset to the SFP ID list diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 886a3431cf5b..138dadd7cf33 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -33,17 +33,25 @@ #define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 /* EEPROM byte offsets */ -#define IXGBE_SFF_IDENTIFIER 0x0 -#define IXGBE_SFF_IDENTIFIER_SFP 0x3 -#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 -#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 -#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 -#define IXGBE_SFF_1GBE_COMP_CODES 0x6 -#define IXGBE_SFF_10GBE_COMP_CODES 0x3 -#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 -#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C -#define IXGBE_SFF_SFF_8472_SWAP 0x5C -#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -54,7 +62,12 @@ #define IXGBE_SFF_1GBASET_CAPABLE 0x8 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 +#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 #define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 @@ -102,6 +115,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, @@ -121,7 +138,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 331987d6815c..5184e2a1a7d8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) ixgbe_ptp_reset(adapter); - /* set the flag that PTP has been enabled */ - adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED; + /* enter the IXGBE_PTP_RUNNING state */ + set_bit(__IXGBE_PTP_RUNNING, &adapter->state); return; } @@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) */ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { - /* stop the overflow check task */ - adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED | - IXGBE_FLAG2_PTP_PPS_ENABLED); + /* Leave the IXGBE_PTP_RUNNING state. */ + if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + return; + /* stop the PPS signal */ + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; ixgbe_ptp_setup_sdp(adapter); cancel_work_sync(&adapter->ptp_tx_work); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 1e7d587c4e57..73c8e73bb6e7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) ixgbe_disable_sriov(adapter); } -static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *vfdev; - int dev_id; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - dev_id = IXGBE_DEV_ID_82599_VF; - break; - case ixgbe_mac_X540: - dev_id = IXGBE_DEV_ID_X540_VF; - break; - default: - return false; - } - - /* loop through all the VFs to see if we own any that are assigned */ - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); - while (vfdev) { - /* if we don't own it we don't care */ - if (vfdev->is_virtfn && vfdev->physfn == pdev) { - /* if it is assigned we cannot release it */ - if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) - return true; - } - - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); - } - - return false; -} - #endif /* #ifdef CONFIG_PCI_IOV */ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { @@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) * without causing issues, so just leave the hardware * available but disabled */ - if (ixgbe_vfs_are_assigned(adapter)) { + if (pci_vfs_assigned(adapter->pdev)) { e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); return -EPERM; } @@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; } +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= IXGBE_VLVF_ENTRIES) + regindex = -1; + + return regindex; +} + static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { @@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); int err; + s32 reg_ndx; + u32 vlvf; + u32 bits; u8 tcs = netdev_get_num_tc(adapter->netdev); if (adapter->vfinfo[vf].pf_vlan || tcs) { @@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, else if (adapter->vfinfo[vf].vlan_count) adapter->vfinfo[vf].vlan_count--; + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); if (!err && adapter->vfinfo[vf].spoofchk_enabled) hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + reg_ndx = ixgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + bits &= ~(1 << VMDQ_P(0)); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && !bits) + ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + +out: + return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 70c6aa3d3f95..161ff18be775 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -69,6 +69,7 @@ #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 #define IXGBE_DEV_ID_X540T1 0x1560 /* VF Device IDs */ @@ -1520,9 +1521,11 @@ enum { #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ #define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ #define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ #define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ #define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ /* LEDCTL Bit Masks */ #define IXGBE_LED_IVRT_BASE 0x00000040 @@ -2582,6 +2585,10 @@ enum ixgbe_phy_type { ixgbe_phy_sfp_ftl_active, ixgbe_phy_sfp_unknown, ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, ixgbe_phy_generic }; @@ -2622,6 +2629,8 @@ enum ixgbe_sfp_type { enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, + ixgbe_media_type_fiber_fixed, + ixgbe_media_type_fiber_qsfp, ixgbe_media_type_fiber_lco, ixgbe_media_type_copper, ixgbe_media_type_backplane, @@ -2885,6 +2894,8 @@ struct ixgbe_phy_operations { s32 (*reset)(struct ixgbe_hw *); s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); @@ -2953,6 +2964,7 @@ struct ixgbe_phy_info { bool smart_speed_active; bool multispeed_fiber; bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; }; #include "ixgbe_mbx.h" diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b017818bccae..2777c70c603b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -79,10 +79,10 @@ #define MVNETA_MAC_ADDR_HIGH 0x2418 #define MVNETA_SDMA_CONFIG 0x241c #define MVNETA_SDMA_BRST_SIZE_16 4 -#define MVNETA_NO_DESC_SWAP 0x0 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) #define MVNETA_RX_NO_DATA_SWAP BIT(4) #define MVNETA_TX_NO_DATA_SWAP BIT(5) +#define MVNETA_DESC_SWAP BIT(6) #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) #define MVNETA_PORT_STATUS 0x2444 #define MVNETA_TX_IN_PRGRS BIT(1) @@ -264,8 +264,7 @@ struct mvneta_port { * layout of the transmit and reception DMA descriptors, and their * layout is therefore defined by the hardware design */ -struct mvneta_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ + #define MVNETA_TX_L3_OFF_SHIFT 0 #define MVNETA_TX_IP_HLEN_SHIFT 8 #define MVNETA_TX_L4_UDP BIT(16) @@ -280,15 +279,6 @@ struct mvneta_tx_desc { #define MVNETA_TX_L4_CSUM_FULL BIT(30) #define MVNETA_TX_L4_CSUM_NOT BIT(31) - u16 reserverd1; /* csum_l4 (for future use) */ - u16 data_size; /* Data size of transmitted packet in bytes */ - u32 buf_phys_addr; /* Physical addr of transmitted buffer */ - u32 reserved2; /* hw_cmd - (for future use, PMT) */ - u32 reserved3[4]; /* Reserved - (for future use) */ -}; - -struct mvneta_rx_desc { - u32 status; /* Info about received packet */ #define MVNETA_RXD_ERR_CRC 0x0 #define MVNETA_RXD_ERR_SUMMARY BIT(16) #define MVNETA_RXD_ERR_OVERRUN BIT(17) @@ -299,16 +289,57 @@ struct mvneta_rx_desc { #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) #define MVNETA_RXD_L4_CSUM_OK BIT(30) +#if defined(__LITTLE_ENDIAN) +struct mvneta_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u16 reserverd1; /* csum_l4 (for future use) */ + u16 data_size; /* Data size of transmitted packet in bytes */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u32 status; /* Info about received packet */ u16 reserved1; /* pnc_info - (for future use, PnC) */ u16 data_size; /* Size of received packet in bytes */ + u32 buf_phys_addr; /* Physical address of the buffer */ u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved3; /* prefetch_cmd, for future use */ u16 reserved4; /* csum_l4 - (for future use, PnC) */ + + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ + u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ +}; +#else +struct mvneta_tx_desc { + u16 data_size; /* Data size of transmitted packet in bytes */ + u16 reserverd1; /* csum_l4 (for future use) */ + u32 command; /* Options used by HW for packet transmitting.*/ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u16 data_size; /* Size of received packet in bytes */ + u16 reserved1; /* pnc_info - (for future use, PnC) */ + u32 status; /* Info about received packet */ + + u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + u32 buf_phys_addr; /* Physical address of the buffer */ + + u16 reserved4; /* csum_l4 - (for future use, PnC) */ + u16 reserved3; /* prefetch_cmd, for future use */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ }; +#endif struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ @@ -908,9 +939,11 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Default burst size */ val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); + val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; - val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP | - MVNETA_NO_DESC_SWAP); +#if defined(__BIG_ENDIAN) + val |= MVNETA_DESC_SWAP; +#endif /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 299d0184f983..141322c31ae9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -809,6 +809,15 @@ int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, return -EPERM; } +int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = { .wrapper = MLX4_CMD_UPDATE_QP_wrapper }, { + .opcode = MLX4_CMD_GET_OP_REQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = MLX4_CMD_GET_OP_REQ_wrapper, + }, + { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, .has_outbox = false, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 7c492382da09..6dcca9817888 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); } +static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int index, + u8 owner) +{ + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + void *end = ring->buf + ring->buf_size; + __be32 *ptr = (__be32 *)tx_desc; + int i; + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + } else { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *)ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + } +} + static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, @@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, void *end = ring->buf + ring->buf_size; int frags = skb_shinfo(skb)->nr_frags; int i; - __be32 *ptr = (__be32 *)tx_desc; - __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); struct skb_shared_hwtstamps hwts; if (timestamp) { @@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, skb_frag_size(frag), PCI_DMA_TODEVICE); } } - /* Stamp the freed descriptor */ - for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { - *ptr = stamp; - ptr += STAMP_DWORDS; - } - } else { if (!tx_info->inl) { if ((void *) data >= end) { @@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, ++data; } } - /* Stamp the freed descriptor */ - for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { - *ptr = stamp; - ptr += STAMP_DWORDS; - if ((void *) ptr >= end) { - ptr = ring->buf; - stamp ^= cpu_to_be32(0x80000000); - } - } - } dev_kfree_skb_any(skb); return tx_info->nr_txbb; @@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; struct mlx4_cqe *cqe; u16 index; - u16 new_index, ring_index; + u16 new_index, ring_index, stamp_index; u32 txbbs_skipped = 0; + u32 txbbs_stamp = 0; u32 cons_index = mcq->cons_index; int size = cq->size; u32 size_mask = ring->size_mask; @@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) index = cons_index & size_mask; cqe = &buf[(index << factor) + factor]; ring_index = ring->cons & size_mask; + stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -359,6 +376,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) priv, ring, ring_index, !!((ring->cons + txbbs_skipped) & ring->size), timestamp); + + mlx4_en_stamp_wqe(priv, ring, stamp_index, + !!((ring->cons + txbbs_stamp) & + ring->size)); + stamp_index = ring_index; + txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; } while (ring_index != new_index); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 7e042869ef0c..0416c5b3b35c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -79,6 +79,7 @@ enum { (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD) | \ + (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) @@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break; + case MLX4_EVENT_TYPE_OP_REQUIRED: + atomic_inc(&priv->opreq_count); + /* FW commands can't be executed from interrupt context + * working in deferred task + */ + queue_work(mlx4_wq, &priv->opreq_task); + break; + case MLX4_EVENT_TYPE_COMM_CHANNEL: if (!mlx4_is_master(dev)) { mlx4_warn(dev, "Received comm channel event " diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 6fc6dabc78d5..0d63daa2f422 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1696,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_wol_write); + +enum { + ADD_TO_MCG = 0x26, +}; + + +void mlx4_opreq_action(struct work_struct *work) +{ + struct mlx4_priv *priv = container_of(work, struct mlx4_priv, + opreq_task); + struct mlx4_dev *dev = &priv->dev; + int num_tasks = atomic_read(&priv->opreq_count); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 *outbox; + u32 modifier; + u16 token; + u16 type_m; + u16 type; + int err; + u32 num_qps; + struct mlx4_qp qp; + int i; + u8 rem_mcg; + u8 prot; + +#define GET_OP_REQ_MODIFIER_OFFSET 0x08 +#define GET_OP_REQ_TOKEN_OFFSET 0x14 +#define GET_OP_REQ_TYPE_OFFSET 0x1a +#define GET_OP_REQ_DATA_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); + return; + } + outbox = mailbox->buf; + + while (num_tasks) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to retreive required operation: %d\n", + err); + return; + } + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); + MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); + type_m = type >> 12; + type &= 0xfff; + + switch (type) { + case ADD_TO_MCG: + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); + err = EPERM; + break; + } + mgm = (struct mlx4_mgm *)((u8 *)(outbox) + + GET_OP_REQ_DATA_OFFSET); + num_qps = be32_to_cpu(mgm->members_count) & + MGM_QPN_MASK; + rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; + prot = ((u8 *)(&mgm->members_count))[0] >> 6; + + for (i = 0; i < num_qps; i++) { + qp.qpn = be32_to_cpu(mgm->qp[i]); + if (rem_mcg) + err = mlx4_multicast_detach(dev, &qp, + mgm->gid, + prot, 0); + else + err = mlx4_multicast_attach(dev, &qp, + mgm->gid, + mgm->gid[5] + , 0, prot, + NULL); + if (err) + break; + } + break; + default: + mlx4_warn(dev, "Bad type for required operation\n"); + err = EINVAL; + break; + } + err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), + 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to acknowledge required request: %d\n", + err); + goto out; + } + memset(outbox, 0, 0xffc); + num_tasks = atomic_dec_return(&priv->opreq_count); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index fdf41665a059..a0a368b7c939 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); int mlx4_NOP(struct mlx4_dev *dev); int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); +void mlx4_opreq_action(struct work_struct *work); #endif /* MLX4_FW_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 36be3208786a..60c9f4f103fc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_xrcd_table_free; } + if (!mlx4_is_slave(dev)) { + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); + goto err_mr_table_free; + } + } + err = mlx4_init_eq_table(dev); if (err) { mlx4_err(dev, "Failed to initialize " "event queue table, aborting.\n"); - goto err_mr_table_free; + goto err_mcg_table_free; } err = mlx4_cmd_use_events(dev); @@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_srq_table_free; } - if (!mlx4_is_slave(dev)) { - err = mlx4_init_mcg_table(dev); - if (err) { - mlx4_err(dev, "Failed to initialize " - "multicast group table, aborting.\n"); - goto err_qp_table_free; - } - } - err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); - goto err_mcg_table_free; + goto err_qp_table_free; } if (!mlx4_is_slave(dev)) { @@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err_counters_table_free: mlx4_cleanup_counters_table(dev); -err_mcg_table_free: - mlx4_cleanup_mcg_table(dev); - err_qp_table_free: mlx4_cleanup_qp_table(dev); @@ -1821,6 +1817,10 @@ err_cmd_poll: err_eq_table_free: mlx4_cleanup_eq_table(dev); +err_mcg_table_free: + if (!mlx4_is_slave(dev)) + mlx4_cleanup_mcg_table(dev); + err_mr_table_free: mlx4_cleanup_mr_table(dev); @@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) } } + atomic_set(&priv->opreq_count, 0); + INIT_WORK(&priv->opreq_task, mlx4_opreq_action); + /* * Now reset the HCA before we touch the PCI capabilities or * attempt a firmware command, since a boot ROM may have left @@ -2315,12 +2318,12 @@ err_port: mlx4_cleanup_port_info(&priv->port[port]); mlx4_cleanup_counters_table(dev); - mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); @@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev) RES_TR_FREE_SLAVES_ONLY); mlx4_cleanup_counters_table(dev); - mlx4_cleanup_mcg_table(dev); mlx4_cleanup_qp_table(dev); mlx4_cleanup_srq_table(dev); mlx4_cleanup_cq_table(dev); mlx4_cmd_use_polling(dev); mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); mlx4_cleanup_mr_table(dev); mlx4_cleanup_xrcd_table(dev); mlx4_cleanup_pd_table(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f3e804f2a35f..55f6245efb6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -39,19 +39,8 @@ #include "mlx4.h" -#define MGM_QPN_MASK 0x00FFFFFF -#define MGM_BLCK_LB_BIT 30 - static const u8 zero_gid[16]; /* automatically initialized to 0 */ -struct mlx4_mgm { - __be32 next_gid_index; - __be32 members_count; - u32 reserved[2]; - u8 gid[16]; - __be32 qp[MLX4_MAX_QP_PER_MGM]; -}; - int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) { return 1 << dev->oper_log_mgm_entry_size; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 17d9277e33ef..348bb8c7d9a7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -554,6 +554,17 @@ struct mlx4_mfunc { struct mlx4_mfunc_master_ctx master; }; +#define MGM_QPN_MASK 0x00FFFFFF +#define MGM_BLCK_LB_BIT 30 + +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_MAX_QP_PER_MGM]; +}; + struct mlx4_cmd { struct pci_pool *pool; void __iomem *hcr; @@ -802,6 +813,8 @@ struct mlx4_priv { u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; __be64 slave_node_guids[MLX4_MFUNC_MAX]; + atomic_t opreq_count; + struct work_struct opreq_task; }; static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 967bae8b85c5..d4cdf4dc4bc4 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -244,7 +244,7 @@ struct myri10ge_priv { int fw_ver_minor; int fw_ver_tiny; int adopted_rx_filter_bug; - u8 mac_addr[6]; /* eeprom mac address */ + u8 mac_addr[ETH_ALEN]; /* eeprom mac address */ unsigned long serial_number; int vendor_specific_offset; int fw_multicast_support; diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index e88bdb1aa669..dcfe58fa3b8a 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev) { struct w90p910_ether *ether = netdev_priv(dev); struct platform_device *pdev; - char addr[6]; + char addr[ETH_ALEN]; pdev = ether->pdev; @@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev) addr[5] = 0xa8; if (is_valid_ether_addr(addr)) - memcpy(dev->dev_addr, &addr, 0x06); + memcpy(dev->dev_addr, &addr, ETH_ALEN); else dev_err(&pdev->dev, "invalid mac address\n"); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 7779036690cc..6797b1075874 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -582,6 +582,19 @@ struct pch_gbe_hw_stats { }; /** + * struct pch_gbe_privdata - PCI Device ID driver data + * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software + * @phy_disable_hibernate: Bool, disable PHY hibernation + * @platform_init: Platform initialization callback, called from + * probe, prior to PHY initialization. + */ +struct pch_gbe_privdata { + bool phy_tx_clk_delay; + bool phy_disable_hibernate; + int (*platform_init)(struct pci_dev *pdev); +}; + +/** * struct pch_gbe_adapter - board specific private data structure * @stats_lock: Spinlock structure for status * @ethtool_lock: Spinlock structure for ethtool @@ -604,6 +617,7 @@ struct pch_gbe_hw_stats { * @rx_buffer_len: Receive buffer length * @tx_queue_len: Transmit queue length * @have_msi: PCI MSI mode flag + * @pch_gbe_privdata: PCI Device ID driver_data */ struct pch_gbe_adapter { @@ -631,6 +645,7 @@ struct pch_gbe_adapter { int hwts_tx_en; int hwts_rx_en; struct pci_dev *ptp_pdev; + struct pch_gbe_privdata *pdata; }; #define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index ab1039a95bf9..e19f1be60d5e 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/net_tstamp.h> #include <linux/ptp_classify.h> +#include <linux/gpio.h> #define DRV_VERSION "1.01" const char pch_driver_version[] = DRV_VERSION; @@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION; #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" +#define MINNOW_PHY_RESET_GPIO 13 + static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); @@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter) } adapter->hw.phy.addr = adapter->mii.phy_id; netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); - if (addr == 32) + if (addr == PCH_GBE_PHY_REGS_LEN) return -EAGAIN; /* Selected the phy and isolate the rest */ for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) { @@ -2635,6 +2638,9 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->pdev = pdev; adapter->hw.back = adapter; adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; + adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data; + if (adapter->pdata && adapter->pdata->platform_init) + adapter->pdata->platform_init(pdev); adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); @@ -2710,6 +2716,10 @@ static int pch_gbe_probe(struct pci_dev *pdev, dev_dbg(&pdev->dev, "PCH Network Connection\n"); + /* Disable hibernation on certain platforms */ + if (adapter->pdata && adapter->pdata->phy_disable_hibernate) + pch_gbe_phy_disable_hibernate(&adapter->hw); + device_set_wakeup_enable(&pdev->dev, 1); return 0; @@ -2720,9 +2730,48 @@ err_free_netdev: return ret; } +/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to + * ensure it is awake for probe and init. Request the line and reset the PHY. + */ +static int pch_gbe_minnow_platform_init(struct pci_dev *pdev) +{ + unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT; + unsigned gpio = MINNOW_PHY_RESET_GPIO; + int ret; + + ret = devm_gpio_request_one(&pdev->dev, gpio, flags, + "minnow_phy_reset"); + if (ret) { + dev_err(&pdev->dev, + "ERR: Can't request PHY reset GPIO line '%d'\n", gpio); + return ret; + } + + gpio_set_value(gpio, 0); + usleep_range(1250, 1500); + gpio_set_value(gpio, 1); + usleep_range(1250, 1500); + + return ret; +} + +static struct pch_gbe_privdata pch_gbe_minnow_privdata = { + .phy_tx_clk_delay = true, + .phy_disable_hibernate = true, + .platform_init = pch_gbe_minnow_platform_init, +}; + static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { {.vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, + .subvendor = PCI_VENDOR_ID_CIRCUITCO, + .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00), + .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata + }, + {.vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class = (PCI_CLASS_NETWORK_ETHERNET << 8), diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index da079073a6c6..8b7ff75fc8e0 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -74,6 +74,15 @@ #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ +/* AR8031 PHY Debug Registers */ +#define PHY_AR803X_ID 0x00001374 +#define PHY_AR8031_DBG_OFF 0x1D +#define PHY_AR8031_DBG_DAT 0x1E +#define PHY_AR8031_SERDES 0x05 +#define PHY_AR8031_HIBERNATE 0x0B +#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */ +#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */ + /* Phy Id Register (word 2) */ #define PHY_REVISION_MASK 0x000F @@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) } /** + * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw) +{ + /* The RGMII interface requires a ~2ns TX clock delay. This is typically + * done in layout with a longer trace or via PHY strapping, but can also + * be done via PHY configuration registers. + */ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u16 mii_reg; + int ret = 0; + + switch (hw->phy.id) { + case PHY_AR803X_ID: + netdev_dbg(adapter->netdev, + "Configuring AR803X PHY for 2ns TX clock delay\n"); + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg); + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF, + PHY_AR8031_SERDES); + if (ret) + break; + + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg); + mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY; + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT, + mii_reg); + break; + default: + netdev_err(adapter->netdev, + "Unknown PHY (%x), could not set TX clock delay\n", + hw->phy.id); + return -EINVAL; + } + + if (ret) + netdev_err(adapter->netdev, + "Could not configure tx clock delay for PHY\n"); + return ret; +} + +/** * pch_gbe_phy_init_setting - PHY initial setting * @hw: Pointer to the HW structure */ @@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); + + /* Setup a TX clock delay on certain platforms */ + if (adapter->pdata && adapter->pdata->phy_tx_clk_delay) + pch_gbe_phy_tx_clk_delay(hw); +} + +/** + * pch_gbe_phy_disable_hibernate - Disable the PHY low power state + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u16 mii_reg; + int ret = 0; + + switch (hw->phy.id) { + case PHY_AR803X_ID: + netdev_dbg(adapter->netdev, + "Disabling hibernation for AR803X PHY\n"); + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF, + PHY_AR8031_HIBERNATE); + if (ret) + break; + + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg); + mii_reg &= ~PHY_AR8031_PS_HIB_EN; + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT, + mii_reg); + break; + default: + netdev_err(adapter->netdev, + "Unknown PHY (%x), could not disable hibernation\n", + hw->phy.id); + return -EINVAL; + } + + if (ret) + netdev_err(adapter->netdev, + "Could not disable PHY hibernation\n"); + return ret; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h index 03264dc7b5ec..0cbe69206e04 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw); void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw); +int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw); #endif /* _PCH_GBE_PHY_H_ */ diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index a5f0b5da6149..f21ae7b6c766 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) struct device_node *dn = pci_device_to_OF_node(pdev); int len; const u8 *maddr; - u8 addr[6]; + u8 addr[ETH_ALEN]; if (!dn) { dev_dbg(&pdev->dev, @@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) maddr = of_get_property(dn, "local-mac-address", &len); - if (maddr && len == 6) { - memcpy(mac->mac_addr, maddr, 6); + if (maddr && len == ETH_ALEN) { + memcpy(mac->mac_addr, maddr, ETH_ALEN); return 0; } @@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) return -ENOENT; } - if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], - &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { + if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) + != ETH_ALEN) { dev_warn(&pdev->dev, "can't parse mac address, not configuring\n"); return -EINVAL; } - memcpy(mac->mac_addr, addr, 6); + memcpy(mac->mac_addr, addr, ETH_ALEN); return 0; } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index e2f4efa8ad46..f2749d46c125 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -83,7 +83,7 @@ struct pasemi_mac { #define MAC_TYPE_GMAC 1 #define MAC_TYPE_XAUI 2 - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; struct net_lro_mgr lro_mgr; struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 9fbb1cdbfa47..8375cbde9969 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; - u8 null_addr[6]; + u8 null_addr[ETH_ALEN]; int i; - memset(null_addr, 0, 6); + memset(null_addr, 0, ETH_ALEN); if (netdev->flags & IFF_PROMISC) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 221645e9f182..39351554106d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -20,7 +20,6 @@ #include <linux/tcp.h> #include <linux/skbuff.h> #include <linux/firmware.h> - #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/timer.h> @@ -38,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 44 -#define QLCNIC_LINUX_VERSIONID "5.2.44" +#define _QLCNIC_LINUX_SUBVERSION 45 +#define QLCNIC_LINUX_VERSIONID "5.2.45" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -467,7 +466,7 @@ struct qlcnic_hardware_context { u32 *ext_reg_tbl; u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; u32 mbox_reg[4]; - spinlock_t mbx_lock; + struct qlcnic_mailbox *mailbox; }; struct qlcnic_adapter_stats { @@ -950,12 +949,6 @@ struct qlcnic_ipaddr { #define QLCNIC_READD_AGE 20 #define QLCNIC_LB_MAX_FILTERS 64 #define QLCNIC_LB_BUCKET_SIZE 32 - -/* QLCNIC Driver Error Code */ -#define QLCNIC_FW_NOT_RESPOND 51 -#define QLCNIC_TEST_IN_PROGRESS 52 -#define QLCNIC_UNDEFINED_ERROR 53 -#define QLCNIC_LB_CABLE_NOT_CONN 54 #define QLCNIC_ILB_MAX_RCV_LOOP 10 struct qlcnic_filter { @@ -972,6 +965,21 @@ struct qlcnic_filter_hash { u16 fbucket_size; }; +/* Mailbox specific data structures */ +struct qlcnic_mailbox { + struct workqueue_struct *work_q; + struct qlcnic_adapter *adapter; + struct qlcnic_mbx_ops *ops; + struct work_struct work; + struct completion completion; + struct list_head cmd_q; + unsigned long status; + spinlock_t queue_lock; /* Mailbox queue lock */ + spinlock_t aen_lock; /* Mailbox response/AEN lock */ + atomic_t rsp_status; + u32 num_cmds; +}; + struct qlcnic_adapter { struct qlcnic_hardware_context *ahw; struct qlcnic_recv_context *recv_ctx; @@ -1385,9 +1393,20 @@ struct _cdrp_cmd { }; struct qlcnic_cmd_args { - struct _cdrp_cmd req; - struct _cdrp_cmd rsp; - int op_type; + struct completion completion; + struct list_head list; + struct _cdrp_cmd req; + struct _cdrp_cmd rsp; + atomic_t rsp_status; + int pay_size; + u32 rsp_opcode; + u32 total_cmds; + u32 op_type; + u32 type; + u32 cmd_op; + u32 *hdr; /* Back channel message header */ + u32 *pay; /* Back channel message payload */ + u8 func_num; }; int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); @@ -1600,6 +1619,20 @@ struct qlcnic_nic_template { int (*resume)(struct qlcnic_adapter *); }; +struct qlcnic_mbx_ops { + int (*enqueue_cmd) (struct qlcnic_adapter *, + struct qlcnic_cmd_args *, unsigned long *); + void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*nofity_fw) (struct qlcnic_adapter *, u8); +}; + +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); + /* Adapter hardware abstraction */ struct qlcnic_hardware_ops { void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 92da9980a0a0..55a597799cf8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -149,7 +149,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, - .mbx_cmd = qlcnic_83xx_mbx_op, + .mbx_cmd = qlcnic_83xx_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, @@ -362,6 +362,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { int i; + + if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) + return; + for (i = 0; i < cmd->rsp.num; i++) cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); } @@ -398,24 +402,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) return IRQ_HANDLED; } +static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) +{ + atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + complete(&mbx->completion); +} + static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) { - u32 resp, event; + u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; unsigned long flags; - spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); - + spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); if (!(resp & QLCNIC_SET_OWNER)) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - + } else { + if (atomic_read(&mbx->rsp_status) != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + } out: qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); + spin_unlock_irqrestore(&mbx->aen_lock, flags); } irqreturn_t qlcnic_83xx_intr(int irq, void *data) @@ -515,7 +528,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) } /* Enable mailbox interrupt */ - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); return err; } @@ -628,7 +641,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter) ahw->max_uc_count = count; } -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter) { u32 val; @@ -687,6 +700,9 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, { int i; + if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) + return; + dev_info(&adapter->pdev->dev, "Host MBX regs(%d)\n", cmd->req.num); for (i = 0; i < cmd->req.num; i++) { @@ -705,120 +721,74 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, pr_info("\n"); } -/* Mailbox response for mac rcode */ -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) +static inline void +qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { - u32 fw_data; - u8 mac_cmd_rcode; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int opcode = LSW(cmd->req.arg[0]); + unsigned long max_loops; - fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); - mac_cmd_rcode = (u8)fw_data; - if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || - mac_cmd_rcode == QLC_83XX_MAC_PRESENT || - mac_cmd_rcode == QLC_83XX_MAC_ABSENT) - return QLCNIC_RCODE_SUCCESS; - return 1; -} + max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP; -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) -{ - u32 data; - struct qlcnic_hardware_context *ahw = adapter->ahw; - /* wait for mailbox completion */ - do { - data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); - if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { - data = QLCNIC_RCODE_TIMEOUT; - break; - } - mdelay(1); - } while (!data); - return data; + for (; max_loops; max_loops--) { + if (atomic_read(&cmd->rsp_status) == + QLC_83XX_MBX_RESPONSE_ARRIVED) + return; + + udelay(1); + } + + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode); + flush_workqueue(ahw->mailbox->work_q); + return; } -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, - struct qlcnic_cmd_args *cmd) +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) { - int i; - u16 opcode; - u8 mbx_err_code; - unsigned long flags; + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; + int cmd_type, err, opcode; + unsigned long timeout; opcode = LSW(cmd->req.arg[0]); - if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { - dev_info(&adapter->pdev->dev, - "Mailbox cmd attempted, 0x%x\n", opcode); - dev_info(&adapter->pdev->dev, "Mailbox detached\n"); - return 0; + cmd_type = cmd->type; + err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd->type, ahw->pci_func, + ahw->op_mode); + return err; } - spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - - if (mbx_val) { - QLCDB(adapter, DRV, - "Mailbox cmd attempted, 0x%x\n", opcode); - QLCDB(adapter, DRV, - "Mailbox not available, 0x%x, collect FW dump\n", - mbx_val); - cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - return cmd->rsp.arg[0]; - } - - /* Fill in mailbox registers */ - mbx_cmd = cmd->req.arg[0]; - writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); - for (i = 1; i < cmd->req.num; i++) - writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); - - /* Signal FW about the impending command */ - QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); -poll: - rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); - if (rsp != QLCNIC_RCODE_TIMEOUT) { - /* Get the FW response data */ - fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); - if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { - __qlcnic_83xx_process_aen(adapter); - goto poll; - } - mbx_err_code = QLCNIC_MBX_STATUS(fw_data); - rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); - opcode = QLCNIC_MBX_RSP(fw_data); - qlcnic_83xx_get_mbx_data(adapter, cmd); - - switch (mbx_err_code) { - case QLCNIC_MBX_RSP_OK: - case QLCNIC_MBX_PORT_RSP_OK: - rsp = QLCNIC_RCODE_SUCCESS; - break; - default: - if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { - rsp = qlcnic_83xx_mac_rcode(adapter); - if (!rsp) - goto out; - } + switch (cmd_type) { + case QLC_83XX_MBX_CMD_WAIT: + if (!wait_for_completion_timeout(&cmd->completion, timeout)) { dev_err(&adapter->pdev->dev, - "MBX command 0x%x failed with err:0x%x\n", - opcode, mbx_err_code); - rsp = mbx_err_code; - qlcnic_dump_mbx(adapter, cmd); - break; + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd_type, ahw->pci_func, + ahw->op_mode); + flush_workqueue(mbx->work_q); } - goto out; + break; + case QLC_83XX_MBX_CMD_NO_WAIT: + return 0; + case QLC_83XX_MBX_CMD_BUSY_WAIT: + qlcnic_83xx_poll_for_mbx_completion(adapter, cmd); + break; + default: + dev_err(&adapter->pdev->dev, + "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd_type, ahw->pci_func, + ahw->op_mode); + qlcnic_83xx_detach_mailbox_work(adapter); } - dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", - QLCNIC_MBX_RSP(mbx_cmd)); - rsp = QLCNIC_RCODE_TIMEOUT; -out: - /* clear fw mbx control register */ - QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - return rsp; + return cmd->rsp_opcode; } int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, @@ -828,6 +798,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, u32 temp; const struct qlcnic_mailbox_metadata *mbx_tbl; + memset(mbx, 0, sizeof(struct qlcnic_cmd_args)); mbx_tbl = qlcnic_83xx_mbx_tbl; size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); for (i = 0; i < size; i++) { @@ -850,6 +821,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); temp = adapter->ahw->fw_hal_version << 29; mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); + mbx->cmd_op = type; return 0; } } @@ -933,20 +905,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) { + u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 resp, event; + struct qlcnic_mailbox *mbx = ahw->mailbox; unsigned long flags; - spin_lock_irqsave(&ahw->mbx_lock, flags); - + spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); if (resp & QLCNIC_SET_OWNER) { event = readl(QLCNIC_MBX_FW(ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); + } else { + if (atomic_read(&mbx->rsp_status) != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + } } - - spin_unlock_irqrestore(&ahw->mbx_lock, flags); + spin_unlock_irqrestore(&mbx->aen_lock, flags); } static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) @@ -969,6 +944,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter) return; INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work); + queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0); } void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter) @@ -1355,8 +1331,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { /* disable and free mailbox interrupt */ - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + qlcnic_83xx_enable_mbx_poll(adapter); qlcnic_83xx_free_mbx_intr(adapter); + } adapter->ahw->loopback_state = 0; adapter->ahw->hw_ops->setup_link_event(adapter, 1); } @@ -1377,6 +1355,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &adapter->recv_ctx->sds_rings[ring]; qlcnic_83xx_disable_intr(adapter, sds_ring); + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_enable_mbx_poll(adapter); } } @@ -1386,6 +1366,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { err = qlcnic_83xx_setup_mbx_intr(adapter); + qlcnic_83xx_disable_mbx_poll(adapter); if (err) { dev_err(&adapter->pdev->dev, "%s: failed to setup mbx interrupt\n", @@ -1402,6 +1383,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev, if (netif_running(netdev)) __qlcnic_up(adapter, netdev); + + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST && + !(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_disable_mbx_poll(adapter); out: netif_device_attach(netdev); } @@ -1619,26 +1604,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { - int err; + struct qlcnic_cmd_args *cmd = NULL; u32 temp = 0; - struct qlcnic_cmd_args cmd; + int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; - err = qlcnic_alloc_mbx_args(&cmd, adapter, + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); if (err) - return err; + goto out; + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; qlcnic_83xx_set_interface_id_promisc(adapter, &temp); - cmd.req.arg[1] = (mode ? 1 : 0) | temp; - err = qlcnic_issue_cmd(adapter, &cmd); - if (err) - dev_info(&adapter->pdev->dev, - "Promiscous mode config failed\n"); + cmd->req.arg[1] = (mode ? 1 : 0) | temp; + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) + return err; - qlcnic_free_mbx_args(&cmd); + qlcnic_free_mbx_args(cmd); + +out: + kfree(cmd); return err; } @@ -1651,7 +1643,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(netdev, "Loopback test not supported in non privileged mode\n"); - return ret; + return -ENOTSUPP; } if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { @@ -1679,19 +1671,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) /* Poll for link up event before running traffic */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_process_aen(adapter); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); - ret = -EIO; + ret = -EBUSY; goto free_diag_res; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_info(netdev, "Firmware didn't sent link up event to loopback request\n"); - ret = -QLCNIC_FW_NOT_RESPOND; + ret = -ETIMEDOUT; qlcnic_83xx_clear_lb_mode(adapter, mode); goto free_diag_res; } @@ -1728,6 +1718,15 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) return status; config = ahw->port_config; + + /* Check if port is already in loopback mode */ + if ((config & QLC_83XX_CFG_LOOPBACK_HSS) || + (config & QLC_83XX_CFG_LOOPBACK_EXT)) { + netdev_err(netdev, + "Port already in Loopback mode.\n"); + return -EINPROGRESS; + } + set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); if (mode == QLCNIC_ILB_MODE) @@ -1748,21 +1747,19 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_process_aen(adapter); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); - return -EIO; + return -EBUSY; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_err(netdev, "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); qlcnic_83xx_clear_lb_mode(adapter, mode); - return -EIO; + return -ETIMEDOUT; } } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); @@ -1797,21 +1794,19 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { msleep(QLC_83XX_LB_MSLEEP_COUNT); - if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) - qlcnic_83xx_process_aen(adapter); if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { netdev_info(netdev, "Device is resetting, free LB test resources\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); - return -EIO; + return -EBUSY; } if (loop++ > QLC_83XX_LB_WAIT_COUNT) { netdev_err(netdev, "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); - return -EIO; + return -ETIMEDOUT; } } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); @@ -1950,25 +1945,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter, int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, u16 vlan_id, u8 op) { - int err; - u32 *buf, temp = 0; - struct qlcnic_cmd_args cmd; + struct qlcnic_cmd_args *cmd = NULL; struct qlcnic_macvlan_mbx mv; + u32 *buf, temp = 0; + int err; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; - err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); if (err) - return err; + goto out; + + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; if (vlan_id) op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; - cmd.req.arg[1] = op | (1 << 8); + cmd->req.arg[1] = op | (1 << 8); qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); - cmd.req.arg[1] |= temp; + cmd->req.arg[1] |= temp; mv.vlan = vlan_id; mv.mac_addr0 = addr[0]; mv.mac_addr1 = addr[1]; @@ -1976,14 +1977,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, mv.mac_addr3 = addr[3]; mv.mac_addr4 = addr[4]; mv.mac_addr5 = addr[5]; - buf = &cmd.req.arg[2]; + buf = &cmd->req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); - err = qlcnic_issue_cmd(adapter, &cmd); - if (err) - dev_err(&adapter->pdev->dev, - "MAC-VLAN %s to CAM failed, err=%d.\n", - ((op == 1) ? "add " : "delete "), err); - qlcnic_free_mbx_args(&cmd); + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) + return err; + + qlcnic_free_mbx_args(cmd); +out: + kfree(cmd); return err; } @@ -2099,10 +2101,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) { struct qlcnic_adapter *adapter = data; - unsigned long flags; + struct qlcnic_mailbox *mbx; u32 mask, resp, event; + unsigned long flags; - spin_lock_irqsave(&adapter->ahw->mbx_lock, flags); + mbx = adapter->ahw->mailbox; + spin_lock_irqsave(&mbx->aen_lock, flags); resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); if (!(resp & QLCNIC_SET_OWNER)) goto out; @@ -2110,11 +2114,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); if (event & QLCNIC_MBX_ASYNC_EVENT) __qlcnic_83xx_process_aen(adapter); + else + qlcnic_83xx_notify_mbx_response(mbx); + out: mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); writel(0, adapter->ahw->pci_base0 + mask); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - + spin_unlock_irqrestore(&mbx->aen_lock, flags); return IRQ_HANDLED; } @@ -3472,3 +3478,300 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) idc->delay); return err; } + +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx) +{ + INIT_COMPLETION(mbx->completion); + set_bit(QLC_83XX_MBX_READY, &mbx->status); +} + +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx) +{ + destroy_workqueue(mbx->work_q); + kfree(mbx); +} + +static inline void +qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); + return; + } + complete(&cmd->completion); +} + +static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + struct list_head *head = &mbx->cmd_q; + struct qlcnic_cmd_args *cmd = NULL; + + spin_lock(&mbx->queue_lock); + + while (!list_empty(head)) { + cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + list_del(&cmd->list); + mbx->num_cmds--; + qlcnic_83xx_notify_cmd_completion(adapter, cmd); + } + + spin_unlock(&mbx->queue_lock); +} + +static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + u32 host_mbx_ctrl; + + if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) + return -EBUSY; + + host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (host_mbx_ctrl) { + ahw->idc.collect_dump = 1; + return -EIO; + } + + return 0; +} + +static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter, + u8 issue_cmd) +{ + if (issue_cmd) + QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); + else + QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); +} + +static inline void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + spin_lock(&mbx->queue_lock); + + list_del(&cmd->list); + mbx->num_cmds--; + + spin_unlock(&mbx->queue_lock); + + qlcnic_83xx_notify_cmd_completion(adapter, cmd); +} + +static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int i, j; + + if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) { + mbx_cmd = cmd->req.arg[0]; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + for (i = 1; i < cmd->req.num; i++) + writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); + } else { + fw_hal_version = ahw->fw_hal_version; + hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32); + total_size = cmd->pay_size + hdr_size; + tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16; + mbx_cmd = tmp | fw_hal_version << 29; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + + /* Back channel specific operations bits */ + mbx_cmd = 0x1 | 1 << 4; + + if (qlcnic_sriov_pf_check(adapter)) + mbx_cmd |= cmd->func_num << 5; + + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); + + for (i = 2, j = 0; j < hdr_size; i++, j++) + writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i)); + for (j = 0; j < cmd->pay_size; j++, i++) + writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i)); + } +} + +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + complete(&mbx->completion); + cancel_work_sync(&mbx->work); + flush_workqueue(mbx->work_q); + qlcnic_83xx_flush_mbx_queue(adapter); +} + +static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd, + unsigned long *timeout) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) { + atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + init_completion(&cmd->completion); + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; + + spin_lock(&mbx->queue_lock); + + list_add_tail(&cmd->list, &mbx->cmd_q); + mbx->num_cmds++; + cmd->total_cmds = mbx->num_cmds; + *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; + queue_work(mbx->work_q, &mbx->work); + + spin_unlock(&mbx->queue_lock); + + return 0; + } + + return -EBUSY; +} + +static inline int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + u8 mac_cmd_rcode; + u32 fw_data; + + if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) { + fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); + mac_cmd_rcode = (u8)fw_data; + if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || + mac_cmd_rcode == QLC_83XX_MAC_PRESENT || + mac_cmd_rcode == QLC_83XX_MAC_ABSENT) { + cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; + return QLCNIC_RCODE_SUCCESS; + } + } + + return -EINVAL; +} + +static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + u8 mbx_err_code; + u32 fw_data; + + fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); + mbx_err_code = QLCNIC_MBX_STATUS(fw_data); + qlcnic_83xx_get_mbx_data(adapter, cmd); + + switch (mbx_err_code) { + case QLCNIC_MBX_RSP_OK: + case QLCNIC_MBX_PORT_RSP_OK: + cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; + break; + default: + if (!qlcnic_83xx_check_mac_rcode(adapter, cmd)) + break; + + dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n", + __func__, cmd->cmd_op, cmd->type, ahw->pci_func, + ahw->op_mode, mbx_err_code); + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED; + qlcnic_dump_mbx(adapter, cmd); + } + + return; +} + +static void qlcnic_83xx_mailbox_worker(struct work_struct *work) +{ + struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, + work); + struct qlcnic_adapter *adapter = mbx->adapter; + struct qlcnic_mbx_ops *mbx_ops = mbx->ops; + struct device *dev = &adapter->pdev->dev; + atomic_t *rsp_status = &mbx->rsp_status; + struct list_head *head = &mbx->cmd_q; + struct qlcnic_hardware_context *ahw; + struct qlcnic_cmd_args *cmd = NULL; + + ahw = adapter->ahw; + + while (true) { + if (qlcnic_83xx_check_mbx_status(adapter)) + return; + + atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + + spin_lock(&mbx->queue_lock); + + if (list_empty(head)) { + spin_unlock(&mbx->queue_lock); + return; + } + cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + + spin_unlock(&mbx->queue_lock); + + mbx_ops->encode_cmd(adapter, cmd); + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); + + if (wait_for_completion_timeout(&mbx->completion, + QLC_83XX_MBX_TIMEOUT)) { + mbx_ops->decode_resp(adapter, cmd); + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION); + } else { + dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n", + __func__, cmd->cmd_op, cmd->type, ahw->pci_func, + ahw->op_mode); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT; + } + mbx_ops->dequeue_cmd(adapter, cmd); + } +} + +static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = { + .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd, + .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd, + .decode_resp = qlcnic_83xx_decode_mbx_rsp, + .encode_cmd = qlcnic_83xx_encode_mbx_cmd, + .nofity_fw = qlcnic_83xx_signal_mbx_cmd, +}; + +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx; + + ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL); + if (!ahw->mailbox) + return -ENOMEM; + + mbx = ahw->mailbox; + mbx->ops = &qlcnic_83xx_mbx_ops; + mbx->adapter = adapter; + + spin_lock_init(&mbx->queue_lock); + spin_lock_init(&mbx->aen_lock); + INIT_LIST_HEAD(&mbx->cmd_q); + init_completion(&mbx->completion); + + mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox"); + if (mbx->work_q == NULL) { + kfree(mbx); + return -ENOMEM; + } + + INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker); + set_bit(QLC_83XX_MBX_READY, &mbx->status); + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 272f56a2e14b..dd22ef3d85e5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -89,6 +89,13 @@ #define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 +#define QLC_83XX_MBX_POST_BC_OP 0x1 +#define QLC_83XX_MBX_COMPLETION 0x0 +#define QLC_83XX_MBX_REQUEST 0x1 + +#define QLC_83XX_MBX_TIMEOUT (5 * HZ) +#define QLC_83XX_MBX_CMD_LOOP 5000000 + /* status descriptor mailbox data * @phy_addr_{low|high}: physical address of buffer * @sds_ring_size: buffer size @@ -449,6 +456,20 @@ enum qlcnic_83xx_states { #define QLC_83xx_FLASH_MAX_WAIT_USEC 100 #define QLC_83XX_FLASH_LOCK_TIMEOUT 10000 +enum qlc_83xx_mbx_cmd_type { + QLC_83XX_MBX_CMD_WAIT = 0, + QLC_83XX_MBX_CMD_NO_WAIT, + QLC_83XX_MBX_CMD_BUSY_WAIT, +}; + +enum qlc_83xx_mbx_response_states { + QLC_83XX_MBX_RESPONSE_WAIT = 0, + QLC_83XX_MBX_RESPONSE_ARRIVED, +}; + +#define QLC_83XX_MBX_RESPONSE_FAILED 0x2 +#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3 + /* Additional registers in 83xx */ enum qlc_83xx_ext_regs { QLCNIC_GLOBAL_RESET = 0, @@ -498,7 +519,7 @@ enum qlc_83xx_ext_regs { /* 83xx funcitons */ int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); -int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8); void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); @@ -551,7 +572,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *, void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *); irqreturn_t qlcnic_83xx_handle_aen(int, void *); int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); -void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *); +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *); irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); irqreturn_t qlcnic_83xx_intr(int, void *); @@ -623,8 +644,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state); int qlcnic_83xx_flash_test(struct qlcnic_adapter *); int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); -u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 9f4b8d5f0865..cc1e32a7248f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter) struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); + qlcnic_83xx_detach_mailbox_work(adapter); /* Disable mailbox interrupt */ qlcnic_83xx_disable_mbx_intr(adapter); @@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) { int err; + qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); + qlcnic_83xx_enable_mbx_interrupt(adapter); + /* register for NIC IDC AEN Events */ qlcnic_83xx_register_nic_idc_func(adapter, 1); @@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) if (err) return err; - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); if (qlcnic_83xx_configure_opmode(adapter)) { qlcnic_83xx_idc_enter_failed_state(adapter, 1); @@ -640,7 +644,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) struct qlcnic_hardware_context *ahw = adapter->ahw; qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); @@ -810,9 +813,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) **/ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) { - u32 val; struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; int ret = 0; + u32 val; /* Perform NIC configuration based ready state entry actions */ if (ahw->idc.state_entry(adapter)) @@ -824,7 +828,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) dev_err(&adapter->pdev->dev, "Error: device temperature %d above limits\n", adapter->ahw->temp); - clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_detach_driver(adapter); qlcnic_83xx_idc_enter_failed_state(adapter, 1); @@ -837,7 +841,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) if (ret) { adapter->flags |= QLCNIC_FW_HANG; if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { - clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); set_bit(__QLCNIC_RESETTING, &adapter->state); qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); } @@ -845,6 +849,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) } if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + /* Move to need reset state and prepare for reset */ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); return ret; @@ -882,12 +888,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) **/ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) { + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; int ret = 0; if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); - clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); @@ -1079,7 +1086,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; clear_bit(__QLCNIC_RESETTING, &adapter->state); - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); /* Check if reset recovery is disabled */ @@ -1190,6 +1196,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) { u32 val; + if (qlcnic_sriov_vf_check(adapter)) + return; + if (qlcnic_83xx_lock_driver(adapter)) { dev_err(&adapter->pdev->dev, "%s:failed, please retry\n", __func__); @@ -2141,17 +2150,35 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; + int err = 0; - if (qlcnic_sriov_vf_check(adapter)) - return qlcnic_sriov_vf_init(adapter, pci_using_dac); + ahw->msix_supported = !!qlcnic_use_msi_x; + err = qlcnic_83xx_init_mailbox_work(adapter); + if (err) + goto exit; - if (qlcnic_83xx_check_hw_status(adapter)) - return -EIO; + if (qlcnic_sriov_vf_check(adapter)) { + err = qlcnic_sriov_vf_init(adapter, pci_using_dac); + if (err) + goto detach_mbx; + else + return err; + } - /* Initilaize 83xx mailbox spinlock */ - spin_lock_init(&ahw->mbx_lock); + err = qlcnic_83xx_check_hw_status(adapter); + if (err) + goto detach_mbx; + + err = qlcnic_setup_intr(adapter, 0); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); + goto disable_intr; + } + + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) + goto disable_mbx_intr; - set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_clear_function_resources(adapter); INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); @@ -2162,19 +2189,33 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (!qlcnic_83xx_read_flash_descriptor_table(adapter)) qlcnic_83xx_read_flash_mfg_id(adapter); - if (qlcnic_83xx_idc_init(adapter)) - return -EIO; + err = qlcnic_83xx_idc_init(adapter); + if (err) + goto disable_mbx_intr; /* Configure default, SR-IOV or Virtual NIC mode of operation */ - if (qlcnic_83xx_configure_opmode(adapter)) - return -EIO; + err = qlcnic_83xx_configure_opmode(adapter); + if (err) + goto disable_mbx_intr; /* Perform operating mode specific initialization */ - if (adapter->nic_ops->init_driver(adapter)) - return -EIO; + err = adapter->nic_ops->init_driver(adapter); + if (err) + goto disable_mbx_intr; /* Periodically monitor device status */ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); + return 0; - return adapter->ahw->idc.err_code; +disable_mbx_intr: + qlcnic_83xx_free_mbx_intr(adapter); + +disable_intr: + qlcnic_teardown_intr(adapter); + +detach_mbx: + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_free_mailbox(ahw->mailbox); +exit: + return err; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 7aac23ab31d1..79a5855f926c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1006,9 +1006,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode) msleep(500); qlcnic_process_rcv_ring_diag(sds_ring); if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - netdev_info(netdev, "firmware didnt respond to loopback" - " configure request\n"); - ret = -QLCNIC_FW_NOT_RESPOND; + netdev_info(netdev, + "Firmware didn't sent link up event to loopback request\n"); + ret = -ETIMEDOUT; goto free_res; } else if (adapter->ahw->diag_cnt) { ret = adapter->ahw->diag_cnt; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 6946d354f44f..f807f3b949e2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -952,17 +952,17 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index, break; case 1: dev_info(dev, "loopback already in progress\n"); - adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; + adapter->ahw->diag_cnt = -EINPROGRESS; break; case 2: dev_info(dev, "loopback cable is not connected\n"); - adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; + adapter->ahw->diag_cnt = -ENODEV; break; default: dev_info(dev, "loopback configure request failed, err %x\n", ret); - adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR; + adapter->ahw->diag_cnt = -EIO; break; } break; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ee013fcc3322..b8242bc0293b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2145,16 +2145,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "Device does not support MSI interrupts\n"); - err = qlcnic_setup_intr(adapter, 0); - if (err) { - dev_err(&pdev->dev, "Failed to setup interrupt\n"); - goto err_out_disable_msi; - } - - if (qlcnic_83xx_check(adapter)) { - err = qlcnic_83xx_setup_mbx_intr(adapter); - if (err) + if (qlcnic_82xx_check(adapter)) { + err = qlcnic_setup_intr(adapter, 0); + if (err) { + dev_err(&pdev->dev, "Failed to setup interrupt\n"); goto err_out_disable_msi; + } } err = qlcnic_get_act_pci_func(adapter); @@ -2241,9 +2237,11 @@ static void qlcnic_remove(struct pci_dev *pdev) qlcnic_sriov_cleanup(adapter); if (qlcnic_83xx_check(adapter)) { - qlcnic_83xx_free_mbx_intr(adapter); qlcnic_83xx_register_nic_idc_func(adapter, 0); cancel_delayed_work_sync(&adapter->idc_aen_work); + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_free_mailbox(ahw->mailbox); } qlcnic_detach(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 5d40045b3cea..046286a897ea 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); -static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); static void qlcnic_sriov_process_bc_cmd(struct work_struct *); @@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .get_mac_address = qlcnic_83xx_get_mac_address, .setup_intr = qlcnic_83xx_setup_intr, .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, - .mbx_cmd = qlcnic_sriov_vf_mbx_op, + .mbx_cmd = qlcnic_sriov_issue_cmd, .get_func_no = qlcnic_83xx_get_func_no, .api_lock = qlcnic_83xx_cam_lock, .api_unlock = qlcnic_83xx_cam_unlock, @@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; - unsigned long flags; - u16 opcode; - u8 mbx_err_code; - int i, j; - - opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; - - if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { - dev_info(&adapter->pdev->dev, - "Mailbox cmd attempted, 0x%x\n", opcode); - dev_info(&adapter->pdev->dev, "Mailbox detached\n"); - return 0; - } - - spin_lock_irqsave(&ahw->mbx_lock, flags); - - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) { - QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode); - spin_unlock_irqrestore(&ahw->mbx_lock, flags); - return QLCNIC_RCODE_TIMEOUT; - } - /* Fill in mailbox registers */ - val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); - mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29); - - writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); - mbx_cmd = 0x1 | (1 << 4); + struct qlcnic_mailbox *mbx = ahw->mailbox; + struct qlcnic_cmd_args cmd; + unsigned long timeout; + int err; - if (qlcnic_sriov_pf_check(adapter)) - mbx_cmd |= (pci_func << 5); + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); + cmd.hdr = hdr; + cmd.pay = pay; + cmd.pay_size = size; + cmd.func_num = pci_func; + cmd.op_type = QLC_83XX_MBX_POST_BC_OP; + cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; - writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); - for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); - i++, j++) { - writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i)); + err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, cmd.cmd_op, cmd.type, ahw->pci_func, + ahw->op_mode); + return err; } - for (j = 0; j < size; j++, i++) - writel(*(pay++), QLCNIC_MBX_HOST(ahw, i)); - - /* Signal FW about the impending command */ - QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); - /* Waiting for the mailbox cmd to complete and while waiting here - * some AEN might arrive. If more than 5 seconds expire we can - * assume something is wrong. - */ -poll: - rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); - if (rsp != QLCNIC_RCODE_TIMEOUT) { - /* Get the FW response data */ - fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); - if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { - __qlcnic_83xx_process_aen(adapter); - goto poll; - } - mbx_err_code = QLCNIC_MBX_STATUS(fw_data); - rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); - opcode = QLCNIC_MBX_RSP(fw_data); - - switch (mbx_err_code) { - case QLCNIC_MBX_RSP_OK: - case QLCNIC_MBX_PORT_RSP_OK: - rsp = QLCNIC_RCODE_SUCCESS; - break; - default: - if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { - rsp = qlcnic_83xx_mac_rcode(adapter); - if (!rsp) - goto out; - } - dev_err(&adapter->pdev->dev, - "MBX command 0x%x failed with err:0x%x\n", - opcode, mbx_err_code); - rsp = mbx_err_code; - break; - } - goto out; + if (!wait_for_completion_timeout(&cmd.completion, timeout)) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, cmd.cmd_op, cmd.type, ahw->pci_func, + ahw->op_mode); + flush_workqueue(mbx->work_q); } - dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", - QLCNIC_MBX_RSP(mbx_cmd)); - rsp = QLCNIC_RCODE_TIMEOUT; -out: - /* clear fw mbx control register */ - QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); - spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); - return rsp; + return cmd.rsp_opcode; } static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) @@ -522,8 +464,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) { - struct qlcnic_info nic_info; struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_info nic_info; int err; err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); @@ -637,8 +579,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) struct qlcnic_hardware_context *ahw = adapter->ahw; int err; - spin_lock_init(&ahw->mbx_lock); - set_bit(QLC_83XX_MBX_READY, &ahw->idc.status); set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; ahw->reset_context = 0; @@ -1085,6 +1025,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) if (test_bit(QLC_BC_VF_FLR, &vf->state)) return; + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); trans = list_first_entry(&vf->rcv_act.wait_list, struct qlcnic_bc_trans, list); adapter = vf->adapter; @@ -1234,6 +1175,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, return; } + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); cmd_op = hdr->cmd_op; if (qlcnic_sriov_alloc_bc_trans(&trans)) return; @@ -1359,7 +1301,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) if (enable) cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); - err = qlcnic_83xx_mbx_op(adapter, &cmd); + err = qlcnic_83xx_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) { dev_err(&adapter->pdev->dev, @@ -1391,10 +1333,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, return -EIO; } -static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlcnic_bc_trans *trans; int err; @@ -1411,7 +1354,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, goto cleanup_transaction; retry: - if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { + if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { rsp = -EIO; QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", QLCNIC_MBX_RSP(cmd->req.arg[0]), func); @@ -1454,7 +1397,7 @@ err_out: if (rsp == QLCNIC_RCODE_TIMEOUT) { ahw->reset_context = 1; adapter->need_fw_reset = 1; - clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); } cleanup_transaction: @@ -1614,7 +1557,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) int err; set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) @@ -1657,8 +1600,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) struct net_device *netdev = adapter->netdev; u8 i, max_ints = ahw->num_msix - 1; - qlcnic_83xx_disable_mbx_intr(adapter); netif_device_detach(netdev); + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_disable_mbx_intr(adapter); + if (netif_running(netdev)) qlcnic_down(adapter, netdev); @@ -1702,6 +1647,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; struct device *dev = &adapter->pdev->dev; struct qlc_83xx_idc *idc = &ahw->idc; u8 func = ahw->pci_func; @@ -1712,7 +1658,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) /* Skip the context reset and check if FW is hung */ if (adapter->reset_ctx_cnt < 3) { adapter->need_fw_reset = 1; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); dev_info(dev, "Resetting context, wait here to check if FW is in failed state\n"); return 0; @@ -1737,7 +1683,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) __func__, adapter->reset_ctx_cnt, func); set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->need_fw_reset = 1; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); adapter->need_fw_reset = 0; @@ -1787,6 +1733,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) { + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); @@ -1794,7 +1741,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } @@ -1803,6 +1750,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) { + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; struct qlc_83xx_idc *idc = &adapter->ahw->idc; u8 func = adapter->ahw->pci_func; @@ -1812,7 +1760,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) set_bit(__QLCNIC_RESETTING, &adapter->state); adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; - clear_bit(QLC_83XX_MBX_READY, &idc->status); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); qlcnic_sriov_vf_detach(adapter); } return 0; @@ -1990,7 +1938,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) int err; set_bit(QLC_83XX_MODULE_LOADED, &idc->status); - qlcnic_83xx_enable_mbx_intrpt(adapter); + qlcnic_83xx_enable_mbx_interrupt(adapter); err = qlcnic_sriov_cfg_bc_intr(adapter, 1); if (err) return err; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 7e8d68263963..899433778466 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -2149,7 +2149,7 @@ struct ql_adapter { struct timer_list timer; atomic_t lb_count; /* Keep local copy of current mac address. */ - char current_mac_addr[6]; + char current_mac_addr[ETH_ALEN]; }; /* diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 19a8a045e077..a30c4395b232 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -13,4 +13,4 @@ config SH_ETH Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - R8A7740 and R8A7779. + R8A7740, R8A777x and R8A7790. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a753928bab9c..9e2afe8e0c9f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { [RMCR] = 0x0258, [TFUCR] = 0x0264, [RFOCR] = 0x0268, + [RMIIMODE] = 0x026c, [FCFTR] = 0x0270, [TRIMD] = 0x027c, }; @@ -392,6 +393,27 @@ static struct sh_eth_cpu_data r8a777x_data = { .hw_swap = 1, }; +/* R8A7790 */ +static struct sh_eth_cpu_data r8a7790_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_r8a777x, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rmiimode = 1, +}; + static void sh_eth_set_rate_sh7724(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -1124,6 +1146,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) if (ret) goto out; + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) @@ -2749,6 +2774,7 @@ static struct platform_device_id sh_eth_id_table[] = { { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, + { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data }, { } }; MODULE_DEVICE_TABLE(platform, sh_eth_id_table); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 99995bf38c40..da93f5cf41f8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -60,6 +60,7 @@ enum { EDOCR, TFUCR, RFOCR, + RMIIMODE, FCFTR, RPADIR, TRIMD, @@ -482,6 +483,7 @@ struct sh_eth_cpu_data { unsigned hw_crc:1; /* E-DMAC have CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ + unsigned rmiimode:1; /* EtherC has RMIIMODE register */ }; struct sh_eth_private { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 03de76c7a177..da8be6e63096 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -109,9 +109,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) const char *mac = NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - addr = devm_ioremap_resource(dev, res); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 22a7a4336211..db6933ecb543 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -91,6 +91,7 @@ do { \ #define CPSW1_SLAVE_SIZE 0x040 #define CPSW1_CPDMA_OFFSET 0x100 #define CPSW1_STATERAM_OFFSET 0x200 +#define CPSW1_HW_STATS 0x400 #define CPSW1_CPTS_OFFSET 0x500 #define CPSW1_ALE_OFFSET 0x600 #define CPSW1_SLIVER_OFFSET 0x700 @@ -99,6 +100,7 @@ do { \ #define CPSW2_SLAVE_OFFSET 0x200 #define CPSW2_SLAVE_SIZE 0x100 #define CPSW2_CPDMA_OFFSET 0x800 +#define CPSW2_HW_STATS 0x900 #define CPSW2_STATERAM_OFFSET 0xa00 #define CPSW2_CPTS_OFFSET 0xc00 #define CPSW2_ALE_OFFSET 0xd00 @@ -299,6 +301,44 @@ struct cpsw_sliver_regs { u32 rx_pri_map; }; +struct cpsw_hw_stats { + u32 rxgoodframes; + u32 rxbroadcastframes; + u32 rxmulticastframes; + u32 rxpauseframes; + u32 rxcrcerrors; + u32 rxaligncodeerrors; + u32 rxoversizedframes; + u32 rxjabberframes; + u32 rxundersizedframes; + u32 rxfragments; + u32 __pad_0[2]; + u32 rxoctets; + u32 txgoodframes; + u32 txbroadcastframes; + u32 txmulticastframes; + u32 txpauseframes; + u32 txdeferredframes; + u32 txcollisionframes; + u32 txsinglecollframes; + u32 txmultcollframes; + u32 txexcessivecollisions; + u32 txlatecollisions; + u32 txunderrun; + u32 txcarriersenseerrors; + u32 txoctets; + u32 octetframes64; + u32 octetframes65t127; + u32 octetframes128t255; + u32 octetframes256t511; + u32 octetframes512t1023; + u32 octetframes1024tup; + u32 netoctets; + u32 rxsofoverruns; + u32 rxmofoverruns; + u32 rxdmaoverruns; +}; + struct cpsw_slave { void __iomem *regs; struct cpsw_sliver_regs __iomem *sliver; @@ -332,6 +372,7 @@ struct cpsw_priv { struct cpsw_platform_data data; struct cpsw_ss_regs __iomem *regs; struct cpsw_wr_regs __iomem *wr_regs; + u8 __iomem *hw_stats; struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; @@ -354,6 +395,94 @@ struct cpsw_priv { u32 emac_port; }; +struct cpsw_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +enum { + CPSW_STATS, + CPDMA_RX_STATS, + CPDMA_TX_STATS, +}; + +#define CPSW_STAT(m) CPSW_STATS, \ + sizeof(((struct cpsw_hw_stats *)0)->m), \ + offsetof(struct cpsw_hw_stats, m) +#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) +#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) + +static const struct cpsw_stats cpsw_gstrings_stats[] = { + { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, + { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, + { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, + { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, + { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, + { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, + { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, + { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, + { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, + { "Rx Fragments", CPSW_STAT(rxfragments) }, + { "Rx Octets", CPSW_STAT(rxoctets) }, + { "Good Tx Frames", CPSW_STAT(txgoodframes) }, + { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, + { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, + { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, + { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, + { "Collisions", CPSW_STAT(txcollisionframes) }, + { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, + { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, + { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, + { "Late Collisions", CPSW_STAT(txlatecollisions) }, + { "Tx Underrun", CPSW_STAT(txunderrun) }, + { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, + { "Tx Octets", CPSW_STAT(txoctets) }, + { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, + { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, + { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, + { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, + { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, + { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, + { "Net Octets", CPSW_STAT(netoctets) }, + { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, + { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, + { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, + { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, + { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, + { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, + { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, + { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, + { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, + { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, + { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, + { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, + { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, + { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, + { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, + { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, + { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, + { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, + { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, +}; + +#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) + #define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) #define for_each_slave(priv, func, arg...) \ do { \ @@ -723,6 +852,69 @@ static int cpsw_set_coalesce(struct net_device *ndev, return 0; } +static int cpsw_get_sset_count(struct net_device *ndev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return CPSW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < CPSW_STATS_LEN; i++) { + memcpy(p, cpsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void cpsw_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpdma_chan_stats rx_stats; + struct cpdma_chan_stats tx_stats; + u32 val; + u8 *p; + int i; + + /* Collect Davinci CPDMA stats for Rx and Tx Channel */ + cpdma_chan_get_stats(priv->rxch, &rx_stats); + cpdma_chan_get_stats(priv->txch, &tx_stats); + + for (i = 0; i < CPSW_STATS_LEN; i++) { + switch (cpsw_gstrings_stats[i].type) { + case CPSW_STATS: + val = readl(priv->hw_stats + + cpsw_gstrings_stats[i].stat_offset); + data[i] = val; + break; + + case CPDMA_RX_STATS: + p = (u8 *)&rx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + + case CPDMA_TX_STATS: + p = (u8 *)&tx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + } + } +} + static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@ -1232,6 +1424,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) } +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = (struct sockaddr *)p; + int flags = 0; + u16 vid = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (priv->data.dual_emac) { + vid = priv->slaves[priv->emac_port].port_vlan; + flags = ALE_VLAN; + } + + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, + flags, vid); + cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, + flags, vid); + + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); + memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + for_each_slave(priv, cpsw_set_slave_mac, priv); + + return 0; +} + static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1326,6 +1545,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, + .ndo_set_mac_address = cpsw_ndo_set_mac_address, .ndo_do_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, @@ -1426,6 +1646,9 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .set_settings = cpsw_set_settings, .get_coalesce = cpsw_get_coalesce, .set_coalesce = cpsw_set_coalesce, + .get_sset_count = cpsw_get_sset_count, + .get_strings = cpsw_get_strings, + .get_ethtool_stats = cpsw_get_ethtool_stats, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1623,6 +1846,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->host_port = priv->host_port; priv_sl2->host_port_regs = priv->host_port_regs; priv_sl2->wr_regs = priv->wr_regs; + priv_sl2->hw_stats = priv->hw_stats; priv_sl2->dma = priv->dma; priv_sl2->txch = priv->txch; priv_sl2->rxch = priv->rxch; @@ -1780,7 +2004,8 @@ static int cpsw_probe(struct platform_device *pdev) switch (priv->version) { case CPSW_VERSION_1: priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW1_HW_STATS; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -1791,7 +2016,8 @@ static int cpsw_probe(struct platform_device *pdev) break; case CPSW_VERSION_2: priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW2_HW_STATS; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig index 098b1c42b393..4083ba8839e1 100644 --- a/drivers/net/ethernet/tile/Kconfig +++ b/drivers/net/ethernet/tile/Kconfig @@ -15,3 +15,14 @@ config TILE_NET To compile this driver as a module, choose M here: the module will be called tile_net. + +config PTP_1588_CLOCK_TILEGX + tristate "Tilera TILE-Gx mPIPE as PTP clock" + select PTP_1588_CLOCK + depends on TILE_NET + depends on TILEGX + ---help--- + This driver adds support for using the mPIPE as a PTP + clock. This clock is only useful if your PTP programs are + getting hardware time stamps on the PTP Ethernet packets + using the SO_TIMESTAMPING API. diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index f3c2d034b32c..907b5772fd55 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -36,7 +36,10 @@ #include <linux/io.h> #include <linux/ctype.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <linux/tcp.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> #include <asm/checksum.h> #include <asm/homecache.h> @@ -76,6 +79,9 @@ #define MAX_FRAGS (MAX_SKB_FRAGS + 1) +/* The "kinds" of buffer stacks (small/large/jumbo). */ +#define MAX_KINDS 3 + /* Size of completions data to allocate. * ISSUE: Probably more than needed since we don't use all the channels. */ @@ -130,29 +136,31 @@ struct tile_net_tx_wake { /* Info for a specific cpu. */ struct tile_net_info { - /* The NAPI struct. */ - struct napi_struct napi; - /* Packet queue. */ - gxio_mpipe_iqueue_t iqueue; /* Our cpu. */ int my_cpu; - /* True if iqueue is valid. */ - bool has_iqueue; - /* NAPI flags. */ - bool napi_added; - bool napi_enabled; - /* Number of small sk_buffs which must still be provided. */ - unsigned int num_needed_small_buffers; - /* Number of large sk_buffs which must still be provided. */ - unsigned int num_needed_large_buffers; /* A timer for handling egress completions. */ struct hrtimer egress_timer; /* True if "egress_timer" is scheduled. */ bool egress_timer_scheduled; - /* Comps for each egress channel. */ - struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; - /* Transmit wake timer for each egress channel. */ - struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; + struct info_mpipe { + /* Packet queue. */ + gxio_mpipe_iqueue_t iqueue; + /* The NAPI struct. */ + struct napi_struct napi; + /* Number of buffers (by kind) which must still be provided. */ + unsigned int num_needed_buffers[MAX_KINDS]; + /* instance id. */ + int instance; + /* True if iqueue is valid. */ + bool has_iqueue; + /* NAPI flags. */ + bool napi_added; + bool napi_enabled; + /* Comps for each egress channel. */ + struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; + /* Transmit wake timer for each egress channel. */ + struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; + } mpipe[NR_MPIPE_MAX]; }; /* Info for egress on a particular egress channel. */ @@ -177,19 +185,67 @@ struct tile_net_priv { int loopify_channel; /* The egress channel (channel or loopify_channel). */ int echannel; - /* Total stats. */ - struct net_device_stats stats; + /* mPIPE instance, 0 or 1. */ + int instance; +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + /* The timestamp config. */ + struct hwtstamp_config stamp_cfg; +#endif }; -/* Egress info, indexed by "priv->echannel" (lazily created as needed). */ -static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; +static struct mpipe_data { + /* The ingress irq. */ + int ingress_irq; -/* Devices currently associated with each channel. - * NOTE: The array entry can become NULL after ifconfig down, but - * we do not free the underlying net_device structures, so it is - * safe to use a pointer after reading it from this array. - */ -static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; + /* The "context" for all devices. */ + gxio_mpipe_context_t context; + + /* Egress info, indexed by "priv->echannel" + * (lazily created as needed). + */ + struct tile_net_egress + egress_for_echannel[TILE_NET_CHANNELS]; + + /* Devices currently associated with each channel. + * NOTE: The array entry can become NULL after ifconfig down, but + * we do not free the underlying net_device structures, so it is + * safe to use a pointer after reading it from this array. + */ + struct net_device + *tile_net_devs_for_channel[TILE_NET_CHANNELS]; + + /* The actual memory allocated for the buffer stacks. */ + void *buffer_stack_vas[MAX_KINDS]; + + /* The amount of memory allocated for each buffer stack. */ + size_t buffer_stack_bytes[MAX_KINDS]; + + /* The first buffer stack index + * (small = +0, large = +1, jumbo = +2). + */ + int first_buffer_stack; + + /* The buckets. */ + int first_bucket; + int num_buckets; + +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + /* PTP-specific data. */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; + + /* Lock for ptp accessors. */ + struct mutex ptp_lock; +#endif + +} mpipe_data[NR_MPIPE_MAX] = { + [0 ... (NR_MPIPE_MAX - 1)] { + .ingress_irq = -1, + .first_buffer_stack = -1, + .first_bucket = -1, + .num_buckets = 1 + } +}; /* A mutex for "tile_net_devs_for_channel". */ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); @@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); /* The per-cpu info. */ static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); -/* The "context" for all devices. */ -static gxio_mpipe_context_t context; -/* Buffer sizes and mpipe enum codes for buffer stacks. +/* The buffer size enums for each buffer stack. * See arch/tile/include/gxio/mpipe.h for the set of possible values. + * We avoid the "10384" size because it can induce "false chaining" + * on "cut-through" jumbo packets. */ -#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 -#define BUFFER_SIZE_SMALL 128 -#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 -#define BUFFER_SIZE_LARGE 1664 - -/* The small/large "buffer stacks". */ -static int small_buffer_stack = -1; -static int large_buffer_stack = -1; - -/* Amount of memory allocated for each buffer stack. */ -static size_t buffer_stack_size; - -/* The actual memory allocated for the buffer stacks. */ -static void *small_buffer_stack_va; -static void *large_buffer_stack_va; - -/* The buckets. */ -static int first_bucket = -1; -static int num_buckets = 1; - -/* The ingress irq. */ -static int ingress_irq = -1; +static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { + GXIO_MPIPE_BUFFER_SIZE_128, + GXIO_MPIPE_BUFFER_SIZE_1664, + GXIO_MPIPE_BUFFER_SIZE_16384 +}; /* Text value of tile_net.cpus if passed as a module parameter. */ static char *network_cpus_string; @@ -232,11 +271,21 @@ static char *network_cpus_string; /* The actual cpus in "network_cpus". */ static struct cpumask network_cpus_map; -/* If "loopify=LINK" was specified, this is "LINK". */ +/* If "tile_net.loopify=LINK" was specified, this is "LINK". */ static char *loopify_link_name; -/* If "tile_net.custom" was specified, this is non-NULL. */ -static char *custom_str; +/* If "tile_net.custom" was specified, this is true. */ +static bool custom_flag; + +/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ +static uint jumbo_num; + +/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */ +static inline int mpipe_instance(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + return priv->instance; +} /* The "tile_net.cpus" argument specifies the cpus that are dedicated * to handle ingress packets. @@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); /* The "tile_net.custom" argument causes us to ignore the "conventional" * classifier metadata, in particular, the "l2_offset". */ -module_param_named(custom, custom_str, charp, 0444); +module_param_named(custom, custom_flag, bool, 0444); MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); +/* The "tile_net.jumbo" argument causes us to support "jumbo" packets, + * and to allocate the given number of "jumbo" buffers. + */ +module_param_named(jumbo, jumbo_num, uint, 0444); +MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets"); + /* Atomically update a statistics field. * Note that on TILE-Gx, this operation is fire-and-forget on the * issuing core (single-cycle dispatch) and takes only a few cycles @@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field) } /* Allocate and push a buffer. */ -static bool tile_net_provide_buffer(bool small) +static bool tile_net_provide_buffer(int instance, int kind) { - int stack = small ? small_buffer_stack : large_buffer_stack; + struct mpipe_data *md = &mpipe_data[instance]; + gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; + size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); const unsigned long buffer_alignment = 128; struct sk_buff *skb; int len; - len = sizeof(struct sk_buff **) + buffer_alignment; - len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); + len = sizeof(struct sk_buff **) + buffer_alignment + bs; skb = dev_alloc_skb(len); if (skb == NULL) return false; @@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small) /* Make sure "skb" and the back-pointer have been flushed. */ wmb(); - gxio_mpipe_push_buffer(&context, stack, + gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind, (void *)va_to_tile_io_addr(skb->data)); return true; @@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va) return skb; } -static void tile_net_pop_all_buffers(int stack) +static void tile_net_pop_all_buffers(int instance, int stack) { + struct mpipe_data *md = &mpipe_data[instance]; + for (;;) { tile_io_addr_t addr = - (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); + (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context, + stack); if (addr == 0) break; dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); @@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack) static void tile_net_provide_needed_buffers(void) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); + int instance, kind; + for (instance = 0; instance < NR_MPIPE_MAX && + info->mpipe[instance].has_iqueue; instance++) { + for (kind = 0; kind < MAX_KINDS; kind++) { + while (info->mpipe[instance].num_needed_buffers[kind] + != 0) { + if (!tile_net_provide_buffer(instance, kind)) { + pr_notice("Tile %d still needs" + " some buffers\n", + info->my_cpu); + return; + } + info->mpipe[instance]. + num_needed_buffers[kind]--; + } + } + } +} - while (info->num_needed_small_buffers != 0) { - if (!tile_net_provide_buffer(true)) - goto oops; - info->num_needed_small_buffers--; +/* Get RX timestamp, and store it in the skb. */ +static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb, + gxio_mpipe_idesc_t *idesc) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec, + idesc->time_stamp_ns); } +#endif +} - while (info->num_needed_large_buffers != 0) { - if (!tile_net_provide_buffer(false)) - goto oops; - info->num_needed_large_buffers--; +/* Get TX timestamp, and store it in the skb. */ +static void tile_tx_timestamp(struct sk_buff *skb, int instance) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + struct skb_shared_info *shtx = skb_shinfo(skb); + if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { + struct mpipe_data *md = &mpipe_data[instance]; + struct skb_shared_hwtstamps shhwtstamps; + struct timespec ts; + + shtx->tx_flags |= SKBTX_IN_PROGRESS; + gxio_mpipe_get_timestamp(&md->context, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); } +#endif +} + +/* Use ioctl() to enable or disable TX or RX timestamping. */ +static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq, + int cmd) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + struct hwtstamp_config config; + struct tile_net_priv *priv = netdev_priv(dev); - return; + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) /* reserved for future extensions */ + return -EINVAL; -oops: - /* Add a description to the page allocation failure dump. */ - pr_notice("Tile %d still needs some buffers\n", info->my_cpu); + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (copy_to_user(rq->ifr_data, &config, sizeof(config))) + return -EFAULT; + + priv->stamp_cfg = config; + return 0; +#else + return -EOPNOTSUPP; +#endif } static inline bool filter_packet(struct net_device *dev, void *buf) @@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_priv *priv = netdev_priv(dev); + int instance = priv->instance; /* Encode the actual packet length. */ skb_put(skb, len); @@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, if (idesc->cs && idesc->csum_seed_val == 0xFFFF) skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_receive_skb(skb); + /* Get RX timestamp from idesc. */ + tile_rx_timestamp(priv, skb, idesc); + + napi_gro_receive(&info->mpipe[instance].napi, skb); /* Update stats. */ - tile_net_stats_add(1, &priv->stats.rx_packets); - tile_net_stats_add(len, &priv->stats.rx_bytes); + tile_net_stats_add(1, &dev->stats.rx_packets); + tile_net_stats_add(len, &dev->stats.rx_bytes); /* Need a new buffer. */ - if (idesc->size == BUFFER_SIZE_SMALL_ENUM) - info->num_needed_small_buffers++; + if (idesc->size == buffer_size_enums[0]) + info->mpipe[instance].num_needed_buffers[0]++; + else if (idesc->size == buffer_size_enums[1]) + info->mpipe[instance].num_needed_buffers[1]++; else - info->num_needed_large_buffers++; + info->mpipe[instance].num_needed_buffers[2]++; } /* Handle a packet. Return true if "processed", false if "filtered". */ -static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) +static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; + struct mpipe_data *md = &mpipe_data[instance]; + struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; uint8_t l2_offset; void *va; void *buf; unsigned long len; bool filter; - /* Drop packets for which no buffer was available. - * NOTE: This happens under heavy load. + /* Drop packets for which no buffer was available (which can + * happen under heavy load), or for which the me/tr/ce flags + * are set (which can happen for jumbo cut-through packets, + * or with a customized classifier). */ - if (idesc->be) { - struct tile_net_priv *priv = netdev_priv(dev); - tile_net_stats_add(1, &priv->stats.rx_dropped); - gxio_mpipe_iqueue_consume(&info->iqueue, idesc); - if (net_ratelimit()) - pr_info("Dropping packet (insufficient buffers).\n"); - return false; + if (idesc->be || idesc->me || idesc->tr || idesc->ce) { + if (dev) + tile_net_stats_add(1, &dev->stats.rx_errors); + goto drop; } /* Get the "l2_offset", if allowed. */ - l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); + l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); - /* Get the raw buffer VA (includes "headroom"). */ - va = tile_io_addr_to_va((unsigned long)(long)idesc->va); + /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */ + va = tile_io_addr_to_va((unsigned long)idesc->va); /* Get the actual packet start/length. */ buf = va + l2_offset; @@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) filter = filter_packet(dev, buf); if (filter) { - gxio_mpipe_iqueue_drop(&info->iqueue, idesc); + if (dev) + tile_net_stats_add(1, &dev->stats.rx_dropped); +drop: + gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc); } else { struct sk_buff *skb = mpipe_buf_to_skb(va); @@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) tile_net_receive_skb(dev, skb, idesc, len); } - gxio_mpipe_iqueue_consume(&info->iqueue, idesc); + gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc); return !filter; } @@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget) struct tile_net_info *info = &__get_cpu_var(per_cpu_info); unsigned int work = 0; gxio_mpipe_idesc_t *idesc; - int i, n; - - /* Process packets. */ - while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { + int instance, i, n; + struct mpipe_data *md; + struct info_mpipe *info_mpipe = + container_of(napi, struct info_mpipe, napi); + + instance = info_mpipe->instance; + while ((n = gxio_mpipe_iqueue_try_peek( + &info_mpipe->iqueue, + &idesc)) > 0) { for (i = 0; i < n; i++) { if (i == TILE_NET_BATCH) goto done; - if (tile_net_handle_packet(idesc + i)) { + if (tile_net_handle_packet(instance, + idesc + i)) { if (++work >= budget) goto done; } @@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget) } /* There are no packets left. */ - napi_complete(&info->napi); + napi_complete(&info_mpipe->napi); + md = &mpipe_data[instance]; /* Re-enable hypervisor interrupts. */ - gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); + gxio_mpipe_enable_notif_ring_interrupt( + &md->context, info->mpipe[instance].iqueue.ring); /* HACK: Avoid the "rotting packet" problem. */ - if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) - napi_schedule(&info->napi); + if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0) + napi_schedule(&info_mpipe->napi); /* ISSUE: Handle completions? */ @@ -533,11 +696,11 @@ done: return work; } -/* Handle an ingress interrupt on the current cpu. */ -static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) +/* Handle an ingress interrupt from an instance on the current cpu. */ +static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - napi_schedule(&info->napi); + napi_schedule(&info->mpipe[(uint64_t)id].napi); return IRQ_HANDLED; } @@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev, { struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); struct tile_net_priv *priv = netdev_priv(dev); - struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; + int instance = priv->instance; + struct tile_net_tx_wake *tx_wake = + &info->mpipe[instance].tx_wake[priv->echannel]; hrtimer_start(&tx_wake->timer, ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), @@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) struct tile_net_info *info = &__get_cpu_var(per_cpu_info); unsigned long irqflags; bool pending = false; - int i; + int i, instance; local_irq_save(irqflags); @@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) info->egress_timer_scheduled = false; /* Free all possible comps for this tile. */ - for (i = 0; i < TILE_NET_CHANNELS; i++) { - struct tile_net_egress *egress = &egress_for_echannel[i]; - struct tile_net_comps *comps = info->comps_for_echannel[i]; - if (comps->comp_last >= comps->comp_next) - continue; - tile_net_free_comps(egress->equeue, comps, -1, true); - pending = pending || (comps->comp_last < comps->comp_next); + for (instance = 0; instance < NR_MPIPE_MAX && + info->mpipe[instance].has_iqueue; instance++) { + for (i = 0; i < TILE_NET_CHANNELS; i++) { + struct tile_net_egress *egress = + &mpipe_data[instance].egress_for_echannel[i]; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[i]; + if (!egress || comps->comp_last >= comps->comp_next) + continue; + tile_net_free_comps(egress->equeue, comps, -1, true); + pending = pending || + (comps->comp_last < comps->comp_next); + } } /* Reschedule timer if needed. */ @@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) return HRTIMER_NORESTART; } -/* Helper function for "tile_net_update()". - * "dev" (i.e. arg) is the device being brought up or down, - * or NULL if all devices are now down. - */ -static void tile_net_update_cpu(void *arg) +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + +/* PTP clock operations. */ + +static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb) { - struct tile_net_info *info = &__get_cpu_var(per_cpu_info); - struct net_device *dev = arg; + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb)) + ret = -EINVAL; + mutex_unlock(&md->ptp_lock); + return ret; +} - if (!info->has_iqueue) - return; +static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_adjust_timestamp(&md->context, delta)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} - if (dev != NULL) { - if (!info->napi_added) { - netif_napi_add(dev, &info->napi, - tile_net_poll, TILE_NET_WEIGHT); - info->napi_added = true; - } - if (!info->napi_enabled) { - napi_enable(&info->napi); - info->napi_enabled = true; - } - enable_percpu_irq(ingress_irq, 0); - } else { - disable_percpu_irq(ingress_irq); - if (info->napi_enabled) { - napi_disable(&info->napi); - info->napi_enabled = false; - } - /* FIXME: Drain the iqueue. */ - } +static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_get_timestamp(&md->context, ts)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_set_timestamp(&md->context, ts)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ptp_mpipe_caps = { + .owner = THIS_MODULE, + .name = "mPIPE clock", + .max_adj = 999999999, + .n_ext_ts = 0, + .pps = 0, + .adjfreq = ptp_mpipe_adjfreq, + .adjtime = ptp_mpipe_adjtime, + .gettime = ptp_mpipe_gettime, + .settime = ptp_mpipe_settime, + .enable = ptp_mpipe_enable, +}; + +#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */ + +/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ +static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + struct timespec ts; + + getnstimeofday(&ts); + gxio_mpipe_set_timestamp(&md->context, &ts); + + mutex_init(&md->ptp_lock); + md->caps = ptp_mpipe_caps; + md->ptp_clock = ptp_clock_register(&md->caps, NULL); + if (IS_ERR(md->ptp_clock)) + netdev_err(dev, "ptp_clock_register failed %ld\n", + PTR_ERR(md->ptp_clock)); +#endif +} + +/* Initialize PTP fields in a new device. */ +static void init_ptp_dev(struct tile_net_priv *priv) +{ +#ifdef CONFIG_PTP_1588_CLOCK_TILEGX + priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; +#endif +} + +/* Helper functions for "tile_net_update()". */ +static void enable_ingress_irq(void *irq) +{ + enable_percpu_irq((long)irq, 0); +} + +static void disable_ingress_irq(void *irq) +{ + disable_percpu_irq((long)irq); } /* Helper function for tile_net_open() and tile_net_stop(). @@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev) { static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ bool saw_channel = false; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; int channel; int rc; int cpu; - gxio_mpipe_rules_init(&rules, &context); + saw_channel = false; + gxio_mpipe_rules_init(&rules, &md->context); for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { - if (tile_net_devs_for_channel[channel] == NULL) + if (md->tile_net_devs_for_channel[channel] == NULL) continue; if (!saw_channel) { saw_channel = true; - gxio_mpipe_rules_begin(&rules, first_bucket, - num_buckets, NULL); + gxio_mpipe_rules_begin(&rules, md->first_bucket, + md->num_buckets, NULL); gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); } gxio_mpipe_rules_add_channel(&rules, channel); @@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev) */ rc = gxio_mpipe_rules_commit(&rules); if (rc != 0) { - netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); + netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n", + instance, rc); return -EIO; } - /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ - for_each_online_cpu(cpu) - smp_call_function_single(cpu, tile_net_update_cpu, - (saw_channel ? dev : NULL), 1); + /* Update all cpus, sequentially (to protect "netif_napi_add()"). + * We use on_each_cpu to handle the IPI mask or unmask. + */ + if (!saw_channel) + on_each_cpu(disable_ingress_irq, + (void *)(long)(md->ingress_irq), 1); + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + + if (!info->mpipe[instance].has_iqueue) + continue; + if (saw_channel) { + if (!info->mpipe[instance].napi_added) { + netif_napi_add(dev, &info->mpipe[instance].napi, + tile_net_poll, TILE_NET_WEIGHT); + info->mpipe[instance].napi_added = true; + } + if (!info->mpipe[instance].napi_enabled) { + napi_enable(&info->mpipe[instance].napi); + info->mpipe[instance].napi_enabled = true; + } + } else { + if (info->mpipe[instance].napi_enabled) { + napi_disable(&info->mpipe[instance].napi); + info->mpipe[instance].napi_enabled = false; + } + /* FIXME: Drain the iqueue. */ + } + } + if (saw_channel) + on_each_cpu(enable_ingress_irq, + (void *)(long)(md->ingress_irq), 1); /* HACK: Allow packets to flow in the simulator. */ if (saw_channel) - sim_enable_mpipe_links(0, -1); + sim_enable_mpipe_links(instance, -1); return 0; } -/* Allocate and initialize mpipe buffer stacks, and register them in - * the mPIPE TLBs, for both small and large packet sizes. - * This routine supports tile_net_init_mpipe(), below. - */ -static int init_buffer_stacks(struct net_device *dev, int num_buffers) +/* Initialize a buffer stack. */ +static int create_buffer_stack(struct net_device *dev, + int kind, size_t num_buffers) { pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); - int rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); + int stack_idx = md->first_buffer_stack + kind; + void *va; + int i, rc; - /* Compute stack bytes; we round up to 64KB and then use - * alloc_pages() so we get the required 64KB alignment as well. + /* Round up to 64KB and then use alloc_pages() so we get the + * required 64KB alignment. */ - buffer_stack_size = - ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), - 64 * 1024); - - /* Allocate two buffer stack indices. */ - rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); - if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", - rc); - return rc; - } - small_buffer_stack = rc; - large_buffer_stack = rc + 1; + md->buffer_stack_bytes[kind] = + ALIGN(needed, 64 * 1024); - /* Allocate the small memory stack. */ - small_buffer_stack_va = - alloc_pages_exact(buffer_stack_size, GFP_KERNEL); - if (small_buffer_stack_va == NULL) { + va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL); + if (va == NULL) { netdev_err(dev, - "Could not alloc %zd bytes for buffer stacks\n", - buffer_stack_size); + "Could not alloc %zd bytes for buffer stack %d\n", + md->buffer_stack_bytes[kind], kind); return -ENOMEM; } - rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, - BUFFER_SIZE_SMALL_ENUM, - small_buffer_stack_va, - buffer_stack_size, 0); + + /* Initialize the buffer stack. */ + rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx, + buffer_size_enums[kind], va, + md->buffer_stack_bytes[kind], 0); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); + netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n", + instance, rc); + free_pages_exact(va, md->buffer_stack_bytes[kind]); return rc; } - rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, + + md->buffer_stack_vas[kind] = va; + + rc = gxio_mpipe_register_client_memory(&md->context, stack_idx, hash_pte, 0); if (rc != 0) { netdev_err(dev, - "gxio_mpipe_register_buffer_memory failed: %d\n", - rc); + "gxio_mpipe_register_client_memory: mpipe[%d] %d\n", + instance, rc); return rc; } - /* Allocate the large buffer stack. */ - large_buffer_stack_va = - alloc_pages_exact(buffer_stack_size, GFP_KERNEL); - if (large_buffer_stack_va == NULL) { - netdev_err(dev, - "Could not alloc %zd bytes for buffer stacks\n", - buffer_stack_size); - return -ENOMEM; - } - rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, - BUFFER_SIZE_LARGE_ENUM, - large_buffer_stack_va, - buffer_stack_size, 0); - if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", - rc); - return rc; + /* Provide initial buffers. */ + for (i = 0; i < num_buffers; i++) { + if (!tile_net_provide_buffer(instance, kind)) { + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); + return -ENOMEM; + } } - rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, - hash_pte, 0); - if (rc != 0) { + + return 0; +} + +/* Allocate and initialize mpipe buffer stacks, and register them in + * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes. + * This routine supports tile_net_init_mpipe(), below. + */ +static int init_buffer_stacks(struct net_device *dev, + int network_cpus_count) +{ + int num_kinds = MAX_KINDS - (jumbo_num == 0); + size_t num_buffers; + int rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + /* Allocate the buffer stacks. */ + rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0); + if (rc < 0) { netdev_err(dev, - "gxio_mpipe_register_buffer_memory failed: %d\n", - rc); + "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n", + instance, rc); return rc; } + md->first_buffer_stack = rc; - return 0; + /* Enough small/large buffers to (normally) avoid buffer errors. */ + num_buffers = + network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); + + /* Allocate the small memory stack. */ + if (rc >= 0) + rc = create_buffer_stack(dev, 0, num_buffers); + + /* Allocate the large buffer stack. */ + if (rc >= 0) + rc = create_buffer_stack(dev, 1, num_buffers); + + /* Allocate the jumbo buffer stack if needed. */ + if (rc >= 0 && jumbo_num != 0) + rc = create_buffer_stack(dev, 2, jumbo_num); + + return rc; } /* Allocate per-cpu resources (memory for completions and idescs). @@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); int order, i, rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; struct page *page; void *addr; @@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, addr = pfn_to_kaddr(page_to_pfn(page)); memset(addr, 0, COMPS_SIZE); for (i = 0; i < TILE_NET_CHANNELS; i++) - info->comps_for_echannel[i] = + info->mpipe[instance].comps_for_echannel[i] = addr + i * sizeof(struct tile_net_comps); /* If this is a network cpu, create an iqueue. */ @@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev, return -ENOMEM; } addr = pfn_to_kaddr(page_to_pfn(page)); - rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, - addr, NOTIF_RING_SIZE, 0); + rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue, + &md->context, ring++, addr, + NOTIF_RING_SIZE, 0); if (rc < 0) { netdev_err(dev, "gxio_mpipe_iqueue_init failed: %d\n", rc); return rc; } - info->has_iqueue = true; + info->mpipe[instance].has_iqueue = true; } return ring; @@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev, int ring, int network_cpus_count) { int group, rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; /* Allocate one NotifGroup. */ - rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); + rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0); if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", - rc); + netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n", + instance, rc); return rc; } group = rc; /* Initialize global num_buckets value. */ if (network_cpus_count > 4) - num_buckets = 256; + md->num_buckets = 256; else if (network_cpus_count > 1) - num_buckets = 16; + md->num_buckets = 16; /* Allocate some buckets, and set global first_bucket value. */ - rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); + rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0); if (rc < 0) { - netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); + netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n", + instance, rc); return rc; } - first_bucket = rc; + md->first_bucket = rc; /* Init group and buckets. */ rc = gxio_mpipe_init_notif_group_and_buckets( - &context, group, ring, network_cpus_count, - first_bucket, num_buckets, + &md->context, group, ring, network_cpus_count, + md->first_bucket, md->num_buckets, GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); if (rc != 0) { - netdev_err( - dev, - "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", - rc); + netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: " + "mpipe[%d] %d\n", instance, rc); return rc; } @@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev, */ static int tile_net_setup_interrupts(struct net_device *dev) { - int cpu, rc; + int cpu, rc, irq; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + irq = md->ingress_irq; + if (irq < 0) { + irq = create_irq(); + if (irq < 0) { + netdev_err(dev, + "create_irq failed: mpipe[%d] %d\n", + instance, irq); + return irq; + } + tile_irq_activate(irq, TILE_IRQ_PERCPU); - rc = create_irq(); - if (rc < 0) { - netdev_err(dev, "create_irq failed: %d\n", rc); - return rc; - } - ingress_irq = rc; - tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); - rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, - 0, "tile_net", NULL); - if (rc != 0) { - netdev_err(dev, "request_irq failed: %d\n", rc); - destroy_irq(ingress_irq); - ingress_irq = -1; - return rc; + rc = request_irq(irq, tile_net_handle_ingress_irq, + 0, "tile_net", (void *)((uint64_t)instance)); + + if (rc != 0) { + netdev_err(dev, "request_irq failed: mpipe[%d] %d\n", + instance, rc); + destroy_irq(irq); + return rc; + } + md->ingress_irq = irq; } for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - if (info->has_iqueue) { - gxio_mpipe_request_notif_ring_interrupt( - &context, cpu_x(cpu), cpu_y(cpu), - KERNEL_PL, ingress_irq, info->iqueue.ring); + if (info->mpipe[instance].has_iqueue) { + gxio_mpipe_request_notif_ring_interrupt(&md->context, + cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, + info->mpipe[instance].iqueue.ring); } } @@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev) } /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ -static void tile_net_init_mpipe_fail(void) +static void tile_net_init_mpipe_fail(int instance) { - int cpu; + int kind, cpu; + struct mpipe_data *md = &mpipe_data[instance]; /* Do cleanups that require the mpipe context first. */ - if (small_buffer_stack >= 0) - tile_net_pop_all_buffers(small_buffer_stack); - if (large_buffer_stack >= 0) - tile_net_pop_all_buffers(large_buffer_stack); + for (kind = 0; kind < MAX_KINDS; kind++) { + if (md->buffer_stack_vas[kind] != NULL) { + tile_net_pop_all_buffers(instance, + md->first_buffer_stack + + kind); + } + } /* Destroy mpipe context so the hardware no longer owns any memory. */ - gxio_mpipe_destroy(&context); + gxio_mpipe_destroy(&md->context); for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); - free_pages((unsigned long)(info->comps_for_echannel[0]), - get_order(COMPS_SIZE)); - info->comps_for_echannel[0] = NULL; - free_pages((unsigned long)(info->iqueue.idescs), + free_pages( + (unsigned long)( + info->mpipe[instance].comps_for_echannel[0]), + get_order(COMPS_SIZE)); + info->mpipe[instance].comps_for_echannel[0] = NULL; + free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs), get_order(NOTIF_RING_SIZE)); - info->iqueue.idescs = NULL; + info->mpipe[instance].iqueue.idescs = NULL; } - if (small_buffer_stack_va) - free_pages_exact(small_buffer_stack_va, buffer_stack_size); - if (large_buffer_stack_va) - free_pages_exact(large_buffer_stack_va, buffer_stack_size); + for (kind = 0; kind < MAX_KINDS; kind++) { + if (md->buffer_stack_vas[kind] != NULL) { + free_pages_exact(md->buffer_stack_vas[kind], + md->buffer_stack_bytes[kind]); + md->buffer_stack_vas[kind] = NULL; + } + } - small_buffer_stack_va = NULL; - large_buffer_stack_va = NULL; - large_buffer_stack = -1; - small_buffer_stack = -1; - first_bucket = -1; + md->first_buffer_stack = -1; + md->first_bucket = -1; } /* The first time any tilegx network device is opened, we initialize @@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void) */ static int tile_net_init_mpipe(struct net_device *dev) { - int i, num_buffers, rc; + int rc; int cpu; int first_ring, ring; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; int network_cpus_count = cpus_weight(network_cpus_map); if (!hash_default) { @@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev) return -EIO; } - rc = gxio_mpipe_init(&context, 0); + rc = gxio_mpipe_init(&md->context, instance); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); + netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n", + instance, rc); return -EIO; } /* Set up the buffer stacks. */ - num_buffers = - network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); - rc = init_buffer_stacks(dev, num_buffers); + rc = init_buffer_stacks(dev, network_cpus_count); if (rc != 0) goto fail; - /* Provide initial buffers. */ - rc = -ENOMEM; - for (i = 0; i < num_buffers; i++) { - if (!tile_net_provide_buffer(true)) { - netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); - goto fail; - } - } - for (i = 0; i < num_buffers; i++) { - if (!tile_net_provide_buffer(false)) { - netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); - goto fail; - } - } - /* Allocate one NotifRing for each network cpu. */ - rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); + rc = gxio_mpipe_alloc_notif_rings(&md->context, + network_cpus_count, 0, 0); if (rc < 0) { netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", rc); @@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev) if (rc != 0) goto fail; + /* Register PTP clock and set mPIPE timestamp, if configured. */ + register_ptp_clock(dev, md); + return 0; fail: - tile_net_init_mpipe_fail(); + tile_net_init_mpipe_fail(instance); return rc; } @@ -1063,17 +1369,19 @@ fail: */ static int tile_net_init_egress(struct net_device *dev, int echannel) { + static int ering = -1; struct page *headers_page, *edescs_page, *equeue_page; gxio_mpipe_edesc_t *edescs; gxio_mpipe_equeue_t *equeue; unsigned char *headers; int headers_order, edescs_order, equeue_order; size_t edescs_size; - int edma; int rc = -ENOMEM; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; /* Only initialize once. */ - if (egress_for_echannel[echannel].equeue != NULL) + if (md->egress_for_echannel[echannel].equeue != NULL) return 0; /* Allocate memory for the "headers". */ @@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel) } equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); - /* Allocate an edma ring. Note that in practice this can't - * fail, which is good, because we will leak an edma ring if so. - */ - rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); - if (rc < 0) { - netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", - rc); - goto fail_equeue; + /* Allocate an edma ring (using a one entry "free list"). */ + if (ering < 0) { + rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0); + if (rc < 0) { + netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: " + "mpipe[%d] %d\n", instance, rc); + goto fail_equeue; + } + ering = rc; } - edma = rc; /* Initialize the equeue. */ - rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, + rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel, edescs, edescs_size, 0); if (rc != 0) { - netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); + netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n", + instance, rc); goto fail_equeue; } + /* Don't reuse the ering later. */ + ering = -1; + + if (jumbo_num != 0) { + /* Make sure "jumbo" packets can be egressed safely. */ + if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) { + /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */ + netdev_warn(dev, "Jumbo packets may not be egressed" + " properly on channel %d\n", echannel); + } + } + /* Done. */ - egress_for_echannel[echannel].equeue = equeue; - egress_for_echannel[echannel].headers = headers; + md->egress_for_echannel[echannel].equeue = equeue; + md->egress_for_echannel[echannel].headers = headers; return 0; fail_equeue: @@ -1151,11 +1472,25 @@ fail: static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, const char *link_name) { - int rc = gxio_mpipe_link_open(link, &context, link_name, 0); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0); if (rc < 0) { - netdev_err(dev, "Failed to open '%s'\n", link_name); + netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n", + link_name, instance, rc); return rc; } + if (jumbo_num != 0) { + u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO; + rc = gxio_mpipe_link_set_attr(link, attr, 1); + if (rc != 0) { + netdev_err(dev, + "Cannot receive jumbo packets on '%s'\n", + link_name); + gxio_mpipe_link_close(link); + return rc; + } + } rc = gxio_mpipe_link_channel(link); if (rc < 0 || rc >= TILE_NET_CHANNELS) { netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); @@ -1169,12 +1504,21 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, static int tile_net_open(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); - int cpu, rc; + int cpu, rc, instance; mutex_lock(&tile_net_devs_for_channel_mutex); - /* Do one-time initialization the first time any device is opened. */ - if (ingress_irq < 0) { + /* Get the instance info. */ + rc = gxio_mpipe_link_instance(dev->name); + if (rc < 0 || rc >= NR_MPIPE_MAX) + return -EIO; + + priv->instance = rc; + instance = rc; + if (!mpipe_data[rc].context.mmio_fast_base) { + /* Do one-time initialization per instance the first time + * any device is opened. + */ rc = tile_net_init_mpipe(dev); if (rc != 0) goto fail; @@ -1205,7 +1549,7 @@ static int tile_net_open(struct net_device *dev) if (rc != 0) goto fail; - tile_net_devs_for_channel[priv->channel] = dev; + mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev; rc = tile_net_update(dev); if (rc != 0) @@ -1217,7 +1561,7 @@ static int tile_net_open(struct net_device *dev) for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_tx_wake *tx_wake = - &info->tx_wake[priv->echannel]; + &info->mpipe[instance].tx_wake[priv->echannel]; hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1243,7 +1587,7 @@ fail: priv->channel = -1; } priv->echannel = -1; - tile_net_devs_for_channel[priv->channel] = NULL; + mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL; mutex_unlock(&tile_net_devs_for_channel_mutex); /* Don't return raw gxio error codes to generic Linux. */ @@ -1255,18 +1599,20 @@ static int tile_net_stop(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int cpu; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; for_each_online_cpu(cpu) { struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); struct tile_net_tx_wake *tx_wake = - &info->tx_wake[priv->echannel]; + &info->mpipe[instance].tx_wake[priv->echannel]; hrtimer_cancel(&tx_wake->timer); netif_stop_subqueue(dev, cpu); } mutex_lock(&tile_net_devs_for_channel_mutex); - tile_net_devs_for_channel[priv->channel] = NULL; + md->tile_net_devs_for_channel[priv->channel] = NULL; (void)tile_net_update(dev); if (priv->loopify_channel >= 0) { if (gxio_mpipe_link_close(&priv->loopify_link) != 0) @@ -1374,20 +1720,20 @@ static int tso_count_edescs(struct sk_buff *skb) return num_edescs; } -/* Prepare modified copies of the skbuff headers. - * FIXME: add support for IPv6. - */ +/* Prepare modified copies of the skbuff headers. */ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, s64 slot) { struct skb_shared_info *sh = skb_shinfo(skb); struct iphdr *ih; + struct ipv6hdr *ih6; struct tcphdr *th; unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); unsigned int data_len = skb->len - sh_len; unsigned char *data = skb->data; unsigned int ih_off, th_off, p_len; unsigned int isum_seed, tsum_seed, id, seq; + int is_ipv6; long f_id = -1; /* id of the current fragment */ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ long f_used = 0; /* bytes used from the current fragment */ @@ -1395,18 +1741,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, int segment; /* Locate original headers and compute various lengths. */ - ih = ip_hdr(skb); + is_ipv6 = skb_is_gso_v6(skb); + if (is_ipv6) { + ih6 = ipv6_hdr(skb); + ih_off = skb_network_offset(skb); + } else { + ih = ip_hdr(skb); + ih_off = skb_network_offset(skb); + isum_seed = ((0xFFFF - ih->check) + + (0xFFFF - ih->tot_len) + + (0xFFFF - ih->id)); + id = ntohs(ih->id); + } + th = tcp_hdr(skb); - ih_off = skb_network_offset(skb); th_off = skb_transport_offset(skb); p_len = sh->gso_size; - /* Set up seed values for IP and TCP csum and initialize id and seq. */ - isum_seed = ((0xFFFF - ih->check) + - (0xFFFF - ih->tot_len) + - (0xFFFF - ih->id)); tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); - id = ntohs(ih->id); seq = ntohl(th->seq); /* Prepare all the headers. */ @@ -1420,11 +1772,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, memcpy(buf, data, sh_len); /* Update copied ip header. */ - ih = (struct iphdr *)(buf + ih_off); - ih->tot_len = htons(sh_len + p_len - ih_off); - ih->id = htons(id); - ih->check = csum_long(isum_seed + ih->tot_len + - ih->id) ^ 0xffff; + if (is_ipv6) { + ih6 = (struct ipv6hdr *)(buf + ih_off); + ih6->payload_len = htons(sh_len + p_len - ih_off - + sizeof(*ih6)); + } else { + ih = (struct iphdr *)(buf + ih_off); + ih->tot_len = htons(sh_len + p_len - ih_off); + ih->id = htons(id); + ih->check = csum_long(isum_seed + ih->tot_len + + ih->id) ^ 0xffff; + } /* Update copied tcp header. */ th = (struct tcphdr *)(buf + th_off); @@ -1475,8 +1833,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, struct sk_buff *skb, unsigned char *headers, s64 slot) { - struct tile_net_priv *priv = netdev_priv(dev); struct skb_shared_info *sh = skb_shinfo(skb); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); unsigned int data_len = skb->len - sh_len; unsigned int p_len = sh->gso_size; @@ -1499,8 +1858,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, edesc_head.xfer_size = sh_len; /* This is only used to specify the TLB. */ - edesc_head.stack_idx = large_buffer_stack; - edesc_body.stack_idx = large_buffer_stack; + edesc_head.stack_idx = md->first_buffer_stack; + edesc_body.stack_idx = md->first_buffer_stack; /* Egress all the edescs. */ for (segment = 0; segment < sh->gso_segs; segment++) { @@ -1553,8 +1912,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, } /* Update stats. */ - tile_net_stats_add(tx_packets, &priv->stats.tx_packets); - tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); + tile_net_stats_add(tx_packets, &dev->stats.tx_packets); + tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes); } /* Do "TSO" handling for egress. @@ -1575,8 +1934,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_priv *priv = netdev_priv(dev); int channel = priv->echannel; - struct tile_net_egress *egress = &egress_for_echannel[channel]; - struct tile_net_comps *comps = info->comps_for_echannel[channel]; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + struct tile_net_egress *egress = &md->egress_for_echannel[channel]; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[channel]; gxio_mpipe_equeue_t *equeue = egress->equeue; unsigned long irqflags; int num_edescs; @@ -1640,10 +2002,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); struct tile_net_priv *priv = netdev_priv(dev); - struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + struct tile_net_egress *egress = + &md->egress_for_echannel[priv->echannel]; gxio_mpipe_equeue_t *equeue = egress->equeue; struct tile_net_comps *comps = - info->comps_for_echannel[priv->echannel]; + info->mpipe[instance].comps_for_echannel[priv->echannel]; unsigned int len = skb->len; unsigned char *data = skb->data; unsigned int num_edescs; @@ -1660,7 +2025,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); /* This is only used to specify the TLB. */ - edesc.stack_idx = large_buffer_stack; + edesc.stack_idx = md->first_buffer_stack; /* Prepare the edescs. */ for (i = 0; i < num_edescs; i++) { @@ -1693,13 +2058,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < num_edescs; i++) gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); + /* Store TX timestamp if needed. */ + tile_tx_timestamp(skb, instance); + /* Add a completion record. */ add_comp(equeue, comps, slot - 1, skb); /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ - tile_net_stats_add(1, &priv->stats.tx_packets); + tile_net_stats_add(1, &dev->stats.tx_packets); tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), - &priv->stats.tx_bytes); + &dev->stats.tx_bytes); local_irq_restore(irqflags); @@ -1727,20 +2095,18 @@ static void tile_net_tx_timeout(struct net_device *dev) /* Ioctl commands. */ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - return -EOPNOTSUPP; -} + if (cmd == SIOCSHWTSTAMP) + return tile_hwtstamp_ioctl(dev, rq, cmd); -/* Get system network statistics for device. */ -static struct net_device_stats *tile_net_get_stats(struct net_device *dev) -{ - struct tile_net_priv *priv = netdev_priv(dev); - return &priv->stats; + return -EOPNOTSUPP; } /* Change the MTU. */ static int tile_net_change_mtu(struct net_device *dev, int new_mtu) { - if ((new_mtu < 68) || (new_mtu > 1500)) + if (new_mtu < 68) + return -EINVAL; + if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500)) return -EINVAL; dev->mtu = new_mtu; return 0; @@ -1772,9 +2138,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p) */ static void tile_net_netpoll(struct net_device *dev) { - disable_percpu_irq(ingress_irq); - tile_net_handle_ingress_irq(ingress_irq, NULL); - enable_percpu_irq(ingress_irq, 0); + int instance = mpipe_instance(dev); + struct tile_net_info *info = &__get_cpu_var(per_cpu_info); + struct mpipe_data *md = &mpipe_data[instance]; + + disable_percpu_irq(md->ingress_irq); + napi_schedule(&info->mpipe[instance].napi); + enable_percpu_irq(md->ingress_irq, 0); } #endif @@ -1784,7 +2154,6 @@ static const struct net_device_ops tile_net_ops = { .ndo_start_xmit = tile_net_tx, .ndo_select_queue = tile_net_select_queue, .ndo_do_ioctl = tile_net_ioctl, - .ndo_get_stats = tile_net_get_stats, .ndo_change_mtu = tile_net_change_mtu, .ndo_tx_timeout = tile_net_tx_timeout, .ndo_set_mac_address = tile_net_set_mac_address, @@ -1800,14 +2169,21 @@ static const struct net_device_ops tile_net_ops = { */ static void tile_net_setup(struct net_device *dev) { + netdev_features_t features = 0; + ether_setup(dev); dev->netdev_ops = &tile_net_ops; dev->watchdog_timeo = TILE_NET_TIMEOUT; - dev->features |= NETIF_F_LLTX; - dev->features |= NETIF_F_HW_CSUM; - dev->features |= NETIF_F_SG; - dev->features |= NETIF_F_TSO; dev->mtu = 1500; + + features |= NETIF_F_HW_CSUM; + features |= NETIF_F_SG; + features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; + + dev->hw_features |= features; + dev->vlan_features |= features; + dev->features |= features; } /* Allocate the device structure, register the device, and obtain the @@ -1842,6 +2218,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac) priv->channel = -1; priv->loopify_channel = -1; priv->echannel = -1; + init_ptp_dev(priv); /* Get the MAC address and set it in the device struct; this must * be done before the device is opened. If the MAC is all zeroes, @@ -1871,9 +2248,12 @@ static void tile_net_init_module_percpu(void *unused) { struct tile_net_info *info = &__get_cpu_var(per_cpu_info); int my_cpu = smp_processor_id(); + int instance; - info->has_iqueue = false; - + for (instance = 0; instance < NR_MPIPE_MAX; instance++) { + info->mpipe[instance].has_iqueue = false; + info->mpipe[instance].instance = instance; + } info->my_cpu = my_cpu; /* Initialize the egress timer. */ @@ -1890,6 +2270,8 @@ static int __init tile_net_init_module(void) pr_info("Tilera Network Driver\n"); + BUILD_BUG_ON(NR_MPIPE_MAX != 2); + mutex_init(&tile_net_devs_for_channel_mutex); /* Initialize each CPU. */ diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 36435499814b..106be47716e7 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -31,6 +31,7 @@ #include <linux/in6.h> #include <linux/timer.h> #include <linux/io.h> +#include <linux/u64_stats_sync.h> #include <asm/checksum.h> #include <asm/homecache.h> @@ -88,13 +89,6 @@ /* ISSUE: This has not been thoroughly tested (except at 1500). */ #define TILE_NET_MTU 1500 -/* HACK: Define to support GSO. */ -/* ISSUE: This may actually hurt performance of the TCP blaster. */ -/* #define TILE_NET_GSO */ - -/* Define this to collapse "duplicate" acks. */ -/* #define IGNORE_DUP_ACKS */ - /* HACK: Define this to verify incoming packets. */ /* #define TILE_NET_VERIFY_INGRESS */ @@ -156,10 +150,13 @@ struct tile_netio_queue { * Statistics counters for a specific cpu and device. */ struct tile_net_stats_t { - u32 rx_packets; - u32 rx_bytes; - u32 tx_packets; - u32 tx_bytes; + struct u64_stats_sync syncp; + u64 rx_packets; /* total packets received */ + u64 tx_packets; /* total packets transmitted */ + u64 rx_bytes; /* total bytes received */ + u64 tx_bytes; /* total bytes transmitted */ + u64 rx_errors; /* packets truncated or marked bad by hw */ + u64 rx_dropped; /* packets not for us or intf not up */ }; @@ -218,8 +215,6 @@ struct tile_net_priv { int network_cpus_count; /* Credits per network cpu. */ int network_cpus_credits; - /* Network stats. */ - struct net_device_stats stats; /* For NetIO bringup retries. */ struct delayed_work retry_work; /* Quick access to per cpu data. */ @@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg) } -#ifdef IGNORE_DUP_ACKS - -/* - * Help detect "duplicate" ACKs. These are sequential packets (for a - * given flow) which are exactly 66 bytes long, sharing everything but - * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, - * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are - * +N, and the Tstamps are usually identical. - * - * NOTE: Apparently truly duplicate acks (with identical "ack" values), - * should not be collapsed, as they are used for some kind of flow control. - */ -static bool is_dup_ack(char *s1, char *s2, unsigned int len) -{ - int i; - - unsigned long long ignorable = 0; - - /* Identification. */ - ignorable |= (1ULL << 0x12); - ignorable |= (1ULL << 0x13); - - /* Header checksum. */ - ignorable |= (1ULL << 0x18); - ignorable |= (1ULL << 0x19); - - /* ACK. */ - ignorable |= (1ULL << 0x2a); - ignorable |= (1ULL << 0x2b); - ignorable |= (1ULL << 0x2c); - ignorable |= (1ULL << 0x2d); - - /* WinSize. */ - ignorable |= (1ULL << 0x30); - ignorable |= (1ULL << 0x31); - - /* Checksum. */ - ignorable |= (1ULL << 0x32); - ignorable |= (1ULL << 0x33); - - for (i = 0; i < len; i++, ignorable >>= 1) { - - if ((ignorable & 1) || (s1[i] == s2[i])) - continue; - -#ifdef TILE_NET_DEBUG - /* HACK: Mention non-timestamp diffs. */ - if (i < 0x38 && i != 0x2f && - net_ratelimit()) - pr_info("Diff at 0x%x\n", i); -#endif - - return false; - } - -#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS - /* HACK: Do not suppress truly duplicate ACKs. */ - /* ISSUE: Is this actually necessary or helpful? */ - if (s1[0x2a] == s2[0x2a] && - s1[0x2b] == s2[0x2b] && - s1[0x2c] == s2[0x2c] && - s1[0x2d] == s2[0x2d]) { - return false; - } -#endif - - return true; -} - -#endif - - - static void tile_net_discard_aux(struct tile_net_cpu *info, int index) { struct tile_netio_queue *queue = &info->queue; @@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); + netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt); /* Extract the packet size. FIXME: Shouldn't the second line */ /* get subtracted? Mostly moot, since it should be "zero". */ @@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) #endif /* TILE_NET_DUMP_PACKETS */ #ifdef TILE_NET_VERIFY_INGRESS - if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && - NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { - /* Bug 6624: Includes UDP packets with a "zero" checksum. */ - pr_warning("Bad L4 checksum on %d byte packet.\n", len); - } - if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && - NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { + if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) { dump_packet(buf, len, "rx"); - panic("Bad L3 checksum."); - } - switch (NETIO_PKT_STATUS_M(metadata, pkt)) { - case NETIO_PKT_STATUS_OVERSIZE: - if (len >= 64) { - dump_packet(buf, len, "rx"); - panic("Unexpected OVERSIZE."); - } - break; - case NETIO_PKT_STATUS_BAD: - pr_warning("Unexpected BAD %ld byte packet.\n", len); + panic("Unexpected OVERSIZE."); } #endif filter = 0; - /* ISSUE: Filter TCP packets with "bad" checksums? */ - - if (!(dev->flags & IFF_UP)) { + if (pkt_status == NETIO_PKT_STATUS_BAD) { + /* Handle CRC error and hardware truncation. */ + filter = 2; + } else if (!(dev->flags & IFF_UP)) { /* Filter packets received before we're up. */ filter = 1; - } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { + } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) && + pkt_status == NETIO_PKT_STATUS_UNDERSIZE) { /* Filter "truncated" packets. */ - filter = 1; + filter = 2; } else if (!(dev->flags & IFF_PROMISC)) { - /* FIXME: Implement HW multicast filter. */ if (!is_multicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; @@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) } } - if (filter) { + u64_stats_update_begin(&stats->syncp); - /* ISSUE: Update "drop" statistics? */ + if (filter != 0) { + + if (filter == 1) + stats->rx_dropped++; + else + stats->rx_errors++; tile_net_provide_linux_buffer(info, va, small); @@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) stats->rx_bytes += len; } + u64_stats_update_end(&stats->syncp); + /* ISSUE: It would be nice to defer this until the packet has */ /* actually been processed. */ tile_net_return_credit(info); @@ -1907,8 +1822,10 @@ busy: kfree_skb(olds[i]); /* Update stats. */ + u64_stats_update_begin(&stats->syncp); stats->tx_packets += num_segs; stats->tx_bytes += (num_segs * sh_len) + d_len; + u64_stats_update_end(&stats->syncp); /* Make sure the egress timer is scheduled. */ tile_net_schedule_egress_timer(info); @@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned int csum_start = skb_checksum_start_offset(skb); - lepp_frag_t frags[LEPP_MAX_FRAGS]; + lepp_frag_t frags[1 + MAX_SKB_FRAGS]; unsigned int num_frags; @@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned int cmd_head, cmd_tail, cmd_next; unsigned int comp_tail; - lepp_cmd_t cmds[LEPP_MAX_FRAGS]; + lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; /* @@ -2089,8 +2006,10 @@ busy: kfree_skb(olds[i]); /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ + u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); + u64_stats_update_end(&stats->syncp); /* Make sure the egress timer is scheduled. */ tile_net_schedule_egress_timer(info); @@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) * * Returns the address of the device statistics structure. */ -static struct net_device_stats *tile_net_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct tile_net_priv *priv = netdev_priv(dev); - u32 rx_packets = 0; - u32 tx_packets = 0; - u32 rx_bytes = 0; - u32 tx_bytes = 0; + u64 rx_packets = 0, tx_packets = 0; + u64 rx_bytes = 0, tx_bytes = 0; + u64 rx_errors = 0, rx_dropped = 0; int i; for_each_online_cpu(i) { - if (priv->cpu[i]) { - rx_packets += priv->cpu[i]->stats.rx_packets; - rx_bytes += priv->cpu[i]->stats.rx_bytes; - tx_packets += priv->cpu[i]->stats.tx_packets; - tx_bytes += priv->cpu[i]->stats.tx_bytes; - } + struct tile_net_stats_t *cpu_stats; + u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes; + u64 trx_errors, trx_dropped; + unsigned int start; + + if (priv->cpu[i] == NULL) + continue; + cpu_stats = &priv->cpu[i]->stats; + + do { + start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); + trx_packets = cpu_stats->rx_packets; + ttx_packets = cpu_stats->tx_packets; + trx_bytes = cpu_stats->rx_bytes; + ttx_bytes = cpu_stats->tx_bytes; + trx_errors = cpu_stats->rx_errors; + trx_dropped = cpu_stats->rx_dropped; + } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); + + rx_packets += trx_packets; + tx_packets += ttx_packets; + rx_bytes += trx_bytes; + tx_bytes += ttx_bytes; + rx_errors += trx_errors; + rx_dropped += trx_dropped; } - priv->stats.rx_packets = rx_packets; - priv->stats.rx_bytes = rx_bytes; - priv->stats.tx_packets = tx_packets; - priv->stats.tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + stats->tx_packets = tx_packets; + stats->rx_bytes = rx_bytes; + stats->tx_bytes = tx_bytes; + stats->rx_errors = rx_errors; + stats->rx_dropped = rx_dropped; - return &priv->stats; + return stats; } @@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = { .ndo_stop = tile_net_stop, .ndo_start_xmit = tile_net_tx, .ndo_do_ioctl = tile_net_ioctl, - .ndo_get_stats = tile_net_get_stats, + .ndo_get_stats64 = tile_net_get_stats64, .ndo_change_mtu = tile_net_change_mtu, .ndo_tx_timeout = tile_net_tx_timeout, .ndo_set_mac_address = tile_net_set_mac_address, @@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = { */ static void tile_net_setup(struct net_device *dev) { - PDEBUG("tile_net_setup()\n"); + netdev_features_t features = 0; ether_setup(dev); - dev->netdev_ops = &tile_net_ops; - dev->watchdog_timeo = TILE_NET_TIMEOUT; + dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; + dev->mtu = TILE_NET_MTU; - /* We want lockless xmit. */ - dev->features |= NETIF_F_LLTX; - - /* We support hardware tx checksums. */ - dev->features |= NETIF_F_HW_CSUM; - - /* We support scatter/gather. */ - dev->features |= NETIF_F_SG; - - /* We support TSO. */ - dev->features |= NETIF_F_TSO; + features |= NETIF_F_HW_CSUM; + features |= NETIF_F_SG; -#ifdef TILE_NET_GSO - /* We support GSO. */ - dev->features |= NETIF_F_GSO; -#endif + /* We support TSO iff the HV supports sufficient frags. */ + if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) + features |= NETIF_F_TSO; + /* We can't support HIGHDMA without hash_default, since we need + * to be able to finv() with a VA if we don't have hash_default. + */ if (hash_default) - dev->features |= NETIF_F_HIGHDMA; - - /* ISSUE: We should support NETIF_F_UFO. */ + features |= NETIF_F_HIGHDMA; - dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; - - dev->mtu = TILE_NET_MTU; + dev->hw_features |= features; + dev->vlan_features |= features; + dev->features |= features; } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..ef776310fab1 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2376,6 +2376,23 @@ out_0: return ret; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * velocity_poll_controller - Velocity Poll controller function + * @dev: network device + * + * + * Used by NETCONSOLE and other diagnostic tools to allow network I/P + * with interrupts disabled. + */ +static void velocity_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + velocity_intr(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + /** * velocity_mii_ioctl - MII ioctl handler * @dev: network device @@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = { .ndo_do_ioctl = velocity_ioctl, .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = velocity_poll_controller, +#endif }; /** |