summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-03-11 15:20:21 -0500
committerDavid S. Miller <davem@davemloft.net>2016-03-11 15:20:21 -0500
commit2aa45984c2180eecbbc9cc4e1321243308f41a01 (patch)
tree3f83134e8b6c13f795b1401c65b0b1cb1f337a38 /drivers/net/ethernet
parent627caa9d319d2402f3d41ebd8dd0fde9d41a33a7 (diff)
parent8f60bafec368191594a8a6ed5f0ce14968131754 (diff)
downloadlinux-2aa45984c2180eecbbc9cc4e1321243308f41a01.tar.bz2
Merge branch 'qed-mf-updates'
Yuval Mintz says: ==================== qed: Management firmware updates This series contains several changes to driver interaction with the management fw. The biggest [& most significant] change here is a change in the locking scheme and re-definition of the 'critical section' when accessing shared resources toward the goal of interacting with the management firmware. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c291
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h14
4 files changed, 219 insertions, 100 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index e5604eec81bf..fcb8e9ba51d9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -303,6 +303,9 @@ struct qed_hwfn {
bool b_int_enabled;
bool b_int_requested;
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
struct qed_mcp_info *mcp_info;
struct qed_hw_cid_data *p_tx_cids;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 236db8a99ec3..a368f5e71d95 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -3621,6 +3621,9 @@ struct public_port {
u32 fc_npiv_nvram_tbl_addr;
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
+#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
+#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
};
/**************************************/
@@ -3955,6 +3958,14 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index f23ce734ab63..b89c9a8e1655 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -11,8 +11,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_hsi.h"
@@ -168,8 +168,8 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW mutex */
- mutex_init(&p_info->mutex);
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->lock);
return 0;
@@ -179,6 +179,52 @@ err:
return -ENOMEM;
}
+/* Locks the MFW mailbox of a PF to ensure a single access.
+ * The lock is achieved in most cases by holding a spinlock, causing other
+ * threads to wait till a previous access is done.
+ * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
+ * access is achieved by setting a blocking flag, which will fail other
+ * competing contexts to send their mailboxes.
+ */
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
+ u32 cmd)
+{
+ spin_lock_bh(&p_hwfn->mcp_info->lock);
+
+ /* The spinlock shouldn't be acquired when the mailbox command is
+ * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
+ * pending [UN]LOAD_REQ command of another PF together with a spinlock
+ * (i.e. interrupts are disabled) - can lead to a deadlock.
+ * It is assumed that for a single PF, no other mailbox commands can be
+ * sent from another context while sending LOAD_REQ, and that any
+ * parallel commands to UNLOAD_REQ can be cancelled.
+ */
+ if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (p_hwfn->mcp_info->block_mb_sending) {
+ DP_NOTICE(p_hwfn,
+ "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
+ cmd);
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ return -EBUSY;
+ }
+
+ if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ p_hwfn->mcp_info->block_mb_sending = true;
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ }
+
+ return 0;
+}
+
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
+ u32 cmd)
+{
+ if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+}
+
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -187,6 +233,13 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
u32 org_mcp_reset_seq, cnt = 0;
int rc = 0;
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ if (rc != 0)
+ return rc;
+
/* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
@@ -209,6 +262,8 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
rc = -EAGAIN;
}
+ qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+
return rc;
}
@@ -275,14 +330,12 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params)
{
- int rc = 0;
+ u32 union_data_addr;
+ int rc;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -290,28 +343,56 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
- /* Lock Mutex to ensure only single thread is
- * accessing the MCP at one time
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
*/
- mutex_lock(&p_hwfn->mcp_info->mutex);
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
- o_mcp_resp, o_mcp_param);
- /* Release Mutex */
- mutex_unlock(&p_hwfn->mcp_info->mutex);
+ rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
+ if (rc)
+ return rc;
+
+ if (p_mb_params->p_data_src != NULL)
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
+ p_mb_params->p_data_src,
+ sizeof(*p_mb_params->p_data_src));
+
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
+ p_mb_params->param, &p_mb_params->mcp_resp,
+ &p_mb_params->mcp_param);
+
+ if (p_mb_params->p_data_dst != NULL)
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr,
+ sizeof(*p_mb_params->p_data_dst));
+
+ qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
return rc;
}
-static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
{
- u32 i;
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
- /* Copy version string to MCP */
- for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
- DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
- *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+ return 0;
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
@@ -319,26 +400,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
- u32 param;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
int rc;
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
-
- /* Save driver's version to shmem */
- qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
-
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
- p_hwfn->mcp_info->drv_mb_seq,
- p_hwfn->mcp_info->drv_pulse_seq);
-
+ memset(&mb_params, 0, sizeof(mb_params));
/* Load Request */
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
- (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- cdev->drv_type),
- p_load_code, &param);
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type;
+ memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc) {
@@ -346,6 +419,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return rc;
}
+ *p_load_code = mb_params.mcp_resp;
+
/* If MFW refused (e.g. other port is in diagnostic mode) we
* must abort. This can happen in the following cases:
* - Other port is in diagnostic mode
@@ -365,6 +440,33 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_HW | QED_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
+ transceiver_state,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data)));
+
+ transceiver_state = GET_FIELD(transceiver_state,
+ PMM_TRANSCEIVER_STATE);
+
+ if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_reset)
@@ -390,7 +492,10 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
return;
}
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ if (p_hwfn->b_drv_link_init)
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ else
+ p_link->link_up = false;
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -492,53 +597,43 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
- u32 param = 0, reply = 0, cmd;
- struct pmm_phy_cfg phy_cfg;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ struct pmm_phy_cfg *phy_cfg;
int rc = 0;
- u32 i;
-
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
+ u32 cmd;
/* Set the shmem configuration according to params */
- memset(&phy_cfg, 0, sizeof(phy_cfg));
+ phy_cfg = &union_data.drv_phy_cfg;
+ memset(phy_cfg, 0, sizeof(*phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- phy_cfg.speed = params->speed.forced_speed;
- phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
- phy_cfg.adv_speed = params->speed.advertised_speeds;
- phy_cfg.loopback_mode = params->loopback_mode;
-
- /* Write the requested configuration to shmem */
- for (i = 0; i < sizeof(phy_cfg); i += 4)
- qed_wr(p_hwfn, p_ptt,
- p_hwfn->mcp_info->drv_mb_addr +
- offsetof(struct public_drv_mb, union_data) + i,
- ((u32 *)&phy_cfg)[i >> 2]);
+ phy_cfg->speed = params->speed.forced_speed;
+ phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg->adv_speed = params->speed.advertised_speeds;
+ phy_cfg->loopback_mode = params->loopback_mode;
+
+ p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg.speed,
- phy_cfg.pause,
- phy_cfg.adv_speed,
- phy_cfg.loopback_mode,
- phy_cfg.feature_config_flags);
+ phy_cfg->speed,
+ phy_cfg->pause,
+ phy_cfg->adv_speed,
+ phy_cfg->loopback_mode,
+ phy_cfg->feature_config_flags);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
- p_hwfn->mcp_info->drv_mb_seq,
- p_hwfn->mcp_info->drv_pulse_seq);
-
- /* Load Request */
- rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc) {
@@ -581,6 +676,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -801,11 +899,11 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 100,
+ DRV_MSG_CODE_NIG_DRAIN, 1000,
&resp, &param);
/* Wait for the drain to complete before returning */
- msleep(120);
+ msleep(1020);
return rc;
}
@@ -831,31 +929,28 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
- int rc = 0;
- u32 param = 0, reply = 0, i;
-
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
+ struct drv_version_stc *p_drv_version;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ __be32 val;
+ u32 i;
+ int rc;
- DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
- p_ver->version);
- /* Copy version string to shmem */
- for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
- DRV_MB_WR(p_hwfn, p_ptt,
- union_data.drv_version.name[i * sizeof(u32)],
- *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ p_drv_version = &union_data.drv_version;
+ p_drv_version->version = p_ver->version;
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
+ val = cpu_to_be32(p_ver->name[i]);
+ *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
- &param);
- if (rc) {
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
- return rc;
- }
- return 0;
+ return rc;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 506197d5c3dd..50917a2131a5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -11,8 +11,8 @@
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "qed_hsi.h"
struct qed_mcp_link_speed_params {
@@ -255,7 +255,8 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info {
- struct mutex mutex; /* MCP access lock */
+ spinlock_t lock;
+ bool block_mb_sending;
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
@@ -272,6 +273,15 @@ struct qed_mcp_info {
u16 mcp_hist;
};
+struct qed_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ union drv_union_data *p_data_src;
+ union drv_union_data *p_data_dst;
+ u32 mcp_resp;
+ u32 mcp_param;
+};
+
/**
* @brief Initialize the interface with the MCP
*