summaryrefslogtreecommitdiffstats
path: root/drivers/staging/qlge
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/qlge')
-rw-r--r--drivers/staging/qlge/qlge.h7
-rw-r--r--drivers/staging/qlge/qlge_dbg.c590
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c8
-rw-r--r--drivers/staging/qlge/qlge_main.c69
-rw-r--r--drivers/staging/qlge/qlge_mpi.c59
5 files changed, 362 insertions, 371 deletions
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index fc8c5ca8935d..483ce04789ed 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -2057,8 +2057,8 @@ enum {
};
struct nic_operations {
- int (*get_flash) (struct ql_adapter *);
- int (*port_initialize) (struct ql_adapter *);
+ int (*get_flash)(struct ql_adapter *qdev);
+ int (*port_initialize)(struct ql_adapter *qdev);
};
/*
@@ -2224,6 +2224,7 @@ static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
static inline u32 ql_read_sh_reg(__le32 *addr)
{
u32 reg;
+
reg = le32_to_cpu(*addr);
rmb();
return reg;
@@ -2275,7 +2276,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
-void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+void ql_check_lb_frame(struct ql_adapter *qdev, struct sk_buff *skb);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 058889687907..a55bf0b3e9dc 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -42,9 +42,9 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
u32 bit, u32 err_bit)
{
u32 temp;
- int count = 10;
+ int count;
- while (count) {
+ for (count = 10; count; count--) {
temp = ql_read_other_func_reg(qdev, reg);
/* check for errors */
@@ -53,7 +53,6 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
else if (temp & bit)
return 0;
mdelay(10);
- count--;
}
return -1;
}
@@ -647,7 +646,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
break;
default:
- pr_err("Bad type!!! 0x%08x\n", type);
+ netdev_err(qdev->ndev, "Bad type!!! 0x%08x\n", type);
max_index = 0;
max_offset = 0;
break;
@@ -1299,7 +1298,7 @@ void ql_get_dump(struct ql_adapter *qdev, void *buff)
* If the dump has already been taken and is stored
* in our internal buffer and if force dump is set then
* just start the spool to dump it to the log file
- * and also, take a snapshot of the general regs to
+ * and also, take a snapshot of the general regs
* to the user's buffer or else take complete dump
* to the user's buffer if force is not set.
*/
@@ -1335,9 +1334,8 @@ static void ql_dump_intr_states(struct ql_adapter *qdev)
for (i = 0; i < qdev->intr_count; i++) {
ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
value = ql_read32(qdev, INTR_EN);
- pr_err("%s: Interrupt %d is %s\n",
- qdev->ndev->name, i,
- (value & INTR_EN_EN ? "enabled" : "disabled"));
+ netdev_err(qdev->ndev, "Interrupt %d is %s\n", i,
+ (value & INTR_EN_EN ? "enabled" : "disabled"));
}
}
@@ -1345,13 +1343,14 @@ static void ql_dump_intr_states(struct ql_adapter *qdev)
do { \
u32 data; \
ql_read_xgmac_reg(qdev, reg, &data); \
- pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
+ netdev_err(qdev->ndev, "%s = 0x%.08x\n", #reg, data); \
} while (0)
void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
{
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- pr_err("%s: Couldn't get xgmac sem\n", __func__);
+ netdev_err(qdev->ndev, "%s: Couldn't get xgmac sem\n",
+ __func__);
return;
}
DUMP_XGMAC(qdev, PAUSE_SRC_LO);
@@ -1388,27 +1387,28 @@ static void ql_dump_cam_entries(struct ql_adapter *qdev)
return;
for (i = 0; i < 4; i++) {
if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
- pr_err("%s: Failed read of mac index register\n",
- __func__);
- return;
- } else {
- if (value[0])
- pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
- qdev->ndev->name, i, value[1], value[0],
- value[2]);
+ netdev_err(qdev->ndev,
+ "%s: Failed read of mac index register\n",
+ __func__);
+ break;
}
+ if (value[0])
+ netdev_err(qdev->ndev,
+ "CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
+ i, value[1], value[0], value[2]);
}
for (i = 0; i < 32; i++) {
if (ql_get_mac_addr_reg
(qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
- pr_err("%s: Failed read of mac index register\n",
- __func__);
- return;
- } else {
- if (value[0])
- pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
- qdev->ndev->name, i, value[1], value[0]);
+ netdev_err(qdev->ndev,
+ "%s: Failed read of mac index register\n",
+ __func__);
+ break;
}
+ if (value[0])
+ netdev_err(qdev->ndev,
+ "MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
+ i, value[1], value[0]);
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -1424,24 +1424,25 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
for (i = 0; i < 16; i++) {
value = 0;
if (ql_get_routing_reg(qdev, i, &value)) {
- pr_err("%s: Failed read of routing index register\n",
- __func__);
- return;
- } else {
- if (value)
- pr_err("%s: Routing Mask %d = 0x%.08x\n",
- qdev->ndev->name, i, value);
+ netdev_err(qdev->ndev,
+ "%s: Failed read of routing index register\n",
+ __func__);
+ break;
}
+ if (value)
+ netdev_err(qdev->ndev,
+ "%s: Routing Mask %d = 0x%.08x\n",
+ i, value);
}
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
#define DUMP_REG(qdev, reg) \
- pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
+ netdev_err(qdev->ndev, "%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
void ql_dump_regs(struct ql_adapter *qdev)
{
- pr_err("reg dump for function #%d\n", qdev->func);
+ netdev_err(qdev->ndev, "reg dump for function #%d\n", qdev->func);
DUMP_REG(qdev, SYS);
DUMP_REG(qdev, RST_FO);
DUMP_REG(qdev, FSC);
@@ -1506,11 +1507,12 @@ void ql_dump_regs(struct ql_adapter *qdev)
#ifdef QL_STAT_DUMP
#define DUMP_STAT(qdev, stat) \
- pr_err("%s = %ld\n", #stat, (unsigned long)(qdev)->nic_stats.stat)
+ netdev_err(qdev->ndev, "%s = %ld\n", #stat, \
+ (unsigned long)(qdev)->nic_stats.stat)
void ql_dump_stat(struct ql_adapter *qdev)
{
- pr_err("%s: Enter\n", __func__);
+ netdev_err(qdev->ndev, "%s: Enter\n", __func__);
DUMP_STAT(qdev, tx_pkts);
DUMP_STAT(qdev, tx_bytes);
DUMP_STAT(qdev, tx_mcast_pkts);
@@ -1559,11 +1561,12 @@ void ql_dump_stat(struct ql_adapter *qdev)
#ifdef QL_DEV_DUMP
#define DUMP_QDEV_FIELD(qdev, type, field) \
- pr_err("qdev->%-24s = " type "\n", #field, (qdev)->field)
+ netdev_err(qdev->ndev, "qdev->%-24s = " type "\n", #field, (qdev)->field)
#define DUMP_QDEV_DMA_FIELD(qdev, field) \
- pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
+ netdev_err(qdev->ndev, "qdev->%-24s = %llx\n", #field, \
+ (unsigned long long)qdev->field)
#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
- pr_err("%s[%d].%s = " type "\n", \
+ netdev_err(qdev->ndev, "%s[%d].%s = " type "\n", \
#array, index, #field, (qdev)->array[index].field)
void ql_dump_qdev(struct ql_adapter *qdev)
{
@@ -1614,99 +1617,100 @@ void ql_dump_qdev(struct ql_adapter *qdev)
#ifdef QL_CB_DUMP
void ql_dump_wqicb(struct wqicb *wqicb)
{
- pr_err("Dumping wqicb stuff...\n");
- pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
- pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
- pr_err("wqicb->cq_id_rss = %d\n",
- le16_to_cpu(wqicb->cq_id_rss));
- pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
- pr_err("wqicb->wq_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(wqicb->addr));
- pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr));
+ netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
+ netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
+ netdev_err(qdev->ndev, "wqicb->flags = %x\n",
+ le16_to_cpu(wqicb->flags));
+ netdev_err(qdev->ndev, "wqicb->cq_id_rss = %d\n",
+ le16_to_cpu(wqicb->cq_id_rss));
+ netdev_err(qdev->ndev, "wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
+ netdev_err(qdev->ndev, "wqicb->wq_addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(wqicb->addr));
+ netdev_err(qdev->ndev, "wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr));
}
void ql_dump_tx_ring(struct tx_ring *tx_ring)
{
if (!tx_ring)
return;
- pr_err("===================== Dumping tx_ring %d ===============\n",
- tx_ring->wq_id);
- pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
- pr_err("tx_ring->base_dma = 0x%llx\n",
- (unsigned long long)tx_ring->wq_base_dma);
- pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
- tx_ring->cnsmr_idx_sh_reg,
- tx_ring->cnsmr_idx_sh_reg
+ netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
+ tx_ring->wq_id);
+ netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
+ netdev_err(qdev->ndev, "tx_ring->base_dma = 0x%llx\n",
+ (unsigned long long)tx_ring->wq_base_dma);
+ netdev_err(qdev->ndev, "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
+ tx_ring->cnsmr_idx_sh_reg,
+ tx_ring->cnsmr_idx_sh_reg
? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
- pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
- pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
- pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
- pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
- pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
- pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
- pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
- pr_err("tx_ring->q = %p\n", tx_ring->q);
- pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
+ netdev_err(qdev->ndev, "tx_ring->size = %d\n", tx_ring->wq_size);
+ netdev_err(qdev->ndev, "tx_ring->len = %d\n", tx_ring->wq_len);
+ netdev_err(qdev->ndev, "tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
+ netdev_err(qdev->ndev, "tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
+ netdev_err(qdev->ndev, "tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
+ netdev_err(qdev->ndev, "tx_ring->cq_id = %d\n", tx_ring->cq_id);
+ netdev_err(qdev->ndev, "tx_ring->wq_id = %d\n", tx_ring->wq_id);
+ netdev_err(qdev->ndev, "tx_ring->q = %p\n", tx_ring->q);
+ netdev_err(qdev->ndev, "tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
}
void ql_dump_ricb(struct ricb *ricb)
{
int i;
- pr_err("===================== Dumping ricb ===============\n");
- pr_err("Dumping ricb stuff...\n");
-
- pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
- pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
- ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
- ricb->flags & RSS_L6K ? "RSS_L6K " : "",
- ricb->flags & RSS_LI ? "RSS_LI " : "",
- ricb->flags & RSS_LB ? "RSS_LB " : "",
- ricb->flags & RSS_LM ? "RSS_LM " : "",
- ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
- ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
- ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
- ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
- pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
+ netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
+ netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
+
+ netdev_err(qdev->ndev, "ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
+ netdev_err(qdev->ndev, "ricb->flags = %s%s%s%s%s%s%s%s%s\n",
+ ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
+ ricb->flags & RSS_L6K ? "RSS_L6K " : "",
+ ricb->flags & RSS_LI ? "RSS_LI " : "",
+ ricb->flags & RSS_LB ? "RSS_LB " : "",
+ ricb->flags & RSS_LM ? "RSS_LM " : "",
+ ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
+ ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
+ ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
+ ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
+ netdev_err(qdev->ndev, "ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
for (i = 0; i < 16; i++)
- pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->hash_cq_id[i]));
+ netdev_err(qdev->ndev, "ricb->hash_cq_id[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->hash_cq_id[i]));
for (i = 0; i < 10; i++)
- pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv6_hash_key[i]));
+ netdev_err(qdev->ndev, "ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->ipv6_hash_key[i]));
for (i = 0; i < 4; i++)
- pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv4_hash_key[i]));
+ netdev_err(qdev->ndev, "ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
+ le32_to_cpu(ricb->ipv4_hash_key[i]));
}
void ql_dump_cqicb(struct cqicb *cqicb)
{
- pr_err("Dumping cqicb stuff...\n");
-
- pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
- pr_err("cqicb->flags = %x\n", cqicb->flags);
- pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
- pr_err("cqicb->addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->addr));
- pr_err("cqicb->prod_idx_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr));
- pr_err("cqicb->pkt_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->pkt_delay));
- pr_err("cqicb->irq_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->irq_delay));
- pr_err("cqicb->lbq_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->lbq_addr));
- pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_buf_size));
- pr_err("cqicb->lbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_len));
- pr_err("cqicb->sbq_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(cqicb->sbq_addr));
- pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_buf_size));
- pr_err("cqicb->sbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_len));
+ netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
+
+ netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
+ netdev_err(qdev->ndev, "cqicb->flags = %x\n", cqicb->flags);
+ netdev_err(qdev->ndev, "cqicb->len = %d\n", le16_to_cpu(cqicb->len));
+ netdev_err(qdev->ndev, "cqicb->addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cqicb->addr));
+ netdev_err(qdev->ndev, "cqicb->prod_idx_addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr));
+ netdev_err(qdev->ndev, "cqicb->pkt_delay = 0x%.04x\n",
+ le16_to_cpu(cqicb->pkt_delay));
+ netdev_err(qdev->ndev, "cqicb->irq_delay = 0x%.04x\n",
+ le16_to_cpu(cqicb->irq_delay));
+ netdev_err(qdev->ndev, "cqicb->lbq_addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cqicb->lbq_addr));
+ netdev_err(qdev->ndev, "cqicb->lbq_buf_size = 0x%.04x\n",
+ le16_to_cpu(cqicb->lbq_buf_size));
+ netdev_err(qdev->ndev, "cqicb->lbq_len = 0x%.04x\n",
+ le16_to_cpu(cqicb->lbq_len));
+ netdev_err(qdev->ndev, "cqicb->sbq_addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cqicb->sbq_addr));
+ netdev_err(qdev->ndev, "cqicb->sbq_buf_size = 0x%.04x\n",
+ le16_to_cpu(cqicb->sbq_buf_size));
+ netdev_err(qdev->ndev, "cqicb->sbq_len = 0x%.04x\n",
+ le16_to_cpu(cqicb->sbq_len));
}
static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
@@ -1723,71 +1727,73 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
{
if (!rx_ring)
return;
- pr_err("===================== Dumping rx_ring %d ===============\n",
- rx_ring->cq_id);
- pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
- qlge_rx_ring_type_name(rx_ring));
- pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
- pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
- pr_err("rx_ring->cq_base_dma = %llx\n",
- (unsigned long long)rx_ring->cq_base_dma);
- pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
- pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
- pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
- rx_ring->prod_idx_sh_reg,
- rx_ring->prod_idx_sh_reg
- ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
- pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
- (unsigned long long)rx_ring->prod_idx_sh_reg_dma);
- pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
- rx_ring->cnsmr_idx_db_reg);
- pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
- pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
- pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
-
- pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
- pr_err("rx_ring->lbq.base_dma = %llx\n",
- (unsigned long long)rx_ring->lbq.base_dma);
- pr_err("rx_ring->lbq.base_indirect = %p\n",
- rx_ring->lbq.base_indirect);
- pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
- (unsigned long long)rx_ring->lbq.base_indirect_dma);
- pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
- pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
- rx_ring->lbq.prod_idx_db_reg);
- pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
- pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
-
- pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
- pr_err("rx_ring->sbq.base_dma = %llx\n",
- (unsigned long long)rx_ring->sbq.base_dma);
- pr_err("rx_ring->sbq.base_indirect = %p\n",
- rx_ring->sbq.base_indirect);
- pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
- (unsigned long long)rx_ring->sbq.base_indirect_dma);
- pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
- pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
- rx_ring->sbq.prod_idx_db_reg);
- pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
- pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
- pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
- pr_err("rx_ring->irq = %d\n", rx_ring->irq);
- pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
- pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
+ netdev_err(qdev->ndev,
+ "===================== Dumping rx_ring %d ===============\n",
+ rx_ring->cq_id);
+ netdev_err(qdev->ndev,
+ "Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
+ qlge_rx_ring_type_name(rx_ring));
+ netdev_err(qdev->ndev, "rx_ring->cqicb = %p\n", &rx_ring->cqicb);
+ netdev_err(qdev->ndev, "rx_ring->cq_base = %p\n", rx_ring->cq_base);
+ netdev_err(qdev->ndev, "rx_ring->cq_base_dma = %llx\n",
+ (unsigned long long)rx_ring->cq_base_dma);
+ netdev_err(qdev->ndev, "rx_ring->cq_size = %d\n", rx_ring->cq_size);
+ netdev_err(qdev->ndev, "rx_ring->cq_len = %d\n", rx_ring->cq_len);
+ netdev_err(qdev->ndev,
+ "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
+ rx_ring->prod_idx_sh_reg,
+ rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
+ netdev_err(qdev->ndev, "rx_ring->prod_idx_sh_reg_dma = %llx\n",
+ (unsigned long long)rx_ring->prod_idx_sh_reg_dma);
+ netdev_err(qdev->ndev, "rx_ring->cnsmr_idx_db_reg = %p\n",
+ rx_ring->cnsmr_idx_db_reg);
+ netdev_err(qdev->ndev, "rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
+ netdev_err(qdev->ndev, "rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
+ netdev_err(qdev->ndev, "rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
+
+ netdev_err(qdev->ndev, "rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
+ netdev_err(qdev->ndev, "rx_ring->lbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_dma);
+ netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect = %p\n",
+ rx_ring->lbq.base_indirect);
+ netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_indirect_dma);
+ netdev_err(qdev->ndev, "rx_ring->lbq = %p\n", rx_ring->lbq.queue);
+ netdev_err(qdev->ndev, "rx_ring->lbq.prod_idx_db_reg = %p\n",
+ rx_ring->lbq.prod_idx_db_reg);
+ netdev_err(qdev->ndev, "rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
+ netdev_err(qdev->ndev, "rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
+
+ netdev_err(qdev->ndev, "rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
+ netdev_err(qdev->ndev, "rx_ring->sbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_dma);
+ netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect = %p\n",
+ rx_ring->sbq.base_indirect);
+ netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_indirect_dma);
+ netdev_err(qdev->ndev, "rx_ring->sbq = %p\n", rx_ring->sbq.queue);
+ netdev_err(qdev->ndev, "rx_ring->sbq.prod_idx_db_reg addr = %p\n",
+ rx_ring->sbq.prod_idx_db_reg);
+ netdev_err(qdev->ndev, "rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
+ netdev_err(qdev->ndev, "rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
+ netdev_err(qdev->ndev, "rx_ring->cq_id = %d\n", rx_ring->cq_id);
+ netdev_err(qdev->ndev, "rx_ring->irq = %d\n", rx_ring->irq);
+ netdev_err(qdev->ndev, "rx_ring->cpu = %d\n", rx_ring->cpu);
+ netdev_err(qdev->ndev, "rx_ring->qdev = %p\n", rx_ring->qdev);
}
void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
{
void *ptr;
- pr_err("%s: Enter\n", __func__);
+ netdev_err(qdev->ndev, "%s: Enter\n", __func__);
ptr = kmalloc(size, GFP_ATOMIC);
if (!ptr)
return;
if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
- pr_err("%s: Failed to upload control block!\n", __func__);
+ netdev_err(qdev->ndev, "%s: Failed to upload control block!\n", __func__);
goto fail_it;
}
switch (bit) {
@@ -1801,7 +1807,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
ql_dump_ricb((struct ricb *)ptr);
break;
default:
- pr_err("%s: Invalid bit value = %x\n", __func__, bit);
+ netdev_err(qdev->ndev, "%s: Invalid bit value = %x\n", __func__, bit);
break;
}
fail_it:
@@ -1812,29 +1818,29 @@ fail_it:
#ifdef QL_OB_DUMP
void ql_dump_tx_desc(struct tx_buf_desc *tbd)
{
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
+ netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64)tbd->addr));
+ netdev_err(qdev->ndev, "tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
tbd++;
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
+ netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64)tbd->addr));
+ netdev_err(qdev->ndev, "tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
tbd++;
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
+ netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
+ le64_to_cpu((u64)tbd->addr));
+ netdev_err(qdev->ndev, "tbd->len = %d\n",
+ le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
+ netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
+ tbd->len & TX_DESC_C ? "C" : ".",
+ tbd->len & TX_DESC_E ? "E" : ".");
}
void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
@@ -1844,39 +1850,39 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
struct tx_buf_desc *tbd;
u16 frame_len;
- pr_err("%s\n", __func__);
- pr_err("opcode = %s\n",
- (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
- pr_err("flags1 = %s %s %s %s %s\n",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
- pr_err("flags2 = %s %s %s\n",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- pr_err("flags3 = %s %s %s\n",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
- pr_err("tid = %x\n", ob_mac_iocb->tid);
- pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
- pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
+ netdev_err(qdev->ndev, "%s\n", __func__);
+ netdev_err(qdev->ndev, "opcode = %s\n",
+ (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
+ netdev_err(qdev->ndev, "flags1 = %s %s %s %s %s\n",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
+ ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
+ netdev_err(qdev->ndev, "flags2 = %s %s %s\n",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
+ ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
+ netdev_err(qdev->ndev, "flags3 = %s %s %s\n",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
+ ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
+ netdev_err(qdev->ndev, "tid = %x\n", ob_mac_iocb->tid);
+ netdev_err(qdev->ndev, "txq_idx = %d\n", ob_mac_iocb->txq_idx);
+ netdev_err(qdev->ndev, "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
- pr_err("frame_len = %d\n",
- le32_to_cpu(ob_mac_tso_iocb->frame_len));
- pr_err("mss = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->mss));
- pr_err("prot_hdr_len = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
- pr_err("hdr_offset = 0x%.04x\n",
- le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
+ netdev_err(qdev->ndev, "frame_len = %d\n",
+ le32_to_cpu(ob_mac_tso_iocb->frame_len));
+ netdev_err(qdev->ndev, "mss = %d\n",
+ le16_to_cpu(ob_mac_tso_iocb->mss));
+ netdev_err(qdev->ndev, "prot_hdr_len = %d\n",
+ le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
+ netdev_err(qdev->ndev, "hdr_offset = 0x%.04x\n",
+ le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
} else {
- pr_err("frame_len = %d\n",
- le16_to_cpu(ob_mac_iocb->frame_len));
+ netdev_err(qdev->ndev, "frame_len = %d\n",
+ le16_to_cpu(ob_mac_iocb->frame_len));
frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
}
tbd = &ob_mac_iocb->tbd[0];
@@ -1885,98 +1891,98 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
{
- pr_err("%s\n", __func__);
- pr_err("opcode = %d\n", ob_mac_rsp->opcode);
- pr_err("flags = %s %s %s %s %s %s %s\n",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
- ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
- pr_err("tid = %x\n", ob_mac_rsp->tid);
+ netdev_err(qdev->ndev, "%s\n", __func__);
+ netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode);
+ netdev_err(qdev->ndev, "flags = %s %s %s %s %s %s %s\n",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ?
+ "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
+ ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
+ ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
+ netdev_err(qdev->ndev, "tid = %x\n", ob_mac_rsp->tid);
}
#endif
#ifdef QL_IB_DUMP
void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
{
- pr_err("%s\n", __func__);
- pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
- pr_err("flags1 = %s%s%s%s%s%s\n",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
+ netdev_err(qdev->ndev, "%s\n", __func__);
+ netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode);
+ netdev_err(qdev->ndev, "flags1 = %s%s%s%s%s%s\n",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
+ ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
- pr_err("%s%s%s Multicast\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
-
- pr_err("flags2 = %s%s%s%s%s\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
+ netdev_err(qdev->ndev, "%s%s%s Multicast\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+
+ netdev_err(qdev->ndev, "flags2 = %s%s%s%s%s\n",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
- pr_err("%s%s%s%s%s error\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
-
- pr_err("flags3 = %s%s\n",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
+ netdev_err(qdev->ndev, "%s%s%s%s%s error\n",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
+ IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
+
+ netdev_err(qdev->ndev, "flags3 = %s%s\n",
+ ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
+ ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- pr_err("RSS flags = %s%s%s%s\n",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
-
- pr_err("data_len = %d\n",
- le32_to_cpu(ib_mac_rsp->data_len));
- pr_err("data_addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr));
+ netdev_err(qdev->ndev, "RSS flags = %s%s%s%s\n",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
+ ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
+ IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
+
+ netdev_err(qdev->ndev, "data_len = %d\n",
+ le32_to_cpu(ib_mac_rsp->data_len));
+ netdev_err(qdev->ndev, "data_addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr));
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- pr_err("rss = %x\n",
- le32_to_cpu(ib_mac_rsp->rss));
+ netdev_err(qdev->ndev, "rss = %x\n",
+ le32_to_cpu(ib_mac_rsp->rss));
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
- pr_err("vlan_id = %x\n",
- le16_to_cpu(ib_mac_rsp->vlan_id));
+ netdev_err(qdev->ndev, "vlan_id = %x\n",
+ le16_to_cpu(ib_mac_rsp->vlan_id));
- pr_err("flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
+ netdev_err(qdev->ndev, "flags4 = %s%s%s\n",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- pr_err("hdr length = %d\n",
- le32_to_cpu(ib_mac_rsp->hdr_len));
- pr_err("hdr addr = 0x%llx\n",
- (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr));
+ netdev_err(qdev->ndev, "hdr length = %d\n",
+ le32_to_cpu(ib_mac_rsp->hdr_len));
+ netdev_err(qdev->ndev, "hdr addr = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr));
}
}
#endif
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 949abd53a7a9..d44b2dae9213 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -516,8 +516,8 @@ static void ql_create_lb_frame(struct sk_buff *skb,
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+ skb->data[frame_size / 2 + 10] = (unsigned char)0xBE;
+ skb->data[frame_size / 2 + 12] = (unsigned char)0xAF;
}
void ql_check_lb_frame(struct ql_adapter *qdev,
@@ -528,8 +528,8 @@ void ql_check_lb_frame(struct ql_adapter *qdev,
if ((*(skb->data + 3) == 0xFF) &&
(*(skb->data + frame_size / 2 + 10) == 0xBE) &&
(*(skb->data + frame_size / 2 + 12) == 0xAF)) {
- atomic_dec(&qdev->lb_count);
- return;
+ atomic_dec(&qdev->lb_count);
+ return;
}
}
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 402edaeffe12..44ef00f1f8ee 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -687,7 +687,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
size = sizeof(struct flash_params_8000) / sizeof(u32);
for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
+ status = ql_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
@@ -750,7 +750,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
return -ETIMEDOUT;
for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
+ status = ql_read_flash_word(qdev, i + offset, p);
if (status) {
netif_err(qdev, ifup, qdev->ndev,
"Error reading flash.\n");
@@ -1528,7 +1528,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct iphdr *iph =
(struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1635,7 +1635,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1924,7 +1924,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
+ htons(IP_MF | IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
@@ -3181,7 +3181,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
msi:
qdev->intr_count = 1;
if (qlge_irq_type == MSI_IRQ) {
- if (!pci_enable_msi(qdev->pdev)) {
+ if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"Running with MSI interrupts.\n");
@@ -3244,7 +3244,8 @@ static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
*/
ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
/* Add the TX ring(s) serviced by this vector
- * to the mask. */
+ * to the mask.
+ */
for (j = 0; j < tx_rings_per_vector; j++) {
ctx->irq_mask |=
(1 << qdev->rx_ring[qdev->rss_ring_count +
@@ -3777,10 +3778,10 @@ static int ql_wol(struct ql_adapter *qdev)
"Failed to set magic packet on %s.\n",
qdev->ndev->name);
return status;
- } else
- netif_info(qdev, drv, qdev->ndev,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
+ }
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
@@ -4547,7 +4548,7 @@ static void ql_timer(struct timer_list *t)
return;
}
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
}
static int qlge_probe(struct pci_dev *pdev,
@@ -4619,7 +4620,7 @@ static int qlge_probe(struct pci_dev *pdev,
* the bus goes dead
*/
timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
@@ -4753,7 +4754,7 @@ static void qlge_io_resume(struct pci_dev *pdev)
netif_err(qdev, ifup, qdev->ndev,
"Device was not running prior to EEH.\n");
}
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
netif_device_attach(ndev);
}
@@ -4763,9 +4764,9 @@ static const struct pci_error_handlers qlge_err_handler = {
.resume = qlge_io_resume,
};
-static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused qlge_suspend(struct device *dev_d)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev_d);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
@@ -4779,35 +4780,19 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
}
ql_wol(qdev);
- err = pci_save_state(pdev);
- if (err)
- return err;
-
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-#ifdef CONFIG_PM
-static int qlge_resume(struct pci_dev *pdev)
+static int __maybe_unused qlge_resume(struct device *dev_d)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev_d);
struct ql_adapter *qdev = netdev_priv(ndev);
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pci_enable_device(pdev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
- return err;
- }
- pci_set_master(pdev);
+ pci_set_master(to_pci_dev(dev_d));
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ device_wakeup_disable(dev_d);
if (netif_running(ndev)) {
err = ql_adapter_up(qdev);
@@ -4815,27 +4800,25 @@ static int qlge_resume(struct pci_dev *pdev)
return err;
}
- mod_timer(&qdev->timer, jiffies + (5*HZ));
+ mod_timer(&qdev->timer, jiffies + (5 * HZ));
netif_device_attach(ndev);
return 0;
}
-#endif /* CONFIG_PM */
static void qlge_shutdown(struct pci_dev *pdev)
{
- qlge_suspend(pdev, PMSG_SUSPEND);
+ qlge_suspend(&pdev->dev);
}
+static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
+
static struct pci_driver qlge_driver = {
.name = DRV_NAME,
.id_table = qlge_pci_tbl,
.probe = qlge_probe,
.remove = qlge_remove,
-#ifdef CONFIG_PM
- .suspend = qlge_suspend,
- .resume = qlge_resume,
-#endif
+ .driver.pm = &qlge_pm_ops,
.shutdown = qlge_shutdown,
.err_handler = &qlge_err_handler
};
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 60c08d9cc034..e85c6ab538df 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -17,36 +17,34 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev)
int ql_pause_mpi_risc(struct ql_adapter *qdev)
{
u32 tmp;
- int count = UDELAY_COUNT;
+ int count;
/* Pause the RISC */
ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
- do {
+ for (count = UDELAY_COUNT; count; count--) {
tmp = ql_read32(qdev, CSR);
if (tmp & CSR_RP)
break;
mdelay(UDELAY_DELAY);
- count--;
- } while (count);
+ }
return (count == 0) ? -ETIMEDOUT : 0;
}
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
{
u32 tmp;
- int count = UDELAY_COUNT;
+ int count;
/* Reset the RISC */
ql_write32(qdev, CSR, CSR_CMD_SET_RST);
- do {
+ for (count = UDELAY_COUNT; count; count--) {
tmp = ql_read32(qdev, CSR);
if (tmp & CSR_RR) {
ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
break;
}
mdelay(UDELAY_DELAY);
- count--;
- } while (count);
+ }
return (count == 0) ? -ETIMEDOUT : 0;
}
@@ -147,15 +145,15 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
*/
static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
{
- int count = 100;
+ int count;
u32 value;
- do {
+ for (count = 100; count; count--) {
value = ql_read32(qdev, STS);
if (value & STS_PI)
return 0;
mdelay(UDELAY_DELAY); /* 100ms */
- } while (--count);
+ }
return -ETIMEDOUT;
}
@@ -276,8 +274,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init CAM/Routing tables.\n");
return;
- } else
- clear_bit(QL_CAM_RT_SET, &qdev->flags);
+ }
+ clear_bit(QL_CAM_RT_SET, &qdev->flags);
}
/* Queue up a worker to check the frame
@@ -389,7 +387,8 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
* This can get called iteratively from the mpi_work thread
* when events arrive via an interrupt.
* It also gets called when a mailbox command is polling for
- * it's completion. */
+ * it's completion.
+ */
static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int status;
@@ -520,7 +519,7 @@ end:
* changed when a mailbox command is waiting
* for a response and an AEN arrives and
* is handled.
- * */
+ */
mbcp->out_count = orig_count;
return status;
}
@@ -555,7 +554,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
* here because some AEN might arrive while
* we're waiting for the mailbox command to
* complete. If more than 5 seconds expire we can
- * assume something is wrong. */
+ * assume something is wrong.
+ */
count = jiffies + HZ * MAILBOX_TIMEOUT;
do {
/* Wait for the interrupt to come in. */
@@ -786,8 +786,9 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
char *my_buf;
dma_addr_t buf_dma;
- my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
- &buf_dma);
+ my_buf = dma_alloc_coherent(&qdev->pdev->dev,
+ word_count * sizeof(u32), &buf_dma,
+ GFP_ATOMIC);
if (!my_buf)
return -EIO;
@@ -795,8 +796,8 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
if (!status)
memcpy(buf, my_buf, word_count * sizeof(u32));
- pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
- buf_dma);
+ dma_free_coherent(&qdev->pdev->dev, word_count * sizeof(u32), my_buf,
+ buf_dma);
return status;
}
@@ -911,10 +912,10 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
static int ql_idc_wait(struct ql_adapter *qdev)
{
int status = -ETIMEDOUT;
- long wait_time = 1 * HZ;
struct mbox_params *mbcp = &qdev->idc_mbc;
+ long wait_time;
- do {
+ for (wait_time = 1 * HZ; wait_time;) {
/* Wait here for the command to complete
* via the IDC process.
*/
@@ -944,7 +945,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
status = -EIO;
break;
}
- } while (wait_time);
+ }
return status;
}
@@ -1077,18 +1078,18 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
int ql_wait_fifo_empty(struct ql_adapter *qdev)
{
- int count = 5;
+ int count;
u32 mgmnt_fifo_empty;
u32 nic_fifo_empty;
- do {
+ for (count = 6; count; count--) {
nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
if (nic_fifo_empty && mgmnt_fifo_empty)
return 0;
msleep(100);
- } while (count-- > 0);
+ }
return -ETIMEDOUT;
}
@@ -1173,12 +1174,12 @@ void ql_mpi_idc_work(struct work_struct *work)
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
ql_link_off(qdev);
- /* Fall through */
+ fallthrough;
case MB_CMD_SET_PORT_CFG:
/* Signal the resulting link up AEN
* that the frame routing and mac addr
* needs to be set.
- * */
+ */
set_bit(QL_CAM_RT_SET, &qdev->flags);
/* Do ACK if required */
if (timeout) {
@@ -1206,7 +1207,7 @@ void ql_mpi_idc_work(struct work_struct *work)
*/
ql_link_off(qdev);
set_bit(QL_CAM_RT_SET, &qdev->flags);
- /* Fall through. */
+ fallthrough;
case MB_CMD_IOP_DVR_START:
case MB_CMD_IOP_FLASH_ACC:
case MB_CMD_IOP_CORE_DUMP_MPI: