diff options
author | Yixian Liu <liuyixian@huawei.com> | 2019-11-18 10:34:53 +0800 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-11-25 10:31:48 -0400 |
commit | f295e4cece5cb4c60715fed539abcd62468f9ef1 (patch) | |
tree | f3651f1bfa78959213d167bb8bc8792a86b79fdd /drivers | |
parent | 707783ab5f48f054f8da3114ddcdf1685a313a63 (diff) | |
download | linux-f295e4cece5cb4c60715fed539abcd62468f9ef1.tar.bz2 |
RDMA/hns: Delete unnecessary callback functions for cq
Currently, when cq event occurred, we first call our own callback
functions in the event process function, then call ib callback
functions. Actually, we can directly call ib callback functions.
Link: https://lore.kernel.org/r/1574044493-46984-5-git-send-email-liweihang@hisilicon.com
Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@hisilicon.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_cq.c | 91 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_device.h | 3 |
2 files changed, 36 insertions, 58 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 9174d33bdfee..af1d8823b3f0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -39,40 +39,6 @@ #include <rdma/hns-abi.h> #include "hns_roce_common.h" -static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq) -{ - struct ib_cq *ibcq = &hr_cq->ib_cq; - - ibcq->comp_handler(ibcq, ibcq->cq_context); -} - -static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, - enum hns_roce_event event_type) -{ - struct hns_roce_dev *hr_dev; - struct ib_event event; - struct ib_cq *ibcq; - - ibcq = &hr_cq->ib_cq; - hr_dev = to_hr_dev(ibcq->device); - - if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && - event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && - event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { - dev_err(hr_dev->dev, - "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", - event_type, hr_cq->cqn); - return; - } - - if (ibcq->event_handler) { - event.device = ibcq->device; - event.event = IB_EVENT_CQ_ERR; - event.element.cq = ibcq; - ibcq->event_handler(&event, ibcq->cq_context); - } -} - static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { @@ -434,10 +400,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, if (!udata && hr_cq->tptr_addr) *hr_cq->tptr_addr = 0; - /* Get created cq handler and carry out event */ - hr_cq->comp = hns_roce_ib_cq_comp; - hr_cq->event = hns_roce_ib_cq_event; - if (udata) { resp.cqn = hr_cq->cqn; ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); @@ -491,38 +453,57 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) { - struct device *dev = hr_dev->dev; - struct hns_roce_cq *cq; + struct hns_roce_cq *hr_cq; + struct ib_cq *ibcq; - cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1)); - if (!cq) { - dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn); + hr_cq = xa_load(&hr_dev->cq_table.array, + cqn & (hr_dev->caps.num_cqs - 1)); + if (!hr_cq) { + dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n", + cqn); return; } - ++cq->arm_sn; - cq->comp(cq); + ++hr_cq->arm_sn; + ibcq = &hr_cq->ib_cq; + if (ibcq->comp_handler) + ibcq->comp_handler(ibcq, ibcq->cq_context); } void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) { - struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct device *dev = hr_dev->dev; - struct hns_roce_cq *cq; + struct hns_roce_cq *hr_cq; + struct ib_event event; + struct ib_cq *ibcq; - cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); + hr_cq = xa_load(&hr_dev->cq_table.array, + cqn & (hr_dev->caps.num_cqs - 1)); + if (!hr_cq) { + dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn); + return; + } - if (!cq) { - dev_warn(dev, "Async event for bogus CQ %08x\n", cqn); + if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && + event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && + event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { + dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n", + event_type, cqn); return; } - cq->event(cq, (enum hns_roce_event)event_type); + atomic_inc(&hr_cq->refcount); - if (atomic_dec_and_test(&cq->refcount)) - complete(&cq->free); + ibcq = &hr_cq->ib_cq; + if (ibcq->event_handler) { + event.device = ibcq->device; + event.element.cq = ibcq; + event.event = IB_EVENT_CQ_ERR; + ibcq->event_handler(&event, ibcq->cq_context); + } + + if (atomic_dec_and_test(&hr_cq->refcount)) + complete(&hr_cq->free); } int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 152701e4a07f..5617434cbfb4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -488,9 +488,6 @@ struct hns_roce_cq { u8 db_en; spinlock_t lock; struct ib_umem *umem; - void (*comp)(struct hns_roce_cq *cq); - void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type); - u32 cq_depth; u32 cons_index; u32 *set_ci_db; |