diff options
author | Geetha sowjanya <gakula@marvell.com> | 2018-10-16 16:57:14 +0530 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-10-17 21:33:43 -0700 |
commit | 57856dde119590462315ddb5b4bf5fd2d981cb36 (patch) | |
tree | f645e5ce03d9361950ca9589aac7088226f0eded /drivers | |
parent | 4a3581cd5995d86721b3a38e1e2799b2137b4d77 (diff) | |
download | linux-57856dde119590462315ddb5b4bf5fd2d981cb36.tar.bz2 |
octeontx2-af: Support for disabling NPA Aura/Pool contexts
This patch adds support for a RVU PF/VF to disable all Aura/Pool
contexts of a NPA LF via mbox. This will be used by PF/VF drivers
upon teardown or while freeing up HW resources.
A HW context which is not INIT'ed cannot be modified and a
RVU PF/VF driver may or may not INIT all the Aura/Pool contexts.
So a bitmap is introduced to keep track of enabled NPA Aura/Pool
contexts, so that only enabled hw contexts are disabled upon LF
teardown.
Signed-off-by: Geetha sowjanya <gakula@marvell.com>
Signed-off-by: Stanislaw Kardach <skardach@marvell.com>
Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c | 98 |
3 files changed, 110 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index bf11058ac30c..4e873147538d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -142,6 +142,7 @@ M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \ M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \ M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \ +M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ @@ -325,4 +326,10 @@ struct npa_aq_enq_rsp { }; }; +/* Disable all contexts of type 'ctype' */ +struct hwctx_disable_req { + struct mbox_msghdr hdr; + u8 ctype; +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index a70c26bfe746..bfc95c3ebe97 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -77,6 +77,8 @@ struct rvu_pfvf { struct qmem *aura_ctx; struct qmem *pool_ctx; struct qmem *npa_qints_ctx; + unsigned long *aura_bmap; + unsigned long *pool_bmap; }; struct rvu_hwinfo { @@ -216,6 +218,9 @@ void rvu_npa_freemem(struct rvu *rvu); int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp); +int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp); int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, struct npa_lf_alloc_req *req, struct npa_lf_alloc_rsp *rsp); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 4ff0e760eeb2..0e43a693d119 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -63,6 +63,7 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, struct admin_queue *aq; struct rvu_pfvf *pfvf; void *ctx, *mask; + bool ena; pfvf = rvu_get_pfvf(rvu, pcifunc); if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize) @@ -149,6 +150,35 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, return rc; } + /* Set aura bitmap if aura hw context is enabled */ + if (req->ctype == NPA_AQ_CTYPE_AURA) { + if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena) + __set_bit(req->aura_id, pfvf->aura_bmap); + if (req->op == NPA_AQ_INSTOP_WRITE) { + ena = (req->aura.ena & req->aura_mask.ena) | + (test_bit(req->aura_id, pfvf->aura_bmap) & + ~req->aura_mask.ena); + if (ena) + __set_bit(req->aura_id, pfvf->aura_bmap); + else + __clear_bit(req->aura_id, pfvf->aura_bmap); + } + } + + /* Set pool bitmap if pool hw context is enabled */ + if (req->ctype == NPA_AQ_CTYPE_POOL) { + if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena) + __set_bit(req->aura_id, pfvf->pool_bmap); + if (req->op == NPA_AQ_INSTOP_WRITE) { + ena = (req->pool.ena & req->pool_mask.ena) | + (test_bit(req->aura_id, pfvf->pool_bmap) & + ~req->pool_mask.ena); + if (ena) + __set_bit(req->aura_id, pfvf->pool_bmap); + else + __clear_bit(req->aura_id, pfvf->pool_bmap); + } + } spin_unlock(&aq->lock); if (rsp) { @@ -166,6 +196,51 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, return 0; } +static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + struct npa_aq_enq_req aq_req; + unsigned long *bmap; + int id, cnt = 0; + int err = 0, rc; + + if (!pfvf->pool_ctx || !pfvf->aura_ctx) + return NPA_AF_ERR_AQ_ENQUEUE; + + memset(&aq_req, 0, sizeof(struct npa_aq_enq_req)); + aq_req.hdr.pcifunc = req->hdr.pcifunc; + + if (req->ctype == NPA_AQ_CTYPE_POOL) { + aq_req.pool.ena = 0; + aq_req.pool_mask.ena = 1; + cnt = pfvf->pool_ctx->qsize; + bmap = pfvf->pool_bmap; + } else if (req->ctype == NPA_AQ_CTYPE_AURA) { + aq_req.aura.ena = 0; + aq_req.aura_mask.ena = 1; + cnt = pfvf->aura_ctx->qsize; + bmap = pfvf->aura_bmap; + } + + aq_req.ctype = req->ctype; + aq_req.op = NPA_AQ_INSTOP_WRITE; + + for (id = 0; id < cnt; id++) { + if (!test_bit(id, bmap)) + continue; + aq_req.aura_id = id; + rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL); + if (rc) { + err = rc; + dev_err(rvu->dev, "Failed to disable %s:%d context\n", + (req->ctype == NPA_AQ_CTYPE_AURA) ? + "Aura" : "Pool", id); + } + } + + return err; +} + int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) @@ -173,11 +248,24 @@ int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, return rvu_npa_aq_enq_inst(rvu, req, rsp); } +int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, + struct hwctx_disable_req *req, + struct msg_rsp *rsp) +{ + return npa_lf_hwctx_disable(rvu, req); +} + static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) { + kfree(pfvf->aura_bmap); + pfvf->aura_bmap = NULL; + qmem_free(rvu->dev, pfvf->aura_ctx); pfvf->aura_ctx = NULL; + kfree(pfvf->pool_bmap); + pfvf->pool_bmap = NULL; + qmem_free(rvu->dev, pfvf->pool_ctx); pfvf->pool_ctx = NULL; @@ -227,12 +315,22 @@ int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, if (err) goto free_mem; + pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), + GFP_KERNEL); + if (!pfvf->aura_bmap) + goto free_mem; + /* Alloc memory for pool HW contexts */ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size); if (err) goto free_mem; + pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long), + GFP_KERNEL); + if (!pfvf->pool_bmap) + goto free_mem; + /* Get no of queue interrupts supported */ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); qints = (cfg >> 28) & 0xFFF; |