summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
diff options
context:
space:
mode:
authorGeetha sowjanya <gakula@marvell.com>2020-03-02 12:49:22 +0530
committerDavid S. Miller <davem@davemloft.net>2020-03-02 11:08:51 -0800
commit27150bc4286cf8f39bdcba1b797971d09642871b (patch)
tree073a74372470bd8d782c1f8dc4d6f79376d4c0be /drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
parent48938b1e50270047566025bdc43700e71cc5e6c5 (diff)
downloadlinux-27150bc4286cf8f39bdcba1b797971d09642871b.tar.bz2
octeontx2-af: Interface backpressure configuration
Each of the interface receive channels can be backpressured by resources upon exhaustion or reaching configured threshold levels. Resources here are receive buffer queues (Auras) and pkt notification descriptor queues (CQs). Resources and interface channels are mapped using backpressure IDs (BPIDs). HW supports upto 512 BPIDs, this patch divides these BPIDs statically across CGX/LBK/SDP interfaces as follows. BPIDs 0 - 191 are mapped to LMAC channels, 16 per LMAC. BPIDs 192 - 255 are mapped to LBK channels. BPIDs 256 - 511 are mapped to SDP channels. Also did the needed basic configuration of BPIDs. Added mbox handlers with which a PF device can request for a BPID which it will use to configure Auras and CQs. Signed-off-by: Geetha sowjanya <gakula@marvell.com> Signed-off-by: Sunil Goutham <sgoutham@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 6e7c7f459f74..67471cb2b129 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
break;
}
- if (rc)
+ if (rc) {
+ spin_unlock(&aq->lock);
return rc;
-
- spin_lock(&aq->lock);
+ }
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}