summaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
authorRoy Pledge <roy.pledge@nxp.com>2017-04-17 16:55:19 -0400
committerScott Wood <oss@buserror.net>2017-04-30 02:02:46 -0500
commite21c7316d8ddcf1fd679591d1427e937999a7cf5 (patch)
treea2d85a19264546a61c2a8f4334ae0aca76b5c363 /drivers/soc
parent4ba251626ff1ac3447f9a05ecfcec7f85b6c8a91 (diff)
downloadlinux-e21c7316d8ddcf1fd679591d1427e937999a7cf5.tar.bz2
soc/fsl/qbman: Disable IRQs for deferred QBMan work
Work for Congestion State Notifications (CSCN) and Message Ring (MR) handling is handled via the workqueue mechanism. This requires the driver to disable those IRQs before scheduling the work and re-enabling it once the work is completed so that the interrupt doesn't continually fire. Signed-off-by: Roy Pledge <roy.pledge@nxp.com> Signed-off-by: Scott Wood <oss@buserror.net>
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/fsl/qbman/qman.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 6f509f68085e..2827a65aa25a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1344,6 +1344,7 @@ static void qm_congestion_task(struct work_struct *work)
if (!qm_mc_result_timeout(&p->p, &mcr)) {
spin_unlock(&p->cgr_lock);
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
return;
}
/* mask out the ones I'm not interested in */
@@ -1358,6 +1359,7 @@ static void qm_congestion_task(struct work_struct *work)
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
spin_unlock(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
}
static void qm_mr_process_task(struct work_struct *work)
@@ -1417,12 +1419,14 @@ static void qm_mr_process_task(struct work_struct *work)
}
qm_mr_cci_consume(&p->p, num);
+ qman_p_irqsource_add(p, QM_PIRQ_MRI);
preempt_enable();
}
static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
{
if (is & QM_PIRQ_CSCI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
queue_work_on(smp_processor_id(), qm_portal_wq,
&p->congestion_work);
}
@@ -1434,6 +1438,7 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
}
if (is & QM_PIRQ_MRI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_MRI);
queue_work_on(smp_processor_id(), qm_portal_wq,
&p->mr_work);
}