summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDou Liyang <douliyangs@gmail.com>2018-12-04 23:51:21 +0800
committerThomas Gleixner <tglx@linutronix.de>2018-12-19 11:32:08 +0100
commitc410abbbacb9b378365ba17a30df08b4b9eec64f (patch)
tree2cf584bc16b68ef0c4a673cb589f659c3c4284c4
parentbec04037e4e484f41ee4d9409e40616874169d20 (diff)
downloadlinux-c410abbbacb9b378365ba17a30df08b4b9eec64f.tar.bz2
genirq/affinity: Add is_managed to struct irq_affinity_desc
Devices which use managed interrupts usually have two classes of interrupts: - Interrupts for multiple device queues - Interrupts for general device management Currently both classes are treated the same way, i.e. as managed interrupts. The general interrupts get the default affinity mask assigned while the device queue interrupts are spread out over the possible CPUs. Treating the general interrupts as managed is both a limitation and under certain circumstances a bug. Assume the following situation: default_irq_affinity = 4..7 So if CPUs 4-7 are offlined, then the core code will shut down the device management interrupts because the last CPU in their affinity mask went offline. It's also a limitation because it's desired to allow manual placement of the general device interrupts for various reasons. If they are marked managed then the interrupt affinity setting from both user and kernel space is disabled. That limitation was reported by Kashyap and Sumit. Expand struct irq_affinity_desc with a new bit 'is_managed' which is set for truly managed interrupts (queue interrupts) and cleared for the general device interrupts. [ tglx: Simplify code and massage changelog ] Reported-by: Kashyap Desai <kashyap.desai@broadcom.com> Reported-by: Sumit Saxena <sumit.saxena@broadcom.com> Signed-off-by: Dou Liyang <douliyangs@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-pci@vger.kernel.org Cc: shivasharan.srikanteshwara@broadcom.com Cc: ming.lei@redhat.com Cc: hch@lst.de Cc: bhelgaas@google.com Cc: douliyang1@huawei.com Link: https://lkml.kernel.org/r/20181204155122.6327-3-douliyangs@gmail.com
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--kernel/irq/affinity.c4
-rw-r--r--kernel/irq/irqdesc.c13
3 files changed, 13 insertions, 5 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c44b7844dc83..c672f34235e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -263,6 +263,7 @@ struct irq_affinity {
*/
struct irq_affinity_desc {
struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index c0fe591b0dc9..45b68b4ea48b 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -289,6 +289,10 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
for (; curvec < nvecs; curvec++)
cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+ /* Mark the managed interrupts */
+ for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
+ masks[i].is_managed = 1;
+
outnodemsk:
free_node_to_cpumask(node_to_cpumask);
return masks;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index cb401d6c5040..ee062b7939d3 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -453,27 +453,30 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
struct module *owner)
{
struct irq_desc *desc;
- unsigned int flags;
int i;
/* Validate affinity mask(s) */
if (affinity) {
- for (i = 0; i < cnt; i++) {
+ for (i = 0; i < cnt; i++, i++) {
if (cpumask_empty(&affinity[i].mask))
return -EINVAL;
}
}
- flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
-
for (i = 0; i < cnt; i++) {
const struct cpumask *mask = NULL;
+ unsigned int flags = 0;
if (affinity) {
- node = cpu_to_node(cpumask_first(affinity));
+ if (affinity->is_managed) {
+ flags = IRQD_AFFINITY_MANAGED |
+ IRQD_MANAGED_SHUTDOWN;
+ }
mask = &affinity->mask;
+ node = cpu_to_node(cpumask_first(mask));
affinity++;
}
+
desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc)
goto err;