diff options
| -rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 2 | ||||
| -rw-r--r-- | drivers/clocksource/exynos_mct.c | 12 | ||||
| -rw-r--r-- | drivers/irqchip/irq-gic.c | 8 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 35 | ||||
| -rw-r--r-- | include/linux/irq.h | 3 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 17 | 
6 files changed, 52 insertions, 25 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index c2bb4f896ce7..3aa5b46b2d40 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)  		cpumask_clear(&new_affinity);  		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);  	} -	__irq_set_affinity_locked(data, &new_affinity); +	irq_set_affinity_locked(data, &new_affinity, false);  }  static int octeon_irq_ciu_set_affinity(struct irq_data *data, diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index a6ee6d7cd63f..acf5a329d538 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -416,8 +416,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)  	evt->set_mode = exynos4_tick_set_mode;  	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;  	evt->rating = 450; -	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), -					0xf, 0x7fffffff);  	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); @@ -430,9 +428,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)  				evt->irq);  			return -EIO;  		} +		irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));  	} else {  		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);  	} +	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), +					0xf, 0x7fffffff);  	return 0;  } @@ -450,7 +451,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,  					   unsigned long action, void *hcpu)  {  	struct mct_clock_event_device *mevt; -	unsigned int cpu;  	/*  	 * Grab cpu pointer in each case to avoid spurious @@ -461,12 +461,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,  		mevt = this_cpu_ptr(&percpu_mct_tick);  		exynos4_local_timer_setup(&mevt->evt);  		break; -	case CPU_ONLINE: -		cpu = (unsigned long)hcpu; -		if (mct_int_type == MCT_INT_SPI) -			irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu], -						cpumask_of(cpu)); -		break;  	case CPU_DYING:  		mevt = this_cpu_ptr(&percpu_mct_tick);  		exynos4_local_timer_stop(&mevt->evt); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4300b6606f5e..57d165e026f4 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,  			    bool force)  {  	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); -	unsigned int shift = (gic_irq(d) % 4) * 8; -	unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); +	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;  	u32 val, mask, bit; +	if (!force) +		cpu = cpumask_any_and(mask_val, cpu_online_mask); +	else +		cpu = cpumask_first(mask_val); +  	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)  		return -EINVAL; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c7bfac1c4a7b..8834a7e5b944 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -203,7 +203,40 @@ static inline int check_wakeup_irqs(void) { return 0; }  extern cpumask_var_t irq_default_affinity; -extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); +/* Internal implementation. Use the helpers below */ +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, +			      bool force); + +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq:	Interrupt to set affinity + * @mask:	cpumask + * + * Fails if cpumask does not contain an online CPU + */ +static inline int +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ +	return __irq_set_affinity(irq, cpumask, false); +} + +/** + * irq_force_affinity - Force the irq affinity of a given irq + * @irq:	Interrupt to set affinity + * @mask:	cpumask + * + * Same as irq_set_affinity, but without checking the mask against + * online cpus. + * + * Solely for low level cpu hotplug code, where we need to make per + * cpu interrupts affine before the cpu becomes online. + */ +static inline int +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ +	return __irq_set_affinity(irq, cpumask, true); +} +  extern int irq_can_set_affinity(unsigned int irq);  extern int irq_select_affinity(unsigned int irq); diff --git a/include/linux/irq.h b/include/linux/irq.h index d278838908cb..10a0b1ac4ea0 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -394,7 +394,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);  extern void irq_cpu_online(void);  extern void irq_cpu_offline(void); -extern int __irq_set_affinity_locked(struct irq_data *data,  const struct cpumask *cpumask); +extern int irq_set_affinity_locked(struct irq_data *data, +				   const struct cpumask *cpumask, bool force);  #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)  void irq_move_irq(struct irq_data *data); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2486a4c1a710..d34131ca372b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,  	struct irq_chip *chip = irq_data_get_irq_chip(data);  	int ret; -	ret = chip->irq_set_affinity(data, mask, false); +	ret = chip->irq_set_affinity(data, mask, force);  	switch (ret) {  	case IRQ_SET_MASK_OK:  		cpumask_copy(data->affinity, mask); @@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,  	return ret;  } -int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) +int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, +			    bool force)  {  	struct irq_chip *chip = irq_data_get_irq_chip(data);  	struct irq_desc *desc = irq_data_to_desc(data); @@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)  		return -EINVAL;  	if (irq_can_move_pcntxt(data)) { -		ret = irq_do_set_affinity(data, mask, false); +		ret = irq_do_set_affinity(data, mask, force);  	} else {  		irqd_set_move_pending(data);  		irq_copy_pending(desc, mask); @@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)  	return ret;  } -/** - *	irq_set_affinity - Set the irq affinity of a given irq - *	@irq:		Interrupt to set affinity - *	@mask:		cpumask - * - */ -int irq_set_affinity(unsigned int irq, const struct cpumask *mask) +int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)  {  	struct irq_desc *desc = irq_to_desc(irq);  	unsigned long flags; @@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)  		return -EINVAL;  	raw_spin_lock_irqsave(&desc->lock, flags); -	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); +	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);  	raw_spin_unlock_irqrestore(&desc->lock, flags);  	return ret;  }  |