diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-22 10:53:05 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-22 10:53:05 -0800 | 
| commit | ae42c3173ba5cbe12fab0dad330e997c4ff9f68a (patch) | |
| tree | 3900a15300c74af76828a9c5fe44d7b6531932ec /block | |
| parent | d652ea30ba32db12fe8365182fad5ba2e7c22822 (diff) | |
| parent | f9ab49184af093f0bf6c0e6583f5b25da2c09ff5 (diff) | |
| download | linux-ae42c3173ba5cbe12fab0dad330e997c4ff9f68a.tar.bz2 | |
Merge tag 'for-5.12/block-ipi-2021-02-21' of git://git.kernel.dk/linux-block
Pull block IPI updates from Jens Axboe:
 "Avoid IRQ locking for the block IPI handling (Sebastian Andrzej
  Siewior)"
* tag 'for-5.12/block-ipi-2021-02-21' of git://git.kernel.dk/linux-block:
  blk-mq: Use llist_head for blk_cpu_done
  blk-mq: Always complete remote completions requests in softirq
  smp: Process pending softirqs in flush_smp_call_function_from_idle()
Diffstat (limited to 'block')
| -rw-r--r-- | block/blk-mq.c | 109 | 
1 files changed, 41 insertions, 68 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index f21d922ecfaf..d4d7c1caa439 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -41,7 +41,7 @@  #include "blk-mq-sched.h"  #include "blk-rq-qos.h" -static DEFINE_PER_CPU(struct list_head, blk_cpu_done); +static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);  static void blk_mq_poll_stats_start(struct request_queue *q);  static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); @@ -567,80 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)  }  EXPORT_SYMBOL(blk_mq_end_request); -/* - * Softirq action handler - move entries to local list and loop over them - * while passing them to the queue registered handler. - */ -static __latent_entropy void blk_done_softirq(struct softirq_action *h) +static void blk_complete_reqs(struct llist_head *list)  { -	struct list_head *cpu_list, local_list; - -	local_irq_disable(); -	cpu_list = this_cpu_ptr(&blk_cpu_done); -	list_replace_init(cpu_list, &local_list); -	local_irq_enable(); - -	while (!list_empty(&local_list)) { -		struct request *rq; +	struct llist_node *entry = llist_reverse_order(llist_del_all(list)); +	struct request *rq, *next; -		rq = list_entry(local_list.next, struct request, ipi_list); -		list_del_init(&rq->ipi_list); +	llist_for_each_entry_safe(rq, next, entry, ipi_list)  		rq->q->mq_ops->complete(rq); -	}  } -static void blk_mq_trigger_softirq(struct request *rq) +static __latent_entropy void blk_done_softirq(struct softirq_action *h)  { -	struct list_head *list; -	unsigned long flags; - -	local_irq_save(flags); -	list = this_cpu_ptr(&blk_cpu_done); -	list_add_tail(&rq->ipi_list, list); - -	/* -	 * If the list only contains our just added request, signal a raise of -	 * the softirq.  If there are already entries there, someone already -	 * raised the irq but it hasn't run yet. -	 */ -	if (list->next == &rq->ipi_list) -		raise_softirq_irqoff(BLOCK_SOFTIRQ); -	local_irq_restore(flags); +	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));  }  static int blk_softirq_cpu_dead(unsigned int cpu)  { -	/* -	 * If a CPU goes away, splice its entries to the current CPU -	 * and trigger a run of the softirq -	 */ -	local_irq_disable(); -	list_splice_init(&per_cpu(blk_cpu_done, cpu), -			 this_cpu_ptr(&blk_cpu_done)); -	raise_softirq_irqoff(BLOCK_SOFTIRQ); -	local_irq_enable(); - +	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));  	return 0;  } -  static void __blk_mq_complete_request_remote(void *data)  { -	struct request *rq = data; - -	/* -	 * For most of single queue controllers, there is only one irq vector -	 * for handling I/O completion, and the only irq's affinity is set -	 * to all possible CPUs.  On most of ARCHs, this affinity means the irq -	 * is handled on one specific CPU. -	 * -	 * So complete I/O requests in softirq context in case of single queue -	 * devices to avoid degrading I/O performance due to irqsoff latency. -	 */ -	if (rq->q->nr_hw_queues == 1) -		blk_mq_trigger_softirq(rq); -	else -		rq->q->mq_ops->complete(rq); +	__raise_softirq_irqoff(BLOCK_SOFTIRQ);  }  static inline bool blk_mq_complete_need_ipi(struct request *rq) @@ -669,6 +618,30 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)  	return cpu_online(rq->mq_ctx->cpu);  } +static void blk_mq_complete_send_ipi(struct request *rq) +{ +	struct llist_head *list; +	unsigned int cpu; + +	cpu = rq->mq_ctx->cpu; +	list = &per_cpu(blk_cpu_done, cpu); +	if (llist_add(&rq->ipi_list, list)) { +		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); +		smp_call_function_single_async(cpu, &rq->csd); +	} +} + +static void blk_mq_raise_softirq(struct request *rq) +{ +	struct llist_head *list; + +	preempt_disable(); +	list = this_cpu_ptr(&blk_cpu_done); +	if (llist_add(&rq->ipi_list, list)) +		raise_softirq(BLOCK_SOFTIRQ); +	preempt_enable(); +} +  bool blk_mq_complete_request_remote(struct request *rq)  {  	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); @@ -681,15 +654,15 @@ bool blk_mq_complete_request_remote(struct request *rq)  		return false;  	if (blk_mq_complete_need_ipi(rq)) { -		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); -		smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); -	} else { -		if (rq->q->nr_hw_queues > 1) -			return false; -		blk_mq_trigger_softirq(rq); +		blk_mq_complete_send_ipi(rq); +		return true;  	} -	return true; +	if (rq->q->nr_hw_queues == 1) { +		blk_mq_raise_softirq(rq); +		return true; +	} +	return false;  }  EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); @@ -3957,7 +3930,7 @@ static int __init blk_mq_init(void)  	int i;  	for_each_possible_cpu(i) -		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); +		init_llist_head(&per_cpu(blk_cpu_done, i));  	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);  	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,  |