From 27fae429acee1e9418059e7fa545438075af5256 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 Nov 2018 12:35:16 -0700 Subject: sbitmap: don't loop for find_next_zero_bit() for !round_robin If we aren't forced to do round robin tag allocation, just use the allocation hint to find the index for the tag word, don't use it for the offset inside the word. This avoids a potential extra round trip in the bit looping, and since we're fetching this cacheline, we may as well check the whole word from the start. Reviewed-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index fdd1b8aa8ac6..45cab6bbc1c7 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -118,10 +118,19 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) index = SB_NR_TO_INDEX(sb, alloc_hint); + /* + * Unless we're doing round robin tag allocation, just use the + * alloc_hint to find the right word index. No point in looping + * twice in find_next_zero_bit() for that case. + */ + if (round_robin) + alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); + else + alloc_hint = 0; + for (i = 0; i < sb->map_nr; i++) { nr = __sbitmap_get_word(&sb->map[index].word, - sb->map[index].depth, - SB_NR_TO_BIT(sb, alloc_hint), + sb->map[index].depth, alloc_hint, !round_robin); if (nr != -1) { nr += index << sb->shift; @@ -129,13 +138,9 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) } /* Jump to next index. */ - index++; - alloc_hint = index << sb->shift; - - if (index >= sb->map_nr) { + alloc_hint = 0; + if (++index >= sb->map_nr) index = 0; - alloc_hint = 0; - } } return nr; -- cgit v1.2.3 From ea86ea2cdced20057da4d2c32965c1219c238197 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 30 Nov 2018 13:18:06 -0700 Subject: sbitmap: ammortize cost of clearing bits sbitmap maintains a set of words that we use to set and clear bits, with each bit representing a tag for blk-mq. Even though we spread the bits out and maintain a hint cache, one particular bit allocated will end up being cleared in the exact same spot. This introduces batched clearing of bits. Instead of clearing a given bit, the same bit is set in a cleared/free mask instead. If we fail allocating a bit from a given word, then we check the free mask, and batch move those cleared bits at that time. This trades 64 atomic bitops for 2 cmpxchg(). In a threaded poll test case, half the overhead of getting and clearing tags is removed with this change. On another poll test case with a single thread, performance is unchanged. Reviewed-by: Omar Sandoval Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 33 ++++++++++++++++---- lib/sbitmap.c | 81 ++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 100 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 804a50983ec5..81359d45751e 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -30,14 +30,24 @@ struct seq_file; */ struct sbitmap_word { /** - * @word: The bitmap word itself. + * @depth: Number of bits being used in @word/@cleared */ - unsigned long word; + unsigned long depth; /** - * @depth: Number of bits being used in @word. + * @word: word holding free bits */ - unsigned long depth; + unsigned long word ____cacheline_aligned_in_smp; + + /** + * @cleared: word holding cleared bits + */ + unsigned long cleared ____cacheline_aligned_in_smp; + + /** + * @swap_lock: Held while swapping word <-> cleared + */ + spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** @@ -310,6 +320,19 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } +/* + * This one is special, since it doesn't actually clear the bit, rather it + * sets the corresponding bit in the ->cleared mask instead. Paired with + * the caller doing sbitmap_batch_clear() if a given index is full, which + * will clear the previously freed entries in the corresponding ->word. + */ +static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) +{ + unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; + + set_bit(SB_NR_TO_BIT(sb, bitnr), addr); +} + static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, unsigned int bitnr) { @@ -321,8 +344,6 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } -unsigned int sbitmap_weight(const struct sbitmap *sb); - /** * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. * @sb: Bitmap to show. diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 45cab6bbc1c7..f99382e59314 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -59,6 +59,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, for (i = 0; i < sb->map_nr; i++) { sb->map[i].depth = min(depth, bits_per_word); depth -= sb->map[i].depth; + spin_lock_init(&sb->map[i].swap_lock); } return 0; } @@ -111,6 +112,57 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, return nr; } +/* + * See if we have deferred clears that we can batch move + */ +static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) +{ + unsigned long mask, val; + bool ret = false; + + spin_lock(&sb->map[index].swap_lock); + + if (!sb->map[index].cleared) + goto out_unlock; + + /* + * First get a stable cleared mask, setting the old mask to 0. + */ + do { + mask = sb->map[index].cleared; + } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); + + /* + * Now clear the masked bits in our free word + */ + do { + val = sb->map[index].word; + } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); + + ret = true; +out_unlock: + spin_unlock(&sb->map[index].swap_lock); + return ret; +} + +static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, + unsigned int alloc_hint, bool round_robin) +{ + int nr; + + do { + nr = __sbitmap_get_word(&sb->map[index].word, + sb->map[index].depth, alloc_hint, + !round_robin); + if (nr != -1) + break; + if (!sbitmap_deferred_clear(sb, index)) + break; + } while (1); + + return nr; +} + int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) { unsigned int i, index; @@ -129,9 +181,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) alloc_hint = 0; for (i = 0; i < sb->map_nr; i++) { - nr = __sbitmap_get_word(&sb->map[index].word, - sb->map[index].depth, alloc_hint, - !round_robin); + nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, + round_robin); if (nr != -1) { nr += index << sb->shift; break; @@ -206,23 +257,36 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb) } EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); -unsigned int sbitmap_weight(const struct sbitmap *sb) +static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) { unsigned int i, weight = 0; for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; - weight += bitmap_weight(&word->word, word->depth); + if (set) + weight += bitmap_weight(&word->word, word->depth); + else + weight += bitmap_weight(&word->cleared, word->depth); } return weight; } -EXPORT_SYMBOL_GPL(sbitmap_weight); + +static unsigned int sbitmap_weight(const struct sbitmap *sb) +{ + return __sbitmap_weight(sb, true); +} + +static unsigned int sbitmap_cleared(const struct sbitmap *sb) +{ + return __sbitmap_weight(sb, false); +} void sbitmap_show(struct sbitmap *sb, struct seq_file *m) { seq_printf(m, "depth=%u\n", sb->depth); - seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); + seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); + seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); seq_printf(m, "map_nr=%u\n", sb->map_nr); } @@ -514,7 +578,8 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu) { - sbitmap_clear_bit_unlock(&sbq->sb, nr); + sbitmap_deferred_clear_bit(&sbq->sb, nr); + /* * Pairs with the memory barrier in set_current_state() to ensure the * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker -- cgit v1.2.3 From 5d2ee7122c73be6a3b6bfe90d237e8aed737cfaa Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 Nov 2018 17:36:41 -0700 Subject: sbitmap: optimize wakeup check Even if we have no waiters on any of the sbitmap_queue wait states, we still have to loop every entry to check. We do this for every IO, so the cost adds up. Shift a bit of the cost to the slow path, when we actually have waiters. Wrap prepare_to_wait_exclusive() and finish_wait(), so we can maintain an internal count of how many are currently active. Then we can simply check this count in sbq_wake_ptr() and not have to loop if we don't have any sleepers. Convert the two users of sbitmap with waiting, blk-mq-tag and iSCSI. Reviewed-by: Omar Sandoval Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 11 +++++------ drivers/target/iscsi/iscsi_target_util.c | 12 ++++++----- include/linux/sbitmap.h | 34 ++++++++++++++++++++++++++++++++ lib/sbitmap.c | 28 ++++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 11 deletions(-) (limited to 'lib') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 87bc5df72d48..2089c6c62f44 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -110,7 +110,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct sbitmap_queue *bt; struct sbq_wait_state *ws; - DEFINE_WAIT(wait); + DEFINE_SBQ_WAIT(wait); unsigned int tag_offset; bool drop_ctx; int tag; @@ -154,8 +154,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (tag != -1) break; - prepare_to_wait_exclusive(&ws->wait, &wait, - TASK_UNINTERRUPTIBLE); + sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); tag = __blk_mq_get_tag(data, bt); if (tag != -1) @@ -167,6 +166,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) bt_prev = bt; io_schedule(); + sbitmap_finish_wait(bt, ws, &wait); + data->ctx = blk_mq_get_ctx(data->q); data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, data->ctx->cpu); @@ -176,8 +177,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) else bt = &tags->bitmap_tags; - finish_wait(&ws->wait, &wait); - /* * If destination hw queue is changed, fake wake up on * previous queue for compensating the wake up miss, so @@ -192,7 +191,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (drop_ctx && data->ctx) blk_mq_put_ctx(data->ctx); - finish_wait(&ws->wait, &wait); + sbitmap_finish_wait(bt, ws, &wait); found_tag: return tag + tag_offset; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 36b742932c72..86987da86dd6 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -150,24 +150,26 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup) { int tag = -1; - DEFINE_WAIT(wait); + DEFINE_SBQ_WAIT(wait); struct sbq_wait_state *ws; + struct sbitmap_queue *sbq; if (state == TASK_RUNNING) return tag; - ws = &se_sess->sess_tag_pool.ws[0]; + sbq = &se_sess->sess_tag_pool; + ws = &sbq->ws[0]; for (;;) { - prepare_to_wait_exclusive(&ws->wait, &wait, state); + sbitmap_prepare_to_wait(sbq, ws, &wait, state); if (signal_pending_state(state, current)) break; - tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup); + tag = sbitmap_queue_get(sbq, cpup); if (tag >= 0) break; schedule(); } - finish_wait(&ws->wait, &wait); + sbitmap_finish_wait(sbq, ws, &wait); return tag; } diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 81359d45751e..92806a2dbab7 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -135,6 +135,11 @@ struct sbitmap_queue { */ struct sbq_wait_state *ws; + /* + * @ws_active: count of currently active ws waitqueues + */ + atomic_t ws_active; + /** * @round_robin: Allocate bits in strict round-robin order. */ @@ -552,4 +557,33 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); */ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); +struct sbq_wait { + int accounted; + struct wait_queue_entry wait; +}; + +#define DEFINE_SBQ_WAIT(name) \ + struct sbq_wait name = { \ + .accounted = 0, \ + .wait = { \ + .private = current, \ + .func = autoremove_wake_function, \ + .entry = LIST_HEAD_INIT((name).wait.entry), \ + } \ + } + +/* + * Wrapper around prepare_to_wait_exclusive(), which maintains some extra + * internal state. + */ +void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait, int state); + +/* + * Must be paired with sbitmap_prepare_to_wait(). + */ +void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait); + #endif /* __LINUX_SCALE_BITMAP_H */ diff --git a/lib/sbitmap.c b/lib/sbitmap.c index f99382e59314..a89fbe7cf6ca 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -394,6 +394,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq->min_shallow_depth = UINT_MAX; sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); atomic_set(&sbq->wake_index, 0); + atomic_set(&sbq->ws_active, 0); sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); if (!sbq->ws) { @@ -509,6 +510,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) { int i, wake_index; + if (!atomic_read(&sbq->ws_active)) + return NULL; + wake_index = atomic_read(&sbq->wake_index); for (i = 0; i < SBQ_WAIT_QUEUES; i++) { struct sbq_wait_state *ws = &sbq->ws[wake_index]; @@ -634,6 +638,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); + seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); seq_puts(m, "ws={\n"); for (i = 0; i < SBQ_WAIT_QUEUES; i++) { @@ -649,3 +654,26 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); } EXPORT_SYMBOL_GPL(sbitmap_queue_show); + +void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait, int state) +{ + if (!sbq_wait->accounted) { + atomic_inc(&sbq->ws_active); + sbq_wait->accounted = 1; + } + prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); +} +EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); + +void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait) +{ + finish_wait(&ws->wait, &sbq_wait->wait); + if (sbq_wait->accounted) { + atomic_dec(&sbq->ws_active); + sbq_wait->accounted = 0; + } +} +EXPORT_SYMBOL_GPL(sbitmap_finish_wait); -- cgit v1.2.3 From 58ab5e32e6fd83e33943614e7257f2ac5823824a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sun, 9 Dec 2018 17:43:20 -0700 Subject: sbitmap: silence bogus lockdep IRQ warning Ming reports that lockdep spews the following trace. What this essentially says is that the sbitmap swap_lock was used inconsistently in IRQ enabled and disabled context, and that is usually indicative of a bug that will cause a deadlock. For this case, it's a false positive. The swap_lock is used from process context only, when we swap the bits in the word and cleared mask. We also end up doing that when we are getting a driver tag, from the blk_mq_mark_tag_wait(), and from there we hold the waitqueue lock with IRQs disabled. However, this isn't from an actual IRQ, it's still process context. In lieu of a better way to fix this, simply always disable interrupts when grabbing the swap_lock if lockdep is enabled. [ 100.967642] ================start test sanity/001================ [ 101.238280] null: module loaded [ 106.093735] [ 106.094012] ===================================================== [ 106.094854] WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected [ 106.095759] 4.20.0-rc3_5d2ee7122c73_for-next+ #1 Not tainted [ 106.096551] ----------------------------------------------------- [ 106.097386] fio/1043 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: [ 106.098231] 000000004c43fa71 (&(&sb->map[i].swap_lock)->rlock){+.+.}, at: sbitmap_get+0xd5/0x22c [ 106.099431] [ 106.099431] and this task is already holding: [ 106.100229] 000000007eec8b2f (&(&hctx->dispatch_wait_lock)->rlock){....}, at: blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.101630] which would create a new lock dependency: [ 106.102326] (&(&hctx->dispatch_wait_lock)->rlock){....} -> (&(&sb->map[i].swap_lock)->rlock){+.+.} [ 106.103553] [ 106.103553] but this new dependency connects a SOFTIRQ-irq-safe lock: [ 106.104580] (&sbq->ws[i].wait){..-.} [ 106.104582] [ 106.104582] ... which became SOFTIRQ-irq-safe at: [ 106.105751] _raw_spin_lock_irqsave+0x4b/0x82 [ 106.106284] __wake_up_common_lock+0x119/0x1b9 [ 106.106825] sbitmap_queue_wake_up+0x33f/0x383 [ 106.107456] sbitmap_queue_clear+0x4c/0x9a [ 106.108046] __blk_mq_free_request+0x188/0x1d3 [ 106.108581] blk_mq_free_request+0x23b/0x26b [ 106.109102] scsi_end_request+0x345/0x5d7 [ 106.109587] scsi_io_completion+0x4b5/0x8f0 [ 106.110099] scsi_finish_command+0x412/0x456 [ 106.110615] scsi_softirq_done+0x23f/0x29b [ 106.111115] blk_done_softirq+0x2a7/0x2e6 [ 106.111608] __do_softirq+0x360/0x6ad [ 106.112062] run_ksoftirqd+0x2f/0x5b [ 106.112499] smpboot_thread_fn+0x3a5/0x3db [ 106.113000] kthread+0x1d4/0x1e4 [ 106.113457] ret_from_fork+0x3a/0x50 [ 106.113969] [ 106.113969] to a SOFTIRQ-irq-unsafe lock: [ 106.114672] (&(&sb->map[i].swap_lock)->rlock){+.+.} [ 106.114674] [ 106.114674] ... which became SOFTIRQ-irq-unsafe at: [ 106.116000] ... [ 106.116003] _raw_spin_lock+0x33/0x64 [ 106.116676] sbitmap_get+0xd5/0x22c [ 106.117134] __sbitmap_queue_get+0xe8/0x177 [ 106.117731] __blk_mq_get_tag+0x1e6/0x22d [ 106.118286] blk_mq_get_tag+0x1db/0x6e4 [ 106.118756] blk_mq_get_driver_tag+0x161/0x258 [ 106.119383] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.120043] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.120607] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.121234] __blk_mq_run_hw_queue+0x137/0x17e [ 106.121781] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.122366] blk_mq_run_hw_queue+0x151/0x187 [ 106.122887] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.123492] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.124042] blk_flush_plug_list+0x392/0x3d7 [ 106.124557] blk_finish_plug+0x37/0x4f [ 106.125019] read_pages+0x3ef/0x430 [ 106.125446] __do_page_cache_readahead+0x18e/0x2fc [ 106.126027] force_page_cache_readahead+0x121/0x133 [ 106.126621] page_cache_sync_readahead+0x35f/0x3bb [ 106.127229] generic_file_buffered_read+0x410/0x1860 [ 106.127932] __vfs_read+0x319/0x38f [ 106.128415] vfs_read+0xd2/0x19a [ 106.128817] ksys_read+0xb9/0x135 [ 106.129225] do_syscall_64+0x140/0x385 [ 106.129684] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.130292] [ 106.130292] other info that might help us debug this: [ 106.130292] [ 106.131226] Chain exists of: [ 106.131226] &sbq->ws[i].wait --> &(&hctx->dispatch_wait_lock)->rlock --> &(&sb->map[i].swap_lock)->rlock [ 106.131226] [ 106.132865] Possible interrupt unsafe locking scenario: [ 106.132865] [ 106.133659] CPU0 CPU1 [ 106.134194] ---- ---- [ 106.134733] lock(&(&sb->map[i].swap_lock)->rlock); [ 106.135318] local_irq_disable(); [ 106.136014] lock(&sbq->ws[i].wait); [ 106.136747] lock(&(&hctx->dispatch_wait_lock)->rlock); [ 106.137742] [ 106.138110] lock(&sbq->ws[i].wait); [ 106.138625] [ 106.138625] *** DEADLOCK *** [ 106.138625] [ 106.139430] 3 locks held by fio/1043: [ 106.139947] #0: 0000000076ff0fd9 (rcu_read_lock){....}, at: hctx_lock+0x29/0xe8 [ 106.140813] #1: 000000002feb1016 (&sbq->ws[i].wait){..-.}, at: blk_mq_dispatch_rq_list+0x4ad/0xd7c [ 106.141877] #2: 000000007eec8b2f (&(&hctx->dispatch_wait_lock)->rlock){....}, at: blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.143267] [ 106.143267] the dependencies between SOFTIRQ-irq-safe lock and the holding lock: [ 106.144351] -> (&sbq->ws[i].wait){..-.} ops: 82 { [ 106.144926] IN-SOFTIRQ-W at: [ 106.145314] _raw_spin_lock_irqsave+0x4b/0x82 [ 106.146042] __wake_up_common_lock+0x119/0x1b9 [ 106.146785] sbitmap_queue_wake_up+0x33f/0x383 [ 106.147567] sbitmap_queue_clear+0x4c/0x9a [ 106.148379] __blk_mq_free_request+0x188/0x1d3 [ 106.149148] blk_mq_free_request+0x23b/0x26b [ 106.149864] scsi_end_request+0x345/0x5d7 [ 106.150546] scsi_io_completion+0x4b5/0x8f0 [ 106.151367] scsi_finish_command+0x412/0x456 [ 106.152157] scsi_softirq_done+0x23f/0x29b [ 106.152855] blk_done_softirq+0x2a7/0x2e6 [ 106.153537] __do_softirq+0x360/0x6ad [ 106.154280] run_ksoftirqd+0x2f/0x5b [ 106.155020] smpboot_thread_fn+0x3a5/0x3db [ 106.155828] kthread+0x1d4/0x1e4 [ 106.156526] ret_from_fork+0x3a/0x50 [ 106.157267] INITIAL USE at: [ 106.157713] _raw_spin_lock_irqsave+0x4b/0x82 [ 106.158542] prepare_to_wait_exclusive+0xa8/0x215 [ 106.159421] blk_mq_get_tag+0x34f/0x6e4 [ 106.160186] blk_mq_get_request+0x48e/0xaef [ 106.160997] blk_mq_make_request+0x27e/0xbd2 [ 106.161828] generic_make_request+0x4d1/0x873 [ 106.162661] submit_bio+0x20c/0x253 [ 106.163379] mpage_bio_submit+0x44/0x4b [ 106.164142] mpage_readpages+0x3c2/0x407 [ 106.164919] read_pages+0x13a/0x430 [ 106.165633] __do_page_cache_readahead+0x18e/0x2fc [ 106.166530] force_page_cache_readahead+0x121/0x133 [ 106.167439] page_cache_sync_readahead+0x35f/0x3bb [ 106.168337] generic_file_buffered_read+0x410/0x1860 [ 106.169255] __vfs_read+0x319/0x38f [ 106.169977] vfs_read+0xd2/0x19a [ 106.170662] ksys_read+0xb9/0x135 [ 106.171356] do_syscall_64+0x140/0x385 [ 106.172120] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.173051] } [ 106.173308] ... key at: [] __key.26481+0x0/0x40 [ 106.174219] ... acquired at: [ 106.174646] _raw_spin_lock+0x33/0x64 [ 106.175183] blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.175843] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.176518] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.177262] __blk_mq_run_hw_queue+0x137/0x17e [ 106.177900] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.178591] blk_mq_run_hw_queue+0x151/0x187 [ 106.179207] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.179926] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.180571] blk_flush_plug_list+0x392/0x3d7 [ 106.181187] blk_finish_plug+0x37/0x4f [ 106.181737] __se_sys_io_submit+0x171/0x304 [ 106.182346] do_syscall_64+0x140/0x385 [ 106.182895] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.183607] [ 106.183830] -> (&(&hctx->dispatch_wait_lock)->rlock){....} ops: 1 { [ 106.184691] INITIAL USE at: [ 106.185119] _raw_spin_lock+0x33/0x64 [ 106.185838] blk_mq_dispatch_rq_list+0x4c1/0xd7c [ 106.186697] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.187551] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.188481] __blk_mq_run_hw_queue+0x137/0x17e [ 106.189307] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.190189] blk_mq_run_hw_queue+0x151/0x187 [ 106.190989] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.191902] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.192739] blk_flush_plug_list+0x392/0x3d7 [ 106.193535] blk_finish_plug+0x37/0x4f [ 106.194269] __se_sys_io_submit+0x171/0x304 [ 106.195059] do_syscall_64+0x140/0x385 [ 106.195794] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.196705] } [ 106.196950] ... key at: [] __key.51231+0x0/0x40 [ 106.197853] ... acquired at: [ 106.198270] lock_acquire+0x280/0x2f3 [ 106.198806] _raw_spin_lock+0x33/0x64 [ 106.199337] sbitmap_get+0xd5/0x22c [ 106.199850] __sbitmap_queue_get+0xe8/0x177 [ 106.200450] __blk_mq_get_tag+0x1e6/0x22d [ 106.201035] blk_mq_get_tag+0x1db/0x6e4 [ 106.201589] blk_mq_get_driver_tag+0x161/0x258 [ 106.202237] blk_mq_dispatch_rq_list+0x5b9/0xd7c [ 106.202902] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.203572] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.204316] __blk_mq_run_hw_queue+0x137/0x17e [ 106.204956] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.205649] blk_mq_run_hw_queue+0x151/0x187 [ 106.206269] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.206997] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.207644] blk_flush_plug_list+0x392/0x3d7 [ 106.208264] blk_finish_plug+0x37/0x4f [ 106.208814] __se_sys_io_submit+0x171/0x304 [ 106.209415] do_syscall_64+0x140/0x385 [ 106.209965] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.210684] [ 106.210904] [ 106.210904] the dependencies between the lock to be acquired [ 106.210905] and SOFTIRQ-irq-unsafe lock: [ 106.212541] -> (&(&sb->map[i].swap_lock)->rlock){+.+.} ops: 1969 { [ 106.213393] HARDIRQ-ON-W at: [ 106.213840] _raw_spin_lock+0x33/0x64 [ 106.214570] sbitmap_get+0xd5/0x22c [ 106.215282] __sbitmap_queue_get+0xe8/0x177 [ 106.216086] __blk_mq_get_tag+0x1e6/0x22d [ 106.216876] blk_mq_get_tag+0x1db/0x6e4 [ 106.217627] blk_mq_get_driver_tag+0x161/0x258 [ 106.218465] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.219326] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.220198] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.221138] __blk_mq_run_hw_queue+0x137/0x17e [ 106.221975] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.222874] blk_mq_run_hw_queue+0x151/0x187 [ 106.223686] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.224597] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.225444] blk_flush_plug_list+0x392/0x3d7 [ 106.226255] blk_finish_plug+0x37/0x4f [ 106.227006] read_pages+0x3ef/0x430 [ 106.227717] __do_page_cache_readahead+0x18e/0x2fc [ 106.228595] force_page_cache_readahead+0x121/0x133 [ 106.229491] page_cache_sync_readahead+0x35f/0x3bb [ 106.230373] generic_file_buffered_read+0x410/0x1860 [ 106.231277] __vfs_read+0x319/0x38f [ 106.231986] vfs_read+0xd2/0x19a [ 106.232666] ksys_read+0xb9/0x135 [ 106.233350] do_syscall_64+0x140/0x385 [ 106.234097] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.235012] SOFTIRQ-ON-W at: [ 106.235460] _raw_spin_lock+0x33/0x64 [ 106.236195] sbitmap_get+0xd5/0x22c [ 106.236913] __sbitmap_queue_get+0xe8/0x177 [ 106.237715] __blk_mq_get_tag+0x1e6/0x22d [ 106.238488] blk_mq_get_tag+0x1db/0x6e4 [ 106.239244] blk_mq_get_driver_tag+0x161/0x258 [ 106.240079] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.240937] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.241806] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.242751] __blk_mq_run_hw_queue+0x137/0x17e [ 106.243579] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.244469] blk_mq_run_hw_queue+0x151/0x187 [ 106.245277] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.246191] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.247044] blk_flush_plug_list+0x392/0x3d7 [ 106.247859] blk_finish_plug+0x37/0x4f [ 106.248749] read_pages+0x3ef/0x430 [ 106.249463] __do_page_cache_readahead+0x18e/0x2fc [ 106.250357] force_page_cache_readahead+0x121/0x133 [ 106.251263] page_cache_sync_readahead+0x35f/0x3bb [ 106.252157] generic_file_buffered_read+0x410/0x1860 [ 106.253084] __vfs_read+0x319/0x38f [ 106.253808] vfs_read+0xd2/0x19a [ 106.254488] ksys_read+0xb9/0x135 [ 106.255186] do_syscall_64+0x140/0x385 [ 106.255943] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.256867] INITIAL USE at: [ 106.257300] _raw_spin_lock+0x33/0x64 [ 106.258033] sbitmap_get+0xd5/0x22c [ 106.258747] __sbitmap_queue_get+0xe8/0x177 [ 106.259542] __blk_mq_get_tag+0x1e6/0x22d [ 106.260320] blk_mq_get_tag+0x1db/0x6e4 [ 106.261072] blk_mq_get_driver_tag+0x161/0x258 [ 106.261902] blk_mq_dispatch_rq_list+0x28e/0xd7c [ 106.262762] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.263626] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.264571] __blk_mq_run_hw_queue+0x137/0x17e [ 106.265409] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.266302] blk_mq_run_hw_queue+0x151/0x187 [ 106.267111] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.268028] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.268878] blk_flush_plug_list+0x392/0x3d7 [ 106.269694] blk_finish_plug+0x37/0x4f [ 106.270432] read_pages+0x3ef/0x430 [ 106.271139] __do_page_cache_readahead+0x18e/0x2fc [ 106.272040] force_page_cache_readahead+0x121/0x133 [ 106.272932] page_cache_sync_readahead+0x35f/0x3bb [ 106.273811] generic_file_buffered_read+0x410/0x1860 [ 106.274709] __vfs_read+0x319/0x38f [ 106.275407] vfs_read+0xd2/0x19a [ 106.276074] ksys_read+0xb9/0x135 [ 106.276764] do_syscall_64+0x140/0x385 [ 106.277500] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 106.278417] } [ 106.278676] ... key at: [] __key.26212+0x0/0x40 [ 106.279586] ... acquired at: [ 106.280026] lock_acquire+0x280/0x2f3 [ 106.280559] _raw_spin_lock+0x33/0x64 [ 106.281101] sbitmap_get+0xd5/0x22c [ 106.281610] __sbitmap_queue_get+0xe8/0x177 [ 106.282221] __blk_mq_get_tag+0x1e6/0x22d [ 106.282809] blk_mq_get_tag+0x1db/0x6e4 [ 106.283368] blk_mq_get_driver_tag+0x161/0x258 [ 106.284018] blk_mq_dispatch_rq_list+0x5b9/0xd7c [ 106.284685] blk_mq_do_dispatch_sched+0x23a/0x287 [ 106.285371] blk_mq_sched_dispatch_requests+0x379/0x3fc [ 106.286135] __blk_mq_run_hw_queue+0x137/0x17e [ 106.286806] __blk_mq_delay_run_hw_queue+0x80/0x25f [ 106.287515] blk_mq_run_hw_queue+0x151/0x187 [ 106.288149] blk_mq_sched_insert_requests+0x13f/0x175 [ 106.289041] blk_mq_flush_plug_list+0x7d6/0x81b [ 106.289912] blk_flush_plug_list+0x392/0x3d7 [ 106.290590] blk_finish_plug+0x37/0x4f [ 106.291238] __se_sys_io_submit+0x171/0x304 [ 106.291864] do_syscall_64+0x140/0x385 [ 106.292534] entry_SYSCALL_64_after_hwframe+0x49/0xbe Reported-by: Ming Lei Tested-by: Guenter Roeck Signed-off-by: Jens Axboe --- lib/sbitmap.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index a89fbe7cf6ca..2261136ae067 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -118,8 +118,13 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) { unsigned long mask, val; + unsigned long __maybe_unused flags; bool ret = false; + /* Silence bogus lockdep warning */ +#if defined(CONFIG_LOCKDEP) + local_irq_save(flags); +#endif spin_lock(&sb->map[index].swap_lock); if (!sb->map[index].cleared) @@ -142,6 +147,9 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) ret = true; out_unlock: spin_unlock(&sb->map[index].swap_lock); +#if defined(CONFIG_LOCKDEP) + local_irq_restore(flags); +#endif return ret; } -- cgit v1.2.3 From b2dbff1bb893d5dfdf501231ff5505ca10cdede3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 11 Dec 2018 18:39:41 -0700 Subject: sbitmap: flush deferred clears for resize and shallow gets We're missing a deferred clear off the shallow get, which can cause a hang. Additionally, when we resize the sbitmap, we should also flush deferred clears for good measure. Ensure we have full coverage on batch clears, even for paths where we would not be doing deferred clear. This makes it less error prone for future additions. Reported-by: Bart Van Assche Tested-by: Ming Lei Signed-off-by: Jens Axboe --- lib/sbitmap.c | 94 ++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 51 insertions(+), 43 deletions(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 2261136ae067..5b3e56d68dab 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -20,6 +20,47 @@ #include #include +/* + * See if we have deferred clears that we can batch move + */ +static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) +{ + unsigned long mask, val; + unsigned long __maybe_unused flags; + bool ret = false; + + /* Silence bogus lockdep warning */ +#if defined(CONFIG_LOCKDEP) + local_irq_save(flags); +#endif + spin_lock(&sb->map[index].swap_lock); + + if (!sb->map[index].cleared) + goto out_unlock; + + /* + * First get a stable cleared mask, setting the old mask to 0. + */ + do { + mask = sb->map[index].cleared; + } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); + + /* + * Now clear the masked bits in our free word + */ + do { + val = sb->map[index].word; + } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); + + ret = true; +out_unlock: + spin_unlock(&sb->map[index].swap_lock); +#if defined(CONFIG_LOCKDEP) + local_irq_restore(flags); +#endif + return ret; +} + int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, gfp_t flags, int node) { @@ -70,6 +111,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) unsigned int bits_per_word = 1U << sb->shift; unsigned int i; + for (i = 0; i < sb->map_nr; i++) + sbitmap_deferred_clear(sb, i); + sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); @@ -112,47 +156,6 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, return nr; } -/* - * See if we have deferred clears that we can batch move - */ -static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) -{ - unsigned long mask, val; - unsigned long __maybe_unused flags; - bool ret = false; - - /* Silence bogus lockdep warning */ -#if defined(CONFIG_LOCKDEP) - local_irq_save(flags); -#endif - spin_lock(&sb->map[index].swap_lock); - - if (!sb->map[index].cleared) - goto out_unlock; - - /* - * First get a stable cleared mask, setting the old mask to 0. - */ - do { - mask = sb->map[index].cleared; - } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); - - /* - * Now clear the masked bits in our free word - */ - do { - val = sb->map[index].word; - } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); - - ret = true; -out_unlock: - spin_unlock(&sb->map[index].swap_lock); -#if defined(CONFIG_LOCKDEP) - local_irq_restore(flags); -#endif - return ret; -} - static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, unsigned int alloc_hint, bool round_robin) { @@ -215,6 +218,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, index = SB_NR_TO_INDEX(sb, alloc_hint); for (i = 0; i < sb->map_nr; i++) { +again: nr = __sbitmap_get_word(&sb->map[index].word, min(sb->map[index].depth, shallow_depth), SB_NR_TO_BIT(sb, alloc_hint), true); @@ -223,6 +227,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, break; } + if (sbitmap_deferred_clear(sb, index)) + goto again; + /* Jump to next index. */ index++; alloc_hint = index << sb->shift; @@ -242,7 +249,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb) unsigned int i; for (i = 0; i < sb->map_nr; i++) { - if (sb->map[i].word) + if (sb->map[i].word & ~sb->map[i].cleared) return true; } return false; @@ -255,9 +262,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb) for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; + unsigned long mask = word->word & ~word->cleared; unsigned long ret; - ret = find_first_zero_bit(&word->word, word->depth); + ret = find_first_zero_bit(&mask, word->depth); if (ret < word->depth) return true; } -- cgit v1.2.3 From cb002d074dabfaa2248507fd9478d16a542e4f1e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 3 Dec 2018 17:52:07 -0800 Subject: iov_iter: pass void csum pointer to csum_and_copy_to_iter The single caller to csum_and_copy_to_iter is skb_copy_and_csum_datagram and we are trying to unite its logic with skb_copy_datagram_iter by passing a callback to the copy function that we want to apply. Thus, we need to make the checksum pointer private to the function. Acked-by: David S. Miller Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- include/linux/uio.h | 2 +- lib/iov_iter.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/include/linux/uio.h b/include/linux/uio.h index 55ce99ddb912..41d1f8d3313d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -266,7 +266,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 54c248526b55..63a8999a234d 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1464,10 +1464,11 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, } EXPORT_SYMBOL(csum_and_copy_from_iter_full); -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i) { const char *from = addr; + __wsum *csum = csump; __wsum sum, next; size_t off = 0; -- cgit v1.2.3 From d05f443554b3c7dc6d46e3ba9c3c4de468875d4f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 3 Dec 2018 17:52:09 -0800 Subject: iov_iter: introduce hash_and_copy_to_iter helper Allow consumers that want to use iov iterator helpers and also update a predefined hash calculation online when copying data. This is useful when copying incoming network buffers to a local iterator and calculate a digest on the incoming stream. nvme-tcp host driver that will be introduced in following patches is the first consumer via skb_copy_and_hash_datagram_iter. Acked-by: David S. Miller Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- include/linux/uio.h | 3 +++ lib/iov_iter.c | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'lib') diff --git a/include/linux/uio.h b/include/linux/uio.h index 41d1f8d3313d..ecf584f6b82d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -11,6 +11,7 @@ #include #include +#include #include struct page; @@ -269,6 +270,8 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, + struct iov_iter *i); int import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 63a8999a234d..1928009f506e 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -6,6 +6,7 @@ #include #include #include +#include #define PIPE_PARANOIA /* for now */ @@ -1511,6 +1512,21 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, } EXPORT_SYMBOL(csum_and_copy_to_iter); +size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, + struct iov_iter *i) +{ + struct ahash_request *hash = hashp; + struct scatterlist sg; + size_t copied; + + copied = copy_to_iter(addr, bytes, i); + sg_init_one(&sg, addr, copied); + ahash_request_set_crypt(hash, &sg, NULL, copied); + crypto_ahash_update(hash); + return copied; +} +EXPORT_SYMBOL(hash_and_copy_to_iter); + int iov_iter_npages(const struct iov_iter *i, int maxpages) { size_t size = i->count; -- cgit v1.2.3 From 9f6b7ef6c3ebe35be77b0ae3cf12e4d25ae80420 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 20 Dec 2018 08:49:00 -0700 Subject: sbitmap: add helpers for add/del wait queue handling After commit 5d2ee7122c73, users of sbitmap that need wait queue handling must use the provided helpers. But we only added prepare_to_wait()/finish_wait() style helpers, add the equivalent add_wait_queue/list_del wrappers as we.. This is needed to ensure kyber plays by the sbitmap waitqueue rules. Tested-by: Ming Lei Reviewed-by: Omar Sandoval Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 16 ++++++++++++++-- lib/sbitmap.c | 30 ++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 03f50fcedc79..14d558146aea 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -560,13 +560,13 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); struct sbq_wait { - int accounted; + struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ struct wait_queue_entry wait; }; #define DEFINE_SBQ_WAIT(name) \ struct sbq_wait name = { \ - .accounted = 0, \ + .sbq = NULL, \ .wait = { \ .private = current, \ .func = autoremove_wake_function, \ @@ -588,4 +588,16 @@ void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); +/* + * Wrapper around add_wait_queue(), which maintains some extra internal state + */ +void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait); + +/* + * Must be paired with sbitmap_add_wait_queue() + */ +void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); + #endif /* __LINUX_SCALE_BITMAP_H */ diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 5b3e56d68dab..65c2d06250a6 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -671,13 +671,35 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) } EXPORT_SYMBOL_GPL(sbitmap_queue_show); +void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait) +{ + if (!sbq_wait->sbq) { + sbq_wait->sbq = sbq; + atomic_inc(&sbq->ws_active); + } + add_wait_queue(&ws->wait, &sbq_wait->wait); +} +EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); + +void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) +{ + list_del_init(&sbq_wait->wait.entry); + if (sbq_wait->sbq) { + atomic_dec(&sbq_wait->sbq->ws_active); + sbq_wait->sbq = NULL; + } +} +EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); + void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait, int state) { - if (!sbq_wait->accounted) { + if (!sbq_wait->sbq) { atomic_inc(&sbq->ws_active); - sbq_wait->accounted = 1; + sbq_wait->sbq = sbq; } prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); } @@ -687,9 +709,9 @@ void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait) { finish_wait(&ws->wait, &sbq_wait->wait); - if (sbq_wait->accounted) { + if (sbq_wait->sbq) { atomic_dec(&sbq->ws_active); - sbq_wait->accounted = 0; + sbq_wait->sbq = NULL; } } EXPORT_SYMBOL_GPL(sbitmap_finish_wait); -- cgit v1.2.3