diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-06 10:06:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-06 10:06:26 -0700 |
commit | 1dbe7e386f505bdae30f7436c41769149c7dcf32 (patch) | |
tree | a18108c84322f4453872823fadeaf6c00e6351cd | |
parent | 03085b3d5a45a60061423ac4857f339c7cb260ff (diff) | |
parent | 1c500ad706383f1a6609e63d0b5d1723fd84dab9 (diff) | |
download | linux-1dbe7e386f505bdae30f7436c41769149c7dcf32.tar.bz2 |
Merge tag 'block-5.15-2021-09-05' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"Was going to send this one in later this week, but given that -Werror
is now enabled (or at least available), the mq-deadline fix really
should go in for the folks hitting that.
- Ensure dd_queued() is only there if needed (Geert)
- Fix a kerneldoc warning for bio_alloc_kiocb()
- BFQ fix for queue merging
- loop locking fix (Tetsuo)"
* tag 'block-5.15-2021-09-05' of git://git.kernel.dk/linux-block:
loop: reduce the loop_ctl_mutex scope
bio: fix kerneldoc documentation for bio_alloc_kiocb()
block, bfq: honor already-setup queue merges
block/mq-deadline: Move dd_queued() to fix defined but not used warning
-rw-r--r-- | block/bfq-iosched.c | 16 | ||||
-rw-r--r-- | block/bio.c | 2 | ||||
-rw-r--r-- | block/mq-deadline.c | 12 | ||||
-rw-r--r-- | drivers/block/loop.c | 75 | ||||
-rw-r--r-- | drivers/block/loop.h | 1 |
5 files changed, 70 insertions, 36 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 480e1a134859..dd13c2bbc29c 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2662,6 +2662,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * are likely to increase the throughput. */ bfqq->new_bfqq = new_bfqq; + /* + * The above assignment schedules the following redirections: + * each time some I/O for bfqq arrives, the process that + * generated that I/O is disassociated from bfqq and + * associated with new_bfqq. Here we increases new_bfqq->ref + * in advance, adding the number of processes that are + * expected to be associated with new_bfqq as they happen to + * issue I/O. + */ new_bfqq->ref += process_refs; return new_bfqq; } @@ -2724,6 +2733,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_queue *in_service_bfqq, *new_bfqq; + /* if a merge has already been setup, then proceed with that first */ + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + /* * Check delayed stable merge for rotational or non-queueing * devs. For this branch to be executed, bfqq must not be @@ -2825,9 +2838,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_too_late_for_merging(bfqq)) return NULL; - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) return NULL; diff --git a/block/bio.c b/block/bio.c index e16849f46b0e..5df3dd282e40 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1688,7 +1688,7 @@ EXPORT_SYMBOL(bioset_init_from_src); /** * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb * @kiocb: kiocb describing the IO - * @nr_iovecs: number of iovecs to pre-allocate + * @nr_vecs: number of iovecs to pre-allocate * @bs: bio_set to allocate from * * Description: diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 3c3693c34f06..7f3c3932b723 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -270,12 +270,6 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, deadline_remove_request(rq->q, per_prio, rq); } -/* Number of requests queued for a given priority level. */ -static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) -{ - return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); -} - /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) @@ -953,6 +947,12 @@ static int dd_async_depth_show(void *data, struct seq_file *m) return 0; } +/* Number of requests queued for a given priority level. */ +static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) +{ + return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); +} + static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fa1c298a8cfb..7bf4686af774 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -2111,18 +2111,6 @@ int loop_register_transfer(struct loop_func_table *funcs) return 0; } -static int unregister_transfer_cb(int id, void *ptr, void *data) -{ - struct loop_device *lo = ptr; - struct loop_func_table *xfer = data; - - mutex_lock(&lo->lo_mutex); - if (lo->lo_encryption == xfer) - loop_release_xfer(lo); - mutex_unlock(&lo->lo_mutex); - return 0; -} - int loop_unregister_transfer(int number) { unsigned int n = number; @@ -2130,9 +2118,20 @@ int loop_unregister_transfer(int number) if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) return -EINVAL; + /* + * This function is called from only cleanup_cryptoloop(). + * Given that each loop device that has a transfer enabled holds a + * reference to the module implementing it we should never get here + * with a transfer that is set (unless forced module unloading is + * requested). Thus, check module's refcount and warn if this is + * not a clean unloading. + */ +#ifdef CONFIG_MODULE_UNLOAD + if (xfer->owner && module_refcount(xfer->owner) != -1) + pr_err("Danger! Unregistering an in use transfer function.\n"); +#endif xfer_funcs[n] = NULL; - idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); return 0; } @@ -2323,8 +2322,9 @@ static int loop_add(int i) } else { err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); } + mutex_unlock(&loop_ctl_mutex); if (err < 0) - goto out_unlock; + goto out_free_dev; i = err; err = -ENOMEM; @@ -2393,15 +2393,19 @@ static int loop_add(int i) disk->events = DISK_EVENT_MEDIA_CHANGE; disk->event_flags = DISK_EVENT_FLAG_UEVENT; sprintf(disk->disk_name, "loop%d", i); + /* Make this loop device reachable from pathname. */ add_disk(disk); + /* Show this loop device. */ + mutex_lock(&loop_ctl_mutex); + lo->idr_visible = true; mutex_unlock(&loop_ctl_mutex); return i; out_cleanup_tags: blk_mq_free_tag_set(&lo->tag_set); out_free_idr: + mutex_lock(&loop_ctl_mutex); idr_remove(&loop_index_idr, i); -out_unlock: mutex_unlock(&loop_ctl_mutex); out_free_dev: kfree(lo); @@ -2411,9 +2415,14 @@ out: static void loop_remove(struct loop_device *lo) { + /* Make this loop device unreachable from pathname. */ del_gendisk(lo->lo_disk); blk_cleanup_disk(lo->lo_disk); blk_mq_free_tag_set(&lo->tag_set); + mutex_lock(&loop_ctl_mutex); + idr_remove(&loop_index_idr, lo->lo_number); + mutex_unlock(&loop_ctl_mutex); + /* There is no route which can find this loop device. */ mutex_destroy(&lo->lo_mutex); kfree(lo); } @@ -2437,31 +2446,40 @@ static int loop_control_remove(int idx) return -EINVAL; } + /* Hide this loop device for serialization. */ ret = mutex_lock_killable(&loop_ctl_mutex); if (ret) return ret; - lo = idr_find(&loop_index_idr, idx); - if (!lo) { + if (!lo || !lo->idr_visible) ret = -ENODEV; - goto out_unlock_ctrl; - } + else + lo->idr_visible = false; + mutex_unlock(&loop_ctl_mutex); + if (ret) + return ret; + /* Check whether this loop device can be removed. */ ret = mutex_lock_killable(&lo->lo_mutex); if (ret) - goto out_unlock_ctrl; + goto mark_visible; if (lo->lo_state != Lo_unbound || atomic_read(&lo->lo_refcnt) > 0) { mutex_unlock(&lo->lo_mutex); ret = -EBUSY; - goto out_unlock_ctrl; + goto mark_visible; } + /* Mark this loop device no longer open()-able. */ lo->lo_state = Lo_deleting; mutex_unlock(&lo->lo_mutex); - idr_remove(&loop_index_idr, lo->lo_number); loop_remove(lo); -out_unlock_ctrl: + return 0; + +mark_visible: + /* Show this loop device again. */ + mutex_lock(&loop_ctl_mutex); + lo->idr_visible = true; mutex_unlock(&loop_ctl_mutex); return ret; } @@ -2475,7 +2493,8 @@ static int loop_control_get_free(int idx) if (ret) return ret; idr_for_each_entry(&loop_index_idr, lo, id) { - if (lo->lo_state == Lo_unbound) + /* Hitting a race results in creating a new loop device which is harmless. */ + if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound) goto found; } mutex_unlock(&loop_ctl_mutex); @@ -2591,10 +2610,14 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); - mutex_lock(&loop_ctl_mutex); + /* + * There is no need to use loop_ctl_mutex here, for nobody else can + * access loop_index_idr when this module is unloading (unless forced + * module unloading is requested). If this is not a clean unloading, + * we have no means to avoid kernel crash. + */ idr_for_each_entry(&loop_index_idr, lo, id) loop_remove(lo); - mutex_unlock(&loop_ctl_mutex); idr_destroy(&loop_index_idr); } diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 1988899db63a..04c88dd6eabd 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -68,6 +68,7 @@ struct loop_device { struct blk_mq_tag_set tag_set; struct gendisk *lo_disk; struct mutex lo_mutex; + bool idr_visible; }; struct loop_cmd { |