From de1b0ee490eafdf65fac9eef9925391a8369f2dc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 31 Aug 2020 11:20:02 -0600 Subject: block: ensure bdi->io_pages is always initialized If a driver leaves the limit settings as the defaults, then we don't initialize bdi->io_pages. This means that file systems may need to work around bdi->io_pages == 0, which is somewhat messy. Initialize the default value just like we do for ->ra_pages. Cc: stable@vger.kernel.org Fixes: 9491ae4aade6 ("mm: don't cap request size based on read-ahead setting") Reported-by: OGAWA Hirofumi Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index d9d632639bd1..10c08ac50697 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -539,6 +539,7 @@ struct request_queue *blk_alloc_queue(int node_id) goto fail_stats; q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; + q->backing_dev_info->io_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; -- cgit v1.2.3 From cafe01ef8fcb248583038e1be071383530fe355a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 1 Sep 2020 18:07:38 +0800 Subject: block: release disk reference in hd_struct_free_work Commit e8c7d14ac6c3 ("block: revert back to synchronous request_queue removal") stops to release request queue from wq context because that commit supposed all blk_put_queue() is called in context which is allowed to sleep. However, this assumption isn't true because we release disk's reference in partition's percpu_ref's ->release() which doesn't allow to sleep, because the ->release() is run via call_rcu(). Fixes this issue by moving put disk reference into hd_struct_free_work() Fixes: e8c7d14ac6c3 ("block: revert back to synchronous request_queue removal") Reported-by: Ilya Dryomov Signed-off-by: Ming Lei Tested-by: Ilya Dryomov Reviewed-by: Christoph Hellwig Cc: Luis Chamberlain Cc: Christoph Hellwig Cc: Bart Van Assche Signed-off-by: Jens Axboe --- block/partitions/core.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/partitions/core.c b/block/partitions/core.c index e62a98a8eeb7..5a18c8edabbc 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -278,6 +278,15 @@ static void hd_struct_free_work(struct work_struct *work) { struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, rcu_work); + struct gendisk *disk = part_to_disk(part); + + /* + * Release the disk reference acquired in delete_partition here. + * We can't release it in hd_struct_free because the final put_device + * needs process context and thus can't be run directly from a + * percpu_ref ->release handler. + */ + put_device(disk_to_dev(disk)); part->start_sect = 0; part->nr_sects = 0; @@ -293,7 +302,6 @@ static void hd_struct_free(struct percpu_ref *ref) rcu_dereference_protected(disk->part_tbl, 1); rcu_assign_pointer(ptbl->last_lookup, NULL); - put_device(disk_to_dev(disk)); INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work); queue_rcu_work(system_wq, &part->rcu_work); -- cgit v1.2.3 From 08fc1ab6d748ab1a690fd483f41e2938984ce353 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 1 Sep 2020 11:59:41 +0200 Subject: block: fix locking in bdev_del_partition We need to hold the whole device bd_mutex to protect against other thread concurrently deleting out partition before we get to it, and thus causing a use after free. Fixes: cddae808aeb7 ("block: pass a hd_struct to delete_partition") Reported-by: syzbot+6448f3c229bc52b82f69@syzkaller.appspotmail.com Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/partitions/core.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'block') diff --git a/block/partitions/core.c b/block/partitions/core.c index 5a18c8edabbc..5b4869c08fb3 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -532,19 +532,20 @@ int bdev_add_partition(struct block_device *bdev, int partno, int bdev_del_partition(struct block_device *bdev, int partno) { struct block_device *bdevp; - struct hd_struct *part; - int ret = 0; - - part = disk_get_part(bdev->bd_disk, partno); - if (!part) - return -ENXIO; + struct hd_struct *part = NULL; + int ret; - ret = -ENOMEM; - bdevp = bdget(part_devt(part)); + bdevp = bdget_disk(bdev->bd_disk, partno); if (!bdevp) - goto out_put_part; + return -ENOMEM; mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + + ret = -ENXIO; + part = disk_get_part(bdev->bd_disk, partno); + if (!part) + goto out_unlock; ret = -EBUSY; if (bdevp->bd_openers) @@ -553,16 +554,14 @@ int bdev_del_partition(struct block_device *bdev, int partno) sync_blockdev(bdevp); invalidate_bdev(bdevp); - mutex_lock_nested(&bdev->bd_mutex, 1); delete_partition(bdev->bd_disk, part); - mutex_unlock(&bdev->bd_mutex); - ret = 0; out_unlock: + mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); -out_put_part: - disk_put_part(part); + if (part) + disk_put_part(part); return ret; } -- cgit v1.2.3 From 5aeac7c4b16069aae49005f0a8d4526baa83341b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:31 -0400 Subject: blk-iocost: ioc_pd_free() shouldn't assume irq disabled ioc_pd_free() grabs irq-safe ioc->lock without ensuring that irq is disabled when it can be called with irq disabled or enabled. This has a small chance of causing A-A deadlocks and triggers lockdep splats. Use irqsave operations instead. Signed-off-by: Tejun Heo Fixes: 7caa47151ab2 ("blkcg: implement blk-iocost") Cc: stable@vger.kernel.org # v5.4+ Signed-off-by: Jens Axboe --- block/blk-iocost.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 413e0b5c8e6b..d37b55db2409 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2092,14 +2092,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd) { struct ioc_gq *iocg = pd_to_iocg(pd); struct ioc *ioc = iocg->ioc; + unsigned long flags; if (ioc) { - spin_lock(&ioc->lock); + spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { propagate_active_weight(iocg, 0, 0); list_del_init(&iocg->active_list); } - spin_unlock(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->delay_timer); -- cgit v1.2.3 From e11d80a849e010f78243bb6f6af7dccef3a71a90 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:32 -0400 Subject: blk-stat: make q->stats->lock irqsafe blk-iocost calls blk_stat_enable_accounting() while holding an irqsafe lock which triggers a lockdep splat because q->stats->lock isn't irqsafe. Let's make it irqsafe. Signed-off-by: Tejun Heo Fixes: cd006509b0a9 ("blk-iocost: account for IO size when testing latencies") Cc: stable@vger.kernel.org # v5.8+ Signed-off-by: Jens Axboe --- block/blk-stat.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-stat.c b/block/blk-stat.c index 7da302ff88d0..ae3dd1fb8e61 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -137,6 +137,7 @@ void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) { unsigned int bucket; + unsigned long flags; int cpu; for_each_possible_cpu(cpu) { @@ -147,20 +148,22 @@ void blk_stat_add_callback(struct request_queue *q, blk_rq_stat_init(&cpu_stat[bucket]); } - spin_lock(&q->stats->lock); + spin_lock_irqsave(&q->stats->lock, flags); list_add_tail_rcu(&cb->list, &q->stats->callbacks); blk_queue_flag_set(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); } void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { - spin_lock(&q->stats->lock); + unsigned long flags; + + spin_lock_irqsave(&q->stats->lock, flags); list_del_rcu(&cb->list); if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) blk_queue_flag_clear(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); del_timer_sync(&cb->timer); } @@ -183,10 +186,12 @@ void blk_stat_free_callback(struct blk_stat_callback *cb) void blk_stat_enable_accounting(struct request_queue *q) { - spin_lock(&q->stats->lock); + unsigned long flags; + + spin_lock_irqsave(&q->stats->lock, flags); q->stats->enable_accounting = true; blk_queue_flag_set(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); } EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); -- cgit v1.2.3