diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/badblocks.c | 2 | ||||
-rw-r--r-- | block/bfq-cgroup.c | 2 | ||||
-rw-r--r-- | block/bfq-iosched.c | 4 | ||||
-rw-r--r-- | block/bfq-iosched.h | 1 | ||||
-rw-r--r-- | block/bfq-wf2q.c | 12 | ||||
-rw-r--r-- | block/bio.c | 10 | ||||
-rw-r--r-- | block/blk-cgroup.c | 8 | ||||
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/blk-iocost.c | 5 | ||||
-rw-r--r-- | block/blk-merge.c | 13 | ||||
-rw-r--r-- | block/blk-mq-sched.c | 9 | ||||
-rw-r--r-- | block/blk-mq.c | 13 | ||||
-rw-r--r-- | block/blk-stat.c | 17 | ||||
-rw-r--r-- | block/blk-wbt.c | 2 | ||||
-rw-r--r-- | block/bsg-lib.c | 2 | ||||
-rw-r--r-- | block/ioprio.c | 2 | ||||
-rw-r--r-- | block/partitions/core.c | 37 |
17 files changed, 87 insertions, 53 deletions
diff --git a/block/badblocks.c b/block/badblocks.c index 2e5f5697db35..d39056630d9c 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -525,7 +525,7 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, case 3: if (newline != '\n') return -EINVAL; - /* fall through */ + fallthrough; case 2: if (length <= 0) return -EINVAL; diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 68882b9b8f11..b791e2041e49 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg) kfree(bfqg); } -void bfqg_and_blkg_get(struct bfq_group *bfqg) +static void bfqg_and_blkg_get(struct bfq_group *bfqg) { /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ bfqg_get(bfqg); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a4c0bec920cb..c34b090178a9 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4980,7 +4980,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) pr_err("bdi %s: bfq: bad prio class %d\n", bdi_dev_name(bfqq->bfqd->queue->backing_dev_info), ioprio_class); - /* fall through */ + fallthrough; case IOPRIO_CLASS_NONE: /* * No prio set, inherit CPU scheduling settings. @@ -5112,7 +5112,7 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, return &bfqg->async_bfqq[0][ioprio]; case IOPRIO_CLASS_NONE: ioprio = IOPRIO_NORM; - /* fall through */ + fallthrough; case IOPRIO_CLASS_BE: return &bfqg->async_bfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index cd224aaf9f52..703895224562 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -986,7 +986,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); struct bfq_group *bfqq_group(struct bfq_queue *bfqq); struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); -void bfqg_and_blkg_get(struct bfq_group *bfqg); void bfqg_and_blkg_put(struct bfq_group *bfqg); #ifdef CONFIG_BFQ_GROUP_IOSCHED diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index eb0e2a6daabe..26776bdbdf36 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -533,9 +533,7 @@ static void bfq_get_entity(struct bfq_entity *entity) bfqq->ref++; bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, bfqq->ref); - } else - bfqg_and_blkg_get(container_of(entity, struct bfq_group, - entity)); + } } /** @@ -649,14 +647,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st, entity->on_st_or_in_serv = false; st->wsum -= entity->weight; - if (is_in_service) - return; - - if (bfqq) + if (bfqq && !is_in_service) bfq_put_queue(bfqq); - else - bfqg_and_blkg_put(container_of(entity, struct bfq_group, - entity)); } /** diff --git a/block/bio.c b/block/bio.c index c63ba04bd629..a9931f23d933 100644 --- a/block/bio.c +++ b/block/bio.c @@ -740,8 +740,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) { - phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + - bv->bv_offset + bv->bv_len - 1; + size_t bv_end = bv->bv_offset + bv->bv_len; + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; phys_addr_t page_addr = page_to_phys(page); if (vec_end_addr + 1 != page_addr + off) @@ -750,9 +750,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return false; *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); - if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page) - return false; - return true; + if (*same_page) + return true; + return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } /* diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 619a79b51068..c195365c9817 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1152,13 +1152,15 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - ret = blk_iolatency_init(q); + ret = blk_throtl_init(q); if (ret) goto err_destroy_all; - ret = blk_throtl_init(q); - if (ret) + ret = blk_iolatency_init(q); + if (ret) { + blk_throtl_exit(q); goto err_destroy_all; + } return 0; err_destroy_all: diff --git a/block/blk-core.c b/block/blk-core.c index d9d632639bd1..10c08ac50697 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -539,6 +539,7 @@ struct request_queue *blk_alloc_queue(int node_id) goto fail_stats; q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; + q->backing_dev_info->io_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 413e0b5c8e6b..d37b55db2409 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2092,14 +2092,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd) { struct ioc_gq *iocg = pd_to_iocg(pd); struct ioc *ioc = iocg->ioc; + unsigned long flags; if (ioc) { - spin_lock(&ioc->lock); + spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { propagate_active_weight(iocg, 0, 0); list_del_init(&iocg->active_list); } - spin_unlock(&ioc->lock); + spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->delay_timer); diff --git a/block/blk-merge.c b/block/blk-merge.c index 6529e3aab001..f685d633bcc9 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(struct request_queue *q, if (max_sectors > start_offset) return max_sectors - start_offset; - return sectors & (lbs - 1); + return sectors & ~(lbs - 1); } static inline unsigned get_max_segment_size(const struct request_queue *q, @@ -533,10 +533,17 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(__blk_rq_map_sg); +static inline unsigned int blk_rq_get_max_segments(struct request *rq) +{ + if (req_op(rq) == REQ_OP_DISCARD) + return queue_max_discard_segments(rq->q); + return queue_max_segments(rq->q); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) goto no_merge; if (blk_integrity_merge_bio(req->q, req, bio) == false) @@ -624,7 +631,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; - if (total_phys_segments > queue_max_segments(q)) + if (total_phys_segments > blk_rq_get_max_segments(req)) return 0; if (blk_integrity_merge_rq(q, req, next) == false) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index a19cdf159b75..d2790e5b06d1 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -78,6 +78,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 0015a1892153..b3d2785eefe9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1438,6 +1438,15 @@ out: spin_unlock(&hctx->lock); /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another * thread and hence that a queue rerun is needed. @@ -1834,6 +1843,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. + * @at_head: true if the request should be inserted at the head of the list. * @run_queue: If we should run the hardware queue after inserting the request. * * Should only be used carefully, when the caller knows we want to @@ -2016,7 +2026,8 @@ insert: if (bypass_insert) return BLK_STS_RESOURCE; - blk_mq_request_bypass_insert(rq, false, run_queue); + blk_mq_sched_insert_request(rq, false, run_queue, false); + return BLK_STS_OK; } diff --git a/block/blk-stat.c b/block/blk-stat.c index 7da302ff88d0..ae3dd1fb8e61 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -137,6 +137,7 @@ void blk_stat_add_callback(struct request_queue *q, struct blk_stat_callback *cb) { unsigned int bucket; + unsigned long flags; int cpu; for_each_possible_cpu(cpu) { @@ -147,20 +148,22 @@ void blk_stat_add_callback(struct request_queue *q, blk_rq_stat_init(&cpu_stat[bucket]); } - spin_lock(&q->stats->lock); + spin_lock_irqsave(&q->stats->lock, flags); list_add_tail_rcu(&cb->list, &q->stats->callbacks); blk_queue_flag_set(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); } void blk_stat_remove_callback(struct request_queue *q, struct blk_stat_callback *cb) { - spin_lock(&q->stats->lock); + unsigned long flags; + + spin_lock_irqsave(&q->stats->lock, flags); list_del_rcu(&cb->list); if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) blk_queue_flag_clear(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); del_timer_sync(&cb->timer); } @@ -183,10 +186,12 @@ void blk_stat_free_callback(struct blk_stat_callback *cb) void blk_stat_enable_accounting(struct request_queue *q) { - spin_lock(&q->stats->lock); + unsigned long flags; + + spin_lock_irqsave(&q->stats->lock, flags); q->stats->enable_accounting = true; blk_queue_flag_set(QUEUE_FLAG_STATS, q); - spin_unlock(&q->stats->lock); + spin_unlock_irqrestore(&q->stats->lock, flags); } EXPORT_SYMBOL_GPL(blk_stat_enable_accounting); diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 0fa615eefd52..fd410086fe1d 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -528,7 +528,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE)) return false; - /* fallthrough */ + fallthrough; case REQ_OP_DISCARD: return true; default: diff --git a/block/bsg-lib.c b/block/bsg-lib.c index fb7b347f8010..d185396d88bb 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -378,7 +378,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, bset->timeout_fn = timeout; set = &bset->tag_set; - set->ops = &bsg_mq_ops, + set->ops = &bsg_mq_ops; set->nr_hw_queues = 1; set->queue_depth = 128; set->numa_node = NUMA_NO_NODE; diff --git a/block/ioprio.c b/block/ioprio.c index 77bcab11dce5..04ebd37966f1 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -71,7 +71,7 @@ int ioprio_check_cap(int ioprio) case IOPRIO_CLASS_RT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - /* fall through */ + fallthrough; /* rt has prio field too */ case IOPRIO_CLASS_BE: if (data >= IOPRIO_BE_NR || data < 0) diff --git a/block/partitions/core.c b/block/partitions/core.c index e62a98a8eeb7..5b4869c08fb3 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -278,6 +278,15 @@ static void hd_struct_free_work(struct work_struct *work) { struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, rcu_work); + struct gendisk *disk = part_to_disk(part); + + /* + * Release the disk reference acquired in delete_partition here. + * We can't release it in hd_struct_free because the final put_device + * needs process context and thus can't be run directly from a + * percpu_ref ->release handler. + */ + put_device(disk_to_dev(disk)); part->start_sect = 0; part->nr_sects = 0; @@ -293,7 +302,6 @@ static void hd_struct_free(struct percpu_ref *ref) rcu_dereference_protected(disk->part_tbl, 1); rcu_assign_pointer(ptbl->last_lookup, NULL); - put_device(disk_to_dev(disk)); INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work); queue_rcu_work(system_wq, &part->rcu_work); @@ -524,19 +532,20 @@ int bdev_add_partition(struct block_device *bdev, int partno, int bdev_del_partition(struct block_device *bdev, int partno) { struct block_device *bdevp; - struct hd_struct *part; - int ret = 0; + struct hd_struct *part = NULL; + int ret; - part = disk_get_part(bdev->bd_disk, partno); - if (!part) - return -ENXIO; - - ret = -ENOMEM; - bdevp = bdget(part_devt(part)); + bdevp = bdget_disk(bdev->bd_disk, partno); if (!bdevp) - goto out_put_part; + return -ENOMEM; mutex_lock(&bdevp->bd_mutex); + mutex_lock_nested(&bdev->bd_mutex, 1); + + ret = -ENXIO; + part = disk_get_part(bdev->bd_disk, partno); + if (!part) + goto out_unlock; ret = -EBUSY; if (bdevp->bd_openers) @@ -545,16 +554,14 @@ int bdev_del_partition(struct block_device *bdev, int partno) sync_blockdev(bdevp); invalidate_bdev(bdevp); - mutex_lock_nested(&bdev->bd_mutex, 1); delete_partition(bdev->bd_disk, part); - mutex_unlock(&bdev->bd_mutex); - ret = 0; out_unlock: + mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdevp->bd_mutex); bdput(bdevp); -out_put_part: - disk_put_part(part); + if (part) + disk_put_part(part); return ret; } |