diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-26 12:43:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-26 12:43:13 -0700 |
commit | 6080ad3a9941e4707bb929445b813fadca9a27ff (patch) | |
tree | 1a67692494cbbeee8ddc988c81903fde8876be99 | |
parent | b27186abb37b7bd19e0ca434f4f425c807dbd708 (diff) | |
parent | a435ab4f80f983c53b4ca4f8c12b3ddd3ca17670 (diff) | |
download | linux-6080ad3a9941e4707bb929445b813fadca9a27ff.tar.bz2 |
Merge tag 'for-linus-20181026' of git://git.kernel.dk/linux-block
Pull more block layer updates from Jens Axboe:
- Set of patches improving support for zoned devices. This was ready
before the merge window, but I was late in picking it up and hence it
missed the original pull request (Damien, Christoph)
- libata no link power management quirk addition for a Samsung drive
(Diego Viola)
- Fix for a performance regression in BFQ that went into this merge
window (Federico Motta)
- Fix for a missing dma mask setting return value check (Gustavo)
- Typo in the gdrom queue failure case (me)
- NULL pointer deref fix for xen-blkfront (Vasilis Liaskovitis)
- Fixing the get_rq trace point placement in blk-mq (Xiaoguang Wang)
- Removal of a set-but-not-read variable in cdrom (zhong jiang)
* tag 'for-linus-20181026' of git://git.kernel.dk/linux-block:
libata: Apply NOLPM quirk for SAMSUNG MZ7TD256HAFV-000L9
block, bfq: fix asymmetric scenarios detection
gdrom: fix mistake in assignment of error
blk-mq: place trace_block_getrq() in correct place
block: Introduce blk_revalidate_disk_zones()
block: add a report_zones method
block: Expose queue nr_zones in sysfs
block: Improve zone reset execution
block: Introduce BLKGETNRZONES ioctl
block: Introduce BLKGETZONESZ ioctl
block: Limit allocation of zone descriptors for report zones
block: Introduce blkdev_nr_zones() helper
scsi: sd_zbc: Fix sd_zbc_check_zones() error checks
scsi: sd_zbc: Reduce boot device scan and revalidate time
scsi: sd_zbc: Rearrange code
cdrom: remove set but not used variable 'tocuse'
skd: fix unchecked return values
xen/blkfront: avoid NULL blkfront_info dereference on device removal
-rw-r--r-- | block/bfq-wf2q.c | 18 | ||||
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/blk-lib.c | 13 | ||||
-rw-r--r-- | block/blk-mq-debugfs.c | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 4 | ||||
-rw-r--r-- | block/blk-sysfs.c | 13 | ||||
-rw-r--r-- | block/blk-zoned.c | 359 | ||||
-rw-r--r-- | block/blk.h | 8 | ||||
-rw-r--r-- | block/ioctl.c | 4 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 1 | ||||
-rw-r--r-- | drivers/block/null_blk.h | 11 | ||||
-rw-r--r-- | drivers/block/null_blk_main.c | 30 | ||||
-rw-r--r-- | drivers/block/null_blk_zoned.c | 57 | ||||
-rw-r--r-- | drivers/block/skd_main.c | 4 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 3 | ||||
-rw-r--r-- | drivers/cdrom/gdrom.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-flakey.c | 30 | ||||
-rw-r--r-- | drivers/md/dm-linear.c | 35 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 10 | ||||
-rw-r--r-- | drivers/md/dm-zoned-target.c | 3 | ||||
-rw-r--r-- | drivers/md/dm.c | 169 | ||||
-rw-r--r-- | drivers/scsi/sd.c | 15 | ||||
-rw-r--r-- | drivers/scsi/sd.h | 15 | ||||
-rw-r--r-- | drivers/scsi/sd_zbc.c | 501 | ||||
-rw-r--r-- | include/linux/blk_types.h | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 30 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 12 | ||||
-rw-r--r-- | include/trace/events/f2fs.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/blkzoned.h | 3 |
29 files changed, 620 insertions, 741 deletions
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 476b5a90a5a4..4b0d5fb69160 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -792,24 +792,18 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, * queue, remove the entity from its old weight counter (if * there is a counter associated with the entity). */ - if (prev_weight != new_weight) { - if (bfqq) { - root = &bfqd->queue_weights_tree; - __bfq_weights_tree_remove(bfqd, bfqq, root); - } else - bfqd->num_active_groups--; + if (prev_weight != new_weight && bfqq) { + root = &bfqd->queue_weights_tree; + __bfq_weights_tree_remove(bfqd, bfqq, root); } entity->weight = new_weight; /* * Add the entity, if it is not a weight-raised queue, * to the counter associated with its new weight. */ - if (prev_weight != new_weight) { - if (bfqq && bfqq->wr_coeff == 1) { - /* If we get here, root has been initialized. */ - bfq_weights_tree_add(bfqd, bfqq, root); - } else - bfqd->num_active_groups++; + if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) { + /* If we get here, root has been initialized. */ + bfq_weights_tree_add(bfqd, bfqq, root); } new_st->wsum += entity->weight; diff --git a/block/blk-core.c b/block/blk-core.c index 3ed60723e242..bc6ea87d10e0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2300,7 +2300,6 @@ generic_make_request_checks(struct bio *bio) if (!q->limits.max_write_same_sectors) goto not_supported; break; - case REQ_OP_ZONE_REPORT: case REQ_OP_ZONE_RESET: if (!blk_queue_is_zoned(q)) goto not_supported; diff --git a/block/blk-lib.c b/block/blk-lib.c index bbd44666f2b5..76f867ea9a9b 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -10,8 +10,7 @@ #include "blk.h" -static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, - gfp_t gfp) +struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) { struct bio *new = bio_alloc(gfp, nr_pages); @@ -63,7 +62,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, end_sect = sector + req_sects; - bio = next_bio(bio, 0, gfp_mask); + bio = blk_next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio_set_op_attrs(bio, op, 0); @@ -165,7 +164,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, max_write_same_sectors = UINT_MAX >> 9; while (nr_sects) { - bio = next_bio(bio, 1, gfp_mask); + bio = blk_next_bio(bio, 1, gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio->bi_vcnt = 1; @@ -241,7 +240,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, return -EOPNOTSUPP; while (nr_sects) { - bio = next_bio(bio, 0, gfp_mask); + bio = blk_next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio->bi_opf = REQ_OP_WRITE_ZEROES; @@ -292,8 +291,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev, return -EPERM; while (nr_sects != 0) { - bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), - gfp_mask); + bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), + gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 41b86f50d126..10b284a1f18d 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -283,7 +283,6 @@ static const char *const op_name[] = { REQ_OP_NAME(WRITE), REQ_OP_NAME(FLUSH), REQ_OP_NAME(DISCARD), - REQ_OP_NAME(ZONE_REPORT), REQ_OP_NAME(SECURE_ERASE), REQ_OP_NAME(ZONE_RESET), REQ_OP_NAME(WRITE_SAME), diff --git a/block/blk-mq.c b/block/blk-mq.c index dcf10e39995a..3f91c6e5b17a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1850,8 +1850,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) rq_qos_throttle(q, bio, NULL); - trace_block_getrq(q, bio, bio->bi_opf); - rq = blk_mq_get_request(q, bio, bio->bi_opf, &data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); @@ -1860,6 +1858,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } + trace_block_getrq(q, bio, bio->bi_opf); + rq_qos_track(q, rq, bio); cookie = request_to_qc_t(data.hctx, rq); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3772671cf2bc..0641533597f1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -300,6 +300,11 @@ static ssize_t queue_zoned_show(struct request_queue *q, char *page) } } +static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) +{ + return queue_var_show(blk_queue_nr_zones(q), page); +} + static ssize_t queue_nomerges_show(struct request_queue *q, char *page) { return queue_var_show((blk_queue_nomerges(q) << 1) | @@ -637,6 +642,11 @@ static struct queue_sysfs_entry queue_zoned_entry = { .show = queue_zoned_show, }; +static struct queue_sysfs_entry queue_nr_zones_entry = { + .attr = {.name = "nr_zones", .mode = 0444 }, + .show = queue_nr_zones_show, +}; + static struct queue_sysfs_entry queue_nomerges_entry = { .attr = {.name = "nomerges", .mode = 0644 }, .show = queue_nomerges_show, @@ -727,6 +737,7 @@ static struct attribute *default_attrs[] = { &queue_write_zeroes_max_entry.attr, &queue_nonrot_entry.attr, &queue_zoned_entry.attr, + &queue_nr_zones_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, &queue_iostats_entry.attr, @@ -841,6 +852,8 @@ static void __blk_release_queue(struct work_struct *work) if (q->queue_tags) __blk_queue_free_tags(q); + blk_queue_free_zone_bitmaps(q); + if (!q->mq_ops) { if (q->exit_rq_fn) q->exit_rq_fn(q, q->fq->flush_rq); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index c461cf63f1f4..13ba2011a306 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -12,6 +12,9 @@ #include <linux/module.h> #include <linux/rbtree.h> #include <linux/blkdev.h> +#include <linux/blk-mq.h> + +#include "blk.h" static inline sector_t blk_zone_start(struct request_queue *q, sector_t sector) @@ -63,14 +66,38 @@ void __blk_req_zone_write_unlock(struct request *rq) } EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); +static inline unsigned int __blkdev_nr_zones(struct request_queue *q, + sector_t nr_sectors) +{ + unsigned long zone_sectors = blk_queue_zone_sectors(q); + + return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors); +} + +/** + * blkdev_nr_zones - Get number of zones + * @bdev: Target block device + * + * Description: + * Return the total number of zones of a zoned block device. + * For a regular block device, the number of zones is always 0. + */ +unsigned int blkdev_nr_zones(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (!blk_queue_is_zoned(q)) + return 0; + + return __blkdev_nr_zones(q, bdev->bd_part->nr_sects); +} +EXPORT_SYMBOL_GPL(blkdev_nr_zones); + /* - * Check that a zone report belongs to the partition. - * If yes, fix its start sector and write pointer, copy it in the - * zone information array and return true. Return false otherwise. + * Check that a zone report belongs to this partition, and if yes, fix its start + * sector and write pointer and return true. Return false otherwise. */ -static bool blkdev_report_zone(struct block_device *bdev, - struct blk_zone *rep, - struct blk_zone *zone) +static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep) { sector_t offset = get_start_sect(bdev); @@ -85,11 +112,36 @@ static bool blkdev_report_zone(struct block_device *bdev, rep->wp = rep->start + rep->len; else rep->wp -= offset; - memcpy(zone, rep, sizeof(struct blk_zone)); - return true; } +static int blk_report_zones(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask) +{ + struct request_queue *q = disk->queue; + unsigned int z = 0, n, nrz = *nr_zones; + sector_t capacity = get_capacity(disk); + int ret; + + while (z < nrz && sector < capacity) { + n = nrz - z; + ret = disk->fops->report_zones(disk, sector, &zones[z], &n, + gfp_mask); + if (ret) + return ret; + if (!n) + break; + sector += blk_queue_zone_sectors(q) * n; + z += n; + } + + WARN_ON(z > *nr_zones); + *nr_zones = z; + + return 0; +} + /** * blkdev_report_zones - Get zones information * @bdev: Target block device @@ -104,130 +156,46 @@ static bool blkdev_report_zone(struct block_device *bdev, * requested by @nr_zones. The number of zones actually reported is * returned in @nr_zones. */ -int blkdev_report_zones(struct block_device *bdev, - sector_t sector, - struct blk_zone *zones, - unsigned int *nr_zones, +int blkdev_report_zones(struct block_device *bdev, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, gfp_t gfp_mask) { struct request_queue *q = bdev_get_queue(bdev); - struct blk_zone_report_hdr *hdr; - unsigned int nrz = *nr_zones; - struct page *page; - unsigned int nr_rep; - size_t rep_bytes; - unsigned int nr_pages; - struct bio *bio; - struct bio_vec *bv; - unsigned int i, n, nz; - unsigned int ofst; - void *addr; + unsigned int i, nrz; int ret; - if (!q) - return -ENXIO; - if (!blk_queue_is_zoned(q)) return -EOPNOTSUPP; - if (!nrz) - return 0; - - if (sector > bdev->bd_part->nr_sects) { - *nr_zones = 0; - return 0; - } - /* - * The zone report has a header. So make room for it in the - * payload. Also make sure that the report fits in a single BIO - * that will not be split down the stack. + * A block device that advertized itself as zoned must have a + * report_zones method. If it does not have one defined, the device + * driver has a bug. So warn about that. */ - rep_bytes = sizeof(struct blk_zone_report_hdr) + - sizeof(struct blk_zone) * nrz; - rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK; - if (rep_bytes > (queue_max_sectors(q) << 9)) - rep_bytes = queue_max_sectors(q) << 9; - - nr_pages = min_t(unsigned int, BIO_MAX_PAGES, - rep_bytes >> PAGE_SHIFT); - nr_pages = min_t(unsigned int, nr_pages, - queue_max_segments(q)); - - bio = bio_alloc(gfp_mask, nr_pages); - if (!bio) - return -ENOMEM; - - bio_set_dev(bio, bdev); - bio->bi_iter.bi_sector = blk_zone_start(q, sector); - bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); + if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones)) + return -EOPNOTSUPP; - for (i = 0; i < nr_pages; i++) { - page = alloc_page(gfp_mask); - if (!page) { - ret = -ENOMEM; - goto out; - } - if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { - __free_page(page); - break; - } + if (!*nr_zones || sector >= bdev->bd_part->nr_sects) { + *nr_zones = 0; + return 0; } - if (i == 0) - ret = -ENOMEM; - else - ret = submit_bio_wait(bio); + nrz = min(*nr_zones, + __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector)); + ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector, + zones, &nrz, gfp_mask); if (ret) - goto out; - - /* - * Process the report result: skip the header and go through the - * reported zones to fixup and fixup the zone information for - * partitions. At the same time, return the zone information into - * the zone array. - */ - n = 0; - nz = 0; - nr_rep = 0; - bio_for_each_segment_all(bv, bio, i) { - - if (!bv->bv_page) - break; - - addr = kmap_atomic(bv->bv_page); - - /* Get header in the first page */ - ofst = 0; - if (!nr_rep) { - hdr = addr; - nr_rep = hdr->nr_zones; - ofst = sizeof(struct blk_zone_report_hdr); - } - - /* Fixup and report zones */ - while (ofst < bv->bv_len && - n < nr_rep && nz < nrz) { - if (blkdev_report_zone(bdev, addr + ofst, &zones[nz])) - nz++; - ofst += sizeof(struct blk_zone); - n++; - } - - kunmap_atomic(addr); + return ret; - if (n >= nr_rep || nz >= nrz) + for (i = 0; i < nrz; i++) { + if (!blkdev_report_zone(bdev, zones)) break; - + zones++; } - *nr_zones = nz; -out: - bio_for_each_segment_all(bv, bio, i) - __free_page(bv->bv_page); - bio_put(bio); + *nr_zones = i; - return ret; + return 0; } EXPORT_SYMBOL_GPL(blkdev_report_zones); @@ -250,16 +218,17 @@ int blkdev_reset_zones(struct block_device *bdev, struct request_queue *q = bdev_get_queue(bdev); sector_t zone_sectors; sector_t end_sector = sector + nr_sectors; - struct bio *bio; + struct bio *bio = NULL; + struct blk_plug plug; int ret; - if (!q) - return -ENXIO; - if (!blk_queue_is_zoned(q)) return -EOPNOTSUPP; - if (end_sector > bdev->bd_part->nr_sects) + if (bdev_read_only(bdev)) + return -EPERM; + + if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) /* Out of range */ return -EINVAL; @@ -272,19 +241,14 @@ int blkdev_reset_zones(struct block_device *bdev, end_sector != bdev->bd_part->nr_sects) return -EINVAL; + blk_start_plug(&plug); while (sector < end_sector) { - bio = bio_alloc(gfp_mask, 0); + bio = blk_next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); - ret = submit_bio_wait(bio); - bio_put(bio); - - if (ret) - return ret; - sector += zone_sectors; /* This may take a while, so be nice to others */ @@ -292,7 +256,12 @@ int blkdev_reset_zones(struct block_device *bdev, } - return 0; + ret = submit_bio_wait(bio); + bio_put(bio); + + blk_finish_plug(&plug); + + return ret; } EXPORT_SYMBOL_GPL(blkdev_reset_zones); @@ -328,8 +297,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, if (!rep.nr_zones) return -EINVAL; - if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone)) - return -ERANGE; + rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones); zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL | __GFP_ZERO); @@ -392,3 +360,138 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors, GFP_KERNEL); } + +static inline unsigned long *blk_alloc_zone_bitmap(int node, + unsigned int nr_zones) +{ + return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), + GFP_NOIO, node); +} + +/* + * Allocate an array of struct blk_zone to get nr_zones zone information. + * The allocated array may be smaller than nr_zones. + */ +static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones) +{ + size_t size = *nr_zones * sizeof(struct blk_zone); + struct page *page; + int order; + + for (order = get_order(size); order > 0; order--) { + page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order); + if (page) { + *nr_zones = min_t(unsigned int, *nr_zones, + (PAGE_SIZE << order) / sizeof(struct blk_zone)); + return page_address(page); + } + } + + return NULL; +} + +void blk_queue_free_zone_bitmaps(struct request_queue *q) +{ + kfree(q->seq_zones_bitmap); + q->seq_zones_bitmap = NULL; + kfree(q->seq_zones_wlock); + q->seq_zones_wlock = NULL; +} + +/** + * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps + * @disk: Target disk + * + * Helper function for low-level device drivers to (re) allocate and initialize + * a disk request queue zone bitmaps. This functions should normally be called + * within the disk ->revalidate method. For BIO based queues, no zone bitmap + * is allocated. + */ +int blk_revalidate_disk_zones(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); + unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; + unsigned int i, rep_nr_zones = 0, z = 0, nrz; + struct blk_zone *zones = NULL; + sector_t sector = 0; + int ret = 0; + + /* + * BIO based queues do not use a scheduler so only q->nr_zones + * needs to be updated so that the sysfs exposed value is correct. + */ + if (!queue_is_rq_based(q)) { + q->nr_zones = nr_zones; + return 0; + } + + if (!blk_queue_is_zoned(q) || !nr_zones) { + nr_zones = 0; + goto update; + } + + /* Allocate bitmaps */ + ret = -ENOMEM; + seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); + if (!seq_zones_wlock) + goto out; + seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); + if (!seq_zones_bitmap) + goto out; + + /* Get zone information and initialize seq_zones_bitmap */ + rep_nr_zones = nr_zones; + zones = blk_alloc_zones(q->node, &rep_nr_zones); + if (!zones) + goto out; + + while (z < nr_zones) { + nrz = min(nr_zones - z, rep_nr_zones); + ret = blk_report_zones(disk, sector, zones, &nrz, GFP_NOIO); + if (ret) + goto out; + if (!nrz) + break; + for (i = 0; i < nrz; i++) { + if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL) + set_bit(z, seq_zones_bitmap); + z++; + } + sector += nrz * blk_queue_zone_sectors(q); + } + + if (WARN_ON(z != nr_zones)) { + ret = -EIO; + goto out; + } + +update: + /* + * Install the new bitmaps, making sure the queue is stopped and + * all I/Os are completed (i.e. a scheduler is not referencing the + * bitmaps). + */ + blk_mq_freeze_queue(q); + q->nr_zones = nr_zones; + swap(q->seq_zones_wlock, seq_zones_wlock); + swap(q->seq_zones_bitmap, seq_zones_bitmap); + blk_mq_unfreeze_queue(q); + +out: + free_pages((unsigned long)zones, + get_order(rep_nr_zones * sizeof(struct blk_zone))); + kfree(seq_zones_wlock); + kfree(seq_zones_bitmap); + + if (ret) { + pr_warn("%s: failed to revalidate zones\n", disk->disk_name); + blk_mq_freeze_queue(q); + blk_queue_free_zone_bitmaps(q); + blk_mq_unfreeze_queue(q); + } + + return ret; +} +EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); + diff --git a/block/blk.h b/block/blk.h index 3d2aecba96a4..a1841b8ff129 100644 --- a/block/blk.h +++ b/block/blk.h @@ -488,4 +488,12 @@ extern int blk_iolatency_init(struct request_queue *q); static inline int blk_iolatency_init(struct request_queue *q) { return 0; } #endif +struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); + +#ifdef CONFIG_BLK_DEV_ZONED +void blk_queue_free_zone_bitmaps(struct request_queue *q); +#else +static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} +#endif + #endif /* BLK_INTERNAL_H */ diff --git a/block/ioctl.c b/block/ioctl.c index 3884d810efd2..4825c78a6baa 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -532,6 +532,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return blkdev_report_zones_ioctl(bdev, mode, cmd, arg); case BLKRESETZONE: return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg); + case BLKGETZONESZ: + return put_uint(arg, bdev_zone_sectors(bdev)); + case BLKGETNRZONES: + return put_uint(arg, blkdev_nr_zones(bdev)); case HDIO_GETGEO: return blkdev_getgeo(bdev, argp); case BLKRAGET: diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a9dd4ea7467d..6e594644cb1d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* These specific Samsung models/firmware-revs do not handle LPM well */ { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index 34e0030f0592..7685df43f1ef 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -87,7 +87,9 @@ struct nullb { #ifdef CONFIG_BLK_DEV_ZONED int null_zone_init(struct nullb_device *dev); void null_zone_exit(struct nullb_device *dev); -blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio); +int null_zone_report(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask); void null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors); void null_zone_reset(struct nullb_cmd *cmd, sector_t sector); @@ -97,10 +99,11 @@ static inline int null_zone_init(struct nullb_device *dev) return -EINVAL; } static inline void null_zone_exit(struct nullb_device *dev) {} -static inline blk_status_t null_zone_report(struct nullb *nullb, - struct bio *bio) +static inline int null_zone_report(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, + unsigned int *nr_zones, gfp_t gfp_mask) { - return BLK_STS_NOTSUPP; + return -EOPNOTSUPP; } static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors) diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index e94591021682..09339203dfba 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1129,34 +1129,12 @@ static void null_restart_queue_async(struct nullb *nullb) blk_mq_start_stopped_hw_queues(q, true); } -static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd) -{ - struct nullb_device *dev = cmd->nq->dev; - - if (dev->queue_mode == NULL_Q_BIO) { - if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) { - cmd->error = null_zone_report(nullb, cmd->bio); - return true; - } - } else { - if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { - cmd->error = null_zone_report(nullb, cmd->rq->bio); - return true; - } - } - - return false; -} - static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; int err = 0; - if (cmd_report_zone(nullb, cmd)) - goto out; - if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { struct request *rq = cmd->rq; @@ -1443,6 +1421,7 @@ static const struct block_device_operations null_fops = { .owner = THIS_MODULE, .open = null_open, .release = null_release, + .report_zones = null_zone_report, }; static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) @@ -1549,6 +1528,13 @@ static int null_gendisk_register(struct nullb *nullb) disk->queue = nullb->q; strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + if (nullb->dev->zoned) { + int ret = blk_revalidate_disk_zones(disk); + + if (ret != 0) + return ret; + } + add_disk(disk); return 0; } diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 7c6b86d98700..c0b0e4a3fa8f 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -48,54 +48,27 @@ void null_zone_exit(struct nullb_device *dev) kvfree(dev->zones); } -static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio, - unsigned int zno, unsigned int nr_zones) +int null_zone_report(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask) { - struct blk_zone_report_hdr *hdr = NULL; - struct bio_vec bvec; - struct bvec_iter iter; - void *addr; - unsigned int zones_to_cpy; - - bio_for_each_segment(bvec, bio, iter) { - addr = kmap_atomic(bvec.bv_page); - - zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); - - if (!hdr) { - hdr = (struct blk_zone_report_hdr *)addr; - hdr->nr_zones = nr_zones; - zones_to_cpy--; - addr += sizeof(struct blk_zone_report_hdr); - } - - zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones); - - memcpy(addr, &dev->zones[zno], - zones_to_cpy * sizeof(struct blk_zone)); - - kunmap_atomic(addr); + struct nullb *nullb = disk->private_data; + struct nullb_device *dev = nullb->dev; + unsigned int zno, nrz = 0; - nr_zones -= zones_to_cpy; - zno += zones_to_cpy; + if (!dev->zoned) + /* Not a zoned null device */ + return -EOPNOTSUPP; - if (!nr_zones) - break; + zno = null_zone_no(dev, sector); + if (zno < dev->nr_zones) { + nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno); + memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone)); } -} -blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio) -{ - struct nullb_device *dev = nullb->dev; - unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector); - unsigned int nr_zones = dev->nr_zones - zno; - unsigned int max_zones; + *nr_zones = nrz; - max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1; - nr_zones = min_t(unsigned int, nr_zones, max_zones); - null_zone_fill_bio(nullb->dev, bio, zno, nr_zones); - - return BLK_STS_OK; + return 0; } void null_zone_write(struct nullb_cmd *cmd, sector_t sector, diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 7c5fc6942f32..2459dcc04b1c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -3175,7 +3175,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "DMA mask error %d\n", rc); goto err_out_regions; @@ -3364,7 +3364,7 @@ static int skd_pci_resume(struct pci_dev *pdev) goto err_out; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "DMA mask error %d\n", rc); goto err_out_regions; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 9eea83ae01c6..56452cabce5b 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2493,6 +2493,9 @@ static int blkfront_remove(struct xenbus_device *xbdev) dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); + if (!info) + return 0; + blkif_free(info, 0); mutex_lock(&info->mutex); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 757e85b81879..a5b8afe3609c 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -327,15 +327,15 @@ static int get_entry_track(int track) static int gdrom_get_last_session(struct cdrom_device_info *cd_info, struct cdrom_multisession *ms_info) { - int fentry, lentry, track, data, tocuse, err; + int fentry, lentry, track, data, err; + if (!gd.toc) return -ENOMEM; - tocuse = 1; + /* Check if GD-ROM */ err = gdrom_readtoc_cmd(gd.toc, 1); /* Not a GD-ROM so check if standard CD-ROM */ if (err) { - tocuse = 0; err = gdrom_readtoc_cmd(gd.toc, 0); if (err) { pr_info("Could not get CD table of contents\n"); @@ -794,7 +794,7 @@ static int probe_gdrom(struct platform_device *devptr) gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1, BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); if (IS_ERR(gd.gdrom_rq)) { - rc = PTR_ERR(gd.gdrom_rq); + err = PTR_ERR(gd.gdrom_rq); gd.gdrom_rq = NULL; goto probe_fail_requestq; } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 32aabe27b37c..3cb97fa4c11d 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -315,10 +315,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) if (bio_op(bio) == REQ_OP_ZONE_RESET) goto map_bio; - /* We need to remap reported zones, so remember the BIO iter */ - if (bio_op(bio) == REQ_OP_ZONE_REPORT) - goto map_bio; - /* Are we alive ? */ elapsed = (jiffies - fc->start_time) / HZ; if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { @@ -380,11 +376,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, if (bio_op(bio) == REQ_OP_ZONE_RESET) return DM_ENDIO_DONE; - if (bio_op(bio) == REQ_OP_ZONE_REPORT) { - dm_remap_zone_report(ti, bio, fc->start); - return DM_ENDIO_DONE; - } - if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && all_corrupt_bio_flags_match(bio, fc)) { @@ -457,6 +448,26 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev return 0; } +#ifdef CONFIG_BLK_DEV_ZONED +static int flakey_report_zones(struct dm_target *ti, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask) +{ + struct flakey_c *fc = ti->private; + int ret; + + /* Do report and remap it */ + ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector), + zones, nr_zones, gfp_mask); + if (ret != 0) + return ret; + + if (*nr_zones) + dm_remap_zone_report(ti, fc->start, zones, nr_zones); + return 0; +} +#endif + static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct flakey_c *fc = ti->private; @@ -469,6 +480,7 @@ static struct target_type flakey_target = { .version = {1, 5, 0}, #ifdef CONFIG_BLK_DEV_ZONED .features = DM_TARGET_ZONED_HM, + .report_zones = flakey_report_zones, #endif .module = THIS_MODULE, .ctr = flakey_ctr, diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 2f7c44a006c4..8d7ddee6ac4d 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -102,19 +102,6 @@ static int linear_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } -#ifdef CONFIG_BLK_DEV_ZONED -static int linear_end_io(struct dm_target *ti, struct bio *bio, - blk_status_t *error) -{ - struct linear_c *lc = ti->private; - - if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT) - dm_remap_zone_report(ti, bio, lc->start); - - return DM_ENDIO_DONE; -} -#endif - static void linear_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { @@ -148,6 +135,26 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev return 0; } +#ifdef CONFIG_BLK_DEV_ZONED +static int linear_report_zones(struct dm_target *ti, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask) +{ + struct linear_c *lc = (struct linear_c *) ti->private; + int ret; + + /* Do report and remap it */ + ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector), + zones, nr_zones, gfp_mask); + if (ret != 0) + return ret; + + if (*nr_zones) + dm_remap_zone_report(ti, lc->start, zones, nr_zones); + return 0; +} +#endif + static int linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { @@ -211,8 +218,8 @@ static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, #ifdef CONFIG_BLK_DEV_ZONED - .end_io = linear_end_io, .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, + .report_zones = linear_report_zones, #else .features = DM_TARGET_PASSES_INTEGRITY, #endif diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3d0e2c198f06..fb4bea20657b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1937,6 +1937,16 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, */ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); + + /* + * For a zoned target, the number of zones should be updated for the + * correct value to be exposed in sysfs queue/nr_zones. For a BIO based + * target, this is all that is needed. For a request based target, the + * queue zone bitmaps must also be updated. + * Use blk_revalidate_disk_zones() to handle this. + */ + if (blk_queue_is_zoned(q)) + blk_revalidate_disk_zones(t->md->disk); } unsigned int dm_table_get_num_targets(struct dm_table *t) diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index a44183ff4be0..12d96a263623 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -702,8 +702,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks); - dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1) - >> dev->zone_nr_sectors_shift; + dev->nr_zones = blkdev_nr_zones(dev->bdev); dmz->dev = dev; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 45abb54037fc..6be21dc210a1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -458,6 +458,57 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return dm_get_geometry(md, geo); } +static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask) +{ +#ifdef CONFIG_BLK_DEV_ZONED + struct mapped_device *md = disk->private_data; + struct dm_target *tgt; + struct dm_table *map; + int srcu_idx, ret; + + if (dm_suspended_md(md)) + return -EAGAIN; + + map = dm_get_live_table(md, &srcu_idx); + if (!map) + return -EIO; + + tgt = dm_table_find_target(map, sector); + if (!dm_target_is_valid(tgt)) { + ret = -EIO; + goto out; + } + + /* + * If we are executing this, we already know that the block device + * is a zoned device and so each target should have support for that + * type of drive. A missing report_zones method means that the target + * driver has a problem. + */ + if (WARN_ON(!tgt->type->report_zones)) { + ret = -EIO; + goto out; + } + + /* + * blkdev_report_zones() will loop and call this again to cover all the + * zones of the target, eventually moving on to the next target. + * So there is no need to loop here trying to fill the entire array + * of zones. + */ + ret = tgt->type->report_zones(tgt, sector, zones, + nr_zones, gfp_mask); + +out: + dm_put_live_table(md, srcu_idx); + return ret; +#else + return -ENOTSUPP; +#endif +} + static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, struct block_device **bdev) __acquires(md->io_barrier) @@ -1155,93 +1206,49 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) EXPORT_SYMBOL_GPL(dm_accept_partial_bio); /* - * The zone descriptors obtained with a zone report indicate zone positions - * within the target backing device, regardless of that device is a partition - * and regardless of the target mapping start sector on the device or partition. - * The zone descriptors start sector and write pointer position must be adjusted - * to match their relative position within the dm device. - * A target may call dm_remap_zone_report() after completion of a - * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the - * backing device. + * The zone descriptors obtained with a zone report indicate + * zone positions within the underlying device of the target. The zone + * descriptors must be remapped to match their position within the dm device. + * The caller target should obtain the zones information using + * blkdev_report_zones() to ensure that remapping for partition offset is + * already handled. */ -void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) +void dm_remap_zone_report(struct dm_target *ti, sector_t start, + struct blk_zone *zones, unsigned int *nr_zones) { #ifdef CONFIG_BLK_DEV_ZONED - struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); - struct bio *report_bio = tio->io->orig_bio; - struct blk_zone_report_hdr *hdr = NULL; struct blk_zone *zone; - unsigned int nr_rep = 0; - unsigned int ofst; - sector_t part_offset; - struct bio_vec bvec; - struct bvec_iter iter; - void *addr; - - if (bio->bi_status) - return; - - /* - * bio sector was incremented by the request size on completion. Taking - * into account the original request sector, the target start offset on - * the backing device and the target mapping offset (ti->begin), the - * start sector of the backing device. The partition offset is always 0 - * if the target uses a whole device. - */ - part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio)); + unsigned int nrz = *nr_zones; + int i; /* - * Remap the start sector of the reported zones. For sequential zones, - * also remap the write pointer position. + * Remap the start sector and write pointer position of the zones in + * the array. Since we may have obtained from the target underlying + * device more zones that the target size, also adjust the number + * of zones. */ - bio_for_each_segment(bvec, report_bio, iter) { - addr = kmap_atomic(bvec.bv_page); - - /* Remember the report header in the first page */ - if (!hdr) { - hdr = addr; - ofst = sizeof(struct blk_zone_report_hdr); - } else - ofst = 0; - - /* Set zones start sector */ - while (hdr->nr_zones && ofst < bvec.bv_len) { - zone = addr + ofst; - zone->start -= part_offset; - if (zone->start >= start + ti->len) { - hdr->nr_zones = 0; - break; - } - zone->start = zone->start + ti->begin - start; - if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { - if (zone->cond == BLK_ZONE_COND_FULL) - zone->wp = zone->start + zone->len; - else if (zone->cond == BLK_ZONE_COND_EMPTY) - zone->wp = zone->start; - else - zone->wp = zone->wp + ti->begin - start - part_offset; - } - ofst += sizeof(struct blk_zone); - hdr->nr_zones--; - nr_rep++; + for (i = 0; i < nrz; i++) { + zone = zones + i; + if (zone->start >= start + ti->len) { + memset(zone, 0, sizeof(struct blk_zone) * (nrz - i)); + break; } - if (addr != hdr) - kunmap_atomic(addr); + zone->start = zone->start + ti->begin - start; + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + continue; - if (!hdr->nr_zones) - break; - } - - if (hdr) { - hdr->nr_zones = nr_rep; - kunmap_atomic(hdr); + if (zone->cond == BLK_ZONE_COND_FULL) + zone->wp = zone->start + zone->len; + else if (zone->cond == BLK_ZONE_COND_EMPTY) + zone->wp = zone->start; + else + zone->wp = zone->wp + ti->begin - start; } - bio_advance(report_bio, report_bio->bi_iter.bi_size); - + *nr_zones = i; #else /* !CONFIG_BLK_DEV_ZONED */ - bio->bi_status = BLK_STS_NOTSUPP; + *nr_zones = 0; #endif } EXPORT_SYMBOL_GPL(dm_remap_zone_report); @@ -1327,8 +1334,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, return r; } - if (bio_op(bio) != REQ_OP_ZONE_REPORT) - bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); + bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); clone->bi_iter.bi_size = to_bytes(len); if (unlikely(bio_integrity(bio) != NULL)) @@ -1541,7 +1547,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, */ static int __split_and_process_non_flush(struct clone_info *ci) { - struct bio *bio = ci->bio; struct dm_target *ti; unsigned len; int r; @@ -1553,11 +1558,7 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (unlikely(__process_abnormal_io(ci, ti, &r))) return r; - if (bio_op(bio) == REQ_OP_ZONE_REPORT) - len = ci->sector_count; - else - len = min_t(sector_t, max_io_len(ci->sector, ti), - ci->sector_count); + len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); if (r < 0) @@ -1616,9 +1617,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, * We take a clone of the original to store in * ci.io->orig_bio to be used by end_io_acct() and * for dec_pending to use for completion handling. - * As this path is not used for REQ_OP_ZONE_REPORT, - * the usage of io->orig_bio in dm_remap_zone_report() - * won't be affected by this reassignment. */ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, GFP_NOIO, &md->queue->bio_split); @@ -3167,6 +3165,7 @@ static const struct block_device_operations dm_blk_dops = { .release = dm_blk_close, .ioctl = dm_blk_ioctl, .getgeo = dm_blk_getgeo, + .report_zones = dm_blk_report_zones, .pr_ops = &dm_pr_ops, .owner = THIS_MODULE }; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b762d0fd773c..3bb2b3351e35 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1272,8 +1272,6 @@ static int sd_init_command(struct scsi_cmnd *cmd) case REQ_OP_READ: case REQ_OP_WRITE: return sd_setup_read_write_cmnd(cmd); - case REQ_OP_ZONE_REPORT: - return sd_zbc_setup_report_cmnd(cmd); case REQ_OP_ZONE_RESET: return sd_zbc_setup_reset_cmnd(cmd); default: @@ -1802,6 +1800,7 @@ static const struct block_device_operations sd_fops = { .check_events = sd_check_events, .revalidate_disk = sd_revalidate_disk, .unlock_native_capacity = sd_unlock_native_capacity, + .report_zones = sd_zbc_report_zones, .pr_ops = &sd_pr_ops, }; @@ -1953,16 +1952,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) scsi_set_resid(SCpnt, blk_rq_bytes(req)); } break; - case REQ_OP_ZONE_REPORT: - if (!result) { - good_bytes = scsi_bufflen(SCpnt) - - scsi_get_resid(SCpnt); - scsi_set_resid(SCpnt, 0); - } else { - good_bytes = 0; - scsi_set_resid(SCpnt, blk_rq_bytes(req)); - } - break; default: /* * In case of bogus fw or device, we could end up having @@ -3425,8 +3414,6 @@ static int sd_remove(struct device *dev) del_gendisk(sdkp->disk); sd_shutdown(dev); - sd_zbc_remove(sdkp); - free_opal_dev(sdkp->opal_dev); blk_register_region(devt, SD_MINORS, NULL, diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index a7d4f50b67d4..1d63f3a23ffb 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -76,7 +76,6 @@ struct scsi_disk { #ifdef CONFIG_BLK_DEV_ZONED u32 nr_zones; u32 zone_blocks; - u32 zone_shift; u32 zones_optimal_open; u32 zones_optimal_nonseq; u32 zones_max_open; @@ -271,12 +270,13 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp) #ifdef CONFIG_BLK_DEV_ZONED extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); -extern void sd_zbc_remove(struct scsi_disk *sdkp); extern void sd_zbc_print_zones(struct scsi_disk *sdkp); -extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd); extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr); +extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask); #else /* CONFIG_BLK_DEV_ZONED */ @@ -286,15 +286,8 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, return 0; } -static inline void sd_zbc_remove(struct scsi_disk *sdkp) {} - static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} -static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) -{ - return BLKPREP_INVALID; -} - static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) { return BLKPREP_INVALID; @@ -304,6 +297,8 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr) {} +#define sd_zbc_report_zones NULL + #endif /* CONFIG_BLK_DEV_ZONED */ #endif /* _SCSI_DISK_H */ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 412c1787dcd9..e06c48c866e4 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -62,16 +62,22 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf, } /** - * sd_zbc_report_zones - Issue a REPORT ZONES scsi command. + * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command. * @sdkp: The target disk * @buf: Buffer to use for the reply * @buflen: the buffer size * @lba: Start LBA of the report + * @partial: Do partial report * * For internal use during device validation. + * Using partial=true can significantly speed up execution of a report zones + * command because the disk does not have to count all possible report matching + * zones and will only report the count of zones fitting in the command reply + * buffer. */ -static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf, - unsigned int buflen, sector_t lba) +static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf, + unsigned int buflen, sector_t lba, + bool partial) { struct scsi_device *sdp = sdkp->device; const int timeout = sdp->request_queue->rq_timeout; @@ -85,6 +91,8 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf, cmd[1] = ZI_REPORT_ZONES; put_unaligned_be64(lba, &cmd[2]); put_unaligned_be32(buflen, &cmd[10]); + if (partial) + cmd[14] = ZBC_REPORT_ZONE_PARTIAL; memset(buf, 0, buflen); result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, @@ -110,108 +118,56 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf, } /** - * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command - * @cmd: The command to setup + * sd_zbc_report_zones - Disk report zones operation. + * @disk: The target disk + * @sector: Start 512B sector of the report + * @zones: Array of zone descriptors + * @nr_zones: Number of descriptors in the array + * @gfp_mask: Memory allocation mask * - * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request. + * Execute a report zones command on the target disk. */ -int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) +int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask) { - struct request *rq = cmd->request; - struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - sector_t lba, sector = blk_rq_pos(rq); - unsigned int nr_bytes = blk_rq_bytes(rq); - int ret; - - WARN_ON(nr_bytes == 0); + struct scsi_disk *sdkp = scsi_disk(disk); + unsigned int i, buflen, nrz = *nr_zones; + unsigned char *buf; + size_t offset = 0; + int ret = 0; if (!sd_is_zoned(sdkp)) /* Not a zoned device */ - return BLKPREP_KILL; - - ret = scsi_init_io(cmd); - if (ret != BLKPREP_OK) - return ret; - - cmd->cmd_len = 16; - memset(cmd->cmnd, 0, cmd->cmd_len); - cmd->cmnd[0] = ZBC_IN; - cmd->cmnd[1] = ZI_REPORT_ZONES; - lba = sectors_to_logical(sdkp->device, sector); - put_unaligned_be64(lba, &cmd->cmnd[2]); - put_unaligned_be32(nr_bytes, &cmd->cmnd[10]); - /* Do partial report for speeding things up */ - cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL; - - cmd->sc_data_direction = DMA_FROM_DEVICE; - cmd->sdb.length = nr_bytes; - cmd->transfersize = sdkp->device->sector_size; - cmd->allowed = 0; - - return BLKPREP_OK; -} - -/** - * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply. - * @scmd: The completed report zones command - * @good_bytes: reply size in bytes - * - * Convert all reported zone descriptors to struct blk_zone. The conversion - * is done in-place, directly in the request specified sg buffer. - */ -static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd, - unsigned int good_bytes) -{ - struct request *rq = scmd->request; - struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - struct sg_mapping_iter miter; - struct blk_zone_report_hdr hdr; - struct blk_zone zone; - unsigned int offset, bytes = 0; - unsigned long flags; - u8 *buf; - - if (good_bytes < 64) - return; + return -EOPNOTSUPP; - memset(&hdr, 0, sizeof(struct blk_zone_report_hdr)); - - sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd), - SG_MITER_TO_SG | SG_MITER_ATOMIC); - - local_irq_save(flags); - while (sg_miter_next(&miter) && bytes < good_bytes) { + /* + * Get a reply buffer for the number of requested zones plus a header. + * For ATA, buffers must be aligned to 512B. + */ + buflen = roundup((nrz + 1) * 64, 512); + buf = kmalloc(buflen, gfp_mask); + if (!buf) + return -ENOMEM; - buf = miter.addr; - offset = 0; + ret = sd_zbc_do_report_zones(sdkp, buf, buflen, + sectors_to_logical(sdkp->device, sector), true); + if (ret) + goto out_free_buf; - if (bytes == 0) { - /* Set the report header */ - hdr.nr_zones = min_t(unsigned int, - (good_bytes - 64) / 64, - get_unaligned_be32(&buf[0]) / 64); - memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr)); - offset += 64; - bytes += 64; - } + nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64); + for (i = 0; i < nrz; i++) { + offset += 64; + sd_zbc_parse_report(sdkp, buf + offset, zones); + zones++; + } - /* Parse zone descriptors */ - while (offset < miter.length && hdr.nr_zones) { - WARN_ON(offset > miter.length); - buf = miter.addr + offset; - sd_zbc_parse_report(sdkp, buf, &zone); - memcpy(buf, &zone, sizeof(struct blk_zone)); - offset += 64; - bytes += 64; - hdr.nr_zones--; - } + *nr_zones = nrz; - if (!hdr.nr_zones) - break; +out_free_buf: + kfree(buf); - } - sg_miter_stop(&miter); - local_irq_restore(flags); + return ret; } /** @@ -294,30 +250,23 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_SAME: break; - - case REQ_OP_ZONE_REPORT: - - if (!result) - sd_zbc_report_zones_complete(cmd, good_bytes); - break; - } } /** - * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics + * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics * @sdkp: Target disk * @buf: Buffer where to store the VPD page data * - * Read VPD page B6. + * Read VPD page B6, get information and check that reads are unconstrained. */ -static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, - unsigned char *buf) +static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp, + unsigned char *buf) { if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) { sd_printk(KERN_NOTICE, sdkp, - "Unconstrained-read check failed\n"); + "Read zoned characteristics VPD page failed\n"); return -ENODEV; } @@ -335,43 +284,17 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, sdkp->zones_max_open = get_unaligned_be32(&buf[16]); } - return 0; -} - -/** - * sd_zbc_check_capacity - Check reported capacity. - * @sdkp: Target disk - * @buf: Buffer to use for commands - * - * ZBC drive may report only the capacity of the first conventional zones at - * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply. - * Check this here. If the disk reported only its conventional zones capacity, - * get the total capacity by doing a report zones. - */ -static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) -{ - sector_t lba; - int ret; - - if (sdkp->rc_basis != 0) - return 0; - - /* Do a report zone to get the maximum LBA to check capacity */ - ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0); - if (ret) - return ret; - - /* The max_lba field is the capacity of this device */ - lba = get_unaligned_be64(&buf[8]); - if (lba + 1 == sdkp->capacity) - return 0; - - if (sdkp->first_scan) - sd_printk(KERN_WARNING, sdkp, - "Changing capacity from %llu to max LBA+1 %llu\n", - (unsigned long long)sdkp->capacity, - (unsigned long long)lba + 1); - sdkp->capacity = lba + 1; + /* + * Check for unconstrained reads: host-managed devices with + * constrained reads (drives failing read after write pointer) + * are not supported. + */ + if (!sdkp->urswrz) { + if (sdkp->first_scan) + sd_printk(KERN_NOTICE, sdkp, + "constrained reads devices are not supported\n"); + return -ENODEV; + } return 0; } @@ -379,24 +302,27 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) #define SD_ZBC_BUF_SIZE 131072U /** - * sd_zbc_check_zone_size - Check the device zone sizes + * sd_zbc_check_zones - Check the device capacity and zone sizes * @sdkp: Target disk * - * Check that all zones of the device are equal. The last zone can however - * be smaller. The zone size must also be a power of two number of LBAs. + * Check that the device capacity as reported by READ CAPACITY matches the + * max_lba value (plus one)of the report zones command reply. Also check that + * all zones of the device have an equal size, only allowing the last zone of + * the disk to have a smaller size (runt zone). The zone size must also be a + * power of two. * * Returns the zone size in number of blocks upon success or an error code * upon failure. */ -static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) +static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks) { u64 zone_blocks = 0; - sector_t block = 0; + sector_t max_lba, block = 0; unsigned char *buf; unsigned char *rec; unsigned int buf_len; unsigned int list_length; - s64 ret; + int ret; u8 same; /* Get a buffer */ @@ -404,11 +330,28 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) if (!buf) return -ENOMEM; - /* Do a report zone to get the same field */ - ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); + /* Do a report zone to get max_lba and the same field */ + ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false); if (ret) goto out_free; + if (sdkp->rc_basis == 0) { + /* The max_lba field is the capacity of this device */ + max_lba = get_unaligned_be64(&buf[8]); + if (sdkp->capacity != max_lba + 1) { + if (sdkp->first_scan) + sd_printk(KERN_WARNING, sdkp, + "Changing capacity from %llu to max LBA+1 %llu\n", + (unsigned long long)sdkp->capacity, + (unsigned long long)max_lba + 1); + sdkp->capacity = max_lba + 1; + } + } + + /* + * Check same field: for any value other than 0, we know that all zones + * have the same size. + */ same = buf[4] & 0x0f; if (same > 0) { rec = &buf[64]; @@ -445,8 +388,8 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) } if (block < sdkp->capacity) { - ret = sd_zbc_report_zones(sdkp, buf, - SD_ZBC_BUF_SIZE, block); + ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, + block, true); if (ret) goto out_free; } @@ -470,9 +413,10 @@ out: if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Zone size too large\n"); - ret = -ENODEV; + ret = -EFBIG; } else { - ret = zone_blocks; + *zblocks = zone_blocks; + ret = 0; } out_free: @@ -481,191 +425,11 @@ out_free: return ret; } -/** - * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone). - * @nr_zones: Number of zones to allocate space for. - * @numa_node: NUMA node to allocate the memory from. - */ -static inline unsigned long * -sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node) -{ - return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), - GFP_KERNEL, numa_node); -} - -/** - * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones - * @sdkp: disk used - * @buf: report reply buffer - * @buflen: length of @buf - * @zone_shift: logarithm base 2 of the number of blocks in a zone - * @seq_zones_bitmap: bitmap of sequential zones to set - * - * Parse reported zone descriptors in @buf to identify sequential zones and - * set the reported zone bit in @seq_zones_bitmap accordingly. - * Since read-only and offline zones cannot be written, do not - * mark them as sequential in the bitmap. - * Return the LBA after the last zone reported. - */ -static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, - unsigned int buflen, u32 zone_shift, - unsigned long *seq_zones_bitmap) -{ - sector_t lba, next_lba = sdkp->capacity; - unsigned int buf_len, list_length; - unsigned char *rec; - u8 type, cond; - - list_length = get_unaligned_be32(&buf[0]) + 64; - buf_len = min(list_length, buflen); - rec = buf + 64; - - while (rec < buf + buf_len) { - type = rec[0] & 0x0f; - cond = (rec[1] >> 4) & 0xf; - lba = get_unaligned_be64(&rec[16]); - if (type != ZBC_ZONE_TYPE_CONV && - cond != ZBC_ZONE_COND_READONLY && - cond != ZBC_ZONE_COND_OFFLINE) - set_bit(lba >> zone_shift, seq_zones_bitmap); - next_lba = lba + get_unaligned_be64(&rec[8]); - rec += 64; - } - - return next_lba; -} - -/** - * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap. - * @sdkp: target disk - * @zone_shift: logarithm base 2 of the number of blocks in a zone - * @nr_zones: number of zones to set up a seq zone bitmap for - * - * Allocate a zone bitmap and initialize it by identifying sequential zones. - */ -static unsigned long * -sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift, - u32 nr_zones) -{ - struct request_queue *q = sdkp->disk->queue; - unsigned long *seq_zones_bitmap; - sector_t lba = 0; - unsigned char *buf; - int ret = -ENOMEM; - - seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node); - if (!seq_zones_bitmap) - return ERR_PTR(-ENOMEM); - - buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); - if (!buf) - goto out; - - while (lba < sdkp->capacity) { - ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba); - if (ret) - goto out; - lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE, - zone_shift, seq_zones_bitmap); - } - - if (lba != sdkp->capacity) { - /* Something went wrong */ - ret = -EIO; - } - -out: - kfree(buf); - if (ret) { - kfree(seq_zones_bitmap); - return ERR_PTR(ret); - } - return seq_zones_bitmap; -} - -static void sd_zbc_cleanup(struct scsi_disk *sdkp) -{ - struct request_queue *q = sdkp->disk->queue; - - kfree(q->seq_zones_bitmap); - q->seq_zones_bitmap = NULL; - - kfree(q->seq_zones_wlock); - q->seq_zones_wlock = NULL; - - q->nr_zones = 0; -} - -static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks) -{ - struct request_queue *q = sdkp->disk->queue; - u32 zone_shift = ilog2(zone_blocks); - u32 nr_zones; - int ret; - - /* chunk_sectors indicates the zone size */ - blk_queue_chunk_sectors(q, - logical_to_sectors(sdkp->device, zone_blocks)); - nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift; - - /* - * Initialize the device request queue information if the number - * of zones changed. - */ - if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) { - unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; - size_t zone_bitmap_size; - - if (nr_zones) { - seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones, - q->node); - if (!seq_zones_wlock) { - ret = -ENOMEM; - goto err; - } - - seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp, - zone_shift, nr_zones); - if (IS_ERR(seq_zones_bitmap)) { - ret = PTR_ERR(seq_zones_bitmap); - kfree(seq_zones_wlock); - goto err; - } - } - zone_bitmap_size = BITS_TO_LONGS(nr_zones) * - sizeof(unsigned long); - blk_mq_freeze_queue(q); - if (q->nr_zones != nr_zones) { - /* READ16/WRITE16 is mandatory for ZBC disks */ - sdkp->device->use_16_for_rw = 1; - sdkp->device->use_10_for_rw = 0; - - sdkp->zone_blocks = zone_blocks; - sdkp->zone_shift = zone_shift; - sdkp->nr_zones = nr_zones; - q->nr_zones = nr_zones; - swap(q->seq_zones_wlock, seq_zones_wlock); - swap(q->seq_zones_bitmap, seq_zones_bitmap); - } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap, - zone_bitmap_size) != 0) { - memcpy(q->seq_zones_bitmap, seq_zones_bitmap, - zone_bitmap_size); - } - blk_mq_unfreeze_queue(q); - kfree(seq_zones_wlock); - kfree(seq_zones_bitmap); - } - - return 0; - -err: - sd_zbc_cleanup(sdkp); - return ret; -} - int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) { - int64_t zone_blocks; + struct gendisk *disk = sdkp->disk; + unsigned int nr_zones; + u32 zone_blocks; int ret; if (!sd_is_zoned(sdkp)) @@ -675,26 +439,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) */ return 0; - /* Get zoned block device characteristics */ - ret = sd_zbc_read_zoned_characteristics(sdkp, buf); - if (ret) - goto err; - - /* - * Check for unconstrained reads: host-managed devices with - * constrained reads (drives failing read after write pointer) - * are not supported. - */ - if (!sdkp->urswrz) { - if (sdkp->first_scan) - sd_printk(KERN_NOTICE, sdkp, - "constrained reads devices are not supported\n"); - ret = -ENODEV; - goto err; - } - - /* Check capacity */ - ret = sd_zbc_check_capacity(sdkp, buf); + /* Check zoned block device characteristics (unconstrained reads) */ + ret = sd_zbc_check_zoned_characteristics(sdkp, buf); if (ret) goto err; @@ -702,33 +448,44 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) * Check zone size: only devices with a constant zone size (except * an eventual last runt zone) that is a power of 2 are supported. */ - zone_blocks = sd_zbc_check_zone_size(sdkp); - ret = -EFBIG; - if (zone_blocks != (u32)zone_blocks) - goto err; - ret = zone_blocks; - if (ret < 0) + ret = sd_zbc_check_zones(sdkp, &zone_blocks); + if (ret != 0) goto err; /* The drive satisfies the kernel restrictions: set it up */ - ret = sd_zbc_setup(sdkp, zone_blocks); - if (ret) - goto err; + blk_queue_chunk_sectors(sdkp->disk->queue, + logical_to_sectors(sdkp->device, zone_blocks)); + nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); + + /* READ16/WRITE16 is mandatory for ZBC disks */ + sdkp->device->use_16_for_rw = 1; + sdkp->device->use_10_for_rw = 0; + + /* + * If something changed, revalidate the disk zone bitmaps once we have + * the capacity, that is on the second revalidate execution during disk + * scan and always during normal revalidate. + */ + if (sdkp->first_scan) + return 0; + if (sdkp->zone_blocks != zone_blocks || + sdkp->nr_zones != nr_zones || + disk->queue->nr_zones != nr_zones) { + ret = blk_revalidate_disk_zones(disk); + if (ret != 0) + goto err; + sdkp->zone_blocks = zone_blocks; + sdkp->nr_zones = nr_zones; + } return 0; err: sdkp->capacity = 0; - sd_zbc_cleanup(sdkp); return ret; } -void sd_zbc_remove(struct scsi_disk *sdkp) -{ - sd_zbc_cleanup(sdkp); -} - void sd_zbc_print_zones(struct scsi_disk *sdkp) { if (!sd_is_zoned(sdkp) || !sdkp->capacity) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 9578c7ab1eb6..093a818c5b68 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -283,8 +283,6 @@ enum req_opf { REQ_OP_FLUSH = 2, /* discard sectors */ REQ_OP_DISCARD = 3, - /* get zone information */ - REQ_OP_ZONE_REPORT = 4, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, /* seset a zone write pointer */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7d423721b327..4293dc1cd160 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -396,16 +396,13 @@ struct queue_limits { #ifdef CONFIG_BLK_DEV_ZONED -struct blk_zone_report_hdr { - unsigned int nr_zones; - u8 padding[60]; -}; - +extern unsigned int blkdev_nr_zones(struct block_device *bdev); extern int blkdev_report_zones(struct block_device *bdev, sector_t sector, struct blk_zone *zones, unsigned int *nr_zones, gfp_t gfp_mask); extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); +extern int blk_revalidate_disk_zones(struct gendisk *disk); extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); @@ -414,6 +411,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, #else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int blkdev_nr_zones(struct block_device *bdev) +{ + return 0; +} + +static inline int blk_revalidate_disk_zones(struct gendisk *disk) +{ + return 0; +} + static inline int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) @@ -806,6 +813,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) } #ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->nr_zones : 0; +} + static inline unsigned int blk_queue_zone_no(struct request_queue *q, sector_t sector) { @@ -821,6 +833,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q, return false; return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); } +#else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline bool rq_is_sync(struct request *rq) @@ -1852,6 +1869,9 @@ struct block_device_operations { int (*getgeo)(struct block_device *, struct hd_geometry *); /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); + int (*report_zones)(struct gendisk *, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask); struct module *owner; const struct pr_ops *pr_ops; }; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 6fb0808e87c8..a23b396a8edc 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -92,6 +92,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); +typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, + struct blk_zone *zones, + unsigned int *nr_zones, + gfp_t gfp_mask); + /* * These iteration functions are typically used to check (and combine) * properties of underlying devices. @@ -180,6 +185,9 @@ struct target_type { dm_status_fn status; dm_message_fn message; dm_prepare_ioctl_fn prepare_ioctl; +#ifdef CONFIG_BLK_DEV_ZONED + dm_report_zones_fn report_zones; +#endif dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; @@ -420,8 +428,8 @@ struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); -void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, - sector_t start); +void dm_remap_zone_report(struct dm_target *ti, sector_t start, + struct blk_zone *zones, unsigned int *nr_zones); union map_info *dm_get_rq_mapinfo(struct request *rq); struct queue_limits *dm_get_queue_limits(struct mapped_device *md); diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 795698925d20..3ec73f17ee2a 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(CP_TRIMMED); { REQ_OP_WRITE, "WRITE" }, \ { REQ_OP_FLUSH, "FLUSH" }, \ { REQ_OP_DISCARD, "DISCARD" }, \ - { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \ { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index ff5a5db8906a..8f08ff9bdea0 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -137,8 +137,11 @@ struct blk_zone_range { * sector specified in the report request structure. * @BLKRESETZONE: Reset the write pointer of the zones in the specified * sector range. The sector range must be zone aligned. + * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. */ #define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) #define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) +#define BLKGETZONESZ _IOW(0x12, 132, __u32) +#define BLKGETNRZONES _IOW(0x12, 133, __u32) #endif /* _UAPI_BLKZONED_H */ |