summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorShin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>2021-03-11 16:25:46 +0900
committerJens Axboe <axboe@kernel.dk>2021-03-11 11:49:25 -0700
commite5113505904ea1c1c0e1f92c1cfa91fbf4da1694 (patch)
treee20c2e385bedcf60b2f8ac24f7038f87ed934d26 /block
parent9ec491447b90ad6a4056a9656b13f0b3a1e83043 (diff)
downloadlinux-e5113505904ea1c1c0e1f92c1cfa91fbf4da1694.tar.bz2
block: Discard page cache of zone reset target range
When zone reset ioctl and data read race for a same zone on zoned block devices, the data read leaves stale page cache even though the zone reset ioctl zero clears all the zone data on the device. To avoid non-zero data read from the stale page cache after zone reset, discard page cache of reset target zones in blkdev_zone_mgmt_ioctl(). Introduce the helper function blkdev_truncate_zone_range() to discard the page cache. Ensure the page cache discarded by calling the helper function before and after zone reset in same manner as fallocate does. This patch can be applied back to the stable kernel version v5.10.y. Rework is needed for older stable kernels. Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com> Fixes: 3ed05a987e0f ("blk-zoned: implement ioctls") Cc: <stable@vger.kernel.org> # 5.10+ Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20210311072546.678999-1-shinichiro.kawasaki@wdc.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-zoned.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 8b9f3fc5a690..c0276b42d9fb 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
+static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
+ const struct blk_zone_range *zrange)
+{
+ loff_t start, end;
+
+ if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
+ zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
+ /* Out of range */
+ return -EINVAL;
+
+ start = zrange->sector << SECTOR_SHIFT;
+ end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
+
+ return truncate_bdev_range(bdev, mode, start, end);
+}
+
/*
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
* Called from blkdev_ioctl.
@@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
struct request_queue *q;
struct blk_zone_range zrange;
enum req_opf op;
+ int ret;
if (!argp)
return -EINVAL;
@@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case BLKRESETZONE:
op = REQ_OP_ZONE_RESET;
+
+ /* Invalidate the page cache, including dirty pages. */
+ ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+ if (ret)
+ return ret;
break;
case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN;
@@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
- return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
- GFP_KERNEL);
+ ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
+ GFP_KERNEL);
+
+ /*
+ * Invalidate the page cache again for zone reset: writes can only be
+ * direct for zoned devices so concurrent writes would not add any page
+ * to the page cache after/during reset. The page cache may be filled
+ * again due to concurrent reads though and dropping the pages for
+ * these is fine.
+ */
+ if (!ret && cmd == BLKRESETZONE)
+ ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
+
+ return ret;
}
static inline unsigned long *blk_alloc_zone_bitmap(int node,