diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-05 14:27:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-05 14:27:02 -0700 |
commit | 3526dd0c7832f1011a0477cc6d903662bae05ea8 (patch) | |
tree | 22fbac64eb40a0b29bfa4c029695f39b2f591e62 /drivers/block | |
parent | dd972f924df6bdbc0ab185a38d5d2361dbc26311 (diff) | |
parent | bc6d65e6dc89c3b7ff78e4ad797117c122ffde8e (diff) | |
download | linux-3526dd0c7832f1011a0477cc6d903662bae05ea8.tar.bz2 |
Merge tag 'for-4.17/block-20180402' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe:
"It's a pretty quiet round this time, which is nice. This contains:
- series from Bart, cleaning up the way we set/test/clear atomic
queue flags.
- series from Bart, fixing races between gendisk and queue
registration and removal.
- set of bcache fixes and improvements from various folks, by way of
Michael Lyle.
- set of lightnvm updates from Matias, most of it being the 1.2 to
2.0 transition.
- removal of unused DIO flags from Nikolay.
- blk-mq/sbitmap memory ordering fixes from Omar.
- divide-by-zero fix for BFQ from Paolo.
- minor documentation patches from Randy.
- timeout fix from Tejun.
- Alpha "can't write a char atomically" fix from Mikulas.
- set of NVMe fixes by way of Keith.
- bsg and bsg-lib improvements from Christoph.
- a few sed-opal fixes from Jonas.
- cdrom check-disk-change deadlock fix from Maurizio.
- various little fixes, comment fixes, etc from various folks"
* tag 'for-4.17/block-20180402' of git://git.kernel.dk/linux-block: (139 commits)
blk-mq: Directly schedule q->timeout_work when aborting a request
blktrace: fix comment in blktrace_api.h
lightnvm: remove function name in strings
lightnvm: pblk: remove some unnecessary NULL checks
lightnvm: pblk: don't recover unwritten lines
lightnvm: pblk: implement 2.0 support
lightnvm: pblk: implement get log report chunk
lightnvm: pblk: rename ppaf* to addrf*
lightnvm: pblk: check for supported version
lightnvm: implement get log report chunk helpers
lightnvm: make address conversions depend on generic device
lightnvm: add support for 2.0 address format
lightnvm: normalize geometry nomenclature
lightnvm: complete geo structure with maxoc*
lightnvm: add shorten OCSSD version in geo
lightnvm: add minor version to generic geometry
lightnvm: simplify geometry structure
lightnvm: pblk: refactor init/exit sequences
lightnvm: Avoid validation of default op value
lightnvm: centralize permission check for lightnvm ioctl
...
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/brd.c | 1 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 4 | ||||
-rw-r--r-- | drivers/block/loop.c | 77 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 8 | ||||
-rw-r--r-- | drivers/block/nbd.c | 8 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 124 | ||||
-rw-r--r-- | drivers/block/paride/pcd.c | 2 | ||||
-rw-r--r-- | drivers/block/rbd.c | 13 | ||||
-rw-r--r-- | drivers/block/rsxx/dev.c | 6 | ||||
-rw-r--r-- | drivers/block/skd_main.c | 4 | ||||
-rw-r--r-- | drivers/block/umem.c | 7 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 10 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 8 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.h | 1 |
15 files changed, 165 insertions, 111 deletions
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index deea78e485da..66cb0f857f64 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -24,7 +24,6 @@ #include <linux/uaccess.h> -#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 0a0394aa1b9c..185f1ef00a7c 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2816,7 +2816,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_init_set_defaults(device); - q = blk_alloc_queue(GFP_KERNEL); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock); if (!q) goto out_no_q; device->rq_queue = q; @@ -2848,7 +2848,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); - q->queue_lock = &resource->req_lock; device->md_io.page = alloc_page(GFP_KERNEL); if (!device->md_io.page) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index a12f77e6891e..b4f02768ba47 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1212,10 +1212,10 @@ static void decide_on_discard_support(struct drbd_device *device, * topology on all peers. */ blk_queue_discard_granularity(q, 512); q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection); } else { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); blk_queue_discard_granularity(q, 0); q->limits.max_discard_sectors = 0; q->limits.max_write_zeroes_sectors = 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ee62d2d517bf..264abaaff662 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -214,10 +214,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) { - queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags |= LO_FLAGS_DIRECT_IO; } else { - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } blk_mq_unfreeze_queue(lo->lo_queue); @@ -817,7 +817,7 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; } @@ -826,7 +826,7 @@ static void loop_config_discard(struct loop_device *lo) blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -1167,21 +1167,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { - struct file *file = lo->lo_backing_file; + struct file *file; struct kstat stat; - int error; + int ret; - if (lo->lo_state != Lo_bound) + if (lo->lo_state != Lo_bound) { + mutex_unlock(&lo->lo_ctl_mutex); return -ENXIO; - error = vfs_getattr(&file->f_path, &stat, - STATX_INO, AT_STATX_SYNC_AS_STAT); - if (error) - return error; + } + memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; - info->lo_device = huge_encode_dev(stat.dev); - info->lo_inode = stat.ino; - info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; @@ -1194,7 +1190,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); } - return 0; + + /* Drop lo_ctl_mutex while we call into the filesystem. */ + file = get_file(lo->lo_backing_file); + mutex_unlock(&lo->lo_ctl_mutex); + ret = vfs_getattr(&file->f_path, &stat, STATX_INO, + AT_STATX_SYNC_AS_STAT); + if (!ret) { + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(stat.rdev); + } + fput(file); + return ret; } static void @@ -1352,7 +1360,10 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, struct loop_device *lo = bdev->bd_disk->private_data; int err; - mutex_lock_nested(&lo->lo_ctl_mutex, 1); + err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1); + if (err) + goto out_unlocked; + switch (cmd) { case LOOP_SET_FD: err = loop_set_fd(lo, mode, bdev, arg); @@ -1374,7 +1385,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1383,7 +1395,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_CAPACITY: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1535,16 +1548,20 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, switch(cmd) { case LOOP_SET_STATUS: - mutex_lock(&lo->lo_ctl_mutex); - err = loop_set_status_compat( - lo, (const struct compat_loop_info __user *) arg); - mutex_unlock(&lo->lo_ctl_mutex); + err = mutex_lock_killable(&lo->lo_ctl_mutex); + if (!err) { + err = loop_set_status_compat(lo, + (const struct compat_loop_info __user *)arg); + mutex_unlock(&lo->lo_ctl_mutex); + } break; case LOOP_GET_STATUS: - mutex_lock(&lo->lo_ctl_mutex); - err = loop_get_status_compat( - lo, (struct compat_loop_info __user *) arg); - mutex_unlock(&lo->lo_ctl_mutex); + err = mutex_lock_killable(&lo->lo_ctl_mutex); + if (!err) { + err = loop_get_status_compat(lo, + (struct compat_loop_info __user *)arg); + /* loop_get_status() unlocks lo_ctl_mutex */ + } break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: @@ -1808,7 +1825,7 @@ static int loop_add(struct loop_device **l, int i) * page. For directio mode, merge does help to dispatch bigger request * to underlayer disk. We will enable merge once directio is enabled. */ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); err = -ENOMEM; disk = lo->lo_disk = alloc_disk(1 << part_shift); @@ -1864,8 +1881,8 @@ out: static void loop_remove(struct loop_device *lo) { - blk_cleanup_queue(lo->lo_queue); del_gendisk(lo->lo_disk); + blk_cleanup_queue(lo->lo_queue); blk_mq_free_tag_set(&lo->tag_set); put_disk(lo->lo_disk); kfree(lo); @@ -1949,7 +1966,9 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, ret = loop_lookup(&lo, parm); if (ret < 0) break; - mutex_lock(&lo->lo_ctl_mutex); + ret = mutex_lock_killable(&lo->lo_ctl_mutex); + if (ret) + break; if (lo->lo_state != Lo_unbound) { ret = -EBUSY; mutex_unlock(&lo->lo_ctl_mutex); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index b8af7352a18f..769c551e3d71 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -159,7 +159,7 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev) if (vendor_id == 0xFFFF) { dd->sr = true; if (dd->queue) - set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue); else dev_warn(&dd->pdev->dev, "%s: dd->queue is NULL\n", __func__); @@ -3855,8 +3855,8 @@ skip_create_disk: goto start_service_thread; /* Set device limits. */ - set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); - clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); blk_queue_max_segments(dd->queue, MTIP_MAX_SG); blk_queue_physical_block_size(dd->queue, 4096); blk_queue_max_hw_sectors(dd->queue, 0xffff); @@ -3866,7 +3866,7 @@ skip_create_disk: /* Signal trim support */ if (dd->trim_supp == true) { - set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue); dd->queue->limits.discard_granularity = 4096; blk_queue_max_discard_sectors(dd->queue, MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 86258b00a1d4..afbc202ca6fd 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -964,7 +964,7 @@ static void nbd_parse_flags(struct nbd_device *nbd) else set_disk_ro(nbd->disk, false); if (config->flags & NBD_FLAG_SEND_TRIM) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); if (config->flags & NBD_FLAG_SEND_FLUSH) { if (config->flags & NBD_FLAG_SEND_FUA) blk_queue_write_cache(nbd->disk->queue, true, true); @@ -1040,7 +1040,7 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); mutex_unlock(&nbd->config_lock); nbd_put(nbd); @@ -1488,8 +1488,8 @@ static int nbd_dev_add(int index) /* * Tell the block layer that we are not a rotational device */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); disk->queue->limits.discard_granularity = 512; blk_queue_max_discard_sectors(disk->queue, UINT_MAX); blk_queue_max_segment_size(disk->queue, UINT_MAX); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 287a09611c0f..a76553293a31 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -16,10 +16,8 @@ #include <linux/badblocks.h> #include <linux/fault-inject.h> -#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) -#define SECTOR_SIZE (1 << SECTOR_SHIFT) #define SECTOR_MASK (PAGE_SECTORS - 1) #define FREE_BATCH 16 @@ -29,6 +27,7 @@ #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static DECLARE_FAULT_ATTR(null_timeout_attr); +static DECLARE_FAULT_ATTR(null_requeue_attr); #endif static inline u64 mb_per_tick(int mbps) @@ -53,6 +52,7 @@ struct nullb_queue { wait_queue_head_t wait; unsigned int queue_depth; struct nullb_device *dev; + unsigned int requeue_selection; struct nullb_cmd *cmds; }; @@ -72,6 +72,7 @@ enum nullb_device_flags { NULLB_DEV_FL_CACHE = 3, }; +#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) /* * nullb_page is a page in memory for nullb devices. * @@ -86,10 +87,10 @@ enum nullb_device_flags { */ struct nullb_page { struct page *page; - unsigned long bitmap; + DECLARE_BITMAP(bitmap, MAP_SZ); }; -#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) -#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) +#define NULLB_PAGE_LOCK (MAP_SZ - 1) +#define NULLB_PAGE_FREE (MAP_SZ - 2) struct nullb_device { struct nullb *nullb; @@ -170,6 +171,9 @@ MODULE_PARM_DESC(home_node, "Home node for the device"); #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static char g_timeout_str[80]; module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO); + +static char g_requeue_str[80]; +module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), S_IRUGO); #endif static int g_queue_mode = NULL_Q_MQ; @@ -728,7 +732,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) if (!t_page->page) goto out_freepage; - t_page->bitmap = 0; + memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); return t_page; out_freepage: kfree(t_page); @@ -738,13 +742,20 @@ out: static void null_free_page(struct nullb_page *t_page) { - __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); - if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) + __set_bit(NULLB_PAGE_FREE, t_page->bitmap); + if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) return; __free_page(t_page->page); kfree(t_page); } +static bool null_page_empty(struct nullb_page *page) +{ + int size = MAP_SZ - 2; + + return find_first_bit(page->bitmap, size) == size; +} + static void null_free_sector(struct nullb *nullb, sector_t sector, bool is_cache) { @@ -759,9 +770,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector, t_page = radix_tree_lookup(root, idx); if (t_page) { - __clear_bit(sector_bit, &t_page->bitmap); + __clear_bit(sector_bit, t_page->bitmap); - if (!t_page->bitmap) { + if (null_page_empty(t_page)) { ret = radix_tree_delete_item(root, idx, t_page); WARN_ON(ret != t_page); null_free_page(ret); @@ -832,7 +843,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb, t_page = radix_tree_lookup(root, idx); WARN_ON(t_page && t_page->page->index != idx); - if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) + if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) return t_page; return NULL; @@ -895,10 +906,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); - __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); - if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { + __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); + if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { null_free_page(c_page); - if (t_page && t_page->bitmap == 0) { + if (t_page && null_page_empty(t_page)) { ret = radix_tree_delete_item(&nullb->dev->data, idx, t_page); null_free_page(t_page); @@ -914,11 +925,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) for (i = 0; i < PAGE_SECTORS; i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { - if (test_bit(i, &c_page->bitmap)) { + if (test_bit(i, c_page->bitmap)) { offset = (i << SECTOR_SHIFT); memcpy(dst + offset, src + offset, nullb->dev->blocksize); - __set_bit(i, &t_page->bitmap); + __set_bit(i, t_page->bitmap); } } @@ -955,10 +966,10 @@ again: * We found the page which is being flushed to disk by other * threads */ - if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) + if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) c_pages[i] = NULL; else - __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); + __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); } one_round = 0; @@ -1011,7 +1022,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source, kunmap_atomic(dst); kunmap_atomic(src); - __set_bit(sector & SECTOR_MASK, &t_page->bitmap); + __set_bit(sector & SECTOR_MASK, t_page->bitmap); if (is_fua) null_free_sector(nullb, sector, true); @@ -1380,7 +1391,15 @@ static bool should_timeout_request(struct request *rq) if (g_timeout_str[0]) return should_fail(&null_timeout_attr, 1); #endif + return false; +} +static bool should_requeue_request(struct request *rq) +{ +#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION + if (g_requeue_str[0]) + return should_fail(&null_requeue_attr, 1); +#endif return false; } @@ -1391,11 +1410,17 @@ static void null_request_fn(struct request_queue *q) while ((rq = blk_fetch_request(q)) != NULL) { struct nullb_cmd *cmd = rq->special; - if (!should_timeout_request(rq)) { - spin_unlock_irq(q->queue_lock); - null_handle_cmd(cmd); - spin_lock_irq(q->queue_lock); + /* just ignore the request */ + if (should_timeout_request(rq)) + continue; + if (should_requeue_request(rq)) { + blk_requeue_request(q, rq); + continue; } + + spin_unlock_irq(q->queue_lock); + null_handle_cmd(cmd); + spin_lock_irq(q->queue_lock); } } @@ -1422,10 +1447,23 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(bd->rq); - if (!should_timeout_request(bd->rq)) - return null_handle_cmd(cmd); + if (should_requeue_request(bd->rq)) { + /* + * Alternate between hitting the core BUSY path, and the + * driver driven requeue path + */ + nq->requeue_selection++; + if (nq->requeue_selection & 1) + return BLK_STS_RESOURCE; + else { + blk_mq_requeue_request(bd->rq, true); + return BLK_STS_OK; + } + } + if (should_timeout_request(bd->rq)) + return BLK_STS_OK; - return BLK_STS_OK; + return null_handle_cmd(cmd); } static const struct blk_mq_ops null_mq_ops = { @@ -1485,7 +1523,7 @@ static void null_config_discard(struct nullb *nullb) nullb->q->limits.discard_granularity = nullb->dev->blocksize; nullb->q->limits.discard_alignment = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); } static int null_open(struct block_device *bdev, fmode_t mode) @@ -1659,16 +1697,27 @@ static void null_validate_conf(struct nullb_device *dev) dev->mbps = 0; } -static bool null_setup_fault(void) -{ #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION - if (!g_timeout_str[0]) +static bool __null_setup_fault(struct fault_attr *attr, char *str) +{ + if (!str[0]) return true; - if (!setup_fault_attr(&null_timeout_attr, g_timeout_str)) + if (!setup_fault_attr(attr, str)) return false; - null_timeout_attr.verbose = 0; + attr->verbose = 0; + return true; +} +#endif + +static bool null_setup_fault(void) +{ +#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION + if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) + return false; + if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) + return false; #endif return true; } @@ -1717,7 +1766,8 @@ static int null_add_dev(struct nullb_device *dev) } null_init_queues(nullb); } else if (dev->queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); + nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node, + NULL); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; @@ -1758,8 +1808,8 @@ static int null_add_dev(struct nullb_device *dev) } nullb->q->queuedata = nullb; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); @@ -1802,10 +1852,6 @@ static int __init null_init(void) struct nullb *nullb; struct nullb_device *dev; - /* check for nullb_page.bitmap */ - if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) - return -EINVAL; - if (g_bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 7b8c6368beb7..a026211afb51 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; + check_disk_change(bdev); + mutex_lock(&pcd_mutex); ret = cdrom_open(&cd->info, bdev, mode); mutex_unlock(&pcd_mutex); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e40da093766..1e03b04819c8 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -51,15 +51,6 @@ #define RBD_DEBUG /* Activate rbd_assert() calls */ /* - * The basic unit of block I/O is a sector. It is interpreted in a - * number of contexts in Linux (blk, bio, genhd), but the default is - * universally 512 bytes. These symbols are just slightly more - * meaningful than the bare numbers they represent. - */ -#define SECTOR_SHIFT 9 -#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) - -/* * Increment the given counter and return its updated value. * If the counter is already 0 it will not be incremented. * If the counter is already at its maximum value returns @@ -4370,7 +4361,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) goto out_tag_set; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ /* set io sizes to object size */ @@ -4383,7 +4374,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_io_opt(q, segment_size); /* enable the discard support */ - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.discard_granularity = segment_size; blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index e397d3ee7308..dddb3f2490b6 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue); if (rsxx_discard_supported(card)) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue); blk_queue_max_discard_sectors(card->queue, RSXX_HW_BLK_SIZE >> 9); card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index e41935ab41ef..bc7aea6d7b7c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2858,8 +2858,8 @@ static int skd_cons_disk(struct skd_device *skdev) /* set optimal I/O size to 8KB */ blk_queue_io_opt(q, 8192); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_rq_timeout(q, 8 * HZ); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 8077123678ad..5c7fb8cc4149 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -888,13 +888,14 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) card->Active = -1; /* no page is active */ card->bio = NULL; card->biotail = &card->bio; + spin_lock_init(&card->lock); - card->queue = blk_alloc_queue(GFP_KERNEL); + card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, + &card->lock); if (!card->queue) goto failed_alloc; blk_queue_make_request(card->queue, mm_make_request); - card->queue->queue_lock = &card->lock; card->queue->queuedata = card; tasklet_init(&card->tasklet, process_page, (unsigned long)card); @@ -968,8 +969,6 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_printk(KERN_INFO, &card->dev->dev, "Window size %d bytes, IRQ %d\n", data, dev->irq); - spin_lock_init(&card->lock); - pci_set_drvdata(dev, card); if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */ diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 92ec1bbece51..2a8e7813bd1a 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -932,15 +932,15 @@ static void blkif_set_queue_limits(struct blkfront_info *info) unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); + blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); if (info->feature_discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -1611,8 +1611,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; - queue_flag_clear(QUEUE_FLAG_DISCARD, rq); - queue_flag_clear(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq); } break; case BLKIF_OP_FLUSH_DISKCACHE: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 0afa6c8c3857..71b449613cfa 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1530,8 +1530,8 @@ static int zram_add(void) /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ set_capacity(zram->disk, 0); /* zram devices sort of resembles non-rotational disks */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); /* * To ensure that we always get PAGE_SIZE aligned @@ -1544,7 +1544,7 @@ static int zram_add(void) blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue); /* * zram_bio_discard() will clear all logical blocks if logical block @@ -1620,8 +1620,8 @@ static int zram_remove(struct zram *zram) pr_info("Removed device: %s\n", zram->disk->disk_name); - blk_cleanup_queue(zram->disk->queue); del_gendisk(zram->disk); + blk_cleanup_queue(zram->disk->queue); put_disk(zram->disk); kfree(zram); return 0; diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 31762db861e3..1e9bf65c0bfb 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -37,7 +37,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /*-- End of configurable params */ -#define SECTOR_SHIFT 9 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) #define ZRAM_LOGICAL_BLOCK_SHIFT 12 |