diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-09 12:05:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-09 12:05:33 -0700 |
commit | a022f7d575bb68c35be0a9ea68860411dec652fe (patch) | |
tree | 1860cd383d6ee291849c652ced784dd3d9acf357 /drivers | |
parent | 3de62951a5bee5dce5f4ffab8b7323ca9d3c7e1c (diff) | |
parent | a731763fc479a9c64456e0643d0ccf64203100c9 (diff) | |
download | linux-a022f7d575bb68c35be0a9ea68860411dec652fe.tar.bz2 |
Merge tag 'block-5.14-2021-07-08' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe:
"A combination of changes that ended up depending on both the driver
and core branch (and/or the IDE removal), and a few late arriving
fixes. In detail:
- Fix io ticks wrap-around issue (Chunguang)
- nvme-tcp sock locking fix (Maurizio)
- s390-dasd fixes (Kees, Christoph)
- blk_execute_rq polling support (Keith)
- blk-cgroup RCU iteration fix (Yu)
- nbd backend ID addition (Prasanna)
- Partition deletion fix (Yufen)
- Use blk_mq_alloc_disk for mmc, mtip32xx, ubd (Christoph)
- Removal of now dead block request types due to IDE removal
(Christoph)
- Loop probing and control device cleanups (Christoph)
- Device uevent fix (Christoph)
- Misc cleanups/fixes (Tetsuo, Christoph)"
* tag 'block-5.14-2021-07-08' of git://git.kernel.dk/linux-block: (34 commits)
blk-cgroup: prevent rcu_sched detected stalls warnings while iterating blkgs
block: fix the problem of io_ticks becoming smaller
nvme-tcp: can't set sk_user_data without write_lock
loop: remove unused variable in loop_set_status()
block: remove the bdgrab in blk_drop_partitions
block: grab a device refcount in disk_uevent
s390/dasd: Avoid field over-reading memcpy()
dasd: unexport dasd_set_target_state
block: check disk exist before trying to add partition
ubd: remove dead code in ubd_setup_common
nvme: use return value from blk_execute_rq()
block: return errors from blk_execute_rq()
nvme: use blk_execute_rq() for passthrough commands
block: support polling through blk_execute_rq
block: remove REQ_OP_SCSI_{IN,OUT}
block: mark blk_mq_init_queue_data static
loop: rewrite loop_exit using idr_for_each_entry
loop: split loop_lookup
loop: don't allow deleting an unspecified loop device
loop: move loop_ctl_mutex locking into loop_add
...
Diffstat (limited to 'drivers')
28 files changed, 249 insertions, 322 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cc0e8c39a48b..f37b9e3d833c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1434,7 +1434,6 @@ static int loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; - struct block_device *bdev; kuid_t uid = current_uid(); int prev_lo_flags; bool partscan = false; @@ -1503,7 +1502,6 @@ out_unfreeze: if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && !(prev_lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; - bdev = lo->lo_device; partscan = true; } out_unlock: @@ -2237,7 +2235,7 @@ static const struct blk_mq_ops loop_mq_ops = { .complete = lo_complete_rq, }; -static int loop_add(struct loop_device **l, int i) +static int loop_add(int i) { struct loop_device *lo; struct gendisk *disk; @@ -2247,9 +2245,12 @@ static int loop_add(struct loop_device **l, int i) lo = kzalloc(sizeof(*lo), GFP_KERNEL); if (!lo) goto out; - lo->lo_state = Lo_unbound; + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + goto out_free_dev; + /* allocate id, if @id >= 0, we're requesting that specific id */ if (i >= 0) { err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); @@ -2259,7 +2260,7 @@ static int loop_add(struct loop_device **l, int i) err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); } if (err < 0) - goto out_free_dev; + goto out_unlock; i = err; err = -ENOMEM; @@ -2326,13 +2327,15 @@ static int loop_add(struct loop_device **l, int i) disk->queue = lo->lo_queue; sprintf(disk->disk_name, "loop%d", i); add_disk(disk); - *l = lo; - return lo->lo_number; + mutex_unlock(&loop_ctl_mutex); + return i; out_cleanup_tags: blk_mq_free_tag_set(&lo->tag_set); out_free_idr: idr_remove(&loop_index_idr, i); +out_unlock: + mutex_unlock(&loop_ctl_mutex); out_free_dev: kfree(lo); out: @@ -2348,109 +2351,86 @@ static void loop_remove(struct loop_device *lo) kfree(lo); } -static int find_free_cb(int id, void *ptr, void *data) +static void loop_probe(dev_t dev) { - struct loop_device *lo = ptr; - struct loop_device **l = data; + int idx = MINOR(dev) >> part_shift; - if (lo->lo_state == Lo_unbound) { - *l = lo; - return 1; - } - return 0; + if (max_loop && idx >= max_loop) + return; + loop_add(idx); } -static int loop_lookup(struct loop_device **l, int i) +static int loop_control_remove(int idx) { struct loop_device *lo; - int ret = -ENODEV; + int ret; - if (i < 0) { - int err; + if (idx < 0) { + pr_warn("deleting an unspecified loop device is not supported.\n"); + return -EINVAL; + } + + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; - err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); - if (err == 1) { - *l = lo; - ret = lo->lo_number; - } - goto out; + lo = idr_find(&loop_index_idr, idx); + if (!lo) { + ret = -ENODEV; + goto out_unlock_ctrl; } - /* lookup and return a specific i */ - lo = idr_find(&loop_index_idr, i); - if (lo) { - *l = lo; - ret = lo->lo_number; + ret = mutex_lock_killable(&lo->lo_mutex); + if (ret) + goto out_unlock_ctrl; + if (lo->lo_state != Lo_unbound || + atomic_read(&lo->lo_refcnt) > 0) { + mutex_unlock(&lo->lo_mutex); + ret = -EBUSY; + goto out_unlock_ctrl; } -out: + lo->lo_state = Lo_deleting; + mutex_unlock(&lo->lo_mutex); + + idr_remove(&loop_index_idr, lo->lo_number); + loop_remove(lo); +out_unlock_ctrl: + mutex_unlock(&loop_ctl_mutex); return ret; } -static void loop_probe(dev_t dev) +static int loop_control_get_free(int idx) { - int idx = MINOR(dev) >> part_shift; struct loop_device *lo; + int id, ret; - if (max_loop && idx >= max_loop) - return; - - mutex_lock(&loop_ctl_mutex); - if (loop_lookup(&lo, idx) < 0) - loop_add(&lo, idx); + ret = mutex_lock_killable(&loop_ctl_mutex); + if (ret) + return ret; + idr_for_each_entry(&loop_index_idr, lo, id) { + if (lo->lo_state == Lo_unbound) + goto found; + } mutex_unlock(&loop_ctl_mutex); + return loop_add(-1); +found: + mutex_unlock(&loop_ctl_mutex); + return id; } static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { - struct loop_device *lo; - int ret; - - ret = mutex_lock_killable(&loop_ctl_mutex); - if (ret) - return ret; - - ret = -ENOSYS; switch (cmd) { case LOOP_CTL_ADD: - ret = loop_lookup(&lo, parm); - if (ret >= 0) { - ret = -EEXIST; - break; - } - ret = loop_add(&lo, parm); - break; + return loop_add(parm); case LOOP_CTL_REMOVE: - ret = loop_lookup(&lo, parm); - if (ret < 0) - break; - ret = mutex_lock_killable(&lo->lo_mutex); - if (ret) - break; - if (lo->lo_state != Lo_unbound) { - ret = -EBUSY; - mutex_unlock(&lo->lo_mutex); - break; - } - if (atomic_read(&lo->lo_refcnt) > 0) { - ret = -EBUSY; - mutex_unlock(&lo->lo_mutex); - break; - } - lo->lo_state = Lo_deleting; - mutex_unlock(&lo->lo_mutex); - idr_remove(&loop_index_idr, lo->lo_number); - loop_remove(lo); - break; + return loop_control_remove(parm); case LOOP_CTL_GET_FREE: - ret = loop_lookup(&lo, -1); - if (ret >= 0) - break; - ret = loop_add(&lo, -1); + return loop_control_get_free(parm); + default: + return -ENOSYS; } - mutex_unlock(&loop_ctl_mutex); - - return ret; } static const struct file_operations loop_ctl_fops = { @@ -2473,7 +2453,6 @@ MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { int i, nr; - struct loop_device *lo; int err; part_shift = 0; @@ -2525,10 +2504,8 @@ static int __init loop_init(void) } /* pre-create number of devices given by config or max_loop */ - mutex_lock(&loop_ctl_mutex); for (i = 0; i < nr; i++) - loop_add(&lo, i); - mutex_unlock(&loop_ctl_mutex); + loop_add(i); printk(KERN_INFO "loop: module loaded\n"); return 0; @@ -2539,26 +2516,20 @@ err_out: return err; } -static int loop_exit_cb(int id, void *ptr, void *data) -{ - struct loop_device *lo = ptr; - - loop_remove(lo); - return 0; -} - static void __exit loop_exit(void) { - mutex_lock(&loop_ctl_mutex); - - idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); - idr_destroy(&loop_index_idr); + struct loop_device *lo; + int id; unregister_blkdev(LOOP_MAJOR, "loop"); - misc_deregister(&loop_misc); + mutex_lock(&loop_ctl_mutex); + idr_for_each_entry(&loop_index_idr, lo, id) + loop_remove(lo); mutex_unlock(&loop_ctl_mutex); + + idr_destroy(&loop_index_idr); } module_init(loop_init); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index ff3e7b3f5ad8..901855717cb5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2160,6 +2160,20 @@ static ssize_t mtip_hw_show_status(struct device *dev, static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL); +static struct attribute *mtip_disk_attrs[] = { + &dev_attr_status.attr, + NULL, +}; + +static const struct attribute_group mtip_disk_attr_group = { + .attrs = mtip_disk_attrs, +}; + +static const struct attribute_group *mtip_disk_attr_groups[] = { + &mtip_disk_attr_group, + NULL, +}; + /* debugsfs entries */ static ssize_t show_device_status(struct device_driver *drv, char *buf) @@ -2374,47 +2388,6 @@ static const struct file_operations mtip_flags_fops = { .llseek = no_llseek, }; -/* - * Create the sysfs related attributes. - * - * @dd Pointer to the driver data structure. - * @kobj Pointer to the kobj for the block device. - * - * return value - * 0 Operation completed successfully. - * -EINVAL Invalid parameter. - */ -static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) -{ - if (!kobj || !dd) - return -EINVAL; - - if (sysfs_create_file(kobj, &dev_attr_status.attr)) - dev_warn(&dd->pdev->dev, - "Error creating 'status' sysfs entry\n"); - return 0; -} - -/* - * Remove the sysfs related attributes. - * - * @dd Pointer to the driver data structure. - * @kobj Pointer to the kobj for the block device. - * - * return value - * 0 Operation completed successfully. - * -EINVAL Invalid parameter. - */ -static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) -{ - if (!kobj || !dd) - return -EINVAL; - - sysfs_remove_file(kobj, &dev_attr_status.attr); - - return 0; -} - static int mtip_hw_debugfs_init(struct driver_data *dd) { if (!dfs_parent) @@ -3566,7 +3539,6 @@ static int mtip_block_initialize(struct driver_data *dd) int rv = 0, wait_for_rebuild = 0; sector_t capacity; unsigned int index = 0; - struct kobject *kobj; if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ @@ -3576,35 +3548,6 @@ static int mtip_block_initialize(struct driver_data *dd) goto protocol_init_error; } - dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node); - if (dd->disk == NULL) { - dev_err(&dd->pdev->dev, - "Unable to allocate gendisk structure\n"); - rv = -EINVAL; - goto alloc_disk_error; - } - - rv = ida_alloc(&rssd_index_ida, GFP_KERNEL); - if (rv < 0) - goto ida_get_error; - index = rv; - - rv = rssd_disk_name_format("rssd", - index, - dd->disk->disk_name, - DISK_NAME_LEN); - if (rv) - goto disk_index_error; - - dd->disk->major = dd->major; - dd->disk->first_minor = index * MTIP_MAX_MINORS; - dd->disk->minors = MTIP_MAX_MINORS; - dd->disk->fops = &mtip_block_ops; - dd->disk->private_data = dd; - dd->index = index; - - mtip_hw_debugfs_init(dd); - memset(&dd->tags, 0, sizeof(dd->tags)); dd->tags.ops = &mtip_mq_ops; dd->tags.nr_hw_queues = 1; @@ -3623,17 +3566,35 @@ static int mtip_block_initialize(struct driver_data *dd) goto block_queue_alloc_tag_error; } - /* Allocate the request queue. */ - dd->queue = blk_mq_init_queue(&dd->tags); - if (IS_ERR(dd->queue)) { + dd->disk = blk_mq_alloc_disk(&dd->tags, dd); + if (IS_ERR(dd->disk)) { dev_err(&dd->pdev->dev, "Unable to allocate request queue\n"); rv = -ENOMEM; goto block_queue_alloc_init_error; } + dd->queue = dd->disk->queue; + + rv = ida_alloc(&rssd_index_ida, GFP_KERNEL); + if (rv < 0) + goto ida_get_error; + index = rv; + + rv = rssd_disk_name_format("rssd", + index, + dd->disk->disk_name, + DISK_NAME_LEN); + if (rv) + goto disk_index_error; + + dd->disk->major = dd->major; + dd->disk->first_minor = index * MTIP_MAX_MINORS; + dd->disk->minors = MTIP_MAX_MINORS; + dd->disk->fops = &mtip_block_ops; + dd->disk->private_data = dd; + dd->index = index; - dd->disk->queue = dd->queue; - dd->queue->queuedata = dd; + mtip_hw_debugfs_init(dd); skip_create_disk: /* Initialize the protocol layer. */ @@ -3672,17 +3633,7 @@ skip_create_disk: set_capacity(dd->disk, capacity); /* Enable the block device and add it to /dev */ - device_add_disk(&dd->pdev->dev, dd->disk, NULL); - - /* - * Now that the disk is active, initialize any sysfs attributes - * managed by the protocol layer. - */ - kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); - if (kobj) { - mtip_hw_sysfs_init(dd, kobj); - kobject_put(kobj); - } + device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups); if (dd->mtip_svc_handler) { set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); @@ -3709,23 +3660,17 @@ start_service_thread: kthread_run_error: /* Delete our gendisk. This also removes the device from /dev */ del_gendisk(dd->disk); - read_capacity_error: init_hw_cmds_error: - blk_cleanup_queue(dd->queue); -block_queue_alloc_init_error: - blk_mq_free_tag_set(&dd->tags); -block_queue_alloc_tag_error: mtip_hw_debugfs_exit(dd); disk_index_error: ida_free(&rssd_index_ida, index); - ida_get_error: - put_disk(dd->disk); - -alloc_disk_error: + blk_cleanup_disk(dd->disk); +block_queue_alloc_init_error: + blk_mq_free_tag_set(&dd->tags); +block_queue_alloc_tag_error: mtip_hw_exit(dd); /* De-initialize the protocol layer. */ - protocol_init_error: return rv; } @@ -3751,8 +3696,6 @@ static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) */ static int mtip_block_remove(struct driver_data *dd) { - struct kobject *kobj; - mtip_hw_debugfs_exit(dd); if (dd->mtip_svc_handler) { @@ -3761,15 +3704,6 @@ static int mtip_block_remove(struct driver_data *dd) kthread_stop(dd->mtip_svc_handler); } - /* Clean up the sysfs attributes, if created */ - if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { - kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); - if (kobj) { - mtip_hw_sysfs_exit(dd, kobj); - kobject_put(kobj); - } - } - if (!dd->sr) { /* * Explicitly wait here for IOs to quiesce, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 614d82e7fae4..b7d663736d35 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -79,6 +79,7 @@ struct link_dead_args { #define NBD_RT_HAS_CONFIG_REF 4 #define NBD_RT_BOUND 5 #define NBD_RT_DISCONNECT_ON_CLOSE 6 +#define NBD_RT_HAS_BACKEND_FILE 7 #define NBD_DESTROY_ON_DISCONNECT 0 #define NBD_DISCONNECT_REQUESTED 1 @@ -119,6 +120,8 @@ struct nbd_device { struct completion *destroy_complete; unsigned long flags; + + char *backend; }; #define NBD_CMD_REQUEUED 1 @@ -216,6 +219,20 @@ static const struct device_attribute pid_attr = { .show = pid_show, }; +static ssize_t backend_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nbd_device *nbd = (struct nbd_device *)disk->private_data; + + return sprintf(buf, "%s\n", nbd->backend ?: ""); +} + +static const struct device_attribute backend_attr = { + .attr = { .name = "backend", .mode = 0444}, + .show = backend_show, +}; + static void nbd_dev_remove(struct nbd_device *nbd) { struct gendisk *disk = nbd->disk; @@ -1211,6 +1228,12 @@ static void nbd_config_put(struct nbd_device *nbd) &config->runtime_flags)) device_remove_file(disk_to_dev(nbd->disk), &pid_attr); nbd->task_recv = NULL; + if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE, + &config->runtime_flags)) { + device_remove_file(disk_to_dev(nbd->disk), &backend_attr); + kfree(nbd->backend); + nbd->backend = NULL; + } nbd_clear_sock(nbd); if (config->num_connections) { int i; @@ -1270,7 +1293,7 @@ static int nbd_start_device(struct nbd_device *nbd) error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (error) { - dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); + dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n"); return error; } set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); @@ -1657,6 +1680,7 @@ static int nbd_dev_add(int index) BLK_MQ_F_BLOCKING; nbd->tag_set.driver_data = nbd; nbd->destroy_complete = NULL; + nbd->backend = NULL; err = blk_mq_alloc_tag_set(&nbd->tag_set); if (err) @@ -1743,6 +1767,7 @@ static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED}, [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 }, [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, + [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING}, }; static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { @@ -1945,6 +1970,23 @@ again: } } ret = nbd_start_device(nbd); + if (ret) + goto out; + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { + nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], + GFP_KERNEL); + if (!nbd->backend) { + ret = -ENOMEM; + goto out; + } + } + ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr); + if (ret) { + dev_err(disk_to_dev(nbd->disk), + "device_create_file failed for backend!\n"); + goto out; + } + set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); out: mutex_unlock(&nbd->config_lock); if (!ret) { @@ -2037,6 +2079,22 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) index); return -EINVAL; } + if (nbd->backend) { + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { + if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], + nbd->backend)) { + mutex_unlock(&nbd_index_mutex); + dev_err(nbd_to_dev(nbd), + "backend image doesn't match with %s\n", + nbd->backend); + return -EINVAL; + } + } else { + mutex_unlock(&nbd_index_mutex); + dev_err(nbd_to_dev(nbd), "must specify backend\n"); + return -EINVAL; + } + } if (!refcount_inc_not_zero(&nbd->refs)) { mutex_unlock(&nbd_index_mutex); printk(KERN_ERR "nbd: device at index %d is going down\n", diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 3b320b005aa8..d734e9ee1546 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1849,7 +1849,6 @@ static int null_add_dev(struct nullb_device *dev) if (!null_setup_fault()) goto out_cleanup_tags; - rv = -ENOMEM; nullb->tag_set->timeout = 5 * HZ; nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb); if (IS_ERR(nullb->disk)) { diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index f69b5c69c2a6..538446b652de 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -704,7 +704,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * int ret = 0; rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 90ad34c6ef8e..feb827eefd1a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2186,7 +2186,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, len = nr * CD_FRAMESIZE_RAW; - rq = blk_get_request(q, REQ_OP_SCSI_IN, 0); + rq = blk_get_request(q, REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) { ret = PTR_ERR(rq); break; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 88f4c215caa6..9890a1532cb0 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -202,7 +202,7 @@ static void mmc_blk_put(struct mmc_blk_data *md) md->usage--; if (md->usage == 0) { int devidx = mmc_get_devidx(md->disk); - blk_put_queue(md->queue.queue); + ida_simple_remove(&mmc_blk_ida, devidx); put_disk(md->disk); kfree(md); @@ -2319,39 +2319,22 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, */ md->read_only = mmc_blk_readonly(card); - md->disk = alloc_disk(perdev_minors); - if (md->disk == NULL) { - ret = -ENOMEM; + md->disk = mmc_init_queue(&md->queue, card); + if (IS_ERR(md->disk)) { + ret = PTR_ERR(md->disk); goto err_kfree; } INIT_LIST_HEAD(&md->part); INIT_LIST_HEAD(&md->rpmbs); md->usage = 1; - - ret = mmc_init_queue(&md->queue, card); - if (ret) - goto err_putdisk; - md->queue.blkdata = md; - /* - * Keep an extra reference to the queue so that we can shutdown the - * queue (i.e. call blk_cleanup_queue()) while there are still - * references to the 'md'. The corresponding blk_put_queue() is in - * mmc_blk_put(). - */ - if (!blk_get_queue(md->queue.queue)) { - mmc_cleanup_queue(&md->queue); - ret = -ENODEV; - goto err_putdisk; - } - md->disk->major = MMC_BLOCK_MAJOR; + md->disk->minors = perdev_minors; md->disk->first_minor = devidx * perdev_minors; md->disk->fops = &mmc_bdops; md->disk->private_data = md; - md->disk->queue = md->queue.queue; md->parent = parent; set_disk_ro(md->disk, md->read_only || default_ro); md->disk->flags = GENHD_FL_EXT_DEVT; @@ -2400,8 +2383,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, return md; - err_putdisk: - put_disk(md->disk); err_kfree: kfree(md); out: diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index d600e0a4a460..cc3261777637 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -424,9 +424,10 @@ static inline bool mmc_merge_capable(struct mmc_host *host) * * Initialise a MMC card request queue. */ -int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; + struct gendisk *disk; int ret; mq->card = card; @@ -464,26 +465,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) ret = blk_mq_alloc_tag_set(&mq->tag_set); if (ret) - return ret; + return ERR_PTR(ret); + - mq->queue = blk_mq_init_queue(&mq->tag_set); - if (IS_ERR(mq->queue)) { - ret = PTR_ERR(mq->queue); - goto free_tag_set; + disk = blk_mq_alloc_disk(&mq->tag_set, mq); + if (IS_ERR(disk)) { + blk_mq_free_tag_set(&mq->tag_set); + return disk; } + mq->queue = disk->queue; if (mmc_host_is_spi(host) && host->use_spi_crc) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); - - mq->queue->queuedata = mq; blk_queue_rq_timeout(mq->queue, 60 * HZ); mmc_setup_queue(mq, card); - return 0; - -free_tag_set: - blk_mq_free_tag_set(&mq->tag_set); - return ret; + return disk; } void mmc_queue_suspend(struct mmc_queue *mq) diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 3319d8ab57d0..9ade3bcbb714 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -94,7 +94,7 @@ struct mmc_queue { struct work_struct complete_work; }; -extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *); +struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card); extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 80c656dcbbac..11779be42186 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -609,6 +609,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); static inline void nvme_clear_nvme_request(struct request *req) { + nvme_req(req)->status = 0; nvme_req(req)->retries = 0; nvme_req(req)->flags = 0; req->rq_flags |= RQF_DONTPREP; @@ -631,6 +632,8 @@ static inline void nvme_init_request(struct request *req, cmd->common.flags &= ~NVME_CMD_SGL_ALL; req->cmd_flags |= REQ_FAILFAST_DRIVER; + if (req->mq_hctx->type == HCTX_TYPE_POLL) + req->cmd_flags |= REQ_HIPRI; nvme_clear_nvme_request(req); memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } @@ -1029,29 +1032,23 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) } EXPORT_SYMBOL_GPL(nvme_setup_cmd); -static void nvme_end_sync_rq(struct request *rq, blk_status_t error) +/* + * Return values: + * 0: success + * >0: nvme controller's cqe status response + * <0: kernel error in lieu of controller response + */ +static int nvme_execute_rq(struct gendisk *disk, struct request *rq, + bool at_head) { - struct completion *waiting = rq->end_io_data; + blk_status_t status; - rq->end_io_data = NULL; - complete(waiting); -} - -static void nvme_execute_rq_polled(struct request_queue *q, - struct gendisk *bd_disk, struct request *rq, int at_head) -{ - DECLARE_COMPLETION_ONSTACK(wait); - - WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); - - rq->cmd_flags |= REQ_HIPRI; - rq->end_io_data = &wait; - blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq); - - while (!completion_done(&wait)) { - blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); - cond_resched(); - } + status = blk_execute_rq(disk, rq, at_head); + if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) + return -EINTR; + if (nvme_req(rq)->status) + return nvme_req(rq)->status; + return blk_status_to_errno(status); } /* @@ -1061,7 +1058,7 @@ static void nvme_execute_rq_polled(struct request_queue *q, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, unsigned timeout, int qid, int at_head, - blk_mq_req_flags_t flags, bool poll) + blk_mq_req_flags_t flags) { struct request *req; int ret; @@ -1082,16 +1079,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; } - if (poll) - nvme_execute_rq_polled(req->q, NULL, req, at_head); - else - blk_execute_rq(NULL, req, at_head); - if (result) + ret = nvme_execute_rq(NULL, req, at_head); + if (result && ret >= 0) *result = nvme_req(req)->result; - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) - ret = -EINTR; - else - ret = nvme_req(req)->status; out: blk_mq_free_request(req); return ret; @@ -1102,7 +1092,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, - NVME_QID_ANY, 0, 0, false); + NVME_QID_ANY, 0, 0); } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); @@ -1179,18 +1169,21 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) } } -void nvme_execute_passthru_rq(struct request *rq) +int nvme_execute_passthru_rq(struct request *rq) { struct nvme_command *cmd = nvme_req(rq)->cmd; struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; struct nvme_ns *ns = rq->q->queuedata; struct gendisk *disk = ns ? ns->disk : NULL; u32 effects; + int ret; effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); - blk_execute_rq(disk, rq, 0); + ret = nvme_execute_rq(disk, rq, false); if (effects) /* nothing to be done for zero cmd effects */ nvme_passthru_end(ctrl, effects); + + return ret; } EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); @@ -1465,7 +1458,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, c.features.dword11 = cpu_to_le32(dword11); ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, - buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); + buffer, buflen, 0, NVME_QID_ANY, 0, 0); if (ret >= 0 && result) *result = le32_to_cpu(res.u32); return ret; @@ -2047,7 +2040,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, cmd.common.cdw11 = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0, - NVME_QID_ANY, 1, 0, false); + NVME_QID_ANY, 1, 0); } EXPORT_SYMBOL_GPL(nvme_sec_submit); #endif /* CONFIG_BLK_SED_OPAL */ diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 1e6a7cc056ca..a5469fd9d4c3 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -154,7 +154,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0, - NVME_QID_ANY, 0, 0, false); + NVME_QID_ANY, 0, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -200,7 +200,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0, - NVME_QID_ANY, 0, 0, false); + NVME_QID_ANY, 0, 0); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -245,7 +245,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) cmd.prop_set.value = cpu_to_le64(val); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0, - NVME_QID_ANY, 0, 0, false); + NVME_QID_ANY, 0, 0); if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", @@ -391,7 +391,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, data, sizeof(*data), 0, NVME_QID_ANY, 1, - BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false); + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); @@ -415,7 +415,6 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); * @qid: NVMe I/O queue number for the new I/O connection between * host and target (note qid == 0 is illegal as this is * the Admin queue, per NVMe standard). - * @poll: Whether or not to poll for the completion of the connect cmd. * * This function issues a fabrics-protocol connection * of a NVMe I/O queue (via NVMe Fabrics "Connect" command) @@ -427,7 +426,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); * > 0: NVMe error status code * < 0: Linux errno error code */ -int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll) +int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) { struct nvme_command cmd = { }; struct nvmf_connect_data *data; @@ -453,7 +452,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll) ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, data, sizeof(*data), 0, qid, 1, - BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll); + BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); if (ret) { nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), &cmd, data); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index c31dad69a773..a146cb903869 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -182,7 +182,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); -int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll); +int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); int nvmf_register_transport(struct nvmf_transport_ops *ops); void nvmf_unregister_transport(struct nvmf_transport_ops *ops); void nvmf_free_options(struct nvmf_ctrl_options *opts); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 95aad3fed571..b08a61ca283f 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2346,7 +2346,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) (qsize / 5)); if (ret) break; - ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); + ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) break; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index d93928d1e5bd..305ddd415e45 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -93,11 +93,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, } } - nvme_execute_passthru_rq(req); - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) - ret = -EINTR; - else - ret = nvme_req(req)->status; + ret = nvme_execute_passthru_rq(req); if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); if (meta && !ret && !write) { diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 75420ceacc10..18ef8dd03a90 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -658,7 +658,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, union nvme_result *result, void *buffer, unsigned bufflen, unsigned timeout, int qid, int at_head, - blk_mq_req_flags_t flags, bool poll); + blk_mq_req_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result); @@ -876,7 +876,7 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); -void nvme_execute_passthru_rq(struct request *rq); +int nvme_execute_passthru_rq(struct request *rq); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); void nvme_put_ns(struct nvme_ns *ns); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index a9e70cefd7ed..7f6b3a991501 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -680,11 +680,10 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) { struct nvme_rdma_queue *queue = &ctrl->queues[idx]; - bool poll = nvme_rdma_poll_queue(queue); int ret; if (idx) - ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll); + ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); else ret = nvmf_connect_admin_queue(&ctrl->ctrl); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index c7bd37103cf4..12acfe05cd68 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1574,7 +1574,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) int ret; if (idx) - ret = nvmf_connect_io_queue(nctrl, idx, false); + ret = nvmf_connect_io_queue(nctrl, idx); else ret = nvmf_connect_admin_queue(nctrl); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index a5c4a1865026..3a17a7e26bbf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -337,7 +337,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) int i, ret; for (i = 1; i < ctrl->ctrl.queue_count; i++) { - ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); + ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) return ret; set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index fced52de33ce..225cd1ffbe45 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -153,11 +153,10 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); struct request *rq = req->p.rq; - u16 status; + int status; - nvme_execute_passthru_rq(rq); + status = nvme_execute_passthru_rq(rq); - status = nvme_req(rq)->status; if (status == NVME_SC_SUCCESS && req->cmd->common.opcode == nvme_admin_identify) { switch (req->cmd->identify.cns) { @@ -168,7 +167,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) nvmet_passthru_override_id_ns(req); break; } - } + } else if (status < 0) + status = NVME_SC_INTERNAL; req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, status); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index d8aceef83284..07ee347ea3f3 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1497,7 +1497,6 @@ static void nvmet_tcp_state_change(struct sock *sk) case TCP_CLOSE_WAIT: case TCP_CLOSE: /* FALLTHRU */ - sk->sk_user_data = NULL; nvmet_tcp_schedule_release_queue(queue); break; default: diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index c8df75e99f4c..e34c6cc61983 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -621,7 +621,6 @@ void dasd_set_target_state(struct dasd_device *device, int target) mutex_unlock(&device->state_mutex); dasd_put_device(device); } -EXPORT_SYMBOL(dasd_set_target_state); /* * Enable devices with device numbers in [from..to]. diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a6ac505cbdd7..0de1a463c509 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -746,7 +746,7 @@ static void create_uid(struct dasd_eckd_private *private) memcpy(uid->vendor, private->ned->HDA_manufacturer, sizeof(uid->vendor) - 1); EBCASC(uid->vendor, sizeof(uid->vendor) - 1); - memcpy(uid->serial, private->ned->HDA_location, + memcpy(uid->serial, &private->ned->serial, sizeof(uid->serial) - 1); EBCASC(uid->serial, sizeof(uid->serial) - 1); uid->ssid = private->gneq->subsystemID; diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index 73651211789f..65e4630ad2ae 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -332,8 +332,10 @@ struct dasd_ned { __u8 dev_type[6]; __u8 dev_model[3]; __u8 HDA_manufacturer[3]; - __u8 HDA_location[2]; - __u8 HDA_seqno[12]; + struct { + __u8 HDA_location[2]; + __u8 HDA_seqno[12]; + } serial; __u8 ID; __u8 unit_addr; } __attribute__ ((packed)); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c6cd5a8e5c85..58a252c38992 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1997,7 +1997,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) struct request *req; struct scsi_request *rq; - req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0); + req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, 0); if (IS_ERR(req)) return; rq = scsi_req(req); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6b994baf87c2..c9a91a52e637 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -215,7 +215,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req = blk_get_request(sdev->request_queue, data_direction == DMA_TO_DEVICE ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0); if (IS_ERR(req)) return PTR_ERR(req); @@ -543,7 +543,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_queue_add_random(q)) add_disk_randomness(req->rq_disk); - if (!blk_rq_is_scsi(req)) { + if (!blk_rq_is_passthrough(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; } @@ -1113,7 +1113,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) bool in_flight; int budget_token = cmd->budget_token; - if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { + if (!blk_rq_is_passthrough(rq) && !(flags & SCMD_INITIALIZED)) { flags |= SCMD_INITIALIZED; scsi_initialize_rq(rq); } @@ -1554,7 +1554,7 @@ static blk_status_t scsi_prepare_cmd(struct request *req) * Special handling for passthrough commands, which don't go to the ULP * at all: */ - if (blk_rq_is_scsi(req)) + if (blk_rq_is_passthrough(req)) return scsi_setup_scsi_cmnd(sdev, req); if (sdev->handler && sdev->handler->prep_fn) { diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 4e66994be190..91e2221bbb0d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1759,7 +1759,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) * not expect an EWOULDBLOCK from this condition. */ rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 66f48bd6da76..c6f14540ae03 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -549,7 +549,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, req = blk_get_request(SRpnt->stp->device->request_queue, data_direction == DMA_TO_DEVICE ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(req)) return PTR_ERR(req); rq = scsi_req(req); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 1646ba8eda11..2629d2ef3970 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -982,7 +982,7 @@ pscsi_execute_cmd(struct se_cmd *cmd) req = blk_get_request(pdv->pdv_sd->request_queue, cmd->data_direction == DMA_TO_DEVICE ? - REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(req)) { pr_err("PSCSI: blk_get_request() failed\n"); ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |