summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-10 13:01:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-10 13:01:12 -0700
commit5f85942c2ea2ed59d8f19c954bbb0f5c1a2ebdd1 (patch)
treeffd0c606829178dd0be28c557685203f760438d8 /drivers/target
parent0c14e43a42e4e44f70963f8ccf89461290c4e4da (diff)
parent1b5c2cb196684f1418fe82257a1b0a8cb0aabc9d (diff)
downloadlinux-5f85942c2ea2ed59d8f19c954bbb0f5c1a2ebdd1.tar.bz2
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates to the usual drivers: ufs, qedf, mpt3sas, lpfc, xfcp, hisi_sas, cxlflash, qla2xxx. In the absence of Nic, we're also taking target updates which are mostly minor except for the tcmu refactor. The only real core change to worry about is the removal of high page bouncing (in sas, storvsc and iscsi). This has been well tested and no problems have shown up so far" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (268 commits) scsi: lpfc: update driver version to 12.0.0.4 scsi: lpfc: Fix port initialization failure. scsi: lpfc: Fix 16gb hbas failing cq create. scsi: lpfc: Fix crash in blk_mq layer when executing modprobe -r lpfc scsi: lpfc: correct oversubscription of nvme io requests for an adapter scsi: lpfc: Fix MDS diagnostics failure (Rx < Tx) scsi: hisi_sas: Mark PHY as in reset for nexus reset scsi: hisi_sas: Fix return value when get_free_slot() failed scsi: hisi_sas: Terminate STP reject quickly for v2 hw scsi: hisi_sas: Add v2 hw force PHY function for internal ATA command scsi: hisi_sas: Include TMF elements in struct hisi_sas_slot scsi: hisi_sas: Try wait commands before before controller reset scsi: hisi_sas: Init disks after controller reset scsi: hisi_sas: Create a scsi_host_template per HW module scsi: hisi_sas: Reset disks when discovered scsi: hisi_sas: Add LED feature for v3 hw scsi: hisi_sas: Change common allocation mode of device id scsi: hisi_sas: change slot index allocation mode scsi: hisi_sas: Introduce hisi_sas_phy_set_linkrate() scsi: hisi_sas: fix a typo in hisi_sas_task_prep() ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_configfs.c25
-rw-r--r--drivers/target/target_core_file.c137
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pscsi.c26
-rw-r--r--drivers/target/target_core_transport.c64
-rw-r--r--drivers/target/target_core_user.c160
7 files changed, 344 insertions, 70 deletions
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3f4bf126eed0..5ccef7d597fa 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -155,6 +155,8 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
mutex_unlock(&g_tf_lock);
+ pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+
return read_bytes;
}
@@ -3213,6 +3215,27 @@ void target_setup_backend_cits(struct target_backend *tb)
target_core_setup_dev_stat_cit(tb);
}
+static void target_init_dbroot(void)
+{
+ struct file *fp;
+
+ snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
+ fp = filp_open(db_root_stage, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ pr_err("db_root: cannot open: %s\n", db_root_stage);
+ return;
+ }
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
+ filp_close(fp, NULL);
+ pr_err("db_root: not a valid directory: %s\n", db_root_stage);
+ return;
+ }
+ filp_close(fp, NULL);
+
+ strncpy(db_root, db_root_stage, DB_ROOT_LEN);
+ pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+}
+
static int __init target_core_init_configfs(void)
{
struct configfs_subsystem *subsys = &target_core_fabrics;
@@ -3293,6 +3316,8 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ target_init_dbroot();
+
return 0;
out:
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 9b2c0c773022..16751ae55d7b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -250,6 +250,84 @@ static void fd_destroy_device(struct se_device *dev)
}
}
+struct target_core_file_cmd {
+ unsigned long len;
+ struct se_cmd *cmd;
+ struct kiocb iocb;
+};
+
+static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+{
+ struct target_core_file_cmd *cmd;
+
+ cmd = container_of(iocb, struct target_core_file_cmd, iocb);
+
+ if (ret != cmd->len)
+ target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
+ else
+ target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
+
+ kfree(cmd);
+}
+
+static sense_reason_t
+fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
+{
+ int is_write = !(data_direction == DMA_FROM_DEVICE);
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *file = fd_dev->fd_file;
+ struct target_core_file_cmd *aio_cmd;
+ struct iov_iter iter = {};
+ struct scatterlist *sg;
+ struct bio_vec *bvec;
+ ssize_t len = 0;
+ int ret = 0, i;
+
+ aio_cmd = kmalloc(sizeof(struct target_core_file_cmd), GFP_KERNEL);
+ if (!aio_cmd)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec) {
+ kfree(aio_cmd);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ for_each_sg(sgl, sg, sgl_nents, i) {
+ bvec[i].bv_page = sg_page(sg);
+ bvec[i].bv_len = sg->length;
+ bvec[i].bv_offset = sg->offset;
+
+ len += sg->length;
+ }
+
+ iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len);
+
+ aio_cmd->cmd = cmd;
+ aio_cmd->len = len;
+ aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
+ aio_cmd->iocb.ki_filp = file;
+ aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
+ aio_cmd->iocb.ki_flags = IOCB_DIRECT;
+
+ if (is_write && (cmd->se_cmd_flags & SCF_FUA))
+ aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
+
+ if (is_write)
+ ret = call_write_iter(file, &aio_cmd->iocb, &iter);
+ else
+ ret = call_read_iter(file, &aio_cmd->iocb, &iter);
+
+ kfree(bvec);
+
+ if (ret != -EIOCBQUEUED)
+ cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
+
+ return 0;
+}
+
static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
u32 block_size, struct scatterlist *sgl,
u32 sgl_nents, u32 data_length, int is_write)
@@ -527,7 +605,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
}
static sense_reason_t
-fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
@@ -537,16 +615,6 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
sense_reason_t rc;
int ret = 0;
/*
- * We are currently limited by the number of iovecs (2048) per
- * single vfs_[writev,readv] call.
- */
- if (cmd->data_length > FD_MAX_BYTES) {
- pr_err("FILEIO: Not able to process I/O of %u bytes due to"
- "FD_MAX_BYTES: %u iovec count limitation\n",
- cmd->data_length, FD_MAX_BYTES);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
- /*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
@@ -620,14 +688,39 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
}
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
+ return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
+ return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
+}
+
enum {
- Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+ Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
+ Opt_fd_async_io, Opt_err
};
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
{Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_fd_async_io, "fd_async_io=%d"},
{Opt_err, NULL}
};
@@ -693,6 +786,21 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
break;
+ case Opt_fd_async_io:
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
+ if (arg != 1) {
+ pr_err("bogus fd_async_io=%d value\n", arg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("FILEIO: Using async I/O"
+ " operations for struct fd_dev\n");
+
+ fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
+ break;
default:
break;
}
@@ -709,10 +817,11 @@ static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s Async: %d\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
- "Buffered-WCE" : "O_DSYNC");
+ "Buffered-WCE" : "O_DSYNC",
+ !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
return bl;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 53be5ffd3261..929b1ecd544e 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -22,6 +22,7 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_HAS_ASYNC_IO 0x08
#define FDBD_FORMAT_UNIT_SIZE 2048
struct fd_dev {
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 1d5afc3ae017..dead30b1d32c 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -166,6 +166,7 @@ extern struct se_portal_group xcopy_pt_tpg;
/* target_core_configfs.c */
#define DB_ROOT_LEN 4096
#define DB_ROOT_DEFAULT "/var/target"
+#define DB_ROOT_PREFERRED "/etc/target"
extern char db_root[];
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 668934ea74cb..47d76c862014 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -689,8 +689,29 @@ after_mode_sense:
}
after_mode_select:
- if (scsi_status == SAM_STAT_CHECK_CONDITION)
+ if (scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(cmd, req_sense);
+
+ /*
+ * check for TAPE device reads with
+ * FM/EOM/ILI set, so that we can get data
+ * back despite framework assumption that a
+ * check condition means there is no data
+ */
+ if (sd->type == TYPE_TAPE &&
+ cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * is sense data valid, fixed format,
+ * and have FM, EOM, or ILI set?
+ */
+ if (req_sense[0] == 0xf0 && /* valid, fixed format */
+ req_sense[2] & 0xe0 && /* FM, EOM, or ILI */
+ (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
+ pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
+ cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ }
+ }
}
enum {
@@ -1062,7 +1083,8 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
switch (host_byte(result)) {
case DID_OK:
- target_complete_cmd(cmd, scsi_status);
+ target_complete_cmd_with_length(cmd, scsi_status,
+ cmd->data_length - scsi_req(req)->resid_len);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4558f2e1fe1b..f0e8f0f4ccb4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -779,7 +779,9 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if ((scsi_status == SAM_STAT_GOOD ||
+ cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -1431,7 +1433,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
return 0;
}
-/*
+/**
* target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
* se_cmd + use pre-allocated SGL memory.
*
@@ -1441,7 +1443,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
* @sgl: struct scatterlist memory for unidirectional mapping
@@ -1578,7 +1580,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
}
EXPORT_SYMBOL(target_submit_cmd_map_sgls);
-/*
+/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
*
* @se_cmd: command descriptor to submit
@@ -1587,7 +1589,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
@@ -1654,7 +1656,7 @@ static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
* @se_sess: associated se_sess for endpoint
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
- * @fabric_context: fabric context for TMR req
+ * @fabric_tmr_ptr: fabric context for TMR req
* @tm_type: Type of TM request
* @gfp: gfp type for caller
* @tag: referenced task tag for TMR_ABORT_TASK
@@ -2084,12 +2086,24 @@ static void transport_complete_qf(struct se_cmd *cmd)
goto queue_status;
}
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ /*
+ * Check if we need to send a sense buffer from
+ * the struct se_cmd in question. We do NOT want
+ * to take this path of the IO has been marked as
+ * needing to be treated like a "normal read". This
+ * is the case if it's a tape read, and either the
+ * FM, EOM, or ILI bits are set, but there is no
+ * sense data.
+ */
+ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
goto queue_status;
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
- if (cmd->scsi_status)
+ /* queue status if not treating this as a normal read */
+ if (cmd->scsi_status &&
+ !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
trace_target_cmd_complete(cmd);
@@ -2194,9 +2208,15 @@ static void target_complete_ok_work(struct work_struct *work)
/*
* Check if we need to send a sense buffer from
- * the struct se_cmd in question.
+ * the struct se_cmd in question. We do NOT want
+ * to take this path of the IO has been marked as
+ * needing to be treated like a "normal read". This
+ * is the case if it's a tape read, and either the
+ * FM, EOM, or ILI bits are set, but there is no
+ * sense data.
*/
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense(
cmd, 0, 1);
@@ -2238,7 +2258,18 @@ static void target_complete_ok_work(struct work_struct *work)
queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
- if (cmd->scsi_status)
+ /*
+ * if this is a READ-type IO, but SCSI status
+ * is set, then skip returning data and just
+ * return the status -- unless this IO is marked
+ * as needing to be treated as a normal read,
+ * in which case we want to go ahead and return
+ * the data. This happens, for example, for tape
+ * reads with the FM, EOM, or ILI bits set, with
+ * no sense data.
+ */
+ if (cmd->scsi_status &&
+ !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
goto queue_status;
atomic_long_add(cmd->data_length,
@@ -2606,7 +2637,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
}
EXPORT_SYMBOL(transport_generic_free_cmd);
-/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+/**
+ * target_get_sess_cmd - Add command to active ->sess_cmd_list
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
@@ -2800,7 +2832,8 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
}
EXPORT_SYMBOL(target_show_cmd);
-/* target_sess_cmd_list_set_waiting - Flag all commands in
+/**
+ * target_sess_cmd_list_set_waiting - Flag all commands in
* sess_cmd_list to complete cmd_wait_comp. Set
* sess_tearing_down so no more commands are queued.
* @se_sess: session to flag
@@ -2835,7 +2868,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
-/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+/**
+ * target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O
*/
void target_wait_for_sess_cmds(struct se_session *se_sess)
@@ -3332,7 +3366,7 @@ static void target_tmr_work(struct work_struct *work)
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
- pr_err("Uknown TMR function: 0x%02x.\n",
+ pr_err("Unknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4f26bdc3d1dc..94b183efd236 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -42,7 +42,11 @@
#include <linux/target_core_user.h>
-/*
+/**
+ * DOC: Userspace I/O
+ * Userspace I/O
+ * -------------
+ *
* Define a shared-memory interface for LIO to pass SCSI commands and
* data to userspace for processing. This is to allow backends that
* are too complex for in-kernel support to be possible.
@@ -53,7 +57,7 @@
* See the .h file for how the ring is laid out. Note that while the
* command ring is defined, the particulars of the data area are
* not. Offset values in the command entry point to other locations
- * internal to the mmap()ed area. There is separate space outside the
+ * internal to the mmap-ed area. There is separate space outside the
* command ring for data buffers. This leaves maximum flexibility for
* moving buffer allocations, or even page flipping or other
* allocation techniques, without altering the command ring layout.
@@ -1382,7 +1386,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
return page;
}
-static int tcmu_vma_fault(struct vm_fault *vmf)
+static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
{
struct tcmu_dev *udev = vmf->vma->vm_private_data;
struct uio_info *info = &udev->uio_info;
@@ -1586,8 +1590,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
return ret;
}
-static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
- int reconfig_attr, const void *reconfig_data)
+static int tcmu_netlink_event_init(struct tcmu_dev *udev,
+ enum tcmu_genl_cmd cmd,
+ struct sk_buff **buf, void **hdr)
{
struct sk_buff *skb;
void *msg_header;
@@ -1613,46 +1618,66 @@ static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
if (ret < 0)
goto free_skb;
- if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
- switch (reconfig_attr) {
- case TCMU_ATTR_DEV_CFG:
- ret = nla_put_string(skb, reconfig_attr, reconfig_data);
- break;
- case TCMU_ATTR_DEV_SIZE:
- ret = nla_put_u64_64bit(skb, reconfig_attr,
- *((u64 *)reconfig_data),
- TCMU_ATTR_PAD);
- break;
- case TCMU_ATTR_WRITECACHE:
- ret = nla_put_u8(skb, reconfig_attr,
- *((u8 *)reconfig_data));
- break;
- default:
- BUG();
- }
+ *buf = skb;
+ *hdr = msg_header;
+ return ret;
- if (ret < 0)
- goto free_skb;
- }
+free_skb:
+ nlmsg_free(skb);
+ return ret;
+}
+
+static int tcmu_netlink_event_send(struct tcmu_dev *udev,
+ enum tcmu_genl_cmd cmd,
+ struct sk_buff **buf, void **hdr)
+{
+ int ret = 0;
+ struct sk_buff *skb = *buf;
+ void *msg_header = *hdr;
genlmsg_end(skb, msg_header);
tcmu_init_genl_cmd_reply(udev, cmd);
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
- TCMU_MCGRP_CONFIG, GFP_KERNEL);
- /* We don't care if no one is listening */
+ TCMU_MCGRP_CONFIG, GFP_KERNEL);
+ /* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
if (!ret)
ret = tcmu_wait_genl_cmd_reply(udev);
-
- return ret;
-free_skb:
- nlmsg_free(skb);
return ret;
}
+static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
+ &msg_header);
+ if (ret < 0)
+ return ret;
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb,
+ &msg_header);
+
+}
+
+static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
+ &skb, &msg_header);
+}
+
static int tcmu_update_uio_info(struct tcmu_dev *udev)
{
struct tcmu_hba *hba = udev->hba->hba_ptr;
@@ -1762,7 +1787,7 @@ static int tcmu_configure_device(struct se_device *dev)
*/
kref_get(&udev->kref);
- ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
+ ret = tcmu_send_dev_add_event(udev);
if (ret)
goto err_netlink;
@@ -1812,7 +1837,7 @@ static void tcmu_destroy_device(struct se_device *dev)
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
+ tcmu_send_dev_remove_event(udev);
uio_unregister_device(&udev->uio_info);
@@ -2151,6 +2176,27 @@ static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
}
+static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
+ const char *reconfig_data)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+}
+
+
static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
size_t count)
{
@@ -2165,8 +2211,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
/* Check if device has been configured before */
if (tcmu_dev_configured(udev)) {
- ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
- TCMU_ATTR_DEV_CFG, page);
+ ret = tcmu_send_dev_config_event(udev, page);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
@@ -2193,6 +2238,26 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
}
+static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
+ size, TCMU_ATTR_PAD);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+}
+
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
size_t count)
{
@@ -2208,8 +2273,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
/* Check if device has been configured before */
if (tcmu_dev_configured(udev)) {
- ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
- TCMU_ATTR_DEV_SIZE, &val);
+ ret = tcmu_send_dev_size_event(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;
@@ -2257,6 +2321,25 @@ static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
}
+static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+}
+
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
const char *page, size_t count)
{
@@ -2272,8 +2355,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
/* Check if device has been configured before */
if (tcmu_dev_configured(udev)) {
- ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
- TCMU_ATTR_WRITECACHE, &val);
+ ret = tcmu_send_emulate_write_cache(udev, val);
if (ret) {
pr_err("Unable to reconfigure device\n");
return ret;