diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-13 16:07:55 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-13 16:07:55 -0800 |
commit | aa3ecf388adc90bde90776bba71a7f2d278fc4e3 (patch) | |
tree | eab9a54822048f3cb4a280ab70d3eaea3c1c4e14 /drivers/scsi | |
parent | b5cab0da75c292ffa0fbd68dd2c820066b2842de (diff) | |
parent | 709613ad2b3c9eaeb2a3e24284b7c8feffc17326 (diff) | |
download | linux-aa3ecf388adc90bde90776bba71a7f2d278fc4e3.tar.bz2 |
Merge tag 'for-linus-4.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross:
"Xen features and fixes for 4.10
These are some fixes, a move of some arm related headers to share them
between arm and arm64 and a series introducing a helper to make code
more readable.
The most notable change is David stepping down as maintainer of the
Xen hypervisor interface. This results in me sending you the pull
requests for Xen related code from now on"
* tag 'for-linus-4.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (29 commits)
xen/balloon: Only mark a page as managed when it is released
xenbus: fix deadlock on writes to /proc/xen/xenbus
xen/scsifront: don't request a slot on the ring until request is ready
xen/x86: Increase xen_e820_map to E820_X_MAX possible entries
x86: Make E820_X_MAX unconditionally larger than E820MAX
xen/pci: Bubble up error and fix description.
xen: xenbus: set error code on failure
xen: set error code on failures
arm/xen: Use alloc_percpu rather than __alloc_percpu
arm/arm64: xen: Move shared architecture headers to include/xen/arm
xen/events: use xen_vcpu_id mapping for EVTCHNOP_status
xen/gntdev: Use VM_MIXEDMAP instead of VM_IO to avoid NUMA balancing
xen-scsifront: Add a missing call to kfree
MAINTAINERS: update XEN HYPERVISOR INTERFACE
xenfs: Use proc_create_mount_point() to create /proc/xen
xen-platform: use builtin_pci_driver
xen-netback: fix error handling output
xen: make use of xenbus_read_unsigned() in xenbus
xen: make use of xenbus_read_unsigned() in xen-pciback
xen: make use of xenbus_read_unsigned() in xen-fbfront
...
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/xen-scsifront.c | 193 |
1 files changed, 84 insertions, 109 deletions
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 9dc8687bf048..9aa1fe1fc939 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -79,10 +79,13 @@ struct vscsifrnt_shadow { /* command between backend and frontend */ unsigned char act; + uint8_t nr_segments; uint16_t rqid; + uint16_t ref_rqid; unsigned int nr_grants; /* number of grants in gref[] */ struct scsiif_request_segment *sg; /* scatter/gather elements */ + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; /* Do reset or abort function. */ wait_queue_head_t wq_reset; /* reset work queue */ @@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id) scsifront_wake_up(info); } -static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) +static int scsifront_do_request(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) { struct vscsiif_front_ring *ring = &(info->ring); struct vscsiif_request *ring_req; + struct scsi_cmnd *sc = shadow->sc; uint32_t id; + int i, notify; + + if (RING_FULL(&info->ring)) + return -EBUSY; id = scsifront_get_rqid(info); /* use id in response */ if (id >= VSCSIIF_MAX_REQS) - return NULL; + return -EBUSY; - ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); + info->shadow[id] = shadow; + shadow->rqid = id; + ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); ring->req_prod_pvt++; - ring_req->rqid = (uint16_t)id; + ring_req->rqid = id; + ring_req->act = shadow->act; + ring_req->ref_rqid = shadow->ref_rqid; + ring_req->nr_segments = shadow->nr_segments; - return ring_req; -} + ring_req->id = sc->device->id; + ring_req->lun = sc->device->lun; + ring_req->channel = sc->device->channel; + ring_req->cmd_len = sc->cmd_len; -static void scsifront_do_request(struct vscsifrnt_info *info) -{ - struct vscsiif_front_ring *ring = &(info->ring); - int notify; + BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); + + memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); + + ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; + ring_req->timeout_per_command = sc->request->timeout / HZ; + + for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) + ring_req->seg[i] = shadow->seg[i]; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); if (notify) notify_remote_via_irq(info->irq); + + return 0; } -static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) +static void scsifront_gnttab_done(struct vscsifrnt_info *info, + struct vscsifrnt_shadow *shadow) { - struct vscsifrnt_shadow *s = info->shadow[id]; int i; - if (s->sc->sc_data_direction == DMA_NONE) + if (shadow->sc->sc_data_direction == DMA_NONE) return; - for (i = 0; i < s->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { + for (i = 0; i < shadow->nr_grants; i++) { + if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(s->gref[i], 0, 0UL); + gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } - kfree(s->sg); + kfree(shadow->sg); } static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, struct vscsiif_response *ring_rsp) { + struct vscsifrnt_shadow *shadow; struct scsi_cmnd *sc; uint32_t id; uint8_t sense_len; id = ring_rsp->rqid; - sc = info->shadow[id]->sc; + shadow = info->shadow[id]; + sc = shadow->sc; BUG_ON(sc == NULL); - scsifront_gnttab_done(info, id); + scsifront_gnttab_done(info, shadow); scsifront_put_rqid(info, id); sc->result = ring_rsp->rslt; @@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info) static int map_data_for_request(struct vscsifrnt_info *info, struct scsi_cmnd *sc, - struct vscsiif_request *ring_req, struct vscsifrnt_shadow *shadow) { grant_ref_t gref_head; @@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info, struct scatterlist *sg; struct scsiif_request_segment *seg; - ring_req->nr_segments = 0; if (sc->sc_data_direction == DMA_NONE || !data_len) return 0; @@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info, if (!shadow->sg) return -ENOMEM; } - seg = shadow->sg ? : ring_req->seg; + seg = shadow->sg ? : shadow->seg; err = gnttab_alloc_grant_references(seg_grants + data_grants, &gref_head); @@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info, info->dev->otherend_id, xen_page_to_gfn(page), 1); shadow->gref[ref_cnt] = ref; - ring_req->seg[ref_cnt].gref = ref; - ring_req->seg[ref_cnt].offset = (uint16_t)off; - ring_req->seg[ref_cnt].length = (uint16_t)bytes; + shadow->seg[ref_cnt].gref = ref; + shadow->seg[ref_cnt].offset = (uint16_t)off; + shadow->seg[ref_cnt].length = (uint16_t)bytes; page++; len -= bytes; @@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info, } if (seg_grants) - ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; else - ring_req->nr_segments = (uint8_t)ref_cnt; + shadow->nr_segments = (uint8_t)ref_cnt; shadow->nr_grants = ref_cnt; return 0; } -static struct vscsiif_request *scsifront_command2ring( - struct vscsifrnt_info *info, struct scsi_cmnd *sc, - struct vscsifrnt_shadow *shadow) -{ - struct vscsiif_request *ring_req; - - memset(shadow, 0, sizeof(*shadow)); - - ring_req = scsifront_pre_req(info); - if (!ring_req) - return NULL; - - info->shadow[ring_req->rqid] = shadow; - shadow->rqid = ring_req->rqid; - - ring_req->id = sc->device->id; - ring_req->lun = sc->device->lun; - ring_req->channel = sc->device->channel; - ring_req->cmd_len = sc->cmd_len; - - BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); - - memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); - - ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; - ring_req->timeout_per_command = sc->request->timeout / HZ; - - return ring_req; -} - static int scsifront_enter(struct vscsifrnt_info *info) { if (info->pause) @@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct vscsifrnt_info *info = shost_priv(shost); - struct vscsiif_request *ring_req; struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); unsigned long flags; int err; - uint16_t rqid; + + sc->result = 0; + memset(shadow, 0, sizeof(*shadow)); + + shadow->sc = sc; + shadow->act = VSCSIIF_ACT_SCSI_CDB; spin_lock_irqsave(shost->host_lock, flags); if (scsifront_enter(info)) { spin_unlock_irqrestore(shost->host_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } - if (RING_FULL(&info->ring)) - goto busy; - - ring_req = scsifront_command2ring(info, sc, shadow); - if (!ring_req) - goto busy; - - sc->result = 0; - - rqid = ring_req->rqid; - ring_req->act = VSCSIIF_ACT_SCSI_CDB; - shadow->sc = sc; - shadow->act = VSCSIIF_ACT_SCSI_CDB; - - err = map_data_for_request(info, sc, ring_req, shadow); + err = map_data_for_request(info, sc, shadow); if (err < 0) { pr_debug("%s: err %d\n", __func__, err); - scsifront_put_rqid(info, rqid); scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); if (err == -ENOMEM) @@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost, return 0; } - scsifront_do_request(info); + if (scsifront_do_request(info, shadow)) { + scsifront_gnttab_done(info, shadow); + goto busy; + } + scsifront_return(info); spin_unlock_irqrestore(shost->host_lock, flags); @@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) struct Scsi_Host *host = sc->device->host; struct vscsifrnt_info *info = shost_priv(host); struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); - struct vscsiif_request *ring_req; int err = 0; - shadow = kmalloc(sizeof(*shadow), GFP_NOIO); + shadow = kzalloc(sizeof(*shadow), GFP_NOIO); if (!shadow) return FAILED; + shadow->act = act; + shadow->rslt_reset = RSLT_RESET_WAITING; + shadow->sc = sc; + shadow->ref_rqid = s->rqid; + init_waitqueue_head(&shadow->wq_reset); + spin_lock_irq(host->host_lock); for (;;) { - if (!RING_FULL(&info->ring)) { - ring_req = scsifront_command2ring(info, sc, shadow); - if (ring_req) - break; - } - if (err || info->pause) { - spin_unlock_irq(host->host_lock); - kfree(shadow); - return FAILED; - } + if (scsifront_enter(info)) + goto fail; + + if (!scsifront_do_request(info, shadow)) + break; + + scsifront_return(info); + if (err) + goto fail; info->wait_ring_available = 1; spin_unlock_irq(host->host_lock); err = wait_event_interruptible(info->wq_sync, @@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) spin_lock_irq(host->host_lock); } - if (scsifront_enter(info)) { - spin_unlock_irq(host->host_lock); - return FAILED; - } - - ring_req->act = act; - ring_req->ref_rqid = s->rqid; - - shadow->act = act; - shadow->rslt_reset = RSLT_RESET_WAITING; - init_waitqueue_head(&shadow->wq_reset); - - ring_req->nr_segments = 0; - - scsifront_do_request(info); - spin_unlock_irq(host->host_lock); err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); spin_lock_irq(host->host_lock); @@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act) scsifront_return(info); spin_unlock_irq(host->host_lock); return err; + +fail: + spin_unlock_irq(host->host_lock); + kfree(shadow); + return FAILED; } static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) @@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev, struct vscsifrnt_info *info) { unsigned int sg_grant, nr_segs; - int ret; struct Scsi_Host *host = info->host; - ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u", - &sg_grant); - if (ret != 1) - sg_grant = 0; + sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0); nr_segs = min_t(unsigned int, sg_grant, SG_ALL); nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); nr_segs = min_t(unsigned int, nr_segs, |