summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 20:49:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 20:49:45 -0700
commitd61306047533eb6f63a7bd51dfa7f868503bf0ba (patch)
tree1f1e905249b81474fcb465b4d461ccb0beea8134 /drivers
parent8443516da676be839b54ee11350baa2605f0a445 (diff)
parent5b3353949e89d48b4faf54a9cc241ee5d70df615 (diff)
downloadlinux-d61306047533eb6f63a7bd51dfa7f868503bf0ba.tar.bz2
Merge tag 'for-linus-5.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - decouple the PV interface from kernel internals in the Xen scsifront/scsiback pv drivers - harden the Xen scsifront PV driver against a malicious backend driver - simplify Xen PV frontend driver ring page setup - support Xen setups with multiple domains created at boot time to tolerate Xenstore coming up late - two small cleanup patches * tag 'for-linus-5.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (29 commits) xen: add support for initializing xenstore later as HVM domain xen: sync xs_wire.h header with upstream xen x86: xen: remove STACK_FRAME_NON_STANDARD from xen_cpuid xen-blk{back,front}: Update contact points for buffer_squeeze_duration_ms and feature_persistent xen/xenbus: eliminate xenbus_grant_ring() xen/sndfront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/usbfront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/scsifront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/pcifront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/drmfront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/tpmfront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/netfront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/blkfront: use xenbus_setup_ring() and xenbus_teardown_ring() xen/xenbus: add xenbus_setup_ring() service function xen: update ring.h xen/shbuf: switch xen-front-pgdir-shbuf to use INVALID_GRANT_REF xen/dmabuf: switch gntdev-dmabuf to use INVALID_GRANT_REF xen/sound: switch xen_snd_front to use INVALID_GRANT_REF xen/drm: switch xen_drm_front to use INVALID_GRANT_REF xen/usb: switch xen-hcd to use INVALID_GRANT_REF ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/xen-blkfront.c57
-rw-r--r--drivers/char/tpm/xen-tpmfront.c18
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h9
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.c43
-rw-r--r--drivers/net/xen-netfront.c85
-rw-r--r--drivers/pci/xen-pcifront.c19
-rw-r--r--drivers/scsi/xen-scsifront.c199
-rw-r--r--drivers/usb/host/xen-hcd.c65
-rw-r--r--drivers/xen/gntdev-dmabuf.c13
-rw-r--r--drivers/xen/grant-table.c12
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c18
-rw-r--r--drivers/xen/xen-scsiback.c82
-rw-r--r--drivers/xen/xenbus/xenbus_client.c82
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c91
14 files changed, 442 insertions, 351 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0f3f5238f7bc..55e004d03ced 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -229,8 +229,6 @@ static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);
-#define GRANT_INVALID_REF 0
-
#define PARTS_PER_DISK 16
#define PARTS_PER_EXT_DISK 256
@@ -321,7 +319,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
gnt_list_entry->page = granted_page;
}
- gnt_list_entry->gref = GRANT_INVALID_REF;
+ gnt_list_entry->gref = INVALID_GRANT_REF;
list_add(&gnt_list_entry->node, &rinfo->grants);
i++;
}
@@ -350,7 +348,7 @@ static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
node);
list_del(&gnt_list_entry->node);
- if (gnt_list_entry->gref != GRANT_INVALID_REF)
+ if (gnt_list_entry->gref != INVALID_GRANT_REF)
rinfo->persistent_gnts_c--;
return gnt_list_entry;
@@ -372,7 +370,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
struct grant *gnt_list_entry = get_free_grant(rinfo);
struct blkfront_info *info = rinfo->dev_info;
- if (gnt_list_entry->gref != GRANT_INVALID_REF)
+ if (gnt_list_entry->gref != INVALID_GRANT_REF)
return gnt_list_entry;
/* Assign a gref to this page */
@@ -396,7 +394,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
struct grant *gnt_list_entry = get_free_grant(rinfo);
struct blkfront_info *info = rinfo->dev_info;
- if (gnt_list_entry->gref != GRANT_INVALID_REF)
+ if (gnt_list_entry->gref != INVALID_GRANT_REF)
return gnt_list_entry;
/* Assign a gref to this page */
@@ -1221,7 +1219,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
list_for_each_entry_safe(persistent_gnt, n,
&rinfo->grants, node) {
list_del(&persistent_gnt->node);
- if (persistent_gnt->gref != GRANT_INVALID_REF) {
+ if (persistent_gnt->gref != INVALID_GRANT_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
0UL);
rinfo->persistent_gnts_c--;
@@ -1282,15 +1280,8 @@ free_shadow:
flush_work(&rinfo->work);
/* Free resources associated with old device channel. */
- for (i = 0; i < info->nr_ring_pages; i++) {
- if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(rinfo->ring_ref[i], 0);
- rinfo->ring_ref[i] = GRANT_INVALID_REF;
- }
- }
- free_pages_exact(rinfo->ring.sring,
- info->nr_ring_pages * XEN_PAGE_SIZE);
- rinfo->ring.sring = NULL;
+ xenbus_teardown_ring((void **)&rinfo->ring.sring, info->nr_ring_pages,
+ rinfo->ring_ref);
if (rinfo->irq)
unbind_from_irqhandler(rinfo->irq, rinfo);
@@ -1475,7 +1466,7 @@ static int blkif_completion(unsigned long *id,
* to the tail of the list, so it will not be picked
* again unless we run out of persistent grants.
*/
- s->grants_used[i]->gref = GRANT_INVALID_REF;
+ s->grants_used[i]->gref = INVALID_GRANT_REF;
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
}
}
@@ -1500,7 +1491,7 @@ static int blkif_completion(unsigned long *id,
indirect_page = s->indirect_grants[i]->page;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
}
- s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+ s->indirect_grants[i]->gref = INVALID_GRANT_REF;
list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
}
}
@@ -1681,30 +1672,16 @@ static int setup_blkring(struct xenbus_device *dev,
struct blkfront_ring_info *rinfo)
{
struct blkif_sring *sring;
- int err, i;
+ int err;
struct blkfront_info *info = rinfo->dev_info;
unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
- grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
-
- for (i = 0; i < info->nr_ring_pages; i++)
- rinfo->ring_ref[i] = GRANT_INVALID_REF;
- sring = alloc_pages_exact(ring_size, GFP_NOIO);
- if (!sring) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
- return -ENOMEM;
- }
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
-
- err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
- if (err < 0) {
- free_pages_exact(sring, ring_size);
- rinfo->ring.sring = NULL;
+ err = xenbus_setup_ring(dev, GFP_NOIO, (void **)&sring,
+ info->nr_ring_pages, rinfo->ring_ref);
+ if (err)
goto fail;
- }
- for (i = 0; i < info->nr_ring_pages; i++)
- rinfo->ring_ref[i] = gref[i];
+
+ XEN_FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
if (err)
@@ -2544,13 +2521,13 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
node) {
- if (gnt_list_entry->gref == GRANT_INVALID_REF ||
+ if (gnt_list_entry->gref == INVALID_GRANT_REF ||
!gnttab_try_end_foreign_access(gnt_list_entry->gref))
continue;
list_del(&gnt_list_entry->node);
rinfo->persistent_gnts_c--;
- gnt_list_entry->gref = GRANT_INVALID_REF;
+ gnt_list_entry->gref = INVALID_GRANT_REF;
list_add_tail(&gnt_list_entry->node, &grants);
}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 69df04ae2401..379291826261 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -253,20 +253,12 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
struct xenbus_transaction xbt;
const char *message = NULL;
int rv;
- grant_ref_t gref;
- priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (!priv->shr) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
- return -ENOMEM;
- }
-
- rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
+ rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1,
+ &priv->ring_ref);
if (rv < 0)
return rv;
- priv->ring_ref = gref;
-
rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
if (rv)
return rv;
@@ -331,11 +323,7 @@ static void ring_free(struct tpm_private *priv)
if (!priv)
return;
- if (priv->ring_ref)
- gnttab_end_foreign_access(priv->ring_ref,
- (unsigned long)priv->shr);
- else
- free_page((unsigned long)priv->shr);
+ xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref);
if (priv->irq)
unbind_from_irqhandler(priv->irq, priv);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index cefafe859aba..a987c78abe41 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -80,15 +80,6 @@ struct drm_pending_vblank_event;
/* timeout in ms to wait for backend to respond */
#define XEN_DRM_FRONT_WAIT_BACK_MS 3000
-#ifndef GRANT_INVALID_REF
-/*
- * Note on usage of grant reference 0 as invalid grant reference:
- * grant reference 0 is valid, but never exposed to a PV driver,
- * because of the fact it is already in use/reserved by the PV console.
- */
-#define GRANT_INVALID_REF 0
-#endif
-
struct xen_drm_front_info {
struct xenbus_device *xb_dev;
struct xen_drm_front_drm_info *drm_info;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
index 08b526eeec16..e52afd792346 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -123,12 +123,12 @@ out:
static void evtchnl_free(struct xen_drm_front_info *front_info,
struct xen_drm_front_evtchnl *evtchnl)
{
- unsigned long page = 0;
+ void *page = NULL;
if (evtchnl->type == EVTCHNL_TYPE_REQ)
- page = (unsigned long)evtchnl->u.req.ring.sring;
+ page = evtchnl->u.req.ring.sring;
else if (evtchnl->type == EVTCHNL_TYPE_EVT)
- page = (unsigned long)evtchnl->u.evt.page;
+ page = evtchnl->u.evt.page;
if (!page)
return;
@@ -147,8 +147,7 @@ static void evtchnl_free(struct xen_drm_front_info *front_info,
xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
/* end access and free the page */
- if (evtchnl->gref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(evtchnl->gref, page);
+ xenbus_teardown_ring(&page, 1, &evtchnl->gref);
memset(evtchnl, 0, sizeof(*evtchnl));
}
@@ -158,8 +157,7 @@ static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
enum xen_drm_front_evtchnl_type type)
{
struct xenbus_device *xb_dev = front_info->xb_dev;
- unsigned long page;
- grant_ref_t gref;
+ void *page;
irq_handler_t handler;
int ret;
@@ -168,44 +166,25 @@ static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
evtchnl->index = index;
evtchnl->front_info = front_info;
evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
- evtchnl->gref = GRANT_INVALID_REF;
- page = get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!page) {
- ret = -ENOMEM;
+ ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
+ 1, &evtchnl->gref);
+ if (ret)
goto fail;
- }
if (type == EVTCHNL_TYPE_REQ) {
struct xen_displif_sring *sring;
init_completion(&evtchnl->u.req.completion);
mutex_init(&evtchnl->u.req.req_io_lock);
- sring = (struct xen_displif_sring *)page;
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
-
- ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
- if (ret < 0) {
- evtchnl->u.req.ring.sring = NULL;
- free_page(page);
- goto fail;
- }
+ sring = page;
+ XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
handler = evtchnl_interrupt_ctrl;
} else {
- ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
- virt_to_gfn((void *)page), 0);
- if (ret < 0) {
- free_page(page);
- goto fail;
- }
-
- evtchnl->u.evt.page = (struct xendispl_event_page *)page;
- gref = ret;
+ evtchnl->u.evt.page = page;
handler = evtchnl_interrupt_evt;
}
- evtchnl->gref = gref;
ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
if (ret < 0)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e2b4a1893a13..65ab907aca5a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -78,8 +78,6 @@ struct netfront_cb {
#define RX_COPY_THRESHOLD 256
-#define GRANT_INVALID_REF 0
-
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
@@ -224,7 +222,7 @@ static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
{
int i = xennet_rxidx(ri);
grant_ref_t ref = queue->grant_rx_ref[i];
- queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[i] = INVALID_GRANT_REF;
return ref;
}
@@ -432,7 +430,7 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
}
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
- queue->grant_tx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[id] = INVALID_GRANT_REF;
queue->grant_tx_page[id] = NULL;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb);
@@ -868,7 +866,7 @@ static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
spin_lock_irqsave(&queue->rx_cons_lock, flags);
queue->rx.rsp_cons = val;
- queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
}
@@ -1021,7 +1019,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
* the backend driver. In future this should flag the bad
* situation to the system controller to reboot the backend.
*/
- if (ref == GRANT_INVALID_REF) {
+ if (ref == INVALID_GRANT_REF) {
if (net_ratelimit())
dev_warn(dev, "Bad rx response id %d.\n",
rx->id);
@@ -1390,7 +1388,7 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
gnttab_end_foreign_access(queue->grant_tx_ref[i],
(unsigned long)page_address(queue->grant_tx_page[i]));
queue->grant_tx_page[i] = NULL;
- queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[i] = INVALID_GRANT_REF;
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb);
}
@@ -1411,7 +1409,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
continue;
ref = queue->grant_rx_ref[id];
- if (ref == GRANT_INVALID_REF)
+ if (ref == INVALID_GRANT_REF)
continue;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -1422,7 +1420,7 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
get_page(page);
gnttab_end_foreign_access(ref,
(unsigned long)page_address(page));
- queue->grant_rx_ref[id] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[id] = INVALID_GRANT_REF;
kfree_skb(skb);
}
@@ -1500,7 +1498,7 @@ static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
return false;
spin_lock_irqsave(&queue->rx_cons_lock, flags);
- work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
if (work_queued > queue->rx_rsp_unconsumed) {
queue->rx_rsp_unconsumed = work_queued;
*eoi = 0;
@@ -1761,7 +1759,7 @@ static int netfront_probe(struct xenbus_device *dev,
static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
- if (ref != GRANT_INVALID_REF)
+ if (ref != INVALID_GRANT_REF)
gnttab_end_foreign_access(ref, (unsigned long)page);
}
@@ -1798,8 +1796,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
- queue->tx_ring_ref = GRANT_INVALID_REF;
- queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx_ring_ref = INVALID_GRANT_REF;
+ queue->rx_ring_ref = INVALID_GRANT_REF;
queue->tx.sring = NULL;
queue->rx.sring = NULL;
@@ -1923,42 +1921,27 @@ static int setup_netfront(struct xenbus_device *dev,
struct netfront_queue *queue, unsigned int feature_split_evtchn)
{
struct xen_netif_tx_sring *txs;
- struct xen_netif_rx_sring *rxs = NULL;
- grant_ref_t gref;
+ struct xen_netif_rx_sring *rxs;
int err;
- queue->tx_ring_ref = GRANT_INVALID_REF;
- queue->rx_ring_ref = GRANT_INVALID_REF;
+ queue->tx_ring_ref = INVALID_GRANT_REF;
+ queue->rx_ring_ref = INVALID_GRANT_REF;
queue->rx.sring = NULL;
queue->tx.sring = NULL;
- txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!txs) {
- err = -ENOMEM;
- xenbus_dev_fatal(dev, err, "allocating tx ring page");
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
+ 1, &queue->tx_ring_ref);
+ if (err)
goto fail;
- }
- SHARED_RING_INIT(txs);
- FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
- err = xenbus_grant_ring(dev, txs, 1, &gref);
- if (err < 0)
- goto fail;
- queue->tx_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
- rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
- if (!rxs) {
- err = -ENOMEM;
- xenbus_dev_fatal(dev, err, "allocating rx ring page");
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
+ 1, &queue->rx_ring_ref);
+ if (err)
goto fail;
- }
- SHARED_RING_INIT(rxs);
- FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
- err = xenbus_grant_ring(dev, rxs, 1, &gref);
- if (err < 0)
- goto fail;
- queue->rx_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
if (feature_split_evtchn)
err = setup_netfront_split(queue);
@@ -1974,24 +1957,10 @@ static int setup_netfront(struct xenbus_device *dev,
return 0;
- /* If we fail to setup netfront, it is safe to just revoke access to
- * granted pages because backend is not accessing it at this point.
- */
fail:
- if (queue->rx_ring_ref != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(queue->rx_ring_ref,
- (unsigned long)rxs);
- queue->rx_ring_ref = GRANT_INVALID_REF;
- } else {
- free_page((unsigned long)rxs);
- }
- if (queue->tx_ring_ref != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(queue->tx_ring_ref,
- (unsigned long)txs);
- queue->tx_ring_ref = GRANT_INVALID_REF;
- } else {
- free_page((unsigned long)txs);
- }
+ xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
+ xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
+
return err;
}
@@ -2020,7 +1989,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
queue->tx_pend_queue = TX_LINK_NONE;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
queue->tx_link[i] = i + 1;
- queue->grant_tx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_tx_ref[i] = INVALID_GRANT_REF;
queue->grant_tx_page[i] = NULL;
}
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
@@ -2028,7 +1997,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
queue->rx_skbs[i] = NULL;
- queue->grant_rx_ref[i] = GRANT_INVALID_REF;
+ queue->grant_rx_ref[i] = INVALID_GRANT_REF;
}
/* A grant for every tx ring slot */
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 3edc1565a27c..689271c4245c 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -709,9 +709,8 @@ static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
if (pdev == NULL)
goto out;
- pdev->sh_info =
- (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
- if (pdev->sh_info == NULL) {
+ if (xenbus_setup_ring(xdev, GFP_KERNEL, (void **)&pdev->sh_info, 1,
+ &pdev->gnt_ref)) {
kfree(pdev);
pdev = NULL;
goto out;
@@ -729,7 +728,6 @@ static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
spin_lock_init(&pdev->sh_info_lock);
pdev->evtchn = INVALID_EVTCHN;
- pdev->gnt_ref = INVALID_GRANT_REF;
pdev->irq = -1;
INIT_WORK(&pdev->op_work, pcifront_do_aer);
@@ -754,11 +752,7 @@ static void free_pdev(struct pcifront_device *pdev)
if (pdev->evtchn != INVALID_EVTCHN)
xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
- if (pdev->gnt_ref != INVALID_GRANT_REF)
- gnttab_end_foreign_access(pdev->gnt_ref,
- (unsigned long)pdev->sh_info);
- else
- free_page((unsigned long)pdev->sh_info);
+ xenbus_teardown_ring((void **)&pdev->sh_info, 1, &pdev->gnt_ref);
dev_set_drvdata(&pdev->xdev->dev, NULL);
@@ -769,13 +763,6 @@ static int pcifront_publish_info(struct pcifront_device *pdev)
{
int err = 0;
struct xenbus_transaction trans;
- grant_ref_t gref;
-
- err = xenbus_grant_ring(pdev->xdev, pdev->sh_info, 1, &gref);
- if (err < 0)
- goto out;
-
- pdev->gnt_ref = gref;
err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
if (err)
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 12109e4c73d4..51afc66e839d 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -58,9 +58,6 @@
#include <asm/xen/hypervisor.h>
-
-#define GRANT_INVALID_REF 0
-
#define VSCSIFRONT_OP_ADD_LUN 1
#define VSCSIFRONT_OP_DEL_LUN 2
#define VSCSIFRONT_OP_READD_LUN 3
@@ -83,6 +80,8 @@ struct vscsifrnt_shadow {
uint16_t rqid;
uint16_t ref_rqid;
+ bool inflight;
+
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg; /* scatter/gather elements */
struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
@@ -104,7 +103,11 @@ struct vscsifrnt_info {
struct xenbus_device *dev;
struct Scsi_Host *host;
- int host_active;
+ enum {
+ STATE_INACTIVE,
+ STATE_ACTIVE,
+ STATE_ERROR
+ } host_active;
unsigned int evtchn;
unsigned int irq;
@@ -217,6 +220,8 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
ring_req->seg[i] = shadow->seg[i];
+ shadow->inflight = true;
+
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
@@ -224,6 +229,13 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
return 0;
}
+static void scsifront_set_error(struct vscsifrnt_info *info, const char *msg)
+{
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME "%s\n"
+ "Disabling device for further use\n", msg);
+ info->host_active = STATE_ERROR;
+}
+
static void scsifront_gnttab_done(struct vscsifrnt_info *info,
struct vscsifrnt_shadow *shadow)
{
@@ -234,15 +246,64 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
for (i = 0; i < shadow->nr_grants; i++) {
if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
- shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
- "grant still in use by backend\n");
- BUG();
+ scsifront_set_error(info, "grant still in use by backend");
+ return;
}
}
kfree(shadow->sg);
}
+static unsigned int scsifront_host_byte(int32_t rslt)
+{
+ switch (XEN_VSCSIIF_RSLT_HOST(rslt)) {
+ case XEN_VSCSIIF_RSLT_HOST_OK:
+ return DID_OK;
+ case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT:
+ return DID_NO_CONNECT;
+ case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY:
+ return DID_BUS_BUSY;
+ case XEN_VSCSIIF_RSLT_HOST_TIME_OUT:
+ return DID_TIME_OUT;
+ case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET:
+ return DID_BAD_TARGET;
+ case XEN_VSCSIIF_RSLT_HOST_ABORT:
+ return DID_ABORT;
+ case XEN_VSCSIIF_RSLT_HOST_PARITY:
+ return DID_PARITY;
+ case XEN_VSCSIIF_RSLT_HOST_ERROR:
+ return DID_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_RESET:
+ return DID_RESET;
+ case XEN_VSCSIIF_RSLT_HOST_BAD_INTR:
+ return DID_BAD_INTR;
+ case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH:
+ return DID_PASSTHROUGH;
+ case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR:
+ return DID_SOFT_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY:
+ return DID_IMM_RETRY;
+ case XEN_VSCSIIF_RSLT_HOST_REQUEUE:
+ return DID_REQUEUE;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED:
+ return DID_TRANSPORT_DISRUPTED;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST:
+ return DID_TRANSPORT_FAILFAST;
+ case XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE:
+ return DID_TARGET_FAILURE;
+ case XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE:
+ return DID_NEXUS_FAILURE;
+ case XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE:
+ return DID_ALLOC_FAILURE;
+ case XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR:
+ return DID_MEDIUM_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL:
+ return DID_TRANSPORT_MARGINAL;
+ default:
+ return DID_ERROR;
+ }
+}
+
static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
@@ -250,7 +311,6 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
- int result;
id = ring_rsp->rqid;
shadow = info->shadow[id];
@@ -259,14 +319,12 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
BUG_ON(sc == NULL);
scsifront_gnttab_done(info, shadow);
+ if (info->host_active == STATE_ERROR)
+ return;
scsifront_put_rqid(info, id);
- result = ring_rsp->rslt;
- if (result >> 24)
- set_host_byte(sc, DID_ERROR);
- else
- set_host_byte(sc, host_byte(result));
- set_status_byte(sc, result & 0xff);
+ set_host_byte(sc, scsifront_host_byte(ring_rsp->rslt));
+ set_status_byte(sc, XEN_VSCSIIF_RSLT_STATUS(ring_rsp->rslt));
scsi_set_resid(sc, ring_rsp->residual_len);
sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
@@ -290,7 +348,10 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
shadow->wait_reset = 1;
switch (shadow->rslt_reset) {
case RSLT_RESET_WAITING:
- shadow->rslt_reset = ring_rsp->rslt;
+ if (ring_rsp->rslt == XEN_VSCSIIF_RSLT_RESET_SUCCESS)
+ shadow->rslt_reset = SUCCESS;
+ else
+ shadow->rslt_reset = FAILED;
break;
case RSLT_RESET_ERR:
kick = _scsifront_put_rqid(info, id);
@@ -300,9 +361,7 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
scsifront_wake_up(info);
return;
default:
- shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
- "bad reset state %d, possibly leaking %u\n",
- shadow->rslt_reset, id);
+ scsifront_set_error(info, "bad reset state");
break;
}
spin_unlock_irqrestore(&info->shadow_lock, flags);
@@ -313,28 +372,41 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
static void scsifront_do_response(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
- if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
- test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
- "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
+ struct vscsifrnt_shadow *shadow;
+
+ if (ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ !info->shadow[ring_rsp->rqid]->inflight) {
+ scsifront_set_error(info, "illegal rqid returned by backend!");
return;
+ }
+ shadow = info->shadow[ring_rsp->rqid];
+ shadow->inflight = false;
- if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ if (shadow->act == VSCSIIF_ACT_SCSI_CDB)
scsifront_cdb_cmd_done(info, ring_rsp);
else
scsifront_sync_cmd_done(info, ring_rsp);
}
-static int scsifront_ring_drain(struct vscsifrnt_info *info)
+static int scsifront_ring_drain(struct vscsifrnt_info *info,
+ unsigned int *eoiflag)
{
- struct vscsiif_response *ring_rsp;
+ struct vscsiif_response ring_rsp;
RING_IDX i, rp;
int more_to_do = 0;
- rp = info->ring.sring->rsp_prod;
- rmb(); /* ordering required respective to dom0 */
+ rp = READ_ONCE(info->ring.sring->rsp_prod);
+ virt_rmb(); /* ordering required respective to backend */
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) {
+ scsifront_set_error(info, "illegal number of responses");
+ return 0;
+ }
for (i = info->ring.rsp_cons; i != rp; i++) {
- ring_rsp = RING_GET_RESPONSE(&info->ring, i);
- scsifront_do_response(info, ring_rsp);
+ RING_COPY_RESPONSE(&info->ring, i, &ring_rsp);
+ scsifront_do_response(info, &ring_rsp);
+ if (info->host_active == STATE_ERROR)
+ return 0;
+ *eoiflag &= ~XEN_EOI_FLAG_SPURIOUS;
}
info->ring.rsp_cons = i;
@@ -347,14 +419,15 @@ static int scsifront_ring_drain(struct vscsifrnt_info *info)
return more_to_do;
}
-static int scsifront_cmd_done(struct vscsifrnt_info *info)
+static int scsifront_cmd_done(struct vscsifrnt_info *info,
+ unsigned int *eoiflag)
{
int more_to_do;
unsigned long flags;
spin_lock_irqsave(info->host->host_lock, flags);
- more_to_do = scsifront_ring_drain(info);
+ more_to_do = scsifront_ring_drain(info, eoiflag);
info->wait_ring_available = 0;
@@ -368,20 +441,28 @@ static int scsifront_cmd_done(struct vscsifrnt_info *info)
static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
{
struct vscsifrnt_info *info = dev_id;
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
- while (scsifront_cmd_done(info))
+ if (info->host_active == STATE_ERROR) {
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ return IRQ_HANDLED;
+ }
+
+ while (scsifront_cmd_done(info, &eoiflag))
/* Yield point for this unbounded loop. */
cond_resched();
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
static void scsifront_finish_all(struct vscsifrnt_info *info)
{
- unsigned i;
+ unsigned int i, dummy;
struct vscsiif_response resp;
- scsifront_ring_drain(info);
+ scsifront_ring_drain(info, &dummy);
for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
if (test_bit(i, info->shadow_free_bitmap))
@@ -538,6 +619,9 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
unsigned long flags;
int err;
+ if (info->host_active == STATE_ERROR)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
sc->result = 0;
shadow->sc = sc;
@@ -590,6 +674,9 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
int err = 0;
+ if (info->host_active == STATE_ERROR)
+ return FAILED;
+
shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
if (!shadow)
return FAILED;
@@ -661,6 +748,9 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
+ if (info->host_active == STATE_ERROR)
+ return -EIO;
+
if (info && current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
@@ -708,27 +798,15 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
{
struct xenbus_device *dev = info->dev;
struct vscsiif_sring *sring;
- grant_ref_t gref;
- int err = -ENOMEM;
+ int err;
/***** Frontend to Backend ring start *****/
- sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
- if (!sring) {
- xenbus_dev_fatal(dev, err,
- "fail to allocate shared ring (Front to Back)");
+ err = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&sring, 1,
+ &info->ring_ref);
+ if (err)
return err;
- }
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = xenbus_grant_ring(dev, sring, 1, &gref);
- if (err < 0) {
- free_page((unsigned long)sring);
- xenbus_dev_fatal(dev, err,
- "fail to grant shared ring (Front to Back)");
- return err;
- }
- info->ring_ref = gref;
+ XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err) {
@@ -736,7 +814,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
goto free_gnttab;
}
- err = bind_evtchn_to_irq(info->evtchn);
+ err = bind_evtchn_to_irq_lateeoi(info->evtchn);
if (err <= 0) {
xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
goto free_gnttab;
@@ -757,8 +835,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
free_irq:
unbind_from_irqhandler(info->irq, info);
free_gnttab:
- gnttab_end_foreign_access(info->ring_ref,
- (unsigned long)info->ring.sring);
+ xenbus_teardown_ring((void **)&sring, 1, &info->ring_ref);
return err;
}
@@ -766,8 +843,7 @@ free_gnttab:
static void scsifront_free_ring(struct vscsifrnt_info *info)
{
unbind_from_irqhandler(info->irq, info);
- gnttab_end_foreign_access(info->ring_ref,
- (unsigned long)info->ring.sring);
+ xenbus_teardown_ring((void **)&info->ring.sring, 1, &info->ring_ref);
}
static int scsifront_init_ring(struct vscsifrnt_info *info)
@@ -866,7 +942,7 @@ static int scsifront_probe(struct xenbus_device *dev,
goto free_sring;
}
info->host = host;
- info->host_active = 1;
+ info->host_active = STATE_ACTIVE;
xenbus_switch_state(dev, XenbusStateInitialised);
@@ -934,10 +1010,10 @@ static int scsifront_remove(struct xenbus_device *dev)
pr_debug("%s: %s removed\n", __func__, dev->nodename);
mutex_lock(&scsifront_mutex);
- if (info->host_active) {
+ if (info->host_active != STATE_INACTIVE) {
/* Scsi_host not yet removed */
scsi_remove_host(info->host);
- info->host_active = 0;
+ info->host_active = STATE_INACTIVE;
}
mutex_unlock(&scsifront_mutex);
@@ -961,9 +1037,9 @@ static void scsifront_disconnect(struct vscsifrnt_info *info)
*/
mutex_lock(&scsifront_mutex);
- if (info->host_active) {
+ if (info->host_active != STATE_INACTIVE) {
scsi_remove_host(host);
- info->host_active = 0;
+ info->host_active = STATE_INACTIVE;
}
mutex_unlock(&scsifront_mutex);
@@ -981,6 +1057,9 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
unsigned int hst, chn, tgt, lun;
struct scsi_device *sdev;
+ if (info->host_active == STATE_ERROR)
+ return;
+
dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
if (IS_ERR(dir))
return;
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
index 3e487baf8422..de1b09158318 100644
--- a/drivers/usb/host/xen-hcd.c
+++ b/drivers/usb/host/xen-hcd.c
@@ -87,8 +87,6 @@ struct xenhcd_info {
bool error;
};
-#define GRANT_INVALID_REF 0
-
#define XENHCD_RING_JIFFIES (HZ/200)
#define XENHCD_SCAN_JIFFIES 1
@@ -1100,19 +1098,10 @@ static void xenhcd_destroy_rings(struct xenhcd_info *info)
unbind_from_irqhandler(info->irq, info);
info->irq = 0;
- if (info->urb_ring_ref != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(info->urb_ring_ref,
- (unsigned long)info->urb_ring.sring);
- info->urb_ring_ref = GRANT_INVALID_REF;
- }
- info->urb_ring.sring = NULL;
-
- if (info->conn_ring_ref != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(info->conn_ring_ref,
- (unsigned long)info->conn_ring.sring);
- info->conn_ring_ref = GRANT_INVALID_REF;
- }
- info->conn_ring.sring = NULL;
+ xenbus_teardown_ring((void **)&info->urb_ring.sring, 1,
+ &info->urb_ring_ref);
+ xenbus_teardown_ring((void **)&info->conn_ring.sring, 1,
+ &info->conn_ring_ref);
}
static int xenhcd_setup_rings(struct xenbus_device *dev,
@@ -1120,46 +1109,24 @@ static int xenhcd_setup_rings(struct xenbus_device *dev,
{
struct xenusb_urb_sring *urb_sring;
struct xenusb_conn_sring *conn_sring;
- grant_ref_t gref;
int err;
- info->urb_ring_ref = GRANT_INVALID_REF;
- info->conn_ring_ref = GRANT_INVALID_REF;
-
- urb_sring = (struct xenusb_urb_sring *)get_zeroed_page(
- GFP_NOIO | __GFP_HIGH);
- if (!urb_sring) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring");
- return -ENOMEM;
- }
- SHARED_RING_INIT(urb_sring);
- FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
-
- err = xenbus_grant_ring(dev, urb_sring, 1, &gref);
- if (err < 0) {
- free_page((unsigned long)urb_sring);
- info->urb_ring.sring = NULL;
- goto fail;
- }
- info->urb_ring_ref = gref;
-
- conn_sring = (struct xenusb_conn_sring *)get_zeroed_page(
- GFP_NOIO | __GFP_HIGH);
- if (!conn_sring) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring");
- err = -ENOMEM;
- goto fail;
+ info->conn_ring_ref = INVALID_GRANT_REF;
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH,
+ (void **)&urb_sring, 1, &info->urb_ring_ref);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "allocating urb ring");
+ return err;
}
- SHARED_RING_INIT(conn_sring);
- FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
+ XEN_FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
- err = xenbus_grant_ring(dev, conn_sring, 1, &gref);
- if (err < 0) {
- free_page((unsigned long)conn_sring);
- info->conn_ring.sring = NULL;
+ err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH,
+ (void **)&conn_sring, 1, &info->conn_ring_ref);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "allocating conn ring");
goto fail;
}
- info->conn_ring_ref = gref;
+ XEN_FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err) {
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index d5bfd7b867fc..91073b4e4a20 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -24,15 +24,6 @@
MODULE_IMPORT_NS(DMA_BUF);
-#ifndef GRANT_INVALID_REF
-/*
- * Note on usage of grant reference 0 as invalid grant reference:
- * grant reference 0 is valid, but never exposed to a driver,
- * because of the fact it is already in use/reserved by the PV console.
- */
-#define GRANT_INVALID_REF 0
-#endif
-
struct gntdev_dmabuf {
struct gntdev_dmabuf_priv *priv;
struct dma_buf *dmabuf;
@@ -532,7 +523,7 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
int i;
for (i = 0; i < count; i++)
- if (refs[i] != GRANT_INVALID_REF)
+ if (refs[i] != INVALID_GRANT_REF)
gnttab_end_foreign_access(refs[i], 0UL);
}
@@ -567,7 +558,7 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
gntdev_dmabuf->nr_pages = count;
for (i = 0; i < count; i++)
- gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
+ gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF;
return gntdev_dmabuf;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8ccccace2a4f..1a1aec0a88a1 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -66,8 +66,6 @@
#include <asm/sync_bitops.h>
-/* External tools reserve first few grant table entries. */
-#define NR_RESERVED_ENTRIES 8
#define GNTTAB_LIST_END 0xffffffff
static grant_ref_t **gnttab_list;
@@ -209,6 +207,10 @@ static inline void check_free_callbacks(void)
static void put_free_entry(grant_ref_t ref)
{
unsigned long flags;
+
+ if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
+ return;
+
spin_lock_irqsave(&gnttab_list_lock, flags);
gnttab_entry(ref) = gnttab_free_head;
gnttab_free_head = ref;
@@ -1465,12 +1467,12 @@ int gnttab_init(void)
nr_init_grefs = nr_grant_frames *
gnttab_interface->grefs_per_grant_frame;
- for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
+ for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
- gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
- gnttab_free_head = NR_RESERVED_ENTRIES;
+ gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES;
+ gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES;
printk("Grant table initialized\n");
return 0;
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index a959dee21134..b6433761d42c 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -21,15 +21,6 @@
#include <xen/xen-front-pgdir-shbuf.h>
-#ifndef GRANT_INVALID_REF
-/*
- * FIXME: usage of grant reference 0 as invalid grant reference:
- * grant reference 0 is valid, but never exposed to a PV driver,
- * because of the fact it is already in use/reserved by the PV console.
- */
-#define GRANT_INVALID_REF 0
-#endif
-
/**
* This structure represents the structure of a shared page
* that contains grant references to the pages of the shared
@@ -38,6 +29,7 @@
*/
struct xen_page_directory {
grant_ref_t gref_dir_next_page;
+#define XEN_GREF_LIST_END 0
grant_ref_t gref[1]; /* Variable length */
};
@@ -83,7 +75,7 @@ grant_ref_t
xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
{
if (!buf->grefs)
- return GRANT_INVALID_REF;
+ return INVALID_GRANT_REF;
return buf->grefs[0];
}
@@ -142,7 +134,7 @@ void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
int i;
for (i = 0; i < buf->num_grefs; i++)
- if (buf->grefs[i] != GRANT_INVALID_REF)
+ if (buf->grefs[i] != INVALID_GRANT_REF)
gnttab_end_foreign_access(buf->grefs[i], 0UL);
}
kfree(buf->grefs);
@@ -355,7 +347,7 @@ static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
}
/* Last page must say there is no more pages. */
page_dir = (struct xen_page_directory *)ptr;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+ page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
}
/**
@@ -384,7 +376,7 @@ static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
to_copy = grefs_left;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+ page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
} else {
to_copy = XEN_NUM_GREFS_PER_PAGE;
page_dir->gref_dir_next_page = buf->grefs[i + 1];
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 0c5e565aa8cf..7a0c93acc2c5 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -280,6 +280,82 @@ static void scsiback_free_translation_entry(struct kref *kref)
kfree(entry);
}
+static int32_t scsiback_result(int32_t result)
+{
+ int32_t host_status;
+
+ switch (XEN_VSCSIIF_RSLT_HOST(result)) {
+ case DID_OK:
+ host_status = XEN_VSCSIIF_RSLT_HOST_OK;
+ break;
+ case DID_NO_CONNECT:
+ host_status = XEN_VSCSIIF_RSLT_HOST_NO_CONNECT;
+ break;
+ case DID_BUS_BUSY:
+ host_status = XEN_VSCSIIF_RSLT_HOST_BUS_BUSY;
+ break;
+ case DID_TIME_OUT:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TIME_OUT;
+ break;
+ case DID_BAD_TARGET:
+ host_status = XEN_VSCSIIF_RSLT_HOST_BAD_TARGET;
+ break;
+ case DID_ABORT:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ABORT;
+ break;
+ case DID_PARITY:
+ host_status = XEN_VSCSIIF_RSLT_HOST_PARITY;
+ break;
+ case DID_ERROR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
+ break;
+ case DID_RESET:
+ host_status = XEN_VSCSIIF_RSLT_HOST_RESET;
+ break;
+ case DID_BAD_INTR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_BAD_INTR;
+ break;
+ case DID_PASSTHROUGH:
+ host_status = XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH;
+ break;
+ case DID_SOFT_ERROR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR;
+ break;
+ case DID_IMM_RETRY:
+ host_status = XEN_VSCSIIF_RSLT_HOST_IMM_RETRY;
+ break;
+ case DID_REQUEUE:
+ host_status = XEN_VSCSIIF_RSLT_HOST_REQUEUE;
+ break;
+ case DID_TRANSPORT_DISRUPTED:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED;
+ break;
+ case DID_TRANSPORT_FAILFAST:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST;
+ break;
+ case DID_TARGET_FAILURE:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE;
+ break;
+ case DID_NEXUS_FAILURE:
+ host_status = XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE;
+ break;
+ case DID_ALLOC_FAILURE:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE;
+ break;
+ case DID_MEDIUM_ERROR:
+ host_status = XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR;
+ break;
+ case DID_TRANSPORT_MARGINAL:
+ host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL;
+ break;
+ default:
+ host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
+ break;
+ }
+
+ return (host_status << 16) | (result & 0x00ffff);
+}
+
static void scsiback_send_response(struct vscsibk_info *info,
char *sense_buffer, int32_t result, uint32_t resid,
uint16_t rqid)
@@ -295,7 +371,7 @@ static void scsiback_send_response(struct vscsibk_info *info,
ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
info->ring.rsp_prod_pvt++;
- ring_res->rslt = result;
+ ring_res->rslt = scsiback_result(result);
ring_res->rqid = rqid;
if (sense_buffer != NULL &&
@@ -555,7 +631,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_cmd *se_cmd = &pending_req->se_cmd;
u64 unpacked_lun = pending_req->v2p->lun;
- int rc, err = FAILED;
+ int rc, err = XEN_VSCSIIF_RSLT_RESET_FAILED;
init_completion(&pending_req->tmr_done);
@@ -569,7 +645,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
wait_for_completion(&pending_req->tmr_done);
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
- SUCCESS : FAILED;
+ XEN_VSCSIIF_RSLT_RESET_SUCCESS : XEN_VSCSIIF_RSLT_RESET_FAILED;
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index df6890681231..d6fdd2d209d3 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -363,50 +363,92 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
__xenbus_switch_state(dev, XenbusStateClosing, 1);
}
-/**
- * xenbus_grant_ring
+/*
+ * xenbus_setup_ring
* @dev: xenbus device
- * @vaddr: starting virtual address of the ring
+ * @vaddr: pointer to starting virtual address of the ring
* @nr_pages: number of pages to be granted
* @grefs: grant reference array to be filled in
*
- * Grant access to the given @vaddr to the peer of the given device.
- * Then fill in @grefs with grant references. Return 0 on success, or
- * -errno on error. On error, the device will switch to
- * XenbusStateClosing, and the error will be saved in the store.
+ * Allocate physically contiguous pages for a shared ring buffer and grant it
+ * to the peer of the given device. The ring buffer is initially filled with
+ * zeroes. The virtual address of the ring is stored at @vaddr and the
+ * grant references are stored in the @grefs array. In case of error @vaddr
+ * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF.
*/
-int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
+int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned int nr_pages, grant_ref_t *grefs)
{
- int err;
- unsigned int i;
+ unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
grant_ref_t gref_head;
+ unsigned int i;
+ int ret;
- err = gnttab_alloc_grant_references(nr_pages, &gref_head);
- if (err) {
- xenbus_dev_fatal(dev, err, "granting access to ring page");
- return err;
+ *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = gnttab_alloc_grant_references(nr_pages, &gref_head);
+ if (ret) {
+ xenbus_dev_fatal(dev, ret, "granting access to %u ring pages",
+ nr_pages);
+ goto err;
}
for (i = 0; i < nr_pages; i++) {
unsigned long gfn;
- if (is_vmalloc_addr(vaddr))
- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
+ if (is_vmalloc_addr(*vaddr))
+ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
else
- gfn = virt_to_gfn(vaddr);
+ gfn = virt_to_gfn(vaddr[i]);
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
-
- vaddr = vaddr + XEN_PAGE_SIZE;
}
return 0;
+
+ err:
+ if (*vaddr)
+ free_pages_exact(*vaddr, ring_size);
+ for (i = 0; i < nr_pages; i++)
+ grefs[i] = INVALID_GRANT_REF;
+ *vaddr = NULL;
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+EXPORT_SYMBOL_GPL(xenbus_setup_ring);
+/*
+ * xenbus_teardown_ring
+ * @vaddr: starting virtual address of the ring
+ * @nr_pages: number of pages
+ * @grefs: grant reference array
+ *
+ * Remove grants for the shared ring buffer and free the associated memory.
+ * On return the grant reference array is filled with INVALID_GRANT_REF.
+ */
+void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
+ grant_ref_t *grefs)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (grefs[i] != INVALID_GRANT_REF) {
+ gnttab_end_foreign_access(grefs[i], 0);
+ grefs[i] = INVALID_GRANT_REF;
+ }
+ }
+
+ if (*vaddr)
+ free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE);
+ *vaddr = NULL;
+}
+EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
/**
* Allocate an event channel for the given xenbus_device, assigning the newly
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index fe360c33ce71..d367f2bd2b93 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -65,6 +65,7 @@
#include "xenbus.h"
+static int xs_init_irq;
int xen_store_evtchn;
EXPORT_SYMBOL_GPL(xen_store_evtchn);
@@ -750,6 +751,20 @@ static void xenbus_probe(void)
{
xenstored_ready = 1;
+ if (!xen_store_interface) {
+ xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE);
+ /*
+ * Now it is safe to free the IRQ used for xenstore late
+ * initialization. No need to unbind: it is about to be
+ * bound again from xb_init_comms. Note that calling
+ * unbind_from_irqhandler now would result in xen_evtchn_close()
+ * being called and the event channel not being enabled again
+ * afterwards, resulting in missed event notifications.
+ */
+ free_irq(xs_init_irq, &xb_waitq);
+ }
+
/*
* In the HVM case, xenbus_init() deferred its call to
* xs_init() in case callbacks were not operational yet.
@@ -798,20 +813,22 @@ static int __init xenbus_probe_initcall(void)
{
/*
* Probe XenBus here in the XS_PV case, and also XS_HVM unless we
- * need to wait for the platform PCI device to come up.
+ * need to wait for the platform PCI device to come up or
+ * xen_store_interface is not ready.
*/
if (xen_store_domain_type == XS_PV ||
(xen_store_domain_type == XS_HVM &&
- !xs_hvm_defer_init_for_callback()))
+ !xs_hvm_defer_init_for_callback() &&
+ xen_store_interface != NULL))
xenbus_probe();
/*
- * For XS_LOCAL, spawn a thread which will wait for xenstored
- * or a xenstore-stubdom to be started, then probe. It will be
- * triggered when communication starts happening, by waiting
- * on xb_waitq.
+ * For XS_LOCAL or when xen_store_interface is not ready, spawn a
+ * thread which will wait for xenstored or a xenstore-stubdom to be
+ * started, then probe. It will be triggered when communication
+ * starts happening, by waiting on xb_waitq.
*/
- if (xen_store_domain_type == XS_LOCAL) {
+ if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
struct task_struct *probe_task;
probe_task = kthread_run(xenbus_probe_thread, NULL,
@@ -907,10 +924,25 @@ static struct notifier_block xenbus_resume_nb = {
.notifier_call = xenbus_resume_cb,
};
+static irqreturn_t xenbus_late_init(int irq, void *unused)
+{
+ int err;
+ uint64_t v = 0;
+
+ err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
+ if (err || !v || !~v)
+ return IRQ_HANDLED;
+ xen_store_gfn = (unsigned long)v;
+
+ wake_up(&xb_waitq);
+ return IRQ_HANDLED;
+}
+
static int __init xenbus_init(void)
{
int err;
uint64_t v = 0;
+ bool wait = false;
xen_store_domain_type = XS_UNKNOWN;
if (!xen_domain())
@@ -957,25 +989,44 @@ static int __init xenbus_init(void)
* been properly initialized. Instead of attempting to map a
* wrong guest physical address return error.
*
- * Also recognize all bits set as an invalid value.
+ * Also recognize all bits set as an invalid/uninitialized value.
*/
- if (!v || !~v) {
+ if (!v) {
err = -ENOENT;
goto out_error;
}
- /* Avoid truncation on 32-bit. */
+ if (v == ~0ULL) {
+ wait = true;
+ } else {
+ /* Avoid truncation on 32-bit. */
#if BITS_PER_LONG == 32
- if (v > ULONG_MAX) {
- pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
- __func__, v);
- err = -EINVAL;
- goto out_error;
- }
+ if (v > ULONG_MAX) {
+ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
+ __func__, v);
+ err = -EINVAL;
+ goto out_error;
+ }
#endif
- xen_store_gfn = (unsigned long)v;
- xen_store_interface =
- xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
- XEN_PAGE_SIZE);
+ xen_store_gfn = (unsigned long)v;
+ xen_store_interface =
+ xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
+ XEN_PAGE_SIZE);
+ if (xen_store_interface->connection != XENSTORE_CONNECTED)
+ wait = true;
+ }
+ if (wait) {
+ err = bind_evtchn_to_irqhandler(xen_store_evtchn,
+ xenbus_late_init,
+ 0, "xenstore_late_init",
+ &xb_waitq);
+ if (err < 0) {
+ pr_err("xenstore_late_init couldn't bind irq err=%d\n",
+ err);
+ return err;
+ }
+
+ xs_init_irq = err;
+ }
break;
default:
pr_warn("Xenstore state unknown\n");