diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-18 09:24:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-18 09:24:01 -0800 |
commit | 53861af9a17022898619a2ae4ead0dfc601b7c13 (patch) | |
tree | dc11088d9e86fa1d8d8479974864153a8f976897 /drivers/virtio | |
parent | 5c2770079fb9b8c5bfb7113d9e76de66e77a0e24 (diff) | |
parent | 5b40a7daf51812b35cf05d1601a779a7043f8414 (diff) | |
download | linux-53861af9a17022898619a2ae4ead0dfc601b7c13.tar.bz2 |
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio updates from Rusty Russell:
"OK, this has the big virtio 1.0 implementation, as specified by OASIS.
On top of tht is the major rework of lguest, to use PCI and virtio
1.0, to double-check the implementation.
Then comes the inevitable fixes and cleanups from that work"
* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (80 commits)
virtio: don't set VIRTIO_CONFIG_S_DRIVER_OK twice.
virtio_net: unconditionally define struct virtio_net_hdr_v1.
tools/lguest: don't use legacy definitions for net device in example launcher.
virtio: Don't expose legacy net features when VIRTIO_NET_NO_LEGACY defined.
tools/lguest: use common error macros in the example launcher.
tools/lguest: give virtqueues names for better error messages
tools/lguest: more documentation and checking of virtio 1.0 compliance.
lguest: don't look in console features to find emerg_wr.
tools/lguest: don't start devices until DRIVER_OK status set.
tools/lguest: handle indirect partway through chain.
tools/lguest: insert driver references from the 1.0 spec (4.1 Virtio Over PCI)
tools/lguest: insert device references from the 1.0 spec (4.1 Virtio Over PCI)
tools/lguest: rename virtio_pci_cfg_cap field to match spec.
tools/lguest: fix features_accepted logic in example launcher.
tools/lguest: handle device reset correctly in example launcher.
virtual: Documentation: simplify and generalize paravirt_ops.txt
lguest: remove NOTIFY call and eventfd facility.
lguest: remove NOTIFY facility from demonstration launcher.
lguest: use the PCI console device's emerg_wr for early boot messages.
lguest: always put console in PCI slot #1.
...
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/Kconfig | 24 | ||||
-rw-r--r-- | drivers/virtio/Makefile | 3 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 5 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 9 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 131 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.c | 94 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.h | 43 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_legacy.c | 76 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_modern.c | 695 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 9 |
10 files changed, 950 insertions, 139 deletions
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 00b228638274..b546da5d8ea3 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -12,16 +12,32 @@ config VIRTIO_PCI depends on PCI select VIRTIO ---help--- - This drivers provides support for virtio based paravirtual device + This driver provides support for virtio based paravirtual device drivers over PCI. This requires that your VMM has appropriate PCI virtio backends. Most QEMU based VMMs should support these devices (like KVM or Xen). - Currently, the ABI is not considered stable so there is no guarantee - that this version of the driver will work with your VMM. - If unsure, say M. +config VIRTIO_PCI_LEGACY + bool "Support for legacy virtio draft 0.9.X and older devices" + default y + depends on VIRTIO_PCI + ---help--- + Virtio PCI Card 0.9.X Draft (circa 2014) and older device support. + + This option enables building a transitional driver, supporting + both devices conforming to Virtio 1 specification, and legacy devices. + If disabled, you get a slightly smaller, non-transitional driver, + with no legacy compatibility. + + So look out into your driveway. Do you have a flying car? If + so, you can happily disable this option and virtio will not + break. Otherwise, leave it set. Unless you're testing what + life will be like in The Future. + + If unsure, say Y. + config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index bf5104b56894..d85565b8ea46 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o -virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o +virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o +virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index b9f70dfc4751..5ce2aa48fc6e 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -236,7 +236,10 @@ static int virtio_dev_probe(struct device *_d) if (err) goto err; - add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + /* If probe didn't do it, mark device DRIVER_OK ourselves. */ + if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) + virtio_device_ready(dev); + if (drv->scan) drv->scan(dev); diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 50c5f42d7a9f..0413157f3b49 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -44,8 +44,7 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES; module_param(oom_pages, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); -struct virtio_balloon -{ +struct virtio_balloon { struct virtio_device *vdev; struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; @@ -466,6 +465,12 @@ static int virtballoon_probe(struct virtio_device *vdev) struct virtio_balloon *vb; int err; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 00d115b22bd8..cad569890908 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -1,7 +1,7 @@ /* * Virtio memory mapped device driver * - * Copyright 2011, ARM Ltd. + * Copyright 2011-2014, ARM Ltd. * * This module allows virtio devices to be used over a virtual, memory mapped * platform device. @@ -50,36 +50,6 @@ * * * - * Registers layout (all 32-bit wide): - * - * offset d. name description - * ------ -- ---------------- ----------------- - * - * 0x000 R MagicValue Magic value "virt" - * 0x004 R Version Device version (current max. 1) - * 0x008 R DeviceID Virtio device ID - * 0x00c R VendorID Virtio vendor ID - * - * 0x010 R HostFeatures Features supported by the host - * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures - * - * 0x020 W GuestFeatures Features activated by the guest - * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures - * 0x028 W GuestPageSize Size of guest's memory page in bytes - * - * 0x030 W QueueSel Queue selector - * 0x034 R QueueNumMax Maximum size of the currently selected queue - * 0x038 W QueueNum Queue size for the currently selected queue - * 0x03c W QueueAlign Used Ring alignment for the current queue - * 0x040 RW QueuePFN PFN for the currently selected queue - * - * 0x050 W QueueNotify Queue notifier - * 0x060 R InterruptStatus Interrupt status register - * 0x064 W InterruptACK Interrupt acknowledge register - * 0x070 RW Status Device status register - * - * 0x100+ RW Device-specific configuration space - * * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 * * This work is licensed under the terms of the GNU GPL, version 2 or later. @@ -145,11 +115,16 @@ struct virtio_mmio_vq_info { static u64 vm_get_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + u64 features; + + writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); + features <<= 32; - /* TODO: Features > 32 bits */ - writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); + writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); - return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); + return features; } static int vm_finalize_features(struct virtio_device *vdev) @@ -159,11 +134,20 @@ static int vm_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - /* Make sure we don't have any features > 32 bits! */ - BUG_ON((u32)vdev->features != vdev->features); + /* Make sure there is are no mixed devices */ + if (vm_dev->version == 2 && + !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n"); + return -EINVAL; + } + + writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); + writel((u32)(vdev->features >> 32), + vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); - writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); - writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); + writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); + writel((u32)vdev->features, + vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); return 0; } @@ -275,7 +259,12 @@ static void vm_del_vq(struct virtqueue *vq) /* Select and deactivate the queue */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); - writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + if (vm_dev->version == 1) { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); + } size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); free_pages_exact(info->queue, size); @@ -312,7 +301,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); /* Queue shouldn't already be set up. */ - if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { + if (readl(vm_dev->base + (vm_dev->version == 1 ? + VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) { err = -ENOENT; goto error_available; } @@ -356,13 +346,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, info->num /= 2; } - /* Activate the queue */ - writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); - writel(VIRTIO_MMIO_VRING_ALIGN, - vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); - writel(virt_to_phys(info->queue) >> PAGE_SHIFT, - vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); - /* Create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, true, info->queue, vm_notify, callback, name); @@ -371,6 +354,33 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, goto error_new_virtqueue; } + /* Activate the queue */ + writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); + if (vm_dev->version == 1) { + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); + writel(virt_to_phys(info->queue) >> PAGE_SHIFT, + vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + u64 addr; + + addr = virt_to_phys(info->queue); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); + + addr = virt_to_phys(virtqueue_get_avail(vq)); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); + + addr = virt_to_phys(virtqueue_get_used(vq)); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); + + writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + } + vq->priv = info; info->vq = vq; @@ -381,7 +391,12 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, return vq; error_new_virtqueue: - writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + if (vm_dev->version == 1) { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); + } free_pages_exact(info->queue, size); error_alloc_pages: kfree(info); @@ -476,16 +491,32 @@ static int virtio_mmio_probe(struct platform_device *pdev) /* Check device version */ vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); - if (vm_dev->version != 1) { + if (vm_dev->version < 1 || vm_dev->version > 2) { dev_err(&pdev->dev, "Version %ld not supported!\n", vm_dev->version); return -ENXIO; } vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); + if (vm_dev->vdev.id.device == 0) { + /* + * virtio-mmio device with an ID 0 is a (dummy) placeholder + * with no function. End probing now with no error reported. + */ + return -ENODEV; + } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); - writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + /* Reject legacy-only IDs for version 2 devices */ + if (vm_dev->version == 2 && + virtio_device_is_legacy_only(vm_dev->vdev.id)) { + dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n", + vm_dev->vdev.id.device); + return -ENODEV; + } + + if (vm_dev->version == 1) + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); platform_set_drvdata(pdev, vm_dev); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 9756f21b809e..e894eb278d83 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -19,6 +19,14 @@ #include "virtio_pci_common.h" +static bool force_legacy = false; + +#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) +module_param(force_legacy, bool, 0444); +MODULE_PARM_DESC(force_legacy, + "Force legacy mode for transitional virtio 1 devices"); +#endif + /* wait for pending irq handlers */ void vp_synchronize_vectors(struct virtio_device *vdev) { @@ -464,15 +472,97 @@ static const struct pci_device_id virtio_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); +static void virtio_pci_release_dev(struct device *_d) +{ + struct virtio_device *vdev = dev_to_virtio(_d); + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* As struct device is a kobject, it's not safe to + * free the memory (including the reference counter itself) + * until it's release callback. */ + kfree(vp_dev); +} + static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { - return virtio_pci_legacy_probe(pci_dev, id); + struct virtio_pci_device *vp_dev; + int rc; + + /* allocate our structure and fill it out */ + vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); + if (!vp_dev) + return -ENOMEM; + + pci_set_drvdata(pci_dev, vp_dev); + vp_dev->vdev.dev.parent = &pci_dev->dev; + vp_dev->vdev.dev.release = virtio_pci_release_dev; + vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); + + /* Disable MSI/MSIX to bring device to a known good state. */ + pci_msi_off(pci_dev); + + /* enable the device */ + rc = pci_enable_device(pci_dev); + if (rc) + goto err_enable_device; + + rc = pci_request_regions(pci_dev, "virtio-pci"); + if (rc) + goto err_request_regions; + + if (force_legacy) { + rc = virtio_pci_legacy_probe(vp_dev); + /* Also try modern mode if we can't map BAR0 (no IO space). */ + if (rc == -ENODEV || rc == -ENOMEM) + rc = virtio_pci_modern_probe(vp_dev); + if (rc) + goto err_probe; + } else { + rc = virtio_pci_modern_probe(vp_dev); + if (rc == -ENODEV) + rc = virtio_pci_legacy_probe(vp_dev); + if (rc) + goto err_probe; + } + + pci_set_master(pci_dev); + + rc = register_virtio_device(&vp_dev->vdev); + if (rc) + goto err_register; + + return 0; + +err_register: + if (vp_dev->ioaddr) + virtio_pci_legacy_remove(vp_dev); + else + virtio_pci_modern_remove(vp_dev); +err_probe: + pci_release_regions(pci_dev); +err_request_regions: + pci_disable_device(pci_dev); +err_enable_device: + kfree(vp_dev); + return rc; } static void virtio_pci_remove(struct pci_dev *pci_dev) { - virtio_pci_legacy_remove(pci_dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + + unregister_virtio_device(&vp_dev->vdev); + + if (vp_dev->ioaddr) + virtio_pci_legacy_remove(vp_dev); + else + virtio_pci_modern_remove(vp_dev); + + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); } static struct pci_driver virtio_pci_driver = { diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 5a497289b7e9..28ee4e56badf 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -53,12 +53,32 @@ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; + /* In legacy mode, these two point to within ->legacy. */ + /* Where to read and clear interrupt */ + u8 __iomem *isr; + + /* Modern only fields */ + /* The IO mapping for the PCI config space (non-legacy mode) */ + struct virtio_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + void __iomem *device; + /* Base of vq notifications (non-legacy mode). */ + void __iomem *notify_base; + + /* So we can sanity-check accesses. */ + size_t notify_len; + size_t device_len; + + /* Capability for when we need to map notifications per-vq. */ + int notify_map_cap; + + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + u32 notify_offset_multiplier; + + /* Legacy only field */ /* the IO mapping for the PCI config space */ void __iomem *ioaddr; - /* the IO mapping for ISR operation */ - void __iomem *isr; - /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; @@ -127,8 +147,19 @@ const char *vp_bus_name(struct virtio_device *vdev); */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); -int virtio_pci_legacy_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id); -void virtio_pci_legacy_remove(struct pci_dev *pci_dev); +#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) +int virtio_pci_legacy_probe(struct virtio_pci_device *); +void virtio_pci_legacy_remove(struct virtio_pci_device *); +#else +static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) +{ + return -ENODEV; +} +static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) +{ +} +#endif +int virtio_pci_modern_probe(struct virtio_pci_device *); +void virtio_pci_modern_remove(struct virtio_pci_device *); #endif diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index a5486e65e04b..256a5278a515 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -211,23 +211,10 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .set_vq_affinity = vp_set_vq_affinity, }; -static void virtio_pci_release_dev(struct device *_d) -{ - struct virtio_device *vdev = dev_to_virtio(_d); - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - /* As struct device is a kobject, it's not safe to - * free the memory (including the reference counter itself) - * until it's release callback. */ - kfree(vp_dev); -} - /* the PCI probing function */ -int virtio_pci_legacy_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id) +int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) { - struct virtio_pci_device *vp_dev; - int err; + struct pci_dev *pci_dev = vp_dev->pci_dev; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) @@ -239,41 +226,12 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev, return -ENODEV; } - /* allocate our structure and fill it out */ - vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); - if (vp_dev == NULL) - return -ENOMEM; - - vp_dev->vdev.dev.parent = &pci_dev->dev; - vp_dev->vdev.dev.release = virtio_pci_release_dev; - vp_dev->vdev.config = &virtio_pci_config_ops; - vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); - - /* Disable MSI/MSIX to bring device to a known good state. */ - pci_msi_off(pci_dev); - - /* enable the device */ - err = pci_enable_device(pci_dev); - if (err) - goto out; - - err = pci_request_regions(pci_dev, "virtio-pci"); - if (err) - goto out_enable_device; - vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); - if (vp_dev->ioaddr == NULL) { - err = -ENOMEM; - goto out_req_regions; - } + if (!vp_dev->ioaddr) + return -ENOMEM; vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; - pci_set_drvdata(pci_dev, vp_dev); - pci_set_master(pci_dev); - /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by @@ -281,36 +239,18 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev, vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; + vp_dev->vdev.config = &virtio_pci_config_ops; + vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; vp_dev->del_vq = del_vq; - /* finally register the virtio device */ - err = register_virtio_device(&vp_dev->vdev); - if (err) - goto out_set_drvdata; - return 0; - -out_set_drvdata: - pci_iounmap(pci_dev, vp_dev->ioaddr); -out_req_regions: - pci_release_regions(pci_dev); -out_enable_device: - pci_disable_device(pci_dev); -out: - kfree(vp_dev); - return err; } -void virtio_pci_legacy_remove(struct pci_dev *pci_dev) +void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) { - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); - - unregister_virtio_device(&vp_dev->vdev); + struct pci_dev *pci_dev = vp_dev->pci_dev; - vp_del_vqs(&vp_dev->vdev); pci_iounmap(pci_dev, vp_dev->ioaddr); - pci_release_regions(pci_dev); - pci_disable_device(pci_dev); } diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c new file mode 100644 index 000000000000..2aa38e59db2e --- /dev/null +++ b/drivers/virtio/virtio_pci_modern.c @@ -0,0 +1,695 @@ +/* + * Virtio PCI driver - modern (virtio 1.0) device support + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#define VIRTIO_PCI_NO_LEGACY +#include "virtio_pci_common.h" + +static void __iomem *map_capability(struct pci_dev *dev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len) +{ + u8 bar; + u32 offset, length; + void __iomem *p; + + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, + bar), + &bar); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), + &offset); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), + &length); + + if (length <= start) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>%u expected)\n", + length, start); + return NULL; + } + + if (length - start < minlen) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>=%zu expected)\n", + length, minlen); + return NULL; + } + + length -= start; + + if (start + offset < offset) { + dev_err(&dev->dev, + "virtio_pci: map wrap-around %u+%u\n", + start, offset); + return NULL; + } + + offset += start; + + if (offset & (align - 1)) { + dev_err(&dev->dev, + "virtio_pci: offset %u not aligned to %u\n", + offset, align); + return NULL; + } + + if (length > size) + length = size; + + if (len) + *len = length; + + if (minlen + offset < minlen || + minlen + offset > pci_resource_len(dev, bar)) { + dev_err(&dev->dev, + "virtio_pci: map virtio %zu@%u " + "out of range on bar %i length %lu\n", + minlen, offset, + bar, (unsigned long)pci_resource_len(dev, bar)); + return NULL; + } + + p = pci_iomap_range(dev, bar, offset, length); + if (!p) + dev_err(&dev->dev, + "virtio_pci: unable to map virtio %u@%u on bar %i\n", + length, offset, bar); + return p; +} + +static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi) +{ + iowrite32((u32)val, lo); + iowrite32(val >> 32, hi); +} + +/* virtio config->get_features() implementation */ +static u64 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u64 features; + + iowrite32(0, &vp_dev->common->device_feature_select); + features = ioread32(&vp_dev->common->device_feature); + iowrite32(1, &vp_dev->common->device_feature_select); + features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32); + + return features; +} + +/* virtio config->finalize_features() implementation */ +static int vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, "virtio: device uses modern interface " + "but does not have VIRTIO_F_VERSION_1\n"); + return -EINVAL; + } + + iowrite32(0, &vp_dev->common->guest_feature_select); + iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); + iowrite32(1, &vp_dev->common->guest_feature_select); + iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); + + return 0; +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u8 b; + __le16 w; + __le32 l; + + BUG_ON(offset + len > vp_dev->device_len); + + switch (len) { + case 1: + b = ioread8(vp_dev->device + offset); + memcpy(buf, &b, sizeof b); + break; + case 2: + w = cpu_to_le16(ioread16(vp_dev->device + offset)); + memcpy(buf, &w, sizeof w); + break; + case 4: + l = cpu_to_le32(ioread32(vp_dev->device + offset)); + memcpy(buf, &l, sizeof l); + break; + case 8: + l = cpu_to_le32(ioread32(vp_dev->device + offset)); + memcpy(buf, &l, sizeof l); + l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); + memcpy(buf + sizeof l, &l, sizeof l); + break; + default: + BUG(); + } +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u8 b; + __le16 w; + __le32 l; + + BUG_ON(offset + len > vp_dev->device_len); + + switch (len) { + case 1: + memcpy(&b, buf, sizeof b); + iowrite8(b, vp_dev->device + offset); + break; + case 2: + memcpy(&w, buf, sizeof w); + iowrite16(le16_to_cpu(w), vp_dev->device + offset); + break; + case 4: + memcpy(&l, buf, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset); + break; + case 8: + memcpy(&l, buf, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset); + memcpy(&l, buf + sizeof l, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); + break; + default: + BUG(); + } +} + +static u32 vp_generation(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(&vp_dev->common->config_generation); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(&vp_dev->common->device_status); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + iowrite8(status, &vp_dev->common->device_status); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + iowrite8(0, &vp_dev->common->device_status); + /* Flush out the status write, and flush in device writes, + * including MSI-X interrupts, if any. */ + ioread8(&vp_dev->common->device_status); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vector, &vp_dev->common->msix_config); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(&vp_dev->common->msix_config); +} + +static size_t vring_pci_size(u16 num) +{ + /* We only need a cacheline separation. */ + return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES)); +} + +static void *alloc_virtqueue_pages(int *num) +{ + void *pages; + + /* TODO: allocate each queue chunk individually */ + for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) { + pages = alloc_pages_exact(vring_pci_size(*num), + GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); + if (pages) + return pages; + } + + if (!*num) + return NULL; + + /* Try to get a single page. You are my only hope! */ + return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO); +} + +static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; + struct virtqueue *vq; + u16 num, off; + int err; + + if (index >= ioread16(&cfg->num_queues)) + return ERR_PTR(-ENOENT); + + /* Select the queue we're interested in */ + iowrite16(index, &cfg->queue_select); + + /* Check if queue is either not available or already active. */ + num = ioread16(&cfg->queue_size); + if (!num || ioread16(&cfg->queue_enable)) + return ERR_PTR(-ENOENT); + + if (num & (num - 1)) { + dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); + return ERR_PTR(-EINVAL); + } + + /* get offset of notification word for this vq */ + off = ioread16(&cfg->queue_notify_off); + + info->num = num; + info->msix_vector = msix_vec; + + info->queue = alloc_virtqueue_pages(&info->num); + if (info->queue == NULL) + return ERR_PTR(-ENOMEM); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, + SMP_CACHE_BYTES, &vp_dev->vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto err_new_queue; + } + + /* activate the queue */ + iowrite16(num, &cfg->queue_size); + iowrite64_twopart(virt_to_phys(info->queue), + &cfg->queue_desc_lo, &cfg->queue_desc_hi); + iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), + &cfg->queue_avail_lo, &cfg->queue_avail_hi); + iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), + &cfg->queue_used_lo, &cfg->queue_used_hi); + + if (vp_dev->notify_base) { + /* offset should not wrap */ + if ((u64)off * vp_dev->notify_offset_multiplier + 2 + > vp_dev->notify_len) { + dev_warn(&vp_dev->pci_dev->dev, + "bad notification offset %u (x %u) " + "for queue %u > %zd", + off, vp_dev->notify_offset_multiplier, + index, vp_dev->notify_len); + err = -EINVAL; + goto err_map_notify; + } + vq->priv = (void __force *)vp_dev->notify_base + + off * vp_dev->notify_offset_multiplier; + } else { + vq->priv = (void __force *)map_capability(vp_dev->pci_dev, + vp_dev->notify_map_cap, 2, 2, + off * vp_dev->notify_offset_multiplier, 2, + NULL); + } + + if (!vq->priv) { + err = -ENOMEM; + goto err_map_notify; + } + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + iowrite16(msix_vec, &cfg->queue_msix_vector); + msix_vec = ioread16(&cfg->queue_msix_vector); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto err_assign_vector; + } + } + + return vq; + +err_assign_vector: + if (!vp_dev->notify_base) + pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); +err_map_notify: + vring_del_virtqueue(vq); +err_new_queue: + free_pages_exact(info->queue, vring_pci_size(info->num)); + return ERR_PTR(err); +} + +static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq; + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); + + if (rc) + return rc; + + /* Select and activate all queues. Has to be done last: once we do + * this, there's no way to go back except reset. + */ + list_for_each_entry(vq, &vdev->vqs, list) { + iowrite16(vq->index, &vp_dev->common->queue_select); + iowrite16(1, &vp_dev->common->queue_enable); + } + + return 0; +} + +static void del_vq(struct virtio_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + + iowrite16(vq->index, &vp_dev->common->queue_select); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_dev->common->queue_msix_vector); + /* Flush the write out to device */ + ioread16(&vp_dev->common->queue_msix_vector); + } + + if (!vp_dev->notify_base) + pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); + + vring_del_virtqueue(vq); + + free_pages_exact(info->queue, vring_pci_size(info->num)); +} + +static const struct virtio_config_ops virtio_pci_config_nodev_ops = { + .get = NULL, + .set = NULL, + .generation = vp_generation, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_modern_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .generation = vp_generation, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_modern_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +/** + * virtio_pci_find_capability - walk capabilities to find device info. + * @dev: the pci device + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. + * + * Returns offset of the capability, or 0. + */ +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, + u32 ioresource_types) +{ + int pos; + + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); + pos > 0; + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { + u8 type, bar; + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + cfg_type), + &type); + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + bar), + &bar); + + /* Ignore structures with reserved BAR values */ + if (bar > 0x5) + continue; + + if (type == cfg_type) { + if (pci_resource_len(dev, bar) && + pci_resource_flags(dev, bar) & ioresource_types) + return pos; + } + } + return 0; +} + +/* This is part of the ABI. Don't screw with it. */ +static inline void check_offsets(void) +{ + /* Note: disk space was harmed in compilation of this function. */ + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != + offsetof(struct virtio_pci_cap, cap_vndr)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != + offsetof(struct virtio_pci_cap, cap_next)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != + offsetof(struct virtio_pci_cap, cap_len)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != + offsetof(struct virtio_pci_cap, cfg_type)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != + offsetof(struct virtio_pci_cap, bar)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != + offsetof(struct virtio_pci_cap, offset)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != + offsetof(struct virtio_pci_cap, length)); + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != + offsetof(struct virtio_pci_common_cfg, + device_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != + offsetof(struct virtio_pci_common_cfg, device_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != + offsetof(struct virtio_pci_common_cfg, + guest_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != + offsetof(struct virtio_pci_common_cfg, guest_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != + offsetof(struct virtio_pci_common_cfg, msix_config)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != + offsetof(struct virtio_pci_common_cfg, num_queues)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != + offsetof(struct virtio_pci_common_cfg, device_status)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != + offsetof(struct virtio_pci_common_cfg, config_generation)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != + offsetof(struct virtio_pci_common_cfg, queue_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != + offsetof(struct virtio_pci_common_cfg, queue_size)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != + offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != + offsetof(struct virtio_pci_common_cfg, queue_enable)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != + offsetof(struct virtio_pci_common_cfg, queue_notify_off)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != + offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != + offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != + offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != + offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != + offsetof(struct virtio_pci_common_cfg, queue_used_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != + offsetof(struct virtio_pci_common_cfg, queue_used_hi)); +} + +/* the PCI probing function */ +int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + int err, common, isr, notify, device; + u32 notify_length; + u32 notify_offset; + + check_offsets(); + + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) + return -ENODEV; + + if (pci_dev->device < 0x1040) { + /* Transitional devices: use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ + vp_dev->vdev.id.device = pci_dev->subsystem_device; + } else { + /* Modern devices: simply use PCI device id, but start from 0x1040. */ + vp_dev->vdev.id.device = pci_dev->device - 0x1040; + } + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + + if (virtio_device_is_legacy_only(vp_dev->vdev.id)) + return -ENODEV; + + /* check for a common config: if not, use legacy mode (bar 0). */ + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + if (!common) { + dev_info(&pci_dev->dev, + "virtio_pci: leaving for legacy driver\n"); + return -ENODEV; + } + + /* If common is there, these should be too... */ + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + if (!isr || !notify) { + dev_err(&pci_dev->dev, + "virtio_pci: missing capabilities %i/%i/%i\n", + common, isr, notify); + return -EINVAL; + } + + /* Device capability is only mandatory for devices that have + * device-specific configuration. + */ + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + + err = -EINVAL; + vp_dev->common = map_capability(pci_dev, common, + sizeof(struct virtio_pci_common_cfg), 4, + 0, sizeof(struct virtio_pci_common_cfg), + NULL); + if (!vp_dev->common) + goto err_map_common; + vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, + 0, 1, + NULL); + if (!vp_dev->isr) + goto err_map_isr; + + /* Read notify_off_multiplier from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier), + &vp_dev->notify_offset_multiplier); + /* Read notify length and offset from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_length); + + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_offset); + + /* We don't know how many VQs we'll map, ahead of the time. + * If notify length is small, map it all now. + * Otherwise, map each VQ individually later. + */ + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { + vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, + 0, notify_length, + &vp_dev->notify_len); + if (!vp_dev->notify_base) + goto err_map_notify; + } else { + vp_dev->notify_map_cap = notify; + } + + /* Again, we don't know how much we should map, but PAGE_SIZE + * is more than enough for all existing devices. + */ + if (device) { + vp_dev->device = map_capability(pci_dev, device, 0, 4, + 0, PAGE_SIZE, + &vp_dev->device_len); + if (!vp_dev->device) + goto err_map_device; + + vp_dev->vdev.config = &virtio_pci_config_ops; + } else { + vp_dev->vdev.config = &virtio_pci_config_nodev_ops; + } + + vp_dev->config_vector = vp_config_vector; + vp_dev->setup_vq = setup_vq; + vp_dev->del_vq = del_vq; + + return 0; + +err_map_device: + if (vp_dev->notify_base) + pci_iounmap(pci_dev, vp_dev->notify_base); +err_map_notify: + pci_iounmap(pci_dev, vp_dev->isr); +err_map_isr: + pci_iounmap(pci_dev, vp_dev->common); +err_map_common: + return err; +} + +void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + + if (vp_dev->device) + pci_iounmap(pci_dev, vp_dev->device); + if (vp_dev->notify_base) + pci_iounmap(pci_dev, vp_dev->notify_base); + pci_iounmap(pci_dev, vp_dev->isr); + pci_iounmap(pci_dev, vp_dev->common); +} diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 00ec6b3f96b2..096b857e7b75 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -54,8 +54,7 @@ #define END_USE(vq) #endif -struct vring_virtqueue -{ +struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ @@ -245,14 +244,14 @@ static inline int virtqueue_add(struct virtqueue *_vq, vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); vq->num_added++; + pr_debug("Added buffer head %i to %p\n", head, vq); + END_USE(vq); + /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); - pr_debug("Added buffer head %i to %p\n", head, vq); - END_USE(vq); - return 0; } |