diff options
Diffstat (limited to 'drivers/virtio')
-rw-r--r-- | drivers/virtio/Makefile | 1 | ||||
-rw-r--r-- | drivers/virtio/virtio.c | 102 | ||||
-rw-r--r-- | drivers/virtio/virtio_mmio.c | 17 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci.c | 802 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.c | 464 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_common.h | 136 | ||||
-rw-r--r-- | drivers/virtio/virtio_pci_legacy.c | 326 | ||||
-rw-r--r-- | drivers/virtio/virtio_ring.c | 109 |
8 files changed, 1078 insertions, 879 deletions
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 9076635697bb..bf5104b56894 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o +virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index df598dd8c5c8..f22665868781 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -3,6 +3,7 @@ #include <linux/virtio_config.h> #include <linux/module.h> #include <linux/idr.h> +#include <uapi/linux/virtio_ids.h> /* Unique numbering for virtio devices. */ static DEFINE_IDA(virtio_index_ida); @@ -49,9 +50,9 @@ static ssize_t features_show(struct device *_d, /* We actually represent this as a bitstring, as it could be * arbitrary length in future. */ - for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) + for (i = 0; i < sizeof(dev->features)*8; i++) len += sprintf(buf+len, "%c", - test_bit(i, dev->features) ? '1' : '0'); + __virtio_test_bit(dev, i) ? '1' : '0'); len += sprintf(buf+len, "\n"); return len; } @@ -113,6 +114,13 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) return; + + if (drv->feature_table_legacy) { + for (i = 0; i < drv->feature_table_size_legacy; i++) + if (drv->feature_table_legacy[i] == fbit) + return; + } + BUG(); } EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); @@ -159,7 +167,10 @@ static int virtio_dev_probe(struct device *_d) int err, i; struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); - u32 device_features; + u64 device_features; + u64 driver_features; + u64 driver_features_legacy; + unsigned status; /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); @@ -167,34 +178,66 @@ static int virtio_dev_probe(struct device *_d) /* Figure out what features the device supports. */ device_features = dev->config->get_features(dev); - /* Features supported by both device and driver into dev->features. */ - memset(dev->features, 0, sizeof(dev->features)); + /* Figure out what features the driver supports. */ + driver_features = 0; for (i = 0; i < drv->feature_table_size; i++) { unsigned int f = drv->feature_table[i]; - BUG_ON(f >= 32); - if (device_features & (1 << f)) - set_bit(f, dev->features); + BUG_ON(f >= 64); + driver_features |= (1ULL << f); + } + + /* Some drivers have a separate feature table for virtio v1.0 */ + if (drv->feature_table_legacy) { + driver_features_legacy = 0; + for (i = 0; i < drv->feature_table_size_legacy; i++) { + unsigned int f = drv->feature_table_legacy[i]; + BUG_ON(f >= 64); + driver_features_legacy |= (1ULL << f); + } + } else { + driver_features_legacy = driver_features; } + if (device_features & (1ULL << VIRTIO_F_VERSION_1)) + dev->features = driver_features & device_features; + else + dev->features = driver_features_legacy & device_features; + /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) - if (device_features & (1 << i)) - set_bit(i, dev->features); + if (device_features & (1ULL << i)) + __virtio_set_bit(dev, i); - dev->config->finalize_features(dev); + err = dev->config->finalize_features(dev); + if (err) + goto err; + + if (virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { + add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); + status = dev->config->get_status(dev); + if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { + dev_err(_d, "virtio: device refuses features: %x\n", + status); + err = -ENODEV; + goto err; + } + } err = drv->probe(dev); if (err) - add_status(dev, VIRTIO_CONFIG_S_FAILED); - else { - add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - if (drv->scan) - drv->scan(dev); + goto err; - virtio_config_enable(dev); - } + add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + if (drv->scan) + drv->scan(dev); + + virtio_config_enable(dev); + return 0; +err: + add_status(dev, VIRTIO_CONFIG_S_FAILED); return err; + } static int virtio_dev_remove(struct device *_d) @@ -223,6 +266,12 @@ static struct bus_type virtio_bus = { .remove = virtio_dev_remove, }; +bool virtio_device_is_legacy_only(struct virtio_device_id id) +{ + return id.device == VIRTIO_ID_BALLOON; +} +EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only); + int register_virtio_driver(struct virtio_driver *driver) { /* Catch this early. */ @@ -303,6 +352,7 @@ EXPORT_SYMBOL_GPL(virtio_device_freeze); int virtio_device_restore(struct virtio_device *dev) { struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + int ret; /* We always start by resetting the device, in case a previous * driver messed it up. */ @@ -322,14 +372,14 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ add_status(dev, VIRTIO_CONFIG_S_DRIVER); - dev->config->finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; if (drv->restore) { - int ret = drv->restore(dev); - if (ret) { - add_status(dev, VIRTIO_CONFIG_S_FAILED); - return ret; - } + ret = drv->restore(dev); + if (ret) + goto err; } /* Finally, tell the device we're all set */ @@ -338,6 +388,10 @@ int virtio_device_restore(struct virtio_device *dev) virtio_config_enable(dev); return 0; + +err: + add_status(dev, VIRTIO_CONFIG_S_FAILED); + return ret; } EXPORT_SYMBOL_GPL(virtio_device_restore); #endif diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index ef9a1650bb80..5219210d31ce 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -142,7 +142,7 @@ struct virtio_mmio_vq_info { /* Configuration interface */ -static u32 vm_get_features(struct virtio_device *vdev) +static u64 vm_get_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); @@ -152,19 +152,20 @@ static u32 vm_get_features(struct virtio_device *vdev) return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); } -static void vm_finalize_features(struct virtio_device *vdev) +static int vm_finalize_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - int i; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { - writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); - writel(vdev->features[i], - vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); - } + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); + writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); + + return 0; } static void vm_get(struct virtio_device *vdev, unsigned offset, diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c deleted file mode 100644 index d34ebfa604f3..000000000000 --- a/drivers/virtio/virtio_pci.c +++ /dev/null @@ -1,802 +0,0 @@ -/* - * Virtio PCI driver - * - * This module allows virtio devices to be used over a virtual PCI device. - * This can be used with QEMU based VMMs like KVM or Xen. - * - * Copyright IBM Corp. 2007 - * - * Authors: - * Anthony Liguori <aliguori@us.ibm.com> - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include <linux/module.h> -#include <linux/list.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/virtio.h> -#include <linux/virtio_config.h> -#include <linux/virtio_ring.h> -#include <linux/virtio_pci.h> -#include <linux/highmem.h> -#include <linux/spinlock.h> - -MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); -MODULE_DESCRIPTION("virtio-pci"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1"); - -/* Our device structure */ -struct virtio_pci_device -{ - struct virtio_device vdev; - struct pci_dev *pci_dev; - - /* the IO mapping for the PCI config space */ - void __iomem *ioaddr; - - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; - - /* MSI-X support */ - int msix_enabled; - int intx_enabled; - struct msix_entry *msix_entries; - cpumask_var_t *msix_affinity_masks; - /* Name strings for interrupts. This size should be enough, - * and I'm too lazy to allocate each name separately. */ - char (*msix_names)[256]; - /* Number of available vectors */ - unsigned msix_vectors; - /* Vectors allocated, excluding per-vq vectors if any */ - unsigned msix_used_vectors; - - /* Whether we have vector per vq */ - bool per_vq_vectors; -}; - -/* Constants for MSI-X */ -/* Use first vector for configuration changes, second and the rest for - * virtqueues Thus, we need at least 2 vectors for MSI. */ -enum { - VP_MSIX_CONFIG_VECTOR = 0, - VP_MSIX_VQ_VECTOR = 1, -}; - -struct virtio_pci_vq_info -{ - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the number of entries in the queue */ - int num; - - /* the virtual address of the ring queue */ - void *queue; - - /* the list node for the virtqueues list */ - struct list_head node; - - /* MSI-X vector (or none) */ - unsigned msix_vector; -}; - -/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ -static const struct pci_device_id virtio_pci_id_table[] = { - { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); - -/* Convert a generic virtio device to our structure */ -static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) -{ - return container_of(vdev, struct virtio_pci_device, vdev); -} - -/* virtio config->get_features() implementation */ -static u32 vp_get_features(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - /* When someone needs more than 32 feature bits, we'll need to - * steal a bit to indicate that the rest are somewhere else. */ - return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); -} - -/* virtio config->finalize_features() implementation */ -static void vp_finalize_features(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - /* Give virtio_ring a chance to accept features. */ - vring_transport_features(vdev); - - /* We only support 32 feature bits. */ - BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); - iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); -} - -/* virtio config->get() implementation */ -static void vp_get(struct virtio_device *vdev, unsigned offset, - void *buf, unsigned len) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + - VIRTIO_PCI_CONFIG(vp_dev) + offset; - u8 *ptr = buf; - int i; - - for (i = 0; i < len; i++) - ptr[i] = ioread8(ioaddr + i); -} - -/* the config->set() implementation. it's symmetric to the config->get() - * implementation */ -static void vp_set(struct virtio_device *vdev, unsigned offset, - const void *buf, unsigned len) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + - VIRTIO_PCI_CONFIG(vp_dev) + offset; - const u8 *ptr = buf; - int i; - - for (i = 0; i < len; i++) - iowrite8(ptr[i], ioaddr + i); -} - -/* config->{get,set}_status() implementations */ -static u8 vp_get_status(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); -} - -static void vp_set_status(struct virtio_device *vdev, u8 status) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - /* We should never be setting status to 0. */ - BUG_ON(status == 0); - iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); -} - -/* wait for pending irq handlers */ -static void vp_synchronize_vectors(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - int i; - - if (vp_dev->intx_enabled) - synchronize_irq(vp_dev->pci_dev->irq); - - for (i = 0; i < vp_dev->msix_vectors; ++i) - synchronize_irq(vp_dev->msix_entries[i].vector); -} - -static void vp_reset(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - /* 0 status means a reset. */ - iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); - /* Flush out the status write, and flush in device writes, - * including MSi-X interrupts, if any. */ - ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); - /* Flush pending VQ/configuration callbacks. */ - vp_synchronize_vectors(vdev); -} - -/* the notify function used when creating a virt queue */ -static bool vp_notify(struct virtqueue *vq) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - - /* we write the queue's selector into the notification register to - * signal the other end */ - iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); - return true; -} - -/* Handle a configuration change: Tell driver if it wants to know. */ -static irqreturn_t vp_config_changed(int irq, void *opaque) -{ - struct virtio_pci_device *vp_dev = opaque; - - virtio_config_changed(&vp_dev->vdev); - return IRQ_HANDLED; -} - -/* Notify all virtqueues on an interrupt. */ -static irqreturn_t vp_vring_interrupt(int irq, void *opaque) -{ - struct virtio_pci_device *vp_dev = opaque; - struct virtio_pci_vq_info *info; - irqreturn_t ret = IRQ_NONE; - unsigned long flags; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_for_each_entry(info, &vp_dev->virtqueues, node) { - if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) - ret = IRQ_HANDLED; - } - spin_unlock_irqrestore(&vp_dev->lock, flags); - - return ret; -} - -/* A small wrapper to also acknowledge the interrupt when it's handled. - * I really need an EIO hook for the vring so I can ack the interrupt once we - * know that we'll be handling the IRQ but before we invoke the callback since - * the callback may notify the host which results in the host attempting to - * raise an interrupt that we would then mask once we acknowledged the - * interrupt. */ -static irqreturn_t vp_interrupt(int irq, void *opaque) -{ - struct virtio_pci_device *vp_dev = opaque; - u8 isr; - - /* reading the ISR has the effect of also clearing it so it's very - * important to save off the value. */ - isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); - - /* It's definitely not us if the ISR was not high */ - if (!isr) - return IRQ_NONE; - - /* Configuration change? Tell driver if it wants to know. */ - if (isr & VIRTIO_PCI_ISR_CONFIG) - vp_config_changed(irq, opaque); - - return vp_vring_interrupt(irq, opaque); -} - -static void vp_free_vectors(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - int i; - - if (vp_dev->intx_enabled) { - free_irq(vp_dev->pci_dev->irq, vp_dev); - vp_dev->intx_enabled = 0; - } - - for (i = 0; i < vp_dev->msix_used_vectors; ++i) - free_irq(vp_dev->msix_entries[i].vector, vp_dev); - - for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) - free_cpumask_var(vp_dev->msix_affinity_masks[i]); - - if (vp_dev->msix_enabled) { - /* Disable the vector used for configuration */ - iowrite16(VIRTIO_MSI_NO_VECTOR, - vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - /* Flush the write out to device */ - ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - - pci_disable_msix(vp_dev->pci_dev); - vp_dev->msix_enabled = 0; - } - - vp_dev->msix_vectors = 0; - vp_dev->msix_used_vectors = 0; - kfree(vp_dev->msix_names); - vp_dev->msix_names = NULL; - kfree(vp_dev->msix_entries); - vp_dev->msix_entries = NULL; - kfree(vp_dev->msix_affinity_masks); - vp_dev->msix_affinity_masks = NULL; -} - -static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, - bool per_vq_vectors) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - unsigned i, v; - int err = -ENOMEM; - - vp_dev->msix_vectors = nvectors; - - vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, - GFP_KERNEL); - if (!vp_dev->msix_entries) - goto error; - vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, - GFP_KERNEL); - if (!vp_dev->msix_names) - goto error; - vp_dev->msix_affinity_masks - = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, - GFP_KERNEL); - if (!vp_dev->msix_affinity_masks) - goto error; - for (i = 0; i < nvectors; ++i) - if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], - GFP_KERNEL)) - goto error; - - for (i = 0; i < nvectors; ++i) - vp_dev->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(vp_dev->pci_dev, - vp_dev->msix_entries, nvectors); - if (err) - goto error; - vp_dev->msix_enabled = 1; - - /* Set the vector used for configuration */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-config", name); - err = request_irq(vp_dev->msix_entries[v].vector, - vp_config_changed, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - - iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - /* Verify we had enough resources to assign the vector */ - v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - if (v == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto error; - } - - if (!per_vq_vectors) { - /* Shared vector for all VQs */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-virtqueues", name); - err = request_irq(vp_dev->msix_entries[v].vector, - vp_vring_interrupt, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error; - ++vp_dev->msix_used_vectors; - } - return 0; -error: - vp_free_vectors(vdev); - return err; -} - -static int vp_request_intx(struct virtio_device *vdev) -{ - int err; - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, - IRQF_SHARED, dev_name(&vdev->dev), vp_dev); - if (!err) - vp_dev->intx_enabled = 1; - return err; -} - -static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name, - u16 msix_vec) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info; - struct virtqueue *vq; - unsigned long flags, size; - u16 num; - int err; - - /* Select the queue we're interested in */ - iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - - /* Check if queue is either not available or already active. */ - num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); - if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) - return ERR_PTR(-ENOENT); - - /* allocate and fill out our structure the represents an active - * queue */ - info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); - if (!info) - return ERR_PTR(-ENOMEM); - - info->num = num; - info->msix_vector = msix_vec; - - size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); - info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); - if (info->queue == NULL) { - err = -ENOMEM; - goto out_info; - } - - /* activate the queue */ - iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); - - /* create the vring */ - vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, - true, info->queue, vp_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto out_activate_queue; - } - - vq->priv = info; - info->vq = vq; - - if (msix_vec != VIRTIO_MSI_NO_VECTOR) { - iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - if (msix_vec == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto out_assign; - } - } - - if (callback) { - spin_lock_irqsave(&vp_dev->lock, flags); - list_add(&info->node, &vp_dev->virtqueues); - spin_unlock_irqrestore(&vp_dev->lock, flags); - } else { - INIT_LIST_HEAD(&info->node); - } - - return vq; - -out_assign: - vring_del_virtqueue(vq); -out_activate_queue: - iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); - free_pages_exact(info->queue, size); -out_info: - kfree(info); - return ERR_PTR(err); -} - -static void vp_del_vq(struct virtqueue *vq) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vq->priv; - unsigned long flags, size; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vp_dev->lock, flags); - - iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - - if (vp_dev->msix_enabled) { - iowrite16(VIRTIO_MSI_NO_VECTOR, - vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - /* Flush the write out to device */ - ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); - } - - vring_del_virtqueue(vq); - - /* Select and deactivate the queue */ - iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); - - size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); - free_pages_exact(info->queue, size); - kfree(info); -} - -/* the config->del_vqs() implementation */ -static void vp_del_vqs(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; - struct virtio_pci_vq_info *info; - - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - info = vq->priv; - if (vp_dev->per_vq_vectors && - info->msix_vector != VIRTIO_MSI_NO_VECTOR) - free_irq(vp_dev->msix_entries[info->msix_vector].vector, - vq); - vp_del_vq(vq); - } - vp_dev->per_vq_vectors = false; - - vp_free_vectors(vdev); -} - -static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[], - bool use_msix, - bool per_vq_vectors) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - u16 msix_vec; - int i, err, nvectors, allocated_vectors; - - if (!use_msix) { - /* Old style: one normal interrupt for change and all vqs. */ - err = vp_request_intx(vdev); - if (err) - goto error_request; - } else { - if (per_vq_vectors) { - /* Best option: one for change interrupt, one per vq. */ - nvectors = 1; - for (i = 0; i < nvqs; ++i) - if (callbacks[i]) - ++nvectors; - } else { - /* Second best: one for change, shared for all vqs. */ - nvectors = 2; - } - - err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); - if (err) - goto error_request; - } - - vp_dev->per_vq_vectors = per_vq_vectors; - allocated_vectors = vp_dev->msix_used_vectors; - for (i = 0; i < nvqs; ++i) { - if (!names[i]) { - vqs[i] = NULL; - continue; - } else if (!callbacks[i] || !vp_dev->msix_enabled) - msix_vec = VIRTIO_MSI_NO_VECTOR; - else if (vp_dev->per_vq_vectors) - msix_vec = allocated_vectors++; - else - msix_vec = VP_MSIX_VQ_VECTOR; - vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); - if (IS_ERR(vqs[i])) { - err = PTR_ERR(vqs[i]); - goto error_find; - } - - if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) - continue; - - /* allocate per-vq irq if available and necessary */ - snprintf(vp_dev->msix_names[msix_vec], - sizeof *vp_dev->msix_names, - "%s-%s", - dev_name(&vp_dev->vdev.dev), names[i]); - err = request_irq(vp_dev->msix_entries[msix_vec].vector, - vring_interrupt, 0, - vp_dev->msix_names[msix_vec], - vqs[i]); - if (err) { - vp_del_vq(vqs[i]); - goto error_find; - } - } - return 0; - -error_find: - vp_del_vqs(vdev); - -error_request: - return err; -} - -/* the config->find_vqs() implementation */ -static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) -{ - int err; - - /* Try MSI-X with one vector per queue. */ - err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); - if (!err) - return 0; - /* Fallback: MSI-X with one vector for config, one shared for queues. */ - err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, - true, false); - if (!err) - return 0; - /* Finally fall back to regular interrupts. */ - return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, - false, false); -} - -static const char *vp_bus_name(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - return pci_name(vp_dev->pci_dev); -} - -/* Setup the affinity for a virtqueue: - * - force the affinity for per vq vector - * - OR over all affinities for shared MSI - * - ignore the affinity request if we're using INTX - */ -static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) -{ - struct virtio_device *vdev = vq->vdev; - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtio_pci_vq_info *info = vq->priv; - struct cpumask *mask; - unsigned int irq; - - if (!vq->callback) - return -EINVAL; - - if (vp_dev->msix_enabled) { - mask = vp_dev->msix_affinity_masks[info->msix_vector]; - irq = vp_dev->msix_entries[info->msix_vector].vector; - if (cpu == -1) - irq_set_affinity_hint(irq, NULL); - else { - cpumask_set_cpu(cpu, mask); - irq_set_affinity_hint(irq, mask); - } - } - return 0; -} - -static const struct virtio_config_ops virtio_pci_config_ops = { - .get = vp_get, - .set = vp_set, - .get_status = vp_get_status, - .set_status = vp_set_status, - .reset = vp_reset, - .find_vqs = vp_find_vqs, - .del_vqs = vp_del_vqs, - .get_features = vp_get_features, - .finalize_features = vp_finalize_features, - .bus_name = vp_bus_name, - .set_vq_affinity = vp_set_vq_affinity, -}; - -static void virtio_pci_release_dev(struct device *_d) -{ - /* - * No need for a release method as we allocate/free - * all devices together with the pci devices. - * Provide an empty one to avoid getting a warning from core. - */ -} - -/* the PCI probing function */ -static int virtio_pci_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id) -{ - struct virtio_pci_device *vp_dev; - int err; - - /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ - if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) - return -ENODEV; - - if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { - printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", - VIRTIO_PCI_ABI_VERSION, pci_dev->revision); - return -ENODEV; - } - - /* allocate our structure and fill it out */ - vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); - if (vp_dev == NULL) - return -ENOMEM; - - vp_dev->vdev.dev.parent = &pci_dev->dev; - vp_dev->vdev.dev.release = virtio_pci_release_dev; - vp_dev->vdev.config = &virtio_pci_config_ops; - vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); - - /* Disable MSI/MSIX to bring device to a known good state. */ - pci_msi_off(pci_dev); - - /* enable the device */ - err = pci_enable_device(pci_dev); - if (err) - goto out; - - err = pci_request_regions(pci_dev, "virtio-pci"); - if (err) - goto out_enable_device; - - vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); - if (vp_dev->ioaddr == NULL) { - err = -ENOMEM; - goto out_req_regions; - } - - pci_set_drvdata(pci_dev, vp_dev); - pci_set_master(pci_dev); - - /* we use the subsystem vendor/device id as the virtio vendor/device - * id. this allows us to use the same PCI vendor/device id for all - * virtio devices and to identify the particular virtio driver by - * the subsystem ids */ - vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; - vp_dev->vdev.id.device = pci_dev->subsystem_device; - - /* finally register the virtio device */ - err = register_virtio_device(&vp_dev->vdev); - if (err) - goto out_set_drvdata; - - return 0; - -out_set_drvdata: - pci_iounmap(pci_dev, vp_dev->ioaddr); -out_req_regions: - pci_release_regions(pci_dev); -out_enable_device: - pci_disable_device(pci_dev); -out: - kfree(vp_dev); - return err; -} - -static void virtio_pci_remove(struct pci_dev *pci_dev) -{ - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); - - unregister_virtio_device(&vp_dev->vdev); - - vp_del_vqs(&vp_dev->vdev); - pci_iounmap(pci_dev, vp_dev->ioaddr); - pci_release_regions(pci_dev); - pci_disable_device(pci_dev); - kfree(vp_dev); -} - -#ifdef CONFIG_PM_SLEEP -static int virtio_pci_freeze(struct device *dev) -{ - struct pci_dev *pci_dev = to_pci_dev(dev); - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); - int ret; - - ret = virtio_device_freeze(&vp_dev->vdev); - - if (!ret) - pci_disable_device(pci_dev); - return ret; -} - -static int virtio_pci_restore(struct device *dev) -{ - struct pci_dev *pci_dev = to_pci_dev(dev); - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); - int ret; - - ret = pci_enable_device(pci_dev); - if (ret) - return ret; - - pci_set_master(pci_dev); - return virtio_device_restore(&vp_dev->vdev); -} - -static const struct dev_pm_ops virtio_pci_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) -}; -#endif - -static struct pci_driver virtio_pci_driver = { - .name = "virtio-pci", - .id_table = virtio_pci_id_table, - .probe = virtio_pci_probe, - .remove = virtio_pci_remove, -#ifdef CONFIG_PM_SLEEP - .driver.pm = &virtio_pci_pm_ops, -#endif -}; - -module_pci_driver(virtio_pci_driver); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c new file mode 100644 index 000000000000..953057d84185 --- /dev/null +++ b/drivers/virtio/virtio_pci_common.c @@ -0,0 +1,464 @@ +/* + * Virtio PCI driver - common functionality for all device versions + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "virtio_pci_common.h" + +/* wait for pending irq handlers */ +void vp_synchronize_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) + synchronize_irq(vp_dev->pci_dev->irq); + + for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(vp_dev->msix_entries[i].vector); +} + +/* the notify function used when creating a virt queue */ +bool vp_notify(struct virtqueue *vq) +{ + /* we write the queue's selector into the notification register to + * signal the other end */ + iowrite16(vq->index, (void __iomem *)vq->priv); + return true; +} + +/* Handle a configuration change: Tell driver if it wants to know. */ +static irqreturn_t vp_config_changed(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + + virtio_config_changed(&vp_dev->vdev); + return IRQ_HANDLED; +} + +/* Notify all virtqueues on an interrupt. */ +static irqreturn_t vp_vring_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vp_dev->lock, flags); + + return ret; +} + +/* A small wrapper to also acknowledge the interrupt when it's handled. + * I really need an EIO hook for the vring so I can ack the interrupt once we + * know that we'll be handling the IRQ but before we invoke the callback since + * the callback may notify the host which results in the host attempting to + * raise an interrupt that we would then mask once we acknowledged the + * interrupt. */ +static irqreturn_t vp_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + u8 isr; + + /* reading the ISR has the effect of also clearing it so it's very + * important to save off the value. */ + isr = ioread8(vp_dev->isr); + + /* It's definitely not us if the ISR was not high */ + if (!isr) + return IRQ_NONE; + + /* Configuration change? Tell driver if it wants to know. */ + if (isr & VIRTIO_PCI_ISR_CONFIG) + vp_config_changed(irq, opaque); + + return vp_vring_interrupt(irq, opaque); +} + +static void vp_free_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) { + free_irq(vp_dev->pci_dev->irq, vp_dev); + vp_dev->intx_enabled = 0; + } + + for (i = 0; i < vp_dev->msix_used_vectors; ++i) + free_irq(vp_dev->msix_entries[i].vector, vp_dev); + + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + + if (vp_dev->msix_enabled) { + /* Disable the vector used for configuration */ + vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); + + pci_disable_msix(vp_dev->pci_dev); + vp_dev->msix_enabled = 0; + } + + vp_dev->msix_vectors = 0; + vp_dev->msix_used_vectors = 0; + kfree(vp_dev->msix_names); + vp_dev->msix_names = NULL; + kfree(vp_dev->msix_entries); + vp_dev->msix_entries = NULL; + kfree(vp_dev->msix_affinity_masks); + vp_dev->msix_affinity_masks = NULL; +} + +static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, + bool per_vq_vectors) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + unsigned i, v; + int err = -ENOMEM; + + vp_dev->msix_vectors = nvectors; + + vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, + GFP_KERNEL); + if (!vp_dev->msix_entries) + goto error; + vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, + GFP_KERNEL); + if (!vp_dev->msix_names) + goto error; + vp_dev->msix_affinity_masks + = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, + GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto error; + for (i = 0; i < nvectors; ++i) + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto error; + + for (i = 0; i < nvectors; ++i) + vp_dev->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(vp_dev->pci_dev, + vp_dev->msix_entries, nvectors); + if (err) + goto error; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + + v = vp_dev->config_vector(vp_dev, v); + /* Verify we had enough resources to assign the vector */ + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error; + } + + if (!per_vq_vectors) { + /* Shared vector for all VQs */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-virtqueues", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_vring_interrupt, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + } + return 0; +error: + vp_free_vectors(vdev); + return err; +} + +static int vp_request_intx(struct virtio_device *vdev) +{ + int err; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, + IRQF_SHARED, dev_name(&vdev->dev), vp_dev); + if (!err) + vp_dev->intx_enabled = 1; + return err; +} + +static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); + struct virtqueue *vq; + unsigned long flags; + + /* fill out our structure that represents an active queue */ + if (!info) + return ERR_PTR(-ENOMEM); + + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); + if (IS_ERR(vq)) + goto out_info; + + info->vq = vq; + if (callback) { + spin_lock_irqsave(&vp_dev->lock, flags); + list_add(&info->node, &vp_dev->virtqueues); + spin_unlock_irqrestore(&vp_dev->lock, flags); + } else { + INIT_LIST_HEAD(&info->node); + } + + vp_dev->vqs[index] = info; + return vq; + +out_info: + kfree(info); + return vq; +} + +static void vp_del_vq(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); + + vp_dev->del_vq(info); + kfree(info); +} + +/* the config->del_vqs() implementation */ +void vp_del_vqs(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq, *n; + struct virtio_pci_vq_info *info; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + info = vp_dev->vqs[vq->index]; + if (vp_dev->per_vq_vectors && + info->msix_vector != VIRTIO_MSI_NO_VECTOR) + free_irq(vp_dev->msix_entries[info->msix_vector].vector, + vq); + vp_del_vq(vq); + } + vp_dev->per_vq_vectors = false; + + vp_free_vectors(vdev); + kfree(vp_dev->vqs); +} + +static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[], + bool use_msix, + bool per_vq_vectors) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u16 msix_vec; + int i, err, nvectors, allocated_vectors; + + vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; + + if (!use_msix) { + /* Old style: one normal interrupt for change and all vqs. */ + err = vp_request_intx(vdev); + if (err) + goto error_find; + } else { + if (per_vq_vectors) { + /* Best option: one for change interrupt, one per vq. */ + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + /* Second best: one for change, shared for all vqs. */ + nvectors = 2; + } + + err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); + if (err) + goto error_find; + } + + vp_dev->per_vq_vectors = per_vq_vectors; + allocated_vectors = vp_dev->msix_used_vectors; + for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } else if (!callbacks[i] || !vp_dev->msix_enabled) + msix_vec = VIRTIO_MSI_NO_VECTOR; + else if (vp_dev->per_vq_vectors) + msix_vec = allocated_vectors++; + else + msix_vec = VP_MSIX_VQ_VECTOR; + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto error_find; + } + + if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + continue; + + /* allocate per-vq irq if available and necessary */ + snprintf(vp_dev->msix_names[msix_vec], + sizeof *vp_dev->msix_names, + "%s-%s", + dev_name(&vp_dev->vdev.dev), names[i]); + err = request_irq(vp_dev->msix_entries[msix_vec].vector, + vring_interrupt, 0, + vp_dev->msix_names[msix_vec], + vqs[i]); + if (err) { + vp_del_vq(vqs[i]); + goto error_find; + } + } + return 0; + +error_find: + vp_del_vqs(vdev); + return err; +} + +/* the config->find_vqs() implementation */ +int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int err; + + /* Try MSI-X with one vector per queue. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); + if (!err) + return 0; + /* Fallback: MSI-X with one vector for config, one shared for queues. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + true, false); + if (!err) + return 0; + /* Finally fall back to regular interrupts. */ + return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + false, false); +} + +const char *vp_bus_name(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + return pci_name(vp_dev->pci_dev); +} + +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +int vp_set_vq_affinity(struct virtqueue *vq, int cpu) +{ + struct virtio_device *vdev = vq->vdev; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + struct cpumask *mask; + unsigned int irq; + + if (!vq->callback) + return -EINVAL; + + if (vp_dev->msix_enabled) { + mask = vp_dev->msix_affinity_masks[info->msix_vector]; + irq = vp_dev->msix_entries[info->msix_vector].vector; + if (cpu == -1) + irq_set_affinity_hint(irq, NULL); + else { + cpumask_set_cpu(cpu, mask); + irq_set_affinity_hint(irq, mask); + } + } + return 0; +} + +void virtio_pci_release_dev(struct device *_d) +{ + /* + * No need for a release method as we allocate/free + * all devices together with the pci devices. + * Provide an empty one to avoid getting a warning from core. + */ +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_pci_freeze(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + int ret; + + ret = virtio_device_freeze(&vp_dev->vdev); + + if (!ret) + pci_disable_device(pci_dev); + return ret; +} + +static int virtio_pci_restore(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + int ret; + + ret = pci_enable_device(pci_dev); + if (ret) + return ret; + + pci_set_master(pci_dev); + return virtio_device_restore(&vp_dev->vdev); +} + +const struct dev_pm_ops virtio_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) +}; +#endif diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h new file mode 100644 index 000000000000..d840dad4149d --- /dev/null +++ b/drivers/virtio/virtio_pci_common.h @@ -0,0 +1,136 @@ +#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +/* + * Virtio PCI driver - APIs for common functionality for all device versions + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include <linux/module.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> +#define VIRTIO_PCI_NO_LEGACY +#include <linux/virtio_pci.h> +#include <linux/highmem.h> +#include <linux/spinlock.h> + +struct virtio_pci_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the number of entries in the queue */ + int num; + + /* the virtual address of the ring queue */ + void *queue; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* MSI-X vector (or none) */ + unsigned msix_vector; +}; + +/* Our device structure */ +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + + /* the IO mapping for the PCI config space */ + void __iomem *ioaddr; + + /* the IO mapping for ISR operation */ + void __iomem *isr; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + /* array of all queues for house-keeping */ + struct virtio_pci_vq_info **vqs; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; + struct msix_entry *msix_entries; + cpumask_var_t *msix_affinity_masks; + /* Name strings for interrupts. This size should be enough, + * and I'm too lazy to allocate each name separately. */ + char (*msix_names)[256]; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated, excluding per-vq vectors if any */ + unsigned msix_used_vectors; + + /* Whether we have vector per vq */ + bool per_vq_vectors; + + struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned idx, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec); + void (*del_vq)(struct virtio_pci_vq_info *info); + + u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); +}; + +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +/* Convert a generic virtio device to our structure */ +static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) +{ + return container_of(vdev, struct virtio_pci_device, vdev); +} + +/* wait for pending irq handlers */ +void vp_synchronize_vectors(struct virtio_device *vdev); +/* the notify function used when creating a virt queue */ +bool vp_notify(struct virtqueue *vq); +/* the config->del_vqs() implementation */ +void vp_del_vqs(struct virtio_device *vdev); +/* the config->find_vqs() implementation */ +int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); +const char *vp_bus_name(struct virtio_device *vdev); + +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +void virtio_pci_release_dev(struct device *); + +#ifdef CONFIG_PM_SLEEP +extern const struct dev_pm_ops virtio_pci_pm_ops; +#endif + +#endif diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c new file mode 100644 index 000000000000..2588252e5c1c --- /dev/null +++ b/drivers/virtio/virtio_pci_legacy.c @@ -0,0 +1,326 @@ +/* + * Virtio PCI driver - legacy device support + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Rusty Russell <rusty@rustcorp.com.au> + * Michael S. Tsirkin <mst@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "virtio_pci_common.h" + +/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ +static const struct pci_device_id virtio_pci_id_table[] = { + { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); + +/* virtio config->get_features() implementation */ +static u64 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* When someone needs more than 32 feature bits, we'll need to + * steal a bit to indicate that the rest are somewhere else. */ + return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); +} + +/* virtio config->finalize_features() implementation */ +static int vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + /* We only support 32 feature bits. */ + iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES); + + return 0; +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = ioread8(ioaddr + i); +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + iowrite8(ptr[i], ioaddr + i); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush out the status write, and flush in device writes, + * including MSi-X interrupts, if any. */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); +} + +static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtqueue *vq; + unsigned long size; + u16 num; + int err; + + /* Select the queue we're interested in */ + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + /* Check if queue is either not available or already active. */ + num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); + if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) + return ERR_PTR(-ENOENT); + + info->num = num; + info->msix_vector = msix_vec; + + size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); + info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); + if (info->queue == NULL) + return ERR_PTR(-ENOMEM); + + /* activate the queue */ + iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, + vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, + VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto out_activate_queue; + } + + vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_assign; + } + } + + return vq; + +out_assign: + vring_del_virtqueue(vq); +out_activate_queue: + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + free_pages_exact(info->queue, size); + return ERR_PTR(err); +} + +static void del_vq(struct virtio_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + unsigned long size; + + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + /* Flush the write out to device */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + } + + vring_del_virtqueue(vq); + + /* Select and deactivate the queue */ + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); + free_pages_exact(info->queue, size); +} + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +/* the PCI probing function */ +static int virtio_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct virtio_pci_device *vp_dev; + int err; + + /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) + return -ENODEV; + + if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { + printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", + VIRTIO_PCI_ABI_VERSION, pci_dev->revision); + return -ENODEV; + } + + /* allocate our structure and fill it out */ + vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); + if (vp_dev == NULL) + return -ENOMEM; + + vp_dev->vdev.dev.parent = &pci_dev->dev; + vp_dev->vdev.dev.release = virtio_pci_release_dev; + vp_dev->vdev.config = &virtio_pci_config_ops; + vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); + + /* Disable MSI/MSIX to bring device to a known good state. */ + pci_msi_off(pci_dev); + + /* enable the device */ + err = pci_enable_device(pci_dev); + if (err) + goto out; + + err = pci_request_regions(pci_dev, "virtio-pci"); + if (err) + goto out_enable_device; + + vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); + if (vp_dev->ioaddr == NULL) { + err = -ENOMEM; + goto out_req_regions; + } + + vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; + + pci_set_drvdata(pci_dev, vp_dev); + pci_set_master(pci_dev); + + /* we use the subsystem vendor/device id as the virtio vendor/device + * id. this allows us to use the same PCI vendor/device id for all + * virtio devices and to identify the particular virtio driver by + * the subsystem ids */ + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + vp_dev->vdev.id.device = pci_dev->subsystem_device; + + vp_dev->config_vector = vp_config_vector; + vp_dev->setup_vq = setup_vq; + vp_dev->del_vq = del_vq; + + /* finally register the virtio device */ + err = register_virtio_device(&vp_dev->vdev); + if (err) + goto out_set_drvdata; + + return 0; + +out_set_drvdata: + pci_iounmap(pci_dev, vp_dev->ioaddr); +out_req_regions: + pci_release_regions(pci_dev); +out_enable_device: + pci_disable_device(pci_dev); +out: + kfree(vp_dev); + return err; +} + +static void virtio_pci_remove(struct pci_dev *pci_dev) +{ + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + + unregister_virtio_device(&vp_dev->vdev); + + vp_del_vqs(&vp_dev->vdev); + pci_iounmap(pci_dev, vp_dev->ioaddr); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + kfree(vp_dev); +} + +static struct pci_driver virtio_pci_driver = { + .name = "virtio-pci", + .id_table = virtio_pci_id_table, + .probe = virtio_pci_probe, + .remove = virtio_pci_remove, +#ifdef CONFIG_PM_SLEEP + .driver.pm = &virtio_pci_pm_ops, +#endif +}; + +module_pci_driver(virtio_pci_driver); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 3b1f89b6e743..00ec6b3f96b2 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -99,7 +99,8 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) -static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) +static struct vring_desc *alloc_indirect(struct virtqueue *_vq, + unsigned int total_sg, gfp_t gfp) { struct vring_desc *desc; unsigned int i; @@ -116,7 +117,7 @@ static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) return NULL; for (i = 0; i < total_sg; i++) - desc[i].next = i+1; + desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); return desc; } @@ -165,17 +166,17 @@ static inline int virtqueue_add(struct virtqueue *_vq, /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ if (vq->indirect && total_sg > 1 && vq->vq.num_free) - desc = alloc_indirect(total_sg, gfp); + desc = alloc_indirect(_vq, total_sg, gfp); else desc = NULL; if (desc) { /* Use a single buffer which doesn't continue */ - vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; - vq->vring.desc[head].addr = virt_to_phys(desc); + vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); + vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); /* avoid kmemleak false positive (hidden by virt_to_phys) */ kmemleak_ignore(desc); - vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc); + vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); /* Set up rest to use this indirect table. */ i = 0; @@ -205,28 +206,28 @@ static inline int virtqueue_add(struct virtqueue *_vq, for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - desc[i].flags = VRING_DESC_F_NEXT; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; + desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); + desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); + desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); prev = i; - i = desc[i].next; + i = virtio16_to_cpu(_vq->vdev, desc[i].next); } } for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) { - desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; + desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); + desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); + desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); prev = i; - i = desc[i].next; + i = virtio16_to_cpu(_vq->vdev, desc[i].next); } } /* Last one doesn't continue. */ - desc[prev].flags &= ~VRING_DESC_F_NEXT; + desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); /* Update free pointer */ if (indirect) - vq->free_head = vq->vring.desc[head].next; + vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); else vq->free_head = i; @@ -235,13 +236,13 @@ static inline int virtqueue_add(struct virtqueue *_vq, /* Put entry in available array (but don't update avail->idx until they * do sync). */ - avail = (vq->vring.avail->idx & (vq->vring.num-1)); - vq->vring.avail->ring[avail] = head; + avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); + vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); /* Descriptors and available array need to be set before we expose the * new available array entries. */ virtio_wmb(vq->weak_barriers); - vq->vring.avail->idx++; + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); vq->num_added++; /* This is very unlikely, but theoretically possible. Kick @@ -354,8 +355,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) * event. */ virtio_mb(vq->weak_barriers); - old = vq->vring.avail->idx - vq->num_added; - new = vq->vring.avail->idx; + old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; + new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); vq->num_added = 0; #ifdef DEBUG @@ -367,10 +368,10 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) #endif if (vq->event) { - needs_kick = vring_need_event(vring_avail_event(&vq->vring), + needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), new, old); } else { - needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); + needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); } END_USE(vq); return needs_kick; @@ -432,15 +433,15 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) i = head; /* Free the indirect table */ - if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) - kfree(phys_to_virt(vq->vring.desc[i].addr)); + if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) + kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); - while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { - i = vq->vring.desc[i].next; + while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { + i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); vq->vq.num_free++; } - vq->vring.desc[i].next = vq->free_head; + vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); vq->free_head = head; /* Plus final descriptor */ vq->vq.num_free++; @@ -448,7 +449,7 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) static inline bool more_used(const struct vring_virtqueue *vq) { - return vq->last_used_idx != vq->vring.used->idx; + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); } /** @@ -491,8 +492,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) virtio_rmb(vq->weak_barriers); last_used = (vq->last_used_idx & (vq->vring.num - 1)); - i = vq->vring.used->ring[last_used].id; - *len = vq->vring.used->ring[last_used].len; + i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); + *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); if (unlikely(i >= vq->vring.num)) { BAD_RING(vq, "id %u out of range\n", i); @@ -510,8 +511,8 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ - if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { - vring_used_event(&vq->vring) = vq->last_used_idx; + if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); virtio_mb(vq->weak_barriers); } @@ -537,7 +538,7 @@ void virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); @@ -565,8 +566,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ - vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; + vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); END_USE(vq); return last_used_idx; } @@ -586,7 +587,7 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) struct vring_virtqueue *vq = to_vvq(_vq); virtio_mb(vq->weak_barriers); - return (u16)last_used_idx != vq->vring.used->idx; + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); } EXPORT_SYMBOL_GPL(virtqueue_poll); @@ -633,12 +634,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ - vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; + vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); /* TODO: tune this threshold */ - bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; - vring_used_event(&vq->vring) = vq->last_used_idx + bufs; + bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); virtio_mb(vq->weak_barriers); - if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { + if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { END_USE(vq); return false; } @@ -670,7 +671,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) /* detach_buf clears data, so grab it now. */ buf = vq->data[i]; detach_buf(vq, i); - vq->vring.avail->idx--; + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); END_USE(vq); return buf; } @@ -747,12 +748,12 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, /* No callback? Tell other side not to bother us. */ if (!callback) - vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); /* Put everything in free lists. */ vq->free_head = 0; for (i = 0; i < num-1; i++) { - vq->vring.desc[i].next = i+1; + vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); vq->data[i] = NULL; } vq->data[i] = NULL; @@ -779,9 +780,11 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_RING_F_EVENT_IDX: break; + case VIRTIO_F_VERSION_1: + break; default: /* We don't understand this bit. */ - clear_bit(i, vdev->features); + __virtio_clear_bit(vdev, i); } } } @@ -826,4 +829,20 @@ void virtio_break_device(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_break_device); +void *virtqueue_get_avail(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.avail; +} +EXPORT_SYMBOL_GPL(virtqueue_get_avail); + +void *virtqueue_get_used(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.used; +} +EXPORT_SYMBOL_GPL(virtqueue_get_used); + MODULE_LICENSE("GPL"); |