diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-03 11:36:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-03 11:36:27 -0700 |
commit | 1d1fdd95df681f0c065d90ffaafa215a0e8825e2 (patch) | |
tree | 19016e131bb5c7eb280a4cc8dff864ba36e53be4 /drivers/misc | |
parent | b3b49114c80e799af8b08c0c6d1ff886ea843f03 (diff) | |
parent | 3cc1f95283a125cf54ccf1e25065321d4385133b (diff) | |
download | linux-1d1fdd95df681f0c065d90ffaafa215a0e8825e2.tar.bz2 |
Merge tag 'char-misc-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc patches from Greg KH:
"Here is the big char/misc driver pull request for 3.12-rc1
Lots of driver updates all over the char/misc tree, full details in
the shortlog"
* tag 'char-misc-3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (62 commits)
drivers: uio: Kconfig: add MMU dependancy for UIO
drivers: uio: Add driver for Humusoft MF624 DAQ PCI card
drivers: uio_pdrv_genirq: use dev_get_platdata()
drivers: uio_pruss: use dev_get_platdata()
drivers: uio_dmem_genirq: use dev_get_platdata()
drivers: parport: Kconfig: exclude h8300 for PARPORT_PC
drivers: misc: ti-st: fix potential race if st_kim_start fails
Drivers: hv: vmbus: Do not attempt to negoatiate a new version prematurely
misc: vmw_balloon: Remove braces to fix build for clang.
Drivers: hv: vmbus: Fix a bug in the handling of channel offers
vme: vme_ca91cx42.c: fix to pass correct device identity to free_irq()
VMCI: Add support for virtual IOMMU
VMCI: Remove non-blocking/pinned queuepair support
uio: uio_pruss: remove unnecessary platform_set_drvdata()
parport: amiga: remove unnecessary platform_set_drvdata()
vme: vme_vmivme7805.c: add missing __iomem annotation
vme: vme_ca91cx42.c: add missing __iomem annotation
vme: vme_tsi148.c: add missing __iomem annotation
drivers/misc/hpilo: Correct panic when an AUX iLO is detected
uio: drop unused vma_count member in uio_device struct
...
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/atmel-ssc.c | 8 | ||||
-rw-r--r-- | drivers/misc/hpilo.c | 4 | ||||
-rw-r--r-- | drivers/misc/lkdtm.c | 63 | ||||
-rw-r--r-- | drivers/misc/mei/amthif.c | 14 | ||||
-rw-r--r-- | drivers/misc/mei/bus.c | 4 | ||||
-rw-r--r-- | drivers/misc/mei/client.c | 15 | ||||
-rw-r--r-- | drivers/misc/mei/client.h | 9 | ||||
-rw-r--r-- | drivers/misc/mei/hw-me.c | 9 | ||||
-rw-r--r-- | drivers/misc/mei/init.c | 11 | ||||
-rw-r--r-- | drivers/misc/mei/main.c | 22 | ||||
-rw-r--r-- | drivers/misc/sram.c | 3 | ||||
-rw-r--r-- | drivers/misc/ti-st/st_core.c | 2 | ||||
-rw-r--r-- | drivers/misc/vmw_balloon.c | 2 | ||||
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_driver.c | 2 | ||||
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_driver.h | 7 | ||||
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_guest.c | 22 | ||||
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_queue_pair.c | 315 | ||||
-rw-r--r-- | drivers/misc/vmw_vmci/vmci_queue_pair.h | 18 |
18 files changed, 263 insertions, 267 deletions
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index e068a76a5f6f..5be808406edc 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -19,7 +19,6 @@ #include <linux/module.h> #include <linux/of.h> -#include <linux/pinctrl/consumer.h> /* Serialize access to ssc_list and user count */ static DEFINE_SPINLOCK(user_lock); @@ -137,13 +136,6 @@ static int ssc_probe(struct platform_device *pdev) struct resource *regs; struct ssc_device *ssc; const struct atmel_ssc_platform_data *plat_dat; - struct pinctrl *pinctrl; - - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - dev_err(&pdev->dev, "Failed to request pinctrl\n"); - return PTR_ERR(pinctrl); - } ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL); if (!ssc) { diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 621c7a373390..b83e3ca12a41 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -759,7 +759,7 @@ static int ilo_probe(struct pci_dev *pdev, /* Ignore subsystem_device = 0x1979 (set by BIOS) */ if (pdev->subsystem_device == 0x1979) - goto out; + return 0; if (max_ccb > MAX_CCB) max_ccb = MAX_CCB; @@ -899,7 +899,7 @@ static void __exit ilo_exit(void) class_destroy(ilo_class); } -MODULE_VERSION("1.4"); +MODULE_VERSION("1.4.1"); MODULE_ALIAS(ILO_NAME); MODULE_DESCRIPTION(ILO_NAME); MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 08aad69c8da4..2fc0586ce3bb 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -43,6 +43,7 @@ #include <linux/slab.h> #include <scsi/scsi_cmnd.h> #include <linux/debugfs.h> +#include <linux/vmalloc.h> #ifdef CONFIG_IDE #include <linux/ide.h> @@ -50,6 +51,7 @@ #define DEFAULT_COUNT 10 #define REC_NUM_DEFAULT 10 +#define EXEC_SIZE 64 enum cname { CN_INVALID, @@ -68,6 +70,7 @@ enum ctype { CT_NONE, CT_PANIC, CT_BUG, + CT_WARNING, CT_EXCEPTION, CT_LOOP, CT_OVERFLOW, @@ -77,7 +80,12 @@ enum ctype { CT_WRITE_AFTER_FREE, CT_SOFTLOCKUP, CT_HARDLOCKUP, + CT_SPINLOCKUP, CT_HUNG_TASK, + CT_EXEC_DATA, + CT_EXEC_STACK, + CT_EXEC_KMALLOC, + CT_EXEC_VMALLOC, }; static char* cp_name[] = { @@ -95,6 +103,7 @@ static char* cp_name[] = { static char* cp_type[] = { "PANIC", "BUG", + "WARNING", "EXCEPTION", "LOOP", "OVERFLOW", @@ -104,7 +113,12 @@ static char* cp_type[] = { "WRITE_AFTER_FREE", "SOFTLOCKUP", "HARDLOCKUP", + "SPINLOCKUP", "HUNG_TASK", + "EXEC_DATA", + "EXEC_STACK", + "EXEC_KMALLOC", + "EXEC_VMALLOC", }; static struct jprobe lkdtm; @@ -121,6 +135,9 @@ static enum cname cpoint = CN_INVALID; static enum ctype cptype = CT_NONE; static int count = DEFAULT_COUNT; static DEFINE_SPINLOCK(count_lock); +static DEFINE_SPINLOCK(lock_me_up); + +static u8 data_area[EXEC_SIZE]; module_param(recur_count, int, 0644); MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ @@ -275,6 +292,19 @@ static int recursive_loop(int a) return recursive_loop(a); } +static void do_nothing(void) +{ + return; +} + +static void execute_location(void *dst) +{ + void (*func)(void) = dst; + + memcpy(dst, do_nothing, EXEC_SIZE); + func(); +} + static void lkdtm_do_action(enum ctype which) { switch (which) { @@ -284,6 +314,9 @@ static void lkdtm_do_action(enum ctype which) case CT_BUG: BUG(); break; + case CT_WARNING: + WARN_ON(1); + break; case CT_EXCEPTION: *((int *) 0) = 0; break; @@ -295,10 +328,10 @@ static void lkdtm_do_action(enum ctype which) (void) recursive_loop(0); break; case CT_CORRUPT_STACK: { - volatile u32 data[8]; - volatile u32 *p = data; + /* Make sure the compiler creates and uses an 8 char array. */ + volatile char data[8]; - p[12] = 0x12345678; + memset((void *)data, 0, 64); break; } case CT_UNALIGNED_LOAD_STORE_WRITE: { @@ -340,10 +373,34 @@ static void lkdtm_do_action(enum ctype which) for (;;) cpu_relax(); break; + case CT_SPINLOCKUP: + /* Must be called twice to trigger. */ + spin_lock(&lock_me_up); + break; case CT_HUNG_TASK: set_current_state(TASK_UNINTERRUPTIBLE); schedule(); break; + case CT_EXEC_DATA: + execute_location(data_area); + break; + case CT_EXEC_STACK: { + u8 stack_area[EXEC_SIZE]; + execute_location(stack_area); + break; + } + case CT_EXEC_KMALLOC: { + u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); + execute_location(kmalloc_area); + kfree(kmalloc_area); + break; + } + case CT_EXEC_VMALLOC: { + u32 *vmalloc_area = vmalloc(EXEC_SIZE); + execute_location(vmalloc_area); + vfree(vmalloc_area); + break; + } case CT_NONE: default: break; diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index 749452f8e2f6..d0fdc134068a 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c @@ -418,15 +418,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev, struct file *file, poll_table *wait) { unsigned int mask = 0; - mutex_unlock(&dev->device_lock); + poll_wait(file, &dev->iamthif_cl.wait, wait); + mutex_lock(&dev->device_lock); - if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && - dev->iamthif_file_object == file) { + if (!mei_cl_is_connected(&dev->iamthif_cl)) { + + mask = POLLERR; + + } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && + dev->iamthif_file_object == file) { + mask |= (POLLIN | POLLRDNORM); dev_dbg(&dev->pdev->dev, "run next amthif cb\n"); mei_amthif_run_next_cmd(dev); } + mutex_unlock(&dev->device_lock); + return mask; } diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 9ecd49a7be1b..a150a42ed4af 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -47,7 +47,7 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv) id = driver->id_table; while (id->name[0]) { - if (!strcmp(dev_name(dev), id->name)) + if (!strncmp(dev_name(dev), id->name, sizeof(id->name))) return 1; id++; @@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev) dev_dbg(dev, "Device probe\n"); - strncpy(id.name, dev_name(dev), MEI_CL_NAME_SIZE); + strncpy(id.name, dev_name(dev), sizeof(id.name)); return driver->probe(device, &id); } diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 21d3f5aa8353..e0684b4d9a08 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -635,10 +635,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length) dev = cl->dev; - if (cl->state != MEI_FILE_CONNECTED) - return -ENODEV; - - if (dev->dev_state != MEI_DEV_ENABLED) + if (!mei_cl_is_connected(cl)) return -ENODEV; if (cl->read_cb) { @@ -892,18 +889,22 @@ void mei_cl_all_disconnect(struct mei_device *dev) /** - * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted + * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted * * @dev - mei device */ -void mei_cl_all_read_wakeup(struct mei_device *dev) +void mei_cl_all_wakeup(struct mei_device *dev) { struct mei_cl *cl, *next; list_for_each_entry_safe(cl, next, &dev->file_list, link) { if (waitqueue_active(&cl->rx_wait)) { - dev_dbg(&dev->pdev->dev, "Waking up client!\n"); + dev_dbg(&dev->pdev->dev, "Waking up reading client!\n"); wake_up_interruptible(&cl->rx_wait); } + if (waitqueue_active(&cl->tx_wait)) { + dev_dbg(&dev->pdev->dev, "Waking up writing client!\n"); + wake_up_interruptible(&cl->tx_wait); + } } } diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 26b157d8bad5..9eb031e92070 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -84,6 +84,13 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); /* * MEI input output function prototype */ +static inline bool mei_cl_is_connected(struct mei_cl *cl) +{ + return (cl->dev && + cl->dev->dev_state == MEI_DEV_ENABLED && + cl->state == MEI_FILE_CONNECTED); +} + bool mei_cl_is_other_connecting(struct mei_cl *cl); int mei_cl_disconnect(struct mei_cl *cl); int mei_cl_connect(struct mei_cl *cl, struct file *file); @@ -99,7 +106,7 @@ void mei_host_client_init(struct work_struct *work); void mei_cl_all_disconnect(struct mei_device *dev); -void mei_cl_all_read_wakeup(struct mei_device *dev); +void mei_cl_all_wakeup(struct mei_device *dev); void mei_cl_all_write_clear(struct mei_device *dev); #endif /* _MEI_CLIENT_H_ */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index b22c7e247225..3412adcdaeb0 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -176,21 +176,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr = mei_hcsr_read(hw); - dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr); - - hcsr |= (H_RST | H_IG); + hcsr |= H_RST | H_IG | H_IS; if (intr_enable) hcsr |= H_IE; else - hcsr |= ~H_IE; + hcsr &= ~H_IE; - mei_hcsr_set(hw, hcsr); + mei_me_reg_write(hw, H_CSR, hcsr); if (dev->dev_state == MEI_DEV_POWER_DOWN) mei_me_hw_reset_release(dev); - dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw)); return 0; } diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index e6f16f83ecde..92c73118b13c 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -154,8 +154,14 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) dev->dev_state != MEI_DEV_POWER_DOWN) dev->dev_state = MEI_DEV_RESETTING; + /* remove all waiting requests */ + mei_cl_all_write_clear(dev); + mei_cl_all_disconnect(dev); + /* wake up all readings so they can be interrupted */ + mei_cl_all_wakeup(dev); + /* remove entry if already in list */ dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); mei_cl_unlink(&dev->wd_cl); @@ -196,11 +202,6 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) mei_hbm_start_req(dev); - /* wake up all readings so they can be interrupted */ - mei_cl_all_read_wakeup(dev); - - /* remove all waiting requests */ - mei_cl_all_write_clear(dev); } EXPORT_SYMBOL_GPL(mei_reset); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 5e11b5b9b65d..173ff095be0d 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -625,24 +625,32 @@ static unsigned int mei_poll(struct file *file, poll_table *wait) unsigned int mask = 0; if (WARN_ON(!cl || !cl->dev)) - return mask; + return POLLERR; dev = cl->dev; mutex_lock(&dev->device_lock); - if (dev->dev_state != MEI_DEV_ENABLED) - goto out; - - - if (cl == &dev->iamthif_cl) { - mask = mei_amthif_poll(dev, file, wait); + if (!mei_cl_is_connected(cl)) { + mask = POLLERR; goto out; } mutex_unlock(&dev->device_lock); + + + if (cl == &dev->iamthif_cl) + return mei_amthif_poll(dev, file, wait); + poll_wait(file, &cl->tx_wait, wait); + mutex_lock(&dev->device_lock); + + if (!mei_cl_is_connected(cl)) { + mask = POLLERR; + goto out; + } + if (MEI_WRITE_COMPLETE == cl->writing_state) mask |= (POLLIN | POLLRDNORM); diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index d87cc91bc016..afe66571ce0b 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -68,7 +68,8 @@ static int sram_probe(struct platform_device *pdev) ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, res->start, size, -1); if (ret < 0) { - gen_pool_destroy(sram->pool); + if (sram->clk) + clk_disable_unprepare(sram->clk); return ret; } diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 0a1428016350..8d64b681dd93 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -562,7 +562,9 @@ long st_register(struct st_proto_s *new_proto) if ((st_gdata->protos_registered != ST_EMPTY) && (test_bit(ST_REG_PENDING, &st_gdata->st_state))) { pr_err(" KIM failure complete callback "); + spin_lock_irqsave(&st_gdata->lock, flags); st_reg_complete(st_gdata, err); + spin_unlock_irqrestore(&st_gdata->lock, flags); clear_bit(ST_REG_PENDING, &st_gdata->st_state); } return -EINVAL; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index cb56e270da11..2421835d5daf 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -133,7 +133,7 @@ MODULE_LICENSE("GPL"); #define VMWARE_BALLOON_CMD(cmd, data, result) \ ({ \ unsigned long __stat, __dummy1, __dummy2; \ - __asm__ __volatile__ ("inl (%%dx)" : \ + __asm__ __volatile__ ("inl %%dx" : \ "=a"(__stat), \ "=c"(__dummy1), \ "=d"(__dummy2), \ diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index 7b3fce2da6c3..3dee7ae123e7 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -113,5 +113,5 @@ module_exit(vmci_drv_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); -MODULE_VERSION("1.0.0.0-k"); +MODULE_VERSION("1.1.0.0-k"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h index f69156a1f30c..cee9e977d318 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.h +++ b/drivers/misc/vmw_vmci/vmci_driver.h @@ -35,6 +35,13 @@ struct vmci_obj { enum vmci_obj_type type; }; +/* + * Needed by other components of this module. It's okay to have one global + * instance of this because there can only ever be one VMCI device. Our + * virtual hardware enforces this. + */ +extern struct pci_dev *vmci_pdev; + u32 vmci_get_context_id(void); int vmci_send_datagram(struct vmci_datagram *dg); diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 60c01999f489..b3a2b763ecf2 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -65,9 +65,11 @@ struct vmci_guest_device { void *data_buffer; void *notification_bitmap; + dma_addr_t notification_base; }; /* vmci_dev singleton device and supporting data*/ +struct pci_dev *vmci_pdev; static struct vmci_guest_device *vmci_dev_g; static DEFINE_SPINLOCK(vmci_dev_spinlock); @@ -528,7 +530,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, * well. */ if (capabilities & VMCI_CAPS_NOTIFICATIONS) { - vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); + vmci_dev->notification_bitmap = dma_alloc_coherent( + &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, + GFP_KERNEL); if (!vmci_dev->notification_bitmap) { dev_warn(&pdev->dev, "Unable to allocate notification bitmap\n"); @@ -546,6 +550,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = vmci_dev; + vmci_pdev = pdev; spin_unlock_irq(&vmci_dev_spinlock); /* @@ -553,9 +558,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, * used. */ if (capabilities & VMCI_CAPS_NOTIFICATIONS) { - struct page *page = - vmalloc_to_page(vmci_dev->notification_bitmap); - unsigned long bitmap_ppn = page_to_pfn(page); + unsigned long bitmap_ppn = + vmci_dev->notification_base >> PAGE_SHIFT; if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { dev_warn(&pdev->dev, "VMCI device unable to register notification bitmap with PPN 0x%x\n", @@ -665,11 +669,14 @@ err_remove_bitmap: if (vmci_dev->notification_bitmap) { iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); - vfree(vmci_dev->notification_bitmap); + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vmci_dev->notification_bitmap, + vmci_dev->notification_base); } err_remove_vmci_dev_g: spin_lock_irq(&vmci_dev_spinlock); + vmci_pdev = NULL; vmci_dev_g = NULL; spin_unlock_irq(&vmci_dev_spinlock); @@ -699,6 +706,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev) spin_lock_irq(&vmci_dev_spinlock); vmci_dev_g = NULL; + vmci_pdev = NULL; spin_unlock_irq(&vmci_dev_spinlock); dev_dbg(&pdev->dev, "Resetting vmci device\n"); @@ -727,7 +735,9 @@ static void vmci_guest_remove_device(struct pci_dev *pdev) * device, so we can safely free it here. */ - vfree(vmci_dev->notification_bitmap); + dma_free_coherent(&pdev->dev, PAGE_SIZE, + vmci_dev->notification_bitmap, + vmci_dev->notification_base); } vfree(vmci_dev->data_buffer); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 8ff2e5ee8fb8..a0515a6d6ebd 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -21,6 +21,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/pagemap.h> +#include <linux/pci.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uio.h> @@ -146,14 +147,20 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset, /* The Kernel specific component of the struct vmci_queue structure. */ struct vmci_queue_kern_if { - struct page **page; - struct page **header_page; - void *va; struct mutex __mutex; /* Protects the queue. */ struct mutex *mutex; /* Shared by producer and consumer queues. */ - bool host; - size_t num_pages; - bool mapped; + size_t num_pages; /* Number of pages incl. header. */ + bool host; /* Host or guest? */ + union { + struct { + dma_addr_t *pas; + void **vas; + } g; /* Used by the guest. */ + struct { + struct page **page; + struct page **header_page; + } h; /* Used by the host. */ + } u; }; /* @@ -265,76 +272,65 @@ static void qp_free_queue(void *q, u64 size) struct vmci_queue *queue = q; if (queue) { - u64 i = DIV_ROUND_UP(size, PAGE_SIZE); + u64 i; - if (queue->kernel_if->mapped) { - vunmap(queue->kernel_if->va); - queue->kernel_if->va = NULL; + /* Given size does not include header, so add in a page here. */ + for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { + dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, + queue->kernel_if->u.g.vas[i], + queue->kernel_if->u.g.pas[i]); } - while (i) - __free_page(queue->kernel_if->page[--i]); - - vfree(queue->q_header); + vfree(queue); } } /* - * Allocates kernel VA space of specified size, plus space for the - * queue structure/kernel interface and the queue header. Allocates - * physical pages for the queue data pages. - * - * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header) - * PAGE m+1: struct vmci_queue - * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if) - * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[]) + * Allocates kernel queue pages of specified size with IOMMU mappings, + * plus space for the queue structure/kernel interface and the queue + * header. */ static void *qp_alloc_queue(u64 size, u32 flags) { u64 i; struct vmci_queue *queue; - struct vmci_queue_header *q_header; - const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE); - const uint queue_size = - PAGE_SIZE + - sizeof(*queue) + sizeof(*(queue->kernel_if)) + - num_data_pages * sizeof(*(queue->kernel_if->page)); - - q_header = vmalloc(queue_size); - if (!q_header) + const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); + const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); + const size_t queue_size = + sizeof(*queue) + sizeof(*queue->kernel_if) + + pas_size + vas_size; + + queue = vmalloc(queue_size); + if (!queue) return NULL; - queue = (void *)q_header + PAGE_SIZE; - queue->q_header = q_header; + queue->q_header = NULL; queue->saved_header = NULL; queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); - queue->kernel_if->header_page = NULL; /* Unused in guest. */ - queue->kernel_if->page = (struct page **)(queue->kernel_if + 1); + queue->kernel_if->mutex = NULL; + queue->kernel_if->num_pages = num_pages; + queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); + queue->kernel_if->u.g.vas = + (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); queue->kernel_if->host = false; - queue->kernel_if->va = NULL; - queue->kernel_if->mapped = false; - - for (i = 0; i < num_data_pages; i++) { - queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0); - if (!queue->kernel_if->page[i]) - goto fail; - } - if (vmci_qp_pinned(flags)) { - queue->kernel_if->va = - vmap(queue->kernel_if->page, num_data_pages, VM_MAP, - PAGE_KERNEL); - if (!queue->kernel_if->va) - goto fail; - - queue->kernel_if->mapped = true; + for (i = 0; i < num_pages; i++) { + queue->kernel_if->u.g.vas[i] = + dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, + &queue->kernel_if->u.g.pas[i], + GFP_KERNEL); + if (!queue->kernel_if->u.g.vas[i]) { + /* Size excl. the header. */ + qp_free_queue(queue, i * PAGE_SIZE); + return NULL; + } } - return (void *)queue; + /* Queue header is the first page. */ + queue->q_header = queue->kernel_if->u.g.vas[0]; - fail: - qp_free_queue(queue, i * PAGE_SIZE); - return NULL; + return queue; } /* @@ -353,17 +349,18 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, size_t bytes_copied = 0; while (bytes_copied < size) { - u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; - size_t page_offset = + const u64 page_index = + (queue_offset + bytes_copied) / PAGE_SIZE; + const size_t page_offset = (queue_offset + bytes_copied) & (PAGE_SIZE - 1); void *va; size_t to_copy; - if (!kernel_if->mapped) - va = kmap(kernel_if->page[page_index]); + if (kernel_if->host) + va = kmap(kernel_if->u.h.page[page_index]); else - va = (void *)((u8 *)kernel_if->va + - (page_index * PAGE_SIZE)); + va = kernel_if->u.g.vas[page_index + 1]; + /* Skip header. */ if (size - bytes_copied > PAGE_SIZE - page_offset) /* Enough payload to fill up from this page. */ @@ -379,7 +376,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, err = memcpy_fromiovec((u8 *)va + page_offset, iov, to_copy); if (err != 0) { - kunmap(kernel_if->page[page_index]); + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); return VMCI_ERROR_INVALID_ARGS; } } else { @@ -388,8 +386,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, } bytes_copied += to_copy; - if (!kernel_if->mapped) - kunmap(kernel_if->page[page_index]); + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); } return VMCI_SUCCESS; @@ -411,17 +409,18 @@ static int __qp_memcpy_from_queue(void *dest, size_t bytes_copied = 0; while (bytes_copied < size) { - u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE; - size_t page_offset = + const u64 page_index = + (queue_offset + bytes_copied) / PAGE_SIZE; + const size_t page_offset = (queue_offset + bytes_copied) & (PAGE_SIZE - 1); void *va; size_t to_copy; - if (!kernel_if->mapped) - va = kmap(kernel_if->page[page_index]); + if (kernel_if->host) + va = kmap(kernel_if->u.h.page[page_index]); else - va = (void *)((u8 *)kernel_if->va + - (page_index * PAGE_SIZE)); + va = kernel_if->u.g.vas[page_index + 1]; + /* Skip header. */ if (size - bytes_copied > PAGE_SIZE - page_offset) /* Enough payload to fill up this page. */ @@ -437,7 +436,8 @@ static int __qp_memcpy_from_queue(void *dest, err = memcpy_toiovec(iov, (u8 *)va + page_offset, to_copy); if (err != 0) { - kunmap(kernel_if->page[page_index]); + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); return VMCI_ERROR_INVALID_ARGS; } } else { @@ -446,8 +446,8 @@ static int __qp_memcpy_from_queue(void *dest, } bytes_copied += to_copy; - if (!kernel_if->mapped) - kunmap(kernel_if->page[page_index]); + if (kernel_if->host) + kunmap(kernel_if->u.h.page[page_index]); } return VMCI_SUCCESS; @@ -489,12 +489,11 @@ static int qp_alloc_ppn_set(void *prod_q, return VMCI_ERROR_NO_MEM; } - produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header)); - for (i = 1; i < num_produce_pages; i++) { + for (i = 0; i < num_produce_pages; i++) { unsigned long pfn; produce_ppns[i] = - page_to_pfn(produce_q->kernel_if->page[i - 1]); + produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; pfn = produce_ppns[i]; /* Fail allocation if PFN isn't supported by hypervisor. */ @@ -503,12 +502,11 @@ static int qp_alloc_ppn_set(void *prod_q, goto ppn_error; } - consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header)); - for (i = 1; i < num_consume_pages; i++) { + for (i = 0; i < num_consume_pages; i++) { unsigned long pfn; consume_ppns[i] = - page_to_pfn(consume_q->kernel_if->page[i - 1]); + consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; pfn = consume_ppns[i]; /* Fail allocation if PFN isn't supported by hypervisor. */ @@ -619,23 +617,20 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); const size_t queue_page_size = - num_pages * sizeof(*queue->kernel_if->page); + num_pages * sizeof(*queue->kernel_if->u.h.page); queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); if (queue) { queue->q_header = NULL; queue->saved_header = NULL; - queue->kernel_if = - (struct vmci_queue_kern_if *)((u8 *)queue + - sizeof(*queue)); + queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); queue->kernel_if->host = true; queue->kernel_if->mutex = NULL; queue->kernel_if->num_pages = num_pages; - queue->kernel_if->header_page = + queue->kernel_if->u.h.header_page = (struct page **)((u8 *)queue + queue_size); - queue->kernel_if->page = &queue->kernel_if->header_page[1]; - queue->kernel_if->va = NULL; - queue->kernel_if->mapped = false; + queue->kernel_if->u.h.page = + &queue->kernel_if->u.h.header_page[1]; } return queue; @@ -742,11 +737,12 @@ static int qp_host_get_user_memory(u64 produce_uva, current->mm, (uintptr_t) produce_uva, produce_q->kernel_if->num_pages, - 1, 0, produce_q->kernel_if->header_page, NULL); + 1, 0, + produce_q->kernel_if->u.h.header_page, NULL); if (retval < produce_q->kernel_if->num_pages) { pr_warn("get_user_pages(produce) failed (retval=%d)", retval); - qp_release_pages(produce_q->kernel_if->header_page, retval, - false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + retval, false); err = VMCI_ERROR_NO_MEM; goto out; } @@ -755,12 +751,13 @@ static int qp_host_get_user_memory(u64 produce_uva, current->mm, (uintptr_t) consume_uva, consume_q->kernel_if->num_pages, - 1, 0, consume_q->kernel_if->header_page, NULL); + 1, 0, + consume_q->kernel_if->u.h.header_page, NULL); if (retval < consume_q->kernel_if->num_pages) { pr_warn("get_user_pages(consume) failed (retval=%d)", retval); - qp_release_pages(consume_q->kernel_if->header_page, retval, - false); - qp_release_pages(produce_q->kernel_if->header_page, + qp_release_pages(consume_q->kernel_if->u.h.header_page, + retval, false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, produce_q->kernel_if->num_pages, false); err = VMCI_ERROR_NO_MEM; } @@ -803,15 +800,15 @@ static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, struct vmci_queue *consume_q) { - qp_release_pages(produce_q->kernel_if->header_page, + qp_release_pages(produce_q->kernel_if->u.h.header_page, produce_q->kernel_if->num_pages, true); - memset(produce_q->kernel_if->header_page, 0, - sizeof(*produce_q->kernel_if->header_page) * + memset(produce_q->kernel_if->u.h.header_page, 0, + sizeof(*produce_q->kernel_if->u.h.header_page) * produce_q->kernel_if->num_pages); - qp_release_pages(consume_q->kernel_if->header_page, + qp_release_pages(consume_q->kernel_if->u.h.header_page, consume_q->kernel_if->num_pages, true); - memset(consume_q->kernel_if->header_page, 0, - sizeof(*consume_q->kernel_if->header_page) * + memset(consume_q->kernel_if->u.h.header_page, 0, + sizeof(*consume_q->kernel_if->u.h.header_page) * consume_q->kernel_if->num_pages); } @@ -834,12 +831,12 @@ static int qp_host_map_queues(struct vmci_queue *produce_q, if (produce_q->q_header != consume_q->q_header) return VMCI_ERROR_QUEUEPAIR_MISMATCH; - if (produce_q->kernel_if->header_page == NULL || - *produce_q->kernel_if->header_page == NULL) + if (produce_q->kernel_if->u.h.header_page == NULL || + *produce_q->kernel_if->u.h.header_page == NULL) return VMCI_ERROR_UNAVAILABLE; - headers[0] = *produce_q->kernel_if->header_page; - headers[1] = *consume_q->kernel_if->header_page; + headers[0] = *produce_q->kernel_if->u.h.header_page; + headers[1] = *consume_q->kernel_if->u.h.header_page; produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); if (produce_q->q_header != NULL) { @@ -1720,21 +1717,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry, if (result < VMCI_SUCCESS) return result; - /* - * Preemptively load in the headers if non-blocking to - * prevent blocking later. - */ - if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) { - result = qp_host_map_queues(entry->produce_q, - entry->consume_q); - if (result < VMCI_SUCCESS) { - qp_host_unregister_user_memory( - entry->produce_q, - entry->consume_q); - return result; - } - } - entry->state = VMCIQPB_ATTACHED_MEM; } else { entry->state = VMCIQPB_ATTACHED_NO_MEM; @@ -1749,24 +1731,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry, return VMCI_ERROR_UNAVAILABLE; } else { - /* - * For non-blocking queue pairs, we cannot rely on - * enqueue/dequeue to map in the pages on the - * host-side, since it may block, so we make an - * attempt here. - */ - - if (flags & VMCI_QPFLAG_NONBLOCK) { - result = - qp_host_map_queues(entry->produce_q, - entry->consume_q); - if (result < VMCI_SUCCESS) - return result; - - entry->qp.flags |= flags & - (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED); - } - /* The host side has successfully attached to a queue pair. */ entry->state = VMCIQPB_ATTACHED_MEM; } @@ -2543,24 +2507,19 @@ void vmci_qp_guest_endpoints_exit(void) * Since non-blocking isn't yet implemented on the host personality we * have no reason to acquire a spin lock. So to avoid the use of an * unnecessary lock only acquire the mutex if we can block. - * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore - * we can use the same locking function for access to both the queue - * and the queue headers as it is the same logic. Assert this behvior. */ static void qp_lock(const struct vmci_qp *qpair) { - if (vmci_can_block(qpair->flags)) - qp_acquire_queue_mutex(qpair->produce_q); + qp_acquire_queue_mutex(qpair->produce_q); } /* * Helper routine that unlocks the queue pair after calling - * qp_lock. Respects non-blocking and pinning flags. + * qp_lock. */ static void qp_unlock(const struct vmci_qp *qpair) { - if (vmci_can_block(qpair->flags)) - qp_release_queue_mutex(qpair->produce_q); + qp_release_queue_mutex(qpair->produce_q); } /* @@ -2568,17 +2527,12 @@ static void qp_unlock(const struct vmci_qp *qpair) * currently not mapped, it will be attempted to do so. */ static int qp_map_queue_headers(struct vmci_queue *produce_q, - struct vmci_queue *consume_q, - bool can_block) + struct vmci_queue *consume_q) { int result; if (NULL == produce_q->q_header || NULL == consume_q->q_header) { - if (can_block) - result = qp_host_map_queues(produce_q, consume_q); - else - result = VMCI_ERROR_QUEUEPAIR_NOT_READY; - + result = qp_host_map_queues(produce_q, consume_q); if (result < VMCI_SUCCESS) return (produce_q->saved_header && consume_q->saved_header) ? @@ -2601,8 +2555,7 @@ static int qp_get_queue_headers(const struct vmci_qp *qpair, { int result; - result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q, - vmci_can_block(qpair->flags)); + result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); if (result == VMCI_SUCCESS) { *produce_q_header = qpair->produce_q->q_header; *consume_q_header = qpair->consume_q->q_header; @@ -2645,9 +2598,6 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) { unsigned int generation; - if (qpair->flags & VMCI_QPFLAG_NONBLOCK) - return false; - qpair->blocked++; generation = qpair->generation; qp_unlock(qpair); @@ -2674,15 +2624,14 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, const u64 produce_q_size, const void *buf, size_t buf_size, - vmci_memcpy_to_queue_func memcpy_to_queue, - bool can_block) + vmci_memcpy_to_queue_func memcpy_to_queue) { s64 free_space; u64 tail; size_t written; ssize_t result; - result = qp_map_queue_headers(produce_q, consume_q, can_block); + result = qp_map_queue_headers(produce_q, consume_q); if (unlikely(result != VMCI_SUCCESS)) return result; @@ -2737,15 +2686,14 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, void *buf, size_t buf_size, vmci_memcpy_from_queue_func memcpy_from_queue, - bool update_consumer, - bool can_block) + bool update_consumer) { s64 buf_ready; u64 head; size_t read; ssize_t result; - result = qp_map_queue_headers(produce_q, consume_q, can_block); + result = qp_map_queue_headers(produce_q, consume_q); if (unlikely(result != VMCI_SUCCESS)) return result; @@ -2842,32 +2790,11 @@ int vmci_qpair_alloc(struct vmci_qp **qpair, route = vmci_guest_code_active() ? VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; - /* If NONBLOCK or PINNED is set, we better be the guest personality. */ - if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) && - VMCI_ROUTE_AS_GUEST != route) { - pr_devel("Not guest personality w/ NONBLOCK OR PINNED set"); + if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { + pr_devel("NONBLOCK OR PINNED set"); return VMCI_ERROR_INVALID_ARGS; } - /* - * Limit the size of pinned QPs and check sanity. - * - * Pinned pages implies non-blocking mode. Mutexes aren't acquired - * when the NONBLOCK flag is set in qpair code; and also should not be - * acquired when the PINNED flagged is set. Since pinning pages - * implies we want speed, it makes no sense not to have NONBLOCK - * set if PINNED is set. Hence enforce this implication. - */ - if (vmci_qp_pinned(flags)) { - if (vmci_can_block(flags)) { - pr_err("Attempted to enable pinning w/o non-blocking"); - return VMCI_ERROR_INVALID_ARGS; - } - - if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY) - return VMCI_ERROR_NO_RESOURCES; - } - my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); if (!my_qpair) return VMCI_ERROR_NO_MEM; @@ -3195,8 +3122,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, qpair->consume_q, qpair->produce_q_size, buf, buf_size, - qp_memcpy_to_queue, - vmci_can_block(qpair->flags)); + qp_memcpy_to_queue); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) @@ -3237,8 +3163,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, qpair->consume_q, qpair->consume_q_size, buf, buf_size, - qp_memcpy_from_queue, true, - vmci_can_block(qpair->flags)); + qp_memcpy_from_queue, true); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) @@ -3280,8 +3205,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, qpair->consume_q, qpair->consume_q_size, buf, buf_size, - qp_memcpy_from_queue, false, - vmci_can_block(qpair->flags)); + qp_memcpy_from_queue, false); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) @@ -3323,8 +3247,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, qpair->consume_q, qpair->produce_q_size, iov, iov_size, - qp_memcpy_to_queue_iov, - vmci_can_block(qpair->flags)); + qp_memcpy_to_queue_iov); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) @@ -3367,7 +3290,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, qpair->consume_q_size, iov, iov_size, qp_memcpy_from_queue_iov, - true, vmci_can_block(qpair->flags)); + true); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) @@ -3411,7 +3334,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, qpair->consume_q_size, iov, iov_size, qp_memcpy_from_queue_iov, - false, vmci_can_block(qpair->flags)); + false); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h index 58c6959f6b6d..ed177f04ef24 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.h +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -146,24 +146,6 @@ VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store) return page_store->len >= 2; } -/* - * Helper function to check if the non-blocking flag - * is set for a given queue pair. - */ -static inline bool vmci_can_block(u32 flags) -{ - return !(flags & VMCI_QPFLAG_NONBLOCK); -} - -/* - * Helper function to check if the queue pair is pinned - * into memory. - */ -static inline bool vmci_qp_pinned(u32 flags) -{ - return flags & VMCI_QPFLAG_PINNED; -} - void vmci_qp_broker_exit(void); int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, u32 flags, u32 priv_flags, |