diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 10:06:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 10:06:12 -0700 |
commit | 1758feddb0f9751debdc865fefde94b45907c948 (patch) | |
tree | 3b677285709b730e572724dcdd925e43333f2d45 /drivers/s390 | |
parent | 278ecbf027c3c559deb225f0cf53a23b7672dacf (diff) | |
parent | 499723d12063aab97dfe6b41c822e9c1c74eff3e (diff) | |
download | linux-1758feddb0f9751debdc865fefde94b45907c948.tar.bz2 |
Merge tag 's390-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik:
- Improve stop_machine wait logic: replace cpu_relax_yield call in
generic stop_machine function with a weak stop_machine_yield
function. This is overridden on s390, which yields the current cpu to
the neighbouring cpu after a couple of retries, instead of blindly
giving up the cpu to the hipervisor. This significantly improves
stop_machine performance on s390 in overcommitted scenarios.
This includes common code changes which have been Acked by Peter
Zijlstra and Thomas Gleixner.
- Improve jump label transformation speed: transform jump labels
without using stop_machine.
- Refactoring of the vfio-ccw cp handling, simplifying the code and
avoiding unneeded allocating/copying.
- Various vfio-ccw fixes (ccw translation, state machine).
- Add support for vfio-ap queue interrupt control in the guest. This
includes s390 kvm changes which have been Acked by Christian
Borntraeger.
- Add protected virtualization support for virtio-ccw.
- Enforce both CONFIG_SMP and CONFIG_HOTPLUG_CPU, which allows to
remove some code which most likely isn't working at all, besides that
s390 didn't even compile for !CONFIG_SMP.
- Support for special flagged EP11 CPRBs for zcrypt.
- Handle PCI devices with no support for new MIO instructions.
- Avoid KASAN false positives in reworked stack unwinder.
- Couple of fixes for the QDIO layer.
- Convert s390 specific documentation to ReST format.
- Let s390 crypto modules return -ENODEV instead of -EOPNOTSUPP if
hardware is missing. This way our modules behave like most other
modules and which is also what systemd's systemd-modules-load.service
expects.
- Replace defconfig with performance_defconfig, so there is one config
file less to maintain.
- Remove the SCLP call home device driver, which was never useful.
- Cleanups all over the place.
* tag 's390-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (83 commits)
docs: s390: s390dbf: typos and formatting, update crash command
docs: s390: unify and update s390dbf kdocs at debug.c
docs: s390: restore important non-kdoc parts of s390dbf.rst
vfio-ccw: Fix the conversion of Format-0 CCWs to Format-1
s390/pci: correctly handle MIO opt-out
s390/pci: deal with devices that have no support for MIO instructions
s390: ap: kvm: Enable PQAP/AQIC facility for the guest
s390: ap: implement PAPQ AQIC interception in kernel
vfio: ap: register IOMMU VFIO notifier
s390: ap: kvm: add PQAP interception for AQIC
s390/unwind: cleanup unused READ_ONCE_TASK_STACK
s390/kasan: avoid false positives during stack unwind
s390/qdio: don't touch the dsci in tiqdio_add_input_queues()
s390/qdio: (re-)initialize tiqdio list entries
s390/dasd: Fix a precision vs width bug in dasd_feature_list()
s390/cio: introduce driver_override on the css bus
vfio-ccw: make convert_ccw0_to_ccw1 static
vfio-ccw: Remove copy_ccw_from_iova()
vfio-ccw: Factor out the ccw0-to-ccw1 transition
vfio-ccw: Copy CCW data outside length calculation
...
Diffstat (limited to 'drivers/s390')
31 files changed, 1192 insertions, 764 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 9ac7574e3cfb..a8682f69effc 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -38,7 +38,7 @@ config DASD_PROFILE depends on DASD help Enable this option if you want to see profiling information - in /proc/dasd/statistics. + in /proc/dasd/statistics. config DASD_ECKD def_tristate y diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index fab35c6170cc..245f33c2f71e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -203,7 +203,7 @@ static int __init dasd_feature_list(char *str) else if (len == 8 && !strncmp(str, "failfast", 8)) features |= DASD_FEATURE_FAILFAST; else { - pr_warn("%*s is not a supported device option\n", + pr_warn("%.*s is not a supported device option\n", len, str); rc = -EINVAL; } diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index ab0b243a947d..6cc4b19acf85 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -79,27 +79,6 @@ config SCLP_VT220_CONSOLE Include support for using an IBM SCLP VT220-compatible terminal as a Linux system console. -config SCLP_ASYNC - def_tristate m - prompt "Support for Call Home via Asynchronous SCLP Records" - depends on S390 - help - This option enables the call home function, which is able to inform - the service element and connected organisations about a kernel panic. - You should only select this option if you know what you are doing, - want for inform other people about your kernel panics, - need this feature and intend to run your kernel in LPAR. - -config SCLP_ASYNC_ID - string "Component ID for Call Home" - depends on SCLP_ASYNC - default "000000000" - help - The Component ID for Call Home is used to identify the correct - problem reporting queue the call home records should be sent to. - - If your are unsure, please use the default value "000000000". - config HMC_DRV def_tristate m prompt "Support for file transfers from HMC drive CD/DVD-ROM" @@ -205,4 +184,3 @@ config S390_VMUR depends on S390 help Character device driver for z/VM reader, puncher and printer. - diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 3072b89785dd..b8a8816d94e7 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_TN3215) += con3215.o obj-$(CONFIG_SCLP_TTY) += sclp_tty.o obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o -obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o obj-$(CONFIG_PCI) += sclp_pci.o diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c deleted file mode 100644 index e69b12a40636..000000000000 --- a/drivers/s390/char/sclp_async.c +++ /dev/null @@ -1,189 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Enable Asynchronous Notification via SCLP. - * - * Copyright IBM Corp. 2009 - * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> - * - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/stat.h> -#include <linux/string.h> -#include <linux/slab.h> -#include <linux/ctype.h> -#include <linux/kmod.h> -#include <linux/err.h> -#include <linux/errno.h> -#include <linux/proc_fs.h> -#include <linux/sysctl.h> -#include <linux/utsname.h> -#include "sclp.h" - -static int callhome_enabled; -static struct sclp_req *request; -static struct sclp_async_sccb *sccb; -static int sclp_async_send_wait(char *message); -static struct ctl_table_header *callhome_sysctl_header; -static DEFINE_SPINLOCK(sclp_async_lock); -#define SCLP_NORMAL_WRITE 0x00 - -struct async_evbuf { - struct evbuf_header header; - u64 reserved; - u8 rflags; - u8 empty; - u8 rtype; - u8 otype; - char comp_id[12]; - char data[3000]; /* there is still some space left */ -} __attribute__((packed)); - -struct sclp_async_sccb { - struct sccb_header header; - struct async_evbuf evbuf; -} __attribute__((packed)); - -static struct sclp_register sclp_async_register = { - .send_mask = EVTYP_ASYNC_MASK, -}; - -static int call_home_on_panic(struct notifier_block *self, - unsigned long event, void *data) -{ - strncat(data, init_utsname()->nodename, - sizeof(init_utsname()->nodename)); - sclp_async_send_wait(data); - return NOTIFY_DONE; -} - -static struct notifier_block call_home_panic_nb = { - .notifier_call = call_home_on_panic, - .priority = INT_MAX, -}; - -static int zero; -static int one = 1; - -static struct ctl_table callhome_table[] = { - { - .procname = "callhome", - .data = &callhome_enabled, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, - .extra2 = &one, - }, - {} -}; - -static struct ctl_table kern_dir_table[] = { - { - .procname = "kernel", - .maxlen = 0, - .mode = 0555, - .child = callhome_table, - }, - {} -}; - -/* - * Function used to transfer asynchronous notification - * records which waits for send completion - */ -static int sclp_async_send_wait(char *message) -{ - struct async_evbuf *evb; - int rc; - unsigned long flags; - - if (!callhome_enabled) - return 0; - sccb->evbuf.header.type = EVTYP_ASYNC; - sccb->evbuf.rtype = 0xA5; - sccb->evbuf.otype = 0x00; - evb = &sccb->evbuf; - request->command = SCLP_CMDW_WRITE_EVENT_DATA; - request->sccb = sccb; - request->status = SCLP_REQ_FILLED; - strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data)); - /* - * Retain Queue - * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS) - */ - strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID, - sizeof(sccb->evbuf.comp_id)); - sccb->evbuf.header.length = sizeof(sccb->evbuf); - sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header); - sccb->header.function_code = SCLP_NORMAL_WRITE; - rc = sclp_add_request(request); - if (rc) - return rc; - spin_lock_irqsave(&sclp_async_lock, flags); - while (request->status != SCLP_REQ_DONE && - request->status != SCLP_REQ_FAILED) { - sclp_sync_wait(); - } - spin_unlock_irqrestore(&sclp_async_lock, flags); - if (request->status != SCLP_REQ_DONE) - return -EIO; - rc = ((struct sclp_async_sccb *) - request->sccb)->header.response_code; - if (rc != 0x0020) - return -EIO; - if (evb->header.flags != 0x80) - return -EIO; - return rc; -} - -static int __init sclp_async_init(void) -{ - int rc; - - rc = sclp_register(&sclp_async_register); - if (rc) - return rc; - rc = -EOPNOTSUPP; - if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) - goto out_sclp; - rc = -ENOMEM; - callhome_sysctl_header = register_sysctl_table(kern_dir_table); - if (!callhome_sysctl_header) - goto out_sclp; - request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); - sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!request || !sccb) - goto out_mem; - rc = atomic_notifier_chain_register(&panic_notifier_list, - &call_home_panic_nb); - if (!rc) - goto out; -out_mem: - kfree(request); - free_page((unsigned long) sccb); - unregister_sysctl_table(callhome_sysctl_header); -out_sclp: - sclp_unregister(&sclp_async_register); -out: - return rc; -} -module_init(sclp_async_init); - -static void __exit sclp_async_exit(void) -{ - atomic_notifier_chain_unregister(&panic_notifier_list, - &call_home_panic_nb); - unregister_sysctl_table(callhome_sysctl_header); - sclp_unregister(&sclp_async_register); - free_page((unsigned long) sccb); - kfree(request); -} -module_exit(sclp_async_exit); - -MODULE_AUTHOR("Copyright IBM Corp. 2009"); -MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SCLP Asynchronous Notification Records"); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 405a60538630..08f812475f5e 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -4,7 +4,7 @@ * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same * dump format as s390 standalone dumps. * - * For more information please refer to Documentation/s390/zfcpdump.txt + * For more information please refer to Documentation/s390/zfcpdump.rst * * Copyright IBM Corp. 2003, 2008 * Author(s): Michael Holzheu diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 4534afc63591..427b2e24a8ce 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -16,9 +16,11 @@ #include <linux/mutex.h> #include <linux/rculist.h> #include <linux/slab.h> +#include <linux/dmapool.h> #include <asm/airq.h> #include <asm/isc.h> +#include <asm/cio.h> #include "cio.h" #include "cio_debug.h" @@ -27,7 +29,7 @@ static DEFINE_SPINLOCK(airq_lists_lock); static struct hlist_head airq_lists[MAX_ISC+1]; -static struct kmem_cache *airq_iv_cache; +static struct dma_pool *airq_iv_cache; /** * register_adapter_interrupt() - register adapter interrupt handler @@ -115,6 +117,11 @@ void __init init_airq_interrupts(void) setup_irq(THIN_INTERRUPT, &airq_interrupt); } +static inline unsigned long iv_size(unsigned long bits) +{ + return BITS_TO_LONGS(bits) * sizeof(unsigned long); +} + /** * airq_iv_create - create an interrupt vector * @bits: number of bits in the interrupt vector @@ -132,17 +139,19 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags) goto out; iv->bits = bits; iv->flags = flags; - size = BITS_TO_LONGS(bits) * sizeof(unsigned long); + size = iv_size(bits); if (flags & AIRQ_IV_CACHELINE) { - if ((cache_line_size() * BITS_PER_BYTE) < bits) + if ((cache_line_size() * BITS_PER_BYTE) < bits + || !airq_iv_cache) goto out_free; - iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL); + iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL, + &iv->vector_dma); if (!iv->vector) goto out_free; } else { - iv->vector = kzalloc(size, GFP_KERNEL); + iv->vector = cio_dma_zalloc(size); if (!iv->vector) goto out_free; } @@ -178,10 +187,10 @@ out_free: kfree(iv->ptr); kfree(iv->bitlock); kfree(iv->avail); - if (iv->flags & AIRQ_IV_CACHELINE) - kmem_cache_free(airq_iv_cache, iv->vector); + if (iv->flags & AIRQ_IV_CACHELINE && iv->vector) + dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma); else - kfree(iv->vector); + cio_dma_free(iv->vector, size); kfree(iv); out: return NULL; @@ -198,9 +207,9 @@ void airq_iv_release(struct airq_iv *iv) kfree(iv->ptr); kfree(iv->bitlock); if (iv->flags & AIRQ_IV_CACHELINE) - kmem_cache_free(airq_iv_cache, iv->vector); + dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma); else - kfree(iv->vector); + cio_dma_free(iv->vector, iv_size(iv->bits)); kfree(iv->avail); kfree(iv); } @@ -295,12 +304,12 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, } EXPORT_SYMBOL(airq_iv_scan); -static int __init airq_init(void) +int __init airq_init(void) { - airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(), - cache_line_size(), 0, NULL); + airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(), + cache_line_size(), + cache_line_size(), PAGE_SIZE); if (!airq_iv_cache) return -ENOMEM; return 0; } -subsys_initcall(airq_init); diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 603268a33ea1..73582a0a2622 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -63,7 +63,7 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc) return; req->done = 1; ccw_device_set_timeout(cdev, 0); - memset(&cdev->private->irb, 0, sizeof(struct irb)); + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); if (rc && rc != -ENODEV && req->drc) rc = req->drc; req->callback(cdev, req->data, rc); @@ -86,7 +86,7 @@ static void ccwreq_do(struct ccw_device *cdev) continue; } /* Perform start function. */ - memset(&cdev->private->irb, 0, sizeof(struct irb)); + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); rc = cio_start(sch, cp, (u8) req->mask); if (rc == 0) { /* I/O started successfully. */ @@ -169,7 +169,7 @@ int ccw_request_cancel(struct ccw_device *cdev) */ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) { - struct irb *irb = &cdev->private->irb; + struct irb *irb = &cdev->private->dma_area->irb; struct cmd_scsw *scsw = &irb->scsw.cmd; enum uc_todo todo; @@ -187,7 +187,8 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) CIO_TRACE_EVENT(2, "sensedata"); CIO_HEX_EVENT(2, &cdev->private->dev_id, sizeof(struct ccw_dev_id)); - CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); + CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw, + SENSE_MAX_COUNT); /* Check for command reject. */ if (irb->ecw[0] & SNS0_CMD_REJECT) return IO_REJECTED; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index a835b31aad99..6392a1b95b02 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -323,36 +323,6 @@ struct chsc_sei { } __packed __aligned(PAGE_SIZE); /* - * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands" - */ - -#define ND_VALIDITY_VALID 0 -#define ND_VALIDITY_OUTDATED 1 -#define ND_VALIDITY_INVALID 2 - -struct node_descriptor { - /* Flags. */ - union { - struct { - u32 validity:3; - u32 reserved:5; - } __packed; - u8 byte0; - } __packed; - - /* Node parameters. */ - u32 params:24; - - /* Node ID. */ - char type[6]; - char model[3]; - char manufacturer[3]; - char plant[2]; - char seq[12]; - u16 tag; -} __packed; - -/* * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface" */ diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 06a91743335a..ba7d2480613b 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + char *driver_override; /* Driver name to force a match */ } __attribute__ ((aligned(8))); DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb); @@ -135,6 +136,8 @@ extern int cio_commit_config(struct subchannel *sch); int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); int cio_tm_intrg(struct subchannel *sch); +extern int __init airq_init(void); + /* Use with care. */ #ifdef CONFIG_CCW_CONSOLE extern struct subchannel *cio_probe_console(void); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index aea502922646..e1f2d0eed544 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -20,6 +20,8 @@ #include <linux/reboot.h> #include <linux/suspend.h> #include <linux/proc_fs.h> +#include <linux/genalloc.h> +#include <linux/dma-mapping.h> #include <asm/isc.h> #include <asm/crw.h> @@ -165,6 +167,7 @@ static void css_subchannel_release(struct device *dev) sch->config.intparm = 0; cio_commit_config(sch); + kfree(sch->driver_override); kfree(sch->lock); kfree(sch); } @@ -224,6 +227,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, INIT_WORK(&sch->todo_work, css_sch_todo); sch->dev.release = &css_subchannel_release; device_initialize(&sch->dev); + /* + * The physical addresses of some the dma structures that can + * belong to a subchannel need to fit 31 bit width (e.g. ccw). + */ + sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); + sch->dev.dma_mask = &sch->dev.coherent_dma_mask; return sch; err: @@ -315,9 +324,57 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(modalias); +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct subchannel *sch = to_subchannel(dev); + char *driver_override, *old, *cp; + + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + device_lock(dev); + old = sch->driver_override; + if (strlen(driver_override)) { + sch->driver_override = driver_override; + } else { + kfree(driver_override); + sch->driver_override = NULL; + } + device_unlock(dev); + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + ssize_t len; + + device_lock(dev); + len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override); + device_unlock(dev); + return len; +} +static DEVICE_ATTR_RW(driver_override); + static struct attribute *subch_attrs[] = { &dev_attr_type.attr, &dev_attr_modalias.attr, + &dev_attr_driver_override.attr, NULL, }; @@ -899,6 +956,13 @@ static int __init setup_css(int nr) dev_set_name(&css->device, "css%x", nr); css->device.groups = cssdev_attr_groups; css->device.release = channel_subsystem_release; + /* + * We currently allocate notifier bits with this (using + * css->device as the device argument with the DMA API) + * and are fine with 64 bit addresses. + */ + css->device.coherent_dma_mask = DMA_BIT_MASK(64); + css->device.dma_mask = &css->device.coherent_dma_mask; mutex_init(&css->mutex); css->cssid = chsc_get_cssid(nr); @@ -1018,6 +1082,111 @@ static struct notifier_block css_power_notifier = { .notifier_call = css_power_event, }; +#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO) +static struct gen_pool *cio_dma_pool; + +/* Currently cio supports only a single css */ +struct device *cio_get_dma_css_dev(void) +{ + return &channel_subsystems[0]->device; +} + +struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages) +{ + struct gen_pool *gp_dma; + void *cpu_addr; + dma_addr_t dma_addr; + int i; + + gp_dma = gen_pool_create(3, -1); + if (!gp_dma) + return NULL; + for (i = 0; i < nr_pages; ++i) { + cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, + CIO_DMA_GFP); + if (!cpu_addr) + return gp_dma; + gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr, + dma_addr, PAGE_SIZE, -1); + } + return gp_dma; +} + +static void __gp_dma_free_dma(struct gen_pool *pool, + struct gen_pool_chunk *chunk, void *data) +{ + size_t chunk_size = chunk->end_addr - chunk->start_addr + 1; + + dma_free_coherent((struct device *) data, chunk_size, + (void *) chunk->start_addr, + (dma_addr_t) chunk->phys_addr); +} + +void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev) +{ + if (!gp_dma) + return; + /* this is quite ugly but no better idea */ + gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev); + gen_pool_destroy(gp_dma); +} + +static int cio_dma_pool_init(void) +{ + /* No need to free up the resources: compiled in */ + cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1); + if (!cio_dma_pool) + return -ENOMEM; + return 0; +} + +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, + size_t size) +{ + dma_addr_t dma_addr; + unsigned long addr; + size_t chunk_size; + + if (!gp_dma) + return NULL; + addr = gen_pool_alloc(gp_dma, size); + while (!addr) { + chunk_size = round_up(size, PAGE_SIZE); + addr = (unsigned long) dma_alloc_coherent(dma_dev, + chunk_size, &dma_addr, CIO_DMA_GFP); + if (!addr) + return NULL; + gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1); + addr = gen_pool_alloc(gp_dma, size); + } + return (void *) addr; +} + +void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size) +{ + if (!cpu_addr) + return; + memset(cpu_addr, 0, size); + gen_pool_free(gp_dma, (unsigned long) cpu_addr, size); +} + +/* + * Allocate dma memory from the css global pool. Intended for memory not + * specific to any single device within the css. The allocated memory + * is not guaranteed to be 31-bit addressable. + * + * Caution: Not suitable for early stuff like console. + */ +void *cio_dma_zalloc(size_t size) +{ + return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size); +} + +void cio_dma_free(void *cpu_addr, size_t size) +{ + cio_gp_dma_free(cio_dma_pool, cpu_addr, size); +} + /* * Now that the driver core is running, we can setup our channel subsystem. * The struct subchannel's are created during probing. @@ -1059,16 +1228,22 @@ static int __init css_bus_init(void) if (ret) goto out_unregister; ret = register_pm_notifier(&css_power_notifier); - if (ret) { - unregister_reboot_notifier(&css_reboot_notifier); - goto out_unregister; - } + if (ret) + goto out_unregister_rn; + ret = cio_dma_pool_init(); + if (ret) + goto out_unregister_pmn; + airq_init(); css_init_done = 1; /* Enable default isc for I/O subchannels. */ isc_register(IO_SCH_ISC); return 0; +out_unregister_pmn: + unregister_pm_notifier(&css_power_notifier); +out_unregister_rn: + unregister_reboot_notifier(&css_reboot_notifier); out_unregister: while (i-- > 0) { struct channel_subsystem *css = channel_subsystems[i]; @@ -1222,6 +1397,10 @@ static int css_bus_match(struct device *dev, struct device_driver *drv) struct css_driver *driver = to_cssdriver(drv); struct css_device_id *id; + /* When driver_override is set, only bind to the matching driver */ + if (sch->driver_override && strcmp(sch->driver_override, drv->name)) + return 0; + for (id = driver->subchannel_type; id->match_flags; id++) { if (sch->st == id->type) return 1; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 1540229a37bb..9985b7484a6b 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -24,6 +24,7 @@ #include <linux/timer.h> #include <linux/kernel_stat.h> #include <linux/sched/signal.h> +#include <linux/dma-mapping.h> #include <asm/ccwdev.h> #include <asm/cio.h> @@ -687,6 +688,9 @@ ccw_device_release(struct device *dev) struct ccw_device *cdev; cdev = to_ccwdev(dev); + cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area, + sizeof(*cdev->private->dma_area)); + cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev); /* Release reference of parent subchannel. */ put_device(cdev->dev.parent); kfree(cdev->private); @@ -696,15 +700,33 @@ ccw_device_release(struct device *dev) static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) { struct ccw_device *cdev; + struct gen_pool *dma_pool; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); - if (cdev) { - cdev->private = kzalloc(sizeof(struct ccw_device_private), - GFP_KERNEL | GFP_DMA); - if (cdev->private) - return cdev; - } + if (!cdev) + goto err_cdev; + cdev->private = kzalloc(sizeof(struct ccw_device_private), + GFP_KERNEL | GFP_DMA); + if (!cdev->private) + goto err_priv; + cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; + cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; + dma_pool = cio_gp_dma_create(&cdev->dev, 1); + if (!dma_pool) + goto err_dma_pool; + cdev->private->dma_pool = dma_pool; + cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, + sizeof(*cdev->private->dma_area)); + if (!cdev->private->dma_area) + goto err_dma_area; + return cdev; +err_dma_area: + cio_gp_dma_destroy(dma_pool, &cdev->dev); +err_dma_pool: + kfree(cdev->private); +err_priv: kfree(cdev); +err_cdev: return ERR_PTR(-ENOMEM); } @@ -884,7 +906,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) wake_up(&ccw_device_init_wq); break; case DEV_STATE_OFFLINE: - /* + /* * We can't register the device in interrupt context so * we schedule a work item. */ @@ -1062,6 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch) if (!io_priv) goto out_schedule; + io_priv->dma_area = dma_alloc_coherent(&sch->dev, + sizeof(*io_priv->dma_area), + &io_priv->dma_area_dma, GFP_KERNEL); + if (!io_priv->dma_area) { + kfree(io_priv); + goto out_schedule; + } + set_io_private(sch, io_priv); css_schedule_eval(sch->schid); return 0; @@ -1088,6 +1118,8 @@ static int io_subchannel_remove(struct subchannel *sch) set_io_private(sch, NULL); spin_unlock_irq(sch->lock); out_free: + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); kfree(io_priv); sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return 0; @@ -1593,13 +1625,19 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) return ERR_CAST(sch); io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); - if (!io_priv) { - put_device(&sch->dev); - return ERR_PTR(-ENOMEM); - } + if (!io_priv) + goto err_priv; + io_priv->dma_area = dma_alloc_coherent(&sch->dev, + sizeof(*io_priv->dma_area), + &io_priv->dma_area_dma, GFP_KERNEL); + if (!io_priv->dma_area) + goto err_dma_area; set_io_private(sch, io_priv); cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); + set_io_private(sch, NULL); put_device(&sch->dev); kfree(io_priv); return cdev; @@ -1607,6 +1645,12 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) cdev->drv = drv; ccw_device_set_int_class(cdev); return cdev; + +err_dma_area: + kfree(io_priv); +err_priv: + put_device(&sch->dev); + return ERR_PTR(-ENOMEM); } void __init ccw_device_destroy_console(struct ccw_device *cdev) @@ -1617,6 +1661,8 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev) set_io_private(sch, NULL); put_device(&sch->dev); put_device(&cdev->dev); + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), + io_priv->dma_area, io_priv->dma_area_dma); kfree(io_priv); } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 9169af7dbb43..8fc267324ebb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -67,8 +67,10 @@ static void ccw_timeout_log(struct ccw_device *cdev) sizeof(struct tcw), 0); } else { printk(KERN_WARNING "cio: orb indicates command mode\n"); - if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || - (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) + if ((void *)(addr_t)orb->cmd.cpa == + &private->dma_area->sense_ccw || + (void *)(addr_t)orb->cmd.cpa == + cdev->private->dma_area->iccws) printk(KERN_WARNING "cio: last channel program " "(intern):\n"); else @@ -143,18 +145,22 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) void ccw_device_update_sense_data(struct ccw_device *cdev) { memset(&cdev->id, 0, sizeof(cdev->id)); - cdev->id.cu_type = cdev->private->senseid.cu_type; - cdev->id.cu_model = cdev->private->senseid.cu_model; - cdev->id.dev_type = cdev->private->senseid.dev_type; - cdev->id.dev_model = cdev->private->senseid.dev_model; + cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type; + cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model; + cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type; + cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model; } int ccw_device_test_sense_data(struct ccw_device *cdev) { - return cdev->id.cu_type == cdev->private->senseid.cu_type && - cdev->id.cu_model == cdev->private->senseid.cu_model && - cdev->id.dev_type == cdev->private->senseid.dev_type && - cdev->id.dev_model == cdev->private->senseid.dev_model; + return cdev->id.cu_type == + cdev->private->dma_area->senseid.cu_type && + cdev->id.cu_model == + cdev->private->dma_area->senseid.cu_model && + cdev->id.dev_type == + cdev->private->dma_area->senseid.dev_type && + cdev->id.dev_model == + cdev->private->dma_area->senseid.dev_model; } /* @@ -342,7 +348,7 @@ ccw_device_done(struct ccw_device *cdev, int state) cio_disable_subchannel(sch); /* Reset device status. */ - memset(&cdev->private->irb, 0, sizeof(struct irb)); + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); cdev->private->state = state; @@ -509,13 +515,14 @@ callback: ccw_device_done(cdev, DEV_STATE_ONLINE); /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { - create_fake_irb(&cdev->private->irb, + create_fake_irb(&cdev->private->dma_area->irb, cdev->private->flags.fake_irb); cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - &cdev->private->irb); - memset(&cdev->private->irb, 0, sizeof(struct irb)); + &cdev->private->dma_area->irb); + memset(&cdev->private->dma_area->irb, 0, + sizeof(struct irb)); } ccw_device_report_path_events(cdev); ccw_device_handle_broken_paths(cdev); @@ -672,7 +679,8 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) if (scsw_actl(&sch->schib.scsw) != 0 || (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || - (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { + (scsw_stctl(&cdev->private->dma_area->irb.scsw) & + SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered * to the device driver. Can't do path verification now, @@ -719,7 +727,7 @@ static int ccw_device_call_handler(struct ccw_device *cdev) * - fast notification was requested (primary status) * - unsolicited interrupts */ - stctl = scsw_stctl(&cdev->private->irb.scsw); + stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw); ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || (stctl == SCSW_STCTL_STATUS_PEND); @@ -735,9 +743,9 @@ static int ccw_device_call_handler(struct ccw_device *cdev) if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - &cdev->private->irb); + &cdev->private->dma_area->irb); - memset(&cdev->private->irb, 0, sizeof(struct irb)); + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); return 1; } @@ -759,7 +767,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) /* Unit check but no sense data. Need basic sense. */ if (ccw_device_do_sense(cdev, irb) != 0) goto call_handler_unsol; - memcpy(&cdev->private->irb, irb, sizeof(struct irb)); + memcpy(&cdev->private->dma_area->irb, irb, + sizeof(struct irb)); cdev->private->state = DEV_STATE_W4SENSE; cdev->private->intparm = 0; return; @@ -842,7 +851,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) if (scsw_fctl(&irb->scsw) & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { cdev->private->flags.dosense = 0; - memset(&cdev->private->irb, 0, sizeof(struct irb)); + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); ccw_device_accumulate_irb(cdev, irb); goto call_handler; } diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index f6df83a9dfbb..740996d0dc8c 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -99,7 +99,7 @@ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag) static int diag210_get_dev_info(struct ccw_device *cdev) { struct ccw_dev_id *dev_id = &cdev->private->dev_id; - struct senseid *senseid = &cdev->private->senseid; + struct senseid *senseid = &cdev->private->dma_area->senseid; struct diag210 diag_data; int rc; @@ -134,8 +134,10 @@ err_failed: static void snsid_init(struct ccw_device *cdev) { cdev->private->flags.esid = 0; - memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid)); - cdev->private->senseid.cu_type = 0xffff; + + memset(&cdev->private->dma_area->senseid, 0, + sizeof(cdev->private->dma_area->senseid)); + cdev->private->dma_area->senseid.cu_type = 0xffff; } /* @@ -143,16 +145,16 @@ static void snsid_init(struct ccw_device *cdev) */ static int snsid_check(struct ccw_device *cdev, void *data) { - struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd; + struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd; int len = sizeof(struct senseid) - scsw->count; /* Check for incomplete SENSE ID data. */ if (len < SENSE_ID_MIN_LEN) goto out_restart; - if (cdev->private->senseid.cu_type == 0xffff) + if (cdev->private->dma_area->senseid.cu_type == 0xffff) goto out_restart; /* Check for incompatible SENSE ID data. */ - if (cdev->private->senseid.reserved != 0xff) + if (cdev->private->dma_area->senseid.reserved != 0xff) return -EOPNOTSUPP; /* Check for extended-identification information. */ if (len > SENSE_ID_BASIC_LEN) @@ -170,7 +172,7 @@ out_restart: static void snsid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_dev_id *id = &cdev->private->dev_id; - struct senseid *senseid = &cdev->private->senseid; + struct senseid *senseid = &cdev->private->dma_area->senseid; int vm = 0; if (rc && MACHINE_IS_VM) { @@ -200,7 +202,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; - struct ccw1 *cp = cdev->private->iccws; + struct ccw1 *cp = cdev->private->dma_area->iccws; CIO_TRACE_EVENT(4, "snsid"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); @@ -208,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev) snsid_init(cdev); /* Channel program setup. */ cp->cmd_code = CCW_CMD_SENSE_ID; - cp->cda = (u32) (addr_t) &cdev->private->senseid; + cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid; cp->count = sizeof(struct senseid); cp->flags = CCW_FLAG_SLI; /* Request setup. */ diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 4435ae0b3027..d722458c5928 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -429,8 +429,8 @@ struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) if (cdev->private->flags.esid == 0) return NULL; for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++) - if (cdev->private->senseid.ciw[ciw_cnt].ct == ct) - return cdev->private->senseid.ciw + ciw_cnt; + if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct) + return cdev->private->dma_area->senseid.ciw + ciw_cnt; return NULL; } @@ -699,6 +699,23 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid) } EXPORT_SYMBOL_GPL(ccw_device_get_schid); +/* + * Allocate zeroed dma coherent 31 bit addressable memory using + * the subchannels dma pool. Maximal size of allocation supported + * is PAGE_SIZE. + */ +void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size) +{ + return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size); +} +EXPORT_SYMBOL(ccw_device_dma_zalloc); + +void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size) +{ + cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size); +} +EXPORT_SYMBOL(ccw_device_dma_free); + EXPORT_SYMBOL(ccw_device_set_options_mask); EXPORT_SYMBOL(ccw_device_set_options); EXPORT_SYMBOL(ccw_device_clear_options); diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index d30a3babf176..767a85635a0f 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -57,7 +57,7 @@ out: static void nop_build_cp(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; - struct ccw1 *cp = cdev->private->iccws; + struct ccw1 *cp = cdev->private->dma_area->iccws; cp->cmd_code = CCW_CMD_NOOP; cp->cda = 0; @@ -134,9 +134,9 @@ err: static void spid_build_cp(struct ccw_device *cdev, u8 fn) { struct ccw_request *req = &cdev->private->req; - struct ccw1 *cp = cdev->private->iccws; + struct ccw1 *cp = cdev->private->dma_area->iccws; int i = pathmask_to_pos(req->lpm); - struct pgid *pgid = &cdev->private->pgid[i]; + struct pgid *pgid = &cdev->private->dma_area->pgid[i]; pgid->inf.fc = fn; cp->cmd_code = CCW_CMD_SET_PGID; @@ -300,7 +300,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2) static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, int *mismatch, u8 *reserved, u8 *reset) { - struct pgid *pgid = &cdev->private->pgid[0]; + struct pgid *pgid = &cdev->private->dma_area->pgid[0]; struct pgid *first = NULL; int lpm; int i; @@ -342,7 +342,7 @@ static u8 pgid_to_donepm(struct ccw_device *cdev) lpm = 0x80 >> i; if ((cdev->private->pgid_valid_mask & lpm) == 0) continue; - pgid = &cdev->private->pgid[i]; + pgid = &cdev->private->dma_area->pgid[i]; if (sch->opm & lpm) { if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) continue; @@ -368,7 +368,8 @@ static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) int i; for (i = 0; i < 8; i++) - memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid)); + memcpy(&cdev->private->dma_area->pgid[i], pgid, + sizeof(struct pgid)); } /* @@ -435,12 +436,12 @@ out: static void snid_build_cp(struct ccw_device *cdev) { struct ccw_request *req = &cdev->private->req; - struct ccw1 *cp = cdev->private->iccws; + struct ccw1 *cp = cdev->private->dma_area->iccws; int i = pathmask_to_pos(req->lpm); /* Channel program setup. */ cp->cmd_code = CCW_CMD_SENSE_PGID; - cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; + cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i]; cp->count = sizeof(struct pgid); cp->flags = CCW_FLAG_SLI; req->cp = cp; @@ -516,7 +517,8 @@ static void verify_start(struct ccw_device *cdev) sch->lpm = sch->schib.pmcw.pam; /* Initialize PGID data. */ - memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); + memset(cdev->private->dma_area->pgid, 0, + sizeof(cdev->private->dma_area->pgid)); cdev->private->pgid_valid_mask = 0; cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; cdev->private->path_notoper_mask = 0; @@ -626,7 +628,7 @@ struct stlck_data { static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) { struct ccw_request *req = &cdev->private->req; - struct ccw1 *cp = cdev->private->iccws; + struct ccw1 *cp = cdev->private->dma_area->iccws; cp[0].cmd_code = CCW_CMD_STLCK; cp[0].cda = (u32) (addr_t) buf1; diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 7d5c7892b2c4..0bd8f2642732 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -79,15 +79,15 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) * are condition that have to be met for the extended control * bit to have meaning. Sick. */ - cdev->private->irb.scsw.cmd.ectl = 0; + cdev->private->dma_area->irb.scsw.cmd.ectl = 0; if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) - cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; + cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; /* Check if extended control word is valid. */ - if (!cdev->private->irb.scsw.cmd.ectl) + if (!cdev->private->dma_area->irb.scsw.cmd.ectl) return; /* Copy concurrent sense / model dependent information. */ - memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); + memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw)); } /* @@ -118,7 +118,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) if (!ccw_device_accumulate_esw_valid(irb)) return; - cdev_irb = &cdev->private->irb; + cdev_irb = &cdev->private->dma_area->irb; /* Copy last path used mask. */ cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; @@ -210,7 +210,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) ccw_device_path_notoper(cdev); /* No irb accumulation for transport mode irbs. */ if (scsw_is_tm(&irb->scsw)) { - memcpy(&cdev->private->irb, irb, sizeof(struct irb)); + memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb)); return; } /* @@ -219,7 +219,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) if (!scsw_is_solicited(&irb->scsw)) return; - cdev_irb = &cdev->private->irb; + cdev_irb = &cdev->private->dma_area->irb; /* * If the clear function had been performed, all formerly pending @@ -227,7 +227,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * intermediate accumulated status to the device driver. */ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) - memset(&cdev->private->irb, 0, sizeof(struct irb)); + memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb)); /* Copy bits which are valid only for the start function. */ if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { @@ -329,9 +329,9 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) /* * We have ending status but no sense information. Do a basic sense. */ - sense_ccw = &to_io_private(sch)->sense_ccw; + sense_ccw = &to_io_private(sch)->dma_area->sense_ccw; sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; - sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw); + sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw); sense_ccw->count = SENSE_MAX_COUNT; sense_ccw->flags = CCW_FLAG_SLI; @@ -364,7 +364,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { - cdev->private->irb.esw.esw0.erw.cons = 1; + cdev->private->dma_area->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; } /* Check if path verification is required. */ @@ -386,7 +386,7 @@ ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) /* Check for basic sense. */ if (cdev->private->flags.dosense && !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { - cdev->private->irb.esw.esw0.erw.cons = 1; + cdev->private->dma_area->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; return 0; } diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 90e4e3a7841b..c03b4a19974e 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -9,15 +9,20 @@ #include "css.h" #include "orb.h" +struct io_subchannel_dma_area { + struct ccw1 sense_ccw; /* static ccw for sense command */ +}; + struct io_subchannel_private { union orb orb; /* operation request block */ - struct ccw1 sense_ccw; /* static ccw for sense command */ struct ccw_device *cdev;/* pointer to the child ccw device */ struct { unsigned int suspend:1; /* allow suspend */ unsigned int prefetch:1;/* deny prefetch */ unsigned int inter:1; /* suppress intermediate interrupts */ } __packed options; + struct io_subchannel_dma_area *dma_area; + dma_addr_t dma_area_dma; } __aligned(8); #define to_io_private(n) ((struct io_subchannel_private *) \ @@ -115,6 +120,13 @@ enum cdev_todo { #define FAKE_CMD_IRB 1 #define FAKE_TM_IRB 2 +struct ccw_device_dma_area { + struct senseid senseid; /* SenseID info */ + struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ + struct irb irb; /* device status */ + struct pgid pgid[8]; /* path group IDs per chpid*/ +}; + struct ccw_device_private { struct ccw_device *cdev; struct subchannel *sch; @@ -156,11 +168,7 @@ struct ccw_device_private { } __attribute__((packed)) flags; unsigned long intparm; /* user interruption parameter */ struct qdio_irq *qdio_data; - struct irb irb; /* device status */ int async_kill_io_rc; - struct senseid senseid; /* SenseID info */ - struct pgid pgid[8]; /* path group IDs per chpid*/ - struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ struct work_struct todo_work; enum cdev_todo todo; wait_queue_head_t wait_q; @@ -169,6 +177,8 @@ struct ccw_device_private { struct list_head cmb_list; /* list of measured devices */ u64 cmb_start_time; /* clock value of cmb reset */ void *cmb_wait; /* deferred cmb enable/disable */ + struct gen_pool *dma_pool; + struct ccw_device_dma_area *dma_area; enum interruption_class int_class; }; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 7b7620de2acd..730c4e68094b 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -736,6 +736,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) switch (state) { case SLSB_P_OUTPUT_EMPTY: + case SLSB_P_OUTPUT_PENDING: /* the adapter got it */ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 99d7d2566a3a..d4101cecdc8d 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) return -ENOMEM; } irq_ptr_qs[i] = q; + INIT_LIST_HEAD(&q->entry); } return 0; } @@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q->mask = 1 << (31 - i); q->nr = i; q->handler = handler; + INIT_LIST_HEAD(&q->entry); } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 28d59ac2204c..93ee067c10ca 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) mutex_lock(&tiq_list_lock); list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); mutex_unlock(&tiq_list_lock); - xchg(irq_ptr->dsci, 1 << 7); } void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) @@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) struct qdio_q *q; q = irq_ptr->input_qs[0]; - /* if establish triggered an error */ - if (!q || !q->entry.prev || !q->entry.next) + if (!q) return; mutex_lock(&tiq_list_lock); list_del_rcu(&q->entry); mutex_unlock(&tiq_list_lock); synchronize_rcu(); + INIT_LIST_HEAD(&q->entry); } static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) @@ -178,6 +177,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) /** * tiqdio_thinint_handler - thin interrupt handler for qdio * @airq: pointer to adapter interrupt descriptor + * @floating: flag to recognize floating vs. directed interrupts (unused) */ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) { diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 0e79799e9a71..1d4c893ead23 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -16,12 +16,6 @@ #include "vfio_ccw_cp.h" -/* - * Max length for ccw chain. - * XXX: Limit to 256, need to check more? - */ -#define CCWCHAIN_LEN_MAX 256 - struct pfn_array { /* Starting guest physical I/O address. */ unsigned long pa_iova; @@ -33,11 +27,6 @@ struct pfn_array { int pa_nr; }; -struct pfn_array_table { - struct pfn_array *pat_pa; - int pat_nr; -}; - struct ccwchain { struct list_head next; struct ccw1 *ch_ccw; @@ -46,35 +35,29 @@ struct ccwchain { /* Count of the valid ccws in chain. */ int ch_len; /* Pinned PAGEs for the original data. */ - struct pfn_array_table *ch_pat; + struct pfn_array *ch_pa; }; /* - * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory + * pfn_array_alloc() - alloc memory for PFNs * @pa: pfn_array on which to perform the operation - * @mdev: the mediated device to perform pin/unpin operations * @iova: target guest physical address * @len: number of bytes that should be pinned from @iova * - * Attempt to allocate memory for PFNs, and pin user pages in memory. + * Attempt to allocate memory for PFNs. * * Usage of pfn_array: * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in * this structure will be filled in by this function. * * Returns: - * Number of pages pinned on success. - * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially, - * returns -EINVAL. - * If no pages were pinned, returns -errno. + * 0 if PFNs are allocated + * -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova_pfn is not NULL + * -ENOMEM if alloc failed */ -static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, - u64 iova, unsigned int len) +static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len) { - int i, ret = 0; - - if (!len) - return 0; + int i; if (pa->pa_nr || pa->pa_iova_pfn) return -EINVAL; @@ -94,8 +77,27 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; - for (i = 1; i < pa->pa_nr; i++) + pa->pa_pfn[0] = -1ULL; + for (i = 1; i < pa->pa_nr; i++) { pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; + pa->pa_pfn[i] = -1ULL; + } + + return 0; +} + +/* + * pfn_array_pin() - Pin user pages in memory + * @pa: pfn_array on which to perform the operation + * @mdev: the mediated device to perform pin operations + * + * Returns number of pages pinned upon success. + * If the pin request partially succeeds, or fails completely, + * all pages are left unpinned and a negative error value is returned. + */ +static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) +{ + int ret = 0; ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); @@ -112,8 +114,6 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, err_out: pa->pa_nr = 0; - kfree(pa->pa_iova_pfn); - pa->pa_iova_pfn = NULL; return ret; } @@ -121,60 +121,30 @@ err_out: /* Unpin the pages before releasing the memory. */ static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) { - vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); + /* Only unpin if any pages were pinned to begin with */ + if (pa->pa_nr) + vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); pa->pa_nr = 0; kfree(pa->pa_iova_pfn); } -static int pfn_array_table_init(struct pfn_array_table *pat, int nr) -{ - pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); - if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) { - pat->pat_nr = 0; - return -ENOMEM; - } - - pat->pat_nr = nr; - - return 0; -} - -static void pfn_array_table_unpin_free(struct pfn_array_table *pat, - struct device *mdev) +static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova) { - int i; - - for (i = 0; i < pat->pat_nr; i++) - pfn_array_unpin_free(pat->pat_pa + i, mdev); - - if (pat->pat_nr) { - kfree(pat->pat_pa); - pat->pat_pa = NULL; - pat->pat_nr = 0; - } -} - -static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat, - unsigned long iova) -{ - struct pfn_array *pa = pat->pat_pa; unsigned long iova_pfn = iova >> PAGE_SHIFT; - int i, j; + int i; - for (i = 0; i < pat->pat_nr; i++, pa++) - for (j = 0; j < pa->pa_nr; j++) - if (pa->pa_iova_pfn[j] == iova_pfn) - return true; + for (i = 0; i < pa->pa_nr; i++) + if (pa->pa_iova_pfn[i] == iova_pfn) + return true; return false; } -/* Create the list idal words for a pfn_array_table. */ -static inline void pfn_array_table_idal_create_words( - struct pfn_array_table *pat, +/* Create the list of IDAL words for a pfn_array. */ +static inline void pfn_array_idal_create_words( + struct pfn_array *pa, unsigned long *idaws) { - struct pfn_array *pa; - int i, j, k; + int i; /* * Idal words (execept the first one) rely on the memory being 4k @@ -183,19 +153,36 @@ static inline void pfn_array_table_idal_create_words( * there will be no problem here to simply use the phys to create an * idaw. */ - k = 0; - for (i = 0; i < pat->pat_nr; i++) { - pa = pat->pat_pa + i; - for (j = 0; j < pa->pa_nr; j++) { - idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT; - if (k == 0) - idaws[k] += pa->pa_iova & (PAGE_SIZE - 1); - k++; + + for (i = 0; i < pa->pa_nr; i++) + idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT; + + /* Adjust the first IDAW, since it may not start on a page boundary */ + idaws[0] += pa->pa_iova & (PAGE_SIZE - 1); +} + +static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len) +{ + struct ccw0 ccw0; + struct ccw1 *pccw1 = source; + int i; + + for (i = 0; i < len; i++) { + ccw0 = *(struct ccw0 *)pccw1; + if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) { + pccw1->cmd_code = CCW_CMD_TIC; + pccw1->flags = 0; + pccw1->count = 0; + } else { + pccw1->cmd_code = ccw0.cmd_code; + pccw1->flags = ccw0.flags; + pccw1->count = ccw0.count; } + pccw1->cda = ccw0.cda; + pccw1++; } } - /* * Within the domain (@mdev), copy @n bytes from a guest physical * address (@iova) to a host physical address (@to). @@ -209,9 +196,15 @@ static long copy_from_iova(struct device *mdev, int i, ret; unsigned long l, m; - ret = pfn_array_alloc_pin(&pa, mdev, iova, n); - if (ret <= 0) + ret = pfn_array_alloc(&pa, iova, n); + if (ret < 0) + return ret; + + ret = pfn_array_pin(&pa, mdev); + if (ret < 0) { + pfn_array_unpin_free(&pa, mdev); return ret; + } l = n; for (i = 0; i < pa.pa_nr; i++) { @@ -235,55 +228,60 @@ static long copy_from_iova(struct device *mdev, return l; } -static long copy_ccw_from_iova(struct channel_program *cp, - struct ccw1 *to, u64 iova, - unsigned long len) -{ - struct ccw0 ccw0; - struct ccw1 *pccw1; - int ret; - int i; - - ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1)); - if (ret) - return ret; - - if (!cp->orb.cmd.fmt) { - pccw1 = to; - for (i = 0; i < len; i++) { - ccw0 = *(struct ccw0 *)pccw1; - if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) { - pccw1->cmd_code = CCW_CMD_TIC; - pccw1->flags = 0; - pccw1->count = 0; - } else { - pccw1->cmd_code = ccw0.cmd_code; - pccw1->flags = ccw0.flags; - pccw1->count = ccw0.count; - } - pccw1->cda = ccw0.cda; - pccw1++; - } - } - - return ret; -} - /* * Helpers to operate ccwchain. */ -#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0) +#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02) +#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C) +#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE) #define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP) #define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC) #define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA) - +#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP) #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC)) /* + * ccw_does_data_transfer() + * + * Determine whether a CCW will move any data, such that the guest pages + * would need to be pinned before performing the I/O. + * + * Returns 1 if yes, 0 if no. + */ +static inline int ccw_does_data_transfer(struct ccw1 *ccw) +{ + /* If the count field is zero, then no data will be transferred */ + if (ccw->count == 0) + return 0; + + /* If the command is a NOP, then no data will be transferred */ + if (ccw_is_noop(ccw)) + return 0; + + /* If the skip flag is off, then data will be transferred */ + if (!ccw_is_skip(ccw)) + return 1; + + /* + * If the skip flag is on, it is only meaningful if the command + * code is a read, read backward, sense, or sense ID. In those + * cases, no data will be transferred. + */ + if (ccw_is_read(ccw) || ccw_is_read_backward(ccw)) + return 0; + + if (ccw_is_sense(ccw)) + return 0; + + /* The skip flag is on, but it is ignored for this command code. */ + return 1; +} + +/* * is_cpa_within_range() * * @cpa: channel program address being questioned @@ -319,7 +317,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) /* Make ccw address aligned to 8. */ size = ((sizeof(*chain) + 7L) & -8L) + sizeof(*chain->ch_ccw) * len + - sizeof(*chain->ch_pat) * len; + sizeof(*chain->ch_pa) * len; chain = kzalloc(size, GFP_DMA | GFP_KERNEL); if (!chain) return NULL; @@ -328,7 +326,7 @@ static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) chain->ch_ccw = (struct ccw1 *)data; data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len; - chain->ch_pat = (struct pfn_array_table *)data; + chain->ch_pa = (struct pfn_array *)data; chain->ch_len = len; @@ -348,31 +346,12 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx) { struct ccw1 *ccw = chain->ch_ccw + idx; - if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw)) - return; - if (!ccw->count) + if (ccw_is_tic(ccw)) return; kfree((void *)(u64)ccw->cda); } -/* Unpin the pages then free the memory resources. */ -static void cp_unpin_free(struct channel_program *cp) -{ - struct ccwchain *chain, *temp; - int i; - - cp->initialized = false; - list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { - for (i = 0; i < chain->ch_len; i++) { - pfn_array_table_unpin_free(chain->ch_pat + i, - cp->mdev); - ccwchain_cda_free(chain, i); - } - ccwchain_free(chain); - } -} - /** * ccwchain_calc_length - calculate the length of the ccw chain. * @iova: guest physical address of the target ccw chain @@ -388,25 +367,9 @@ static void cp_unpin_free(struct channel_program *cp) */ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) { - struct ccw1 *ccw, *p; - int cnt; - - /* - * Copy current chain from guest to host kernel. - * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256). - * So copying 2K is enough (safe). - */ - p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL); - if (!ccw) - return -ENOMEM; - - cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX); - if (cnt) { - kfree(ccw); - return cnt; - } + struct ccw1 *ccw = cp->guest_cp; + int cnt = 0; - cnt = 0; do { cnt++; @@ -415,10 +378,8 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) * orb specified one of the unsupported formats, we defer * checking for IDAWs in unsupported formats to here. */ - if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) { - kfree(p); + if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) return -EOPNOTSUPP; - } /* * We want to keep counting if the current CCW has the @@ -437,7 +398,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) if (cnt == CCWCHAIN_LEN_MAX + 1) cnt = -EINVAL; - kfree(p); return cnt; } @@ -458,17 +418,23 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp); -static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp) +static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) { struct ccwchain *chain; - int len, ret; + int len; - /* May transfer to an existing chain. */ - if (tic_target_chain_exists(tic, cp)) - return 0; + /* Copy 2K (the most we support today) of possible CCWs */ + len = copy_from_iova(cp->mdev, cp->guest_cp, cda, + CCWCHAIN_LEN_MAX * sizeof(struct ccw1)); + if (len) + return len; - /* Get chain length. */ - len = ccwchain_calc_length(tic->cda, cp); + /* Convert any Format-0 CCWs to Format-1 */ + if (!cp->orb.cmd.fmt) + convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX); + + /* Count the CCWs in the current chain */ + len = ccwchain_calc_length(cda, cp); if (len < 0) return len; @@ -476,14 +442,10 @@ static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp) chain = ccwchain_alloc(cp, len); if (!chain) return -ENOMEM; - chain->ch_iova = tic->cda; + chain->ch_iova = cda; - /* Copy the new chain from user. */ - ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len); - if (ret) { - ccwchain_free(chain); - return ret; - } + /* Copy the actual CCWs into the new chain */ + memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); /* Loop for tics on this new chain. */ return ccwchain_loop_tic(chain, cp); @@ -501,7 +463,12 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp) if (!ccw_is_tic(tic)) continue; - ret = ccwchain_handle_tic(tic, cp); + /* May transfer to an existing chain. */ + if (tic_target_chain_exists(tic, cp)) + continue; + + /* Build a ccwchain for the next segment */ + ret = ccwchain_handle_ccw(tic->cda, cp); if (ret) return ret; } @@ -534,115 +501,90 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, struct channel_program *cp) { struct ccw1 *ccw; - struct pfn_array_table *pat; + struct pfn_array *pa; + u64 iova; unsigned long *idaws; int ret; + int bytes = 1; + int idaw_nr, idal_len; + int i; ccw = chain->ch_ccw + idx; - if (!ccw->count) { - /* - * We just want the translation result of any direct ccw - * to be an IDA ccw, so let's add the IDA flag for it. - * Although the flag will be ignored by firmware. - */ - ccw->flags |= CCW_FLAG_IDA; - return 0; - } - - /* - * Pin data page(s) in memory. - * The number of pages actually is the count of the idaws which will be - * needed when translating a direct ccw to a idal ccw. - */ - pat = chain->ch_pat + idx; - ret = pfn_array_table_init(pat, 1); - if (ret) - goto out_init; - - ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count); - if (ret < 0) - goto out_unpin; + if (ccw->count) + bytes = ccw->count; - /* Translate this direct ccw to a idal ccw. */ - idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL); - if (!idaws) { - ret = -ENOMEM; - goto out_unpin; + /* Calculate size of IDAL */ + if (ccw_is_idal(ccw)) { + /* Read first IDAW to see if it's 4K-aligned or not. */ + /* All subsequent IDAws will be 4K-aligned. */ + ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova)); + if (ret) + return ret; + } else { + iova = ccw->cda; } - ccw->cda = (__u32) virt_to_phys(idaws); - ccw->flags |= CCW_FLAG_IDA; - - pfn_array_table_idal_create_words(pat, idaws); - - return 0; - -out_unpin: - pfn_array_table_unpin_free(pat, cp->mdev); -out_init: - ccw->cda = 0; - return ret; -} - -static int ccwchain_fetch_idal(struct ccwchain *chain, - int idx, - struct channel_program *cp) -{ - struct ccw1 *ccw; - struct pfn_array_table *pat; - unsigned long *idaws; - u64 idaw_iova; - unsigned int idaw_nr, idaw_len; - int i, ret; - - ccw = chain->ch_ccw + idx; - - if (!ccw->count) - return 0; - - /* Calculate size of idaws. */ - ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova)); - if (ret) - return ret; - idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count); - idaw_len = idaw_nr * sizeof(*idaws); - - /* Pin data page(s) in memory. */ - pat = chain->ch_pat + idx; - ret = pfn_array_table_init(pat, idaw_nr); - if (ret) - goto out_init; + idaw_nr = idal_nr_words((void *)iova, bytes); + idal_len = idaw_nr * sizeof(*idaws); - /* Translate idal ccw to use new allocated idaws. */ - idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); + /* Allocate an IDAL from host storage */ + idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); if (!idaws) { ret = -ENOMEM; - goto out_unpin; + goto out_init; } - ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len); - if (ret) + /* + * Allocate an array of pfn's for pages to pin/translate. + * The number of pages is actually the count of the idaws + * required for the data transfer, since we only only support + * 4K IDAWs today. + */ + pa = chain->ch_pa + idx; + ret = pfn_array_alloc(pa, iova, bytes); + if (ret < 0) goto out_free_idaws; - ccw->cda = virt_to_phys(idaws); + if (ccw_is_idal(ccw)) { + /* Copy guest IDAL into host IDAL */ + ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len); + if (ret) + goto out_unpin; - for (i = 0; i < idaw_nr; i++) { - idaw_iova = *(idaws + i); + /* + * Copy guest IDAWs into pfn_array, in case the memory they + * occupy is not contiguous. + */ + for (i = 0; i < idaw_nr; i++) + pa->pa_iova_pfn[i] = idaws[i] >> PAGE_SHIFT; + } else { + /* + * No action is required here; the iova addresses in pfn_array + * were initialized sequentially in pfn_array_alloc() beginning + * with the contents of ccw->cda. + */ + } - ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev, - idaw_iova, 1); + if (ccw_does_data_transfer(ccw)) { + ret = pfn_array_pin(pa, cp->mdev); if (ret < 0) - goto out_free_idaws; + goto out_unpin; + } else { + pa->pa_nr = 0; } - pfn_array_table_idal_create_words(pat, idaws); + ccw->cda = (__u32) virt_to_phys(idaws); + ccw->flags |= CCW_FLAG_IDA; + + /* Populate the IDAL with pinned/translated addresses from pfn */ + pfn_array_idal_create_words(pa, idaws); return 0; +out_unpin: + pfn_array_unpin_free(pa, cp->mdev); out_free_idaws: kfree(idaws); -out_unpin: - pfn_array_table_unpin_free(pat, cp->mdev); out_init: ccw->cda = 0; return ret; @@ -660,15 +602,9 @@ static int ccwchain_fetch_one(struct ccwchain *chain, { struct ccw1 *ccw = chain->ch_ccw + idx; - if (ccw_is_test(ccw) || ccw_is_noop(ccw)) - return 0; - if (ccw_is_tic(ccw)) return ccwchain_fetch_tic(chain, idx, cp); - if (ccw_is_idal(ccw)) - return ccwchain_fetch_idal(chain, idx, cp); - return ccwchain_fetch_direct(chain, idx, cp); } @@ -691,9 +627,7 @@ static int ccwchain_fetch_one(struct ccwchain *chain, */ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) { - u64 iova = orb->cmd.cpa; - struct ccwchain *chain; - int len, ret; + int ret; /* * XXX: @@ -706,28 +640,11 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) memcpy(&cp->orb, orb, sizeof(*orb)); cp->mdev = mdev; - /* Get chain length. */ - len = ccwchain_calc_length(iova, cp); - if (len < 0) - return len; - - /* Alloc mem for the head chain. */ - chain = ccwchain_alloc(cp, len); - if (!chain) - return -ENOMEM; - chain->ch_iova = iova; - - /* Copy the head chain from guest. */ - ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len); - if (ret) { - ccwchain_free(chain); - return ret; - } - - /* Now loop for its TICs. */ - ret = ccwchain_loop_tic(chain, cp); + /* Build a ccwchain for the first CCW segment */ + ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); if (ret) - cp_unpin_free(cp); + cp_free(cp); + /* It is safe to force: if not set but idals used * ccwchain_calc_length returns an error. */ @@ -750,8 +667,20 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) */ void cp_free(struct channel_program *cp) { - if (cp->initialized) - cp_unpin_free(cp); + struct ccwchain *chain, *temp; + int i; + + if (!cp->initialized) + return; + + cp->initialized = false; + list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { + for (i = 0; i < chain->ch_len; i++) { + pfn_array_unpin_free(chain->ch_pa + i, cp->mdev); + ccwchain_cda_free(chain, i); + } + ccwchain_free(chain); + } } /** @@ -886,7 +815,11 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) */ list_for_each_entry(chain, &cp->ccwchain_list, next) { ccw_head = (u32)(u64)chain->ch_ccw; - if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) { + /* + * On successful execution, cpa points just beyond the end + * of the chain. + */ + if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) { /* * (cpa - ccw_head) is the offset value of the host * physical ccw to its chain head. @@ -919,8 +852,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova) list_for_each_entry(chain, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) - if (pfn_array_table_iova_pinned(chain->ch_pat + i, - iova)) + if (pfn_array_iova_pinned(chain->ch_pa + i, iova)) return true; } diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index 3c20cd208da5..7cdc38049033 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -16,6 +16,12 @@ #include "orb.h" +/* + * Max length for ccw chain. + * XXX: Limit to 256, need to check more? + */ +#define CCWCHAIN_LEN_MAX 256 + /** * struct channel_program - manage information for channel program * @ccwchain_list: list head of ccwchains @@ -32,6 +38,7 @@ struct channel_program { union orb orb; struct device *mdev; bool initialized; + struct ccw1 *guest_cp; }; extern int cp_init(struct channel_program *cp, struct device *mdev, diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 9125f7f4e64c..2b90a5ecaeb9 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -95,11 +95,11 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) memcpy(private->io_region->irb_area, irb, sizeof(*irb)); mutex_unlock(&private->io_mutex); - if (private->io_trigger) - eventfd_signal(private->io_trigger, 1); - if (private->mdev && is_final) private->state = VFIO_CCW_STATE_IDLE; + + if (private->io_trigger) + eventfd_signal(private->io_trigger, 1); } /* @@ -129,6 +129,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (!private) return -ENOMEM; + private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), + GFP_KERNEL); + if (!private->cp.guest_cp) + goto out_free; + private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, GFP_KERNEL | GFP_DMA); if (!private->io_region) @@ -169,6 +174,7 @@ out_free: kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); if (private->io_region) kmem_cache_free(vfio_ccw_io_region, private->io_region); + kfree(private->cp.guest_cp); kfree(private); return ret; } @@ -185,6 +191,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); kmem_cache_free(vfio_ccw_io_region, private->io_region); + kfree(private->cp.guest_cp); kfree(private); return 0; diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 45eb0c14b880..7f418d2d8cdf 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -690,7 +690,7 @@ int pkey_clr2protkey(u32 keytype, */ if (!cpacf_test_func(&pckmo_functions, fc)) { DEBUG_ERR("%s pckmo functions not available\n", __func__); - return -EOPNOTSUPP; + return -ENODEV; } /* prepare param block */ @@ -1695,15 +1695,15 @@ static int __init pkey_init(void) * are able to work with protected keys. */ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) - return -EOPNOTSUPP; + return -ENODEV; /* check for kmc instructions available */ if (!cpacf_query(CPACF_KMC, &kmc_functions)) - return -EOPNOTSUPP; + return -ENODEV; if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) - return -EOPNOTSUPP; + return -ENODEV; pkey_debug_init(); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index e9824c35c34f..003662aa8060 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -5,6 +5,7 @@ * Copyright IBM Corp. 2018 * * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> + * Pierre Morel <pmorel@linux.ibm.com> */ #include <linux/module.h> @@ -40,14 +41,45 @@ static struct ap_device_id ap_queue_ids[] = { MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids); +/** + * vfio_ap_queue_dev_probe: + * + * Allocate a vfio_ap_queue structure and associate it + * with the device as driver_data. + */ static int vfio_ap_queue_dev_probe(struct ap_device *apdev) { + struct vfio_ap_queue *q; + + q = kzalloc(sizeof(*q), GFP_KERNEL); + if (!q) + return -ENOMEM; + dev_set_drvdata(&apdev->device, q); + q->apqn = to_ap_queue(&apdev->device)->qid; + q->saved_isc = VFIO_AP_ISC_INVALID; return 0; } +/** + * vfio_ap_queue_dev_remove: + * + * Takes the matrix lock to avoid actions on this device while removing + * Free the associated vfio_ap_queue structure + */ static void vfio_ap_queue_dev_remove(struct ap_device *apdev) { - /* Nothing to do yet */ + struct vfio_ap_queue *q; + int apid, apqi; + + mutex_lock(&matrix_dev->lock); + q = dev_get_drvdata(&apdev->device); + dev_set_drvdata(&apdev->device, NULL); + apid = AP_QID_CARD(q->apqn); + apqi = AP_QID_QUEUE(q->apqn); + vfio_ap_mdev_reset_queue(apid, apqi, 1); + vfio_ap_irq_disable(q); + kfree(q); + mutex_unlock(&matrix_dev->lock); } static void vfio_ap_matrix_dev_release(struct device *dev) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 900b9cf20ca5..2c9fb1423a39 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -24,6 +24,296 @@ #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" +static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); + +static int match_apqn(struct device *dev, void *data) +{ + struct vfio_ap_queue *q = dev_get_drvdata(dev); + + return (q->apqn == *(int *)(data)) ? 1 : 0; +} + +/** + * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list + * @matrix_mdev: the associated mediated matrix + * @apqn: The queue APQN + * + * Retrieve a queue with a specific APQN from the list of the + * devices of the vfio_ap_drv. + * Verify that the APID and the APQI are set in the matrix. + * + * Returns the pointer to the associated vfio_ap_queue + */ +static struct vfio_ap_queue *vfio_ap_get_queue( + struct ap_matrix_mdev *matrix_mdev, + int apqn) +{ + struct vfio_ap_queue *q; + struct device *dev; + + if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) + return NULL; + if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) + return NULL; + + dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, + &apqn, match_apqn); + if (!dev) + return NULL; + q = dev_get_drvdata(dev); + q->matrix_mdev = matrix_mdev; + put_device(dev); + + return q; +} + +/** + * vfio_ap_wait_for_irqclear + * @apqn: The AP Queue number + * + * Checks the IRQ bit for the status of this APQN using ap_tapq. + * Returns if the ap_tapq function succeeded and the bit is clear. + * Returns if ap_tapq function failed with invalid, deconfigured or + * checkstopped AP. + * Otherwise retries up to 5 times after waiting 20ms. + * + */ +static void vfio_ap_wait_for_irqclear(int apqn) +{ + struct ap_queue_status status; + int retry = 5; + + do { + status = ap_tapq(apqn, NULL); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + case AP_RESPONSE_RESET_IN_PROGRESS: + if (!status.irq_enabled) + return; + /* Fall through */ + case AP_RESPONSE_BUSY: + msleep(20); + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + default: + WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__, + status.response_code, apqn); + return; + } + } while (--retry); + + WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n", + __func__, status.response_code, apqn); +} + +/** + * vfio_ap_free_aqic_resources + * @q: The vfio_ap_queue + * + * Unregisters the ISC in the GIB when the saved ISC not invalid. + * Unpin the guest's page holding the NIB when it exist. + * Reset the saved_pfn and saved_isc to invalid values. + * Clear the pointer to the matrix mediated device. + * + */ +static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) +{ + if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev) + kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); + if (q->saved_pfn && q->matrix_mdev) + vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), + &q->saved_pfn, 1); + q->saved_pfn = 0; + q->saved_isc = VFIO_AP_ISC_INVALID; + q->matrix_mdev = NULL; +} + +/** + * vfio_ap_irq_disable + * @q: The vfio_ap_queue + * + * Uses ap_aqic to disable the interruption and in case of success, reset + * in progress or IRQ disable command already proceeded: calls + * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear + * and calls vfio_ap_free_aqic_resources() to free the resources associated + * with the AP interrupt handling. + * + * In the case the AP is busy, or a reset is in progress, + * retries after 20ms, up to 5 times. + * + * Returns if ap_aqic function failed with invalid, deconfigured or + * checkstopped AP. + */ +struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) +{ + struct ap_qirq_ctrl aqic_gisa = {}; + struct ap_queue_status status; + int retries = 5; + + do { + status = ap_aqic(q->apqn, aqic_gisa, NULL); + switch (status.response_code) { + case AP_RESPONSE_OTHERWISE_CHANGED: + case AP_RESPONSE_NORMAL: + vfio_ap_wait_for_irqclear(q->apqn); + goto end_free; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + msleep(20); + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + case AP_RESPONSE_INVALID_ADDRESS: + default: + /* All cases in default means AP not operational */ + WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, + status.response_code); + goto end_free; + } + } while (retries--); + + WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__, + status.response_code); +end_free: + vfio_ap_free_aqic_resources(q); + return status; +} + +/** + * vfio_ap_setirq: Enable Interruption for a APQN + * + * @dev: the device associated with the ap_queue + * @q: the vfio_ap_queue holding AQIC parameters + * + * Pin the NIB saved in *q + * Register the guest ISC to GIB interface and retrieve the + * host ISC to issue the host side PQAP/AQIC + * + * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the + * vfio_pin_pages failed. + * + * Otherwise return the ap_queue_status returned by the ap_aqic(), + * all retry handling will be done by the guest. + */ +static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, + int isc, + unsigned long nib) +{ + struct ap_qirq_ctrl aqic_gisa = {}; + struct ap_queue_status status = {}; + struct kvm_s390_gisa *gisa; + struct kvm *kvm; + unsigned long h_nib, g_pfn, h_pfn; + int ret; + + g_pfn = nib >> PAGE_SHIFT; + ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1, + IOMMU_READ | IOMMU_WRITE, &h_pfn); + switch (ret) { + case 1: + break; + default: + status.response_code = AP_RESPONSE_INVALID_ADDRESS; + return status; + } + + kvm = q->matrix_mdev->kvm; + gisa = kvm->arch.gisa_int.origin; + + h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK); + aqic_gisa.gisc = isc; + aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc); + aqic_gisa.ir = 1; + aqic_gisa.gisa = (uint64_t)gisa >> 4; + + status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + /* See if we did clear older IRQ configuration */ + vfio_ap_free_aqic_resources(q); + q->saved_pfn = g_pfn; + q->saved_isc = isc; + break; + case AP_RESPONSE_OTHERWISE_CHANGED: + /* We could not modify IRQ setings: clear new configuration */ + vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1); + kvm_s390_gisc_unregister(kvm, isc); + break; + default: + pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, + status.response_code); + vfio_ap_irq_disable(q); + break; + } + + return status; +} + +/** + * handle_pqap: PQAP instruction callback + * + * @vcpu: The vcpu on which we received the PQAP instruction + * + * Get the general register contents to initialize internal variables. + * REG[0]: APQN + * REG[1]: IR and ISC + * REG[2]: NIB + * + * Response.status may be set to following Response Code: + * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available + * - AP_RESPONSE_DECONFIGURED: if the queue is not configured + * - AP_RESPONSE_NORMAL (0) : in case of successs + * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC. + * We take the matrix_dev lock to ensure serialization on queues and + * mediated device access. + * + * Return 0 if we could handle the request inside KVM. + * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault. + */ +static int handle_pqap(struct kvm_vcpu *vcpu) +{ + uint64_t status; + uint16_t apqn; + struct vfio_ap_queue *q; + struct ap_queue_status qstatus = { + .response_code = AP_RESPONSE_Q_NOT_AVAIL, }; + struct ap_matrix_mdev *matrix_mdev; + + /* If we do not use the AIV facility just go to userland */ + if (!(vcpu->arch.sie_block->eca & ECA_AIV)) + return -EOPNOTSUPP; + + apqn = vcpu->run->s.regs.gprs[0] & 0xffff; + mutex_lock(&matrix_dev->lock); + + if (!vcpu->kvm->arch.crypto.pqap_hook) + goto out_unlock; + matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, + struct ap_matrix_mdev, pqap_hook); + + q = vfio_ap_get_queue(matrix_mdev, apqn); + if (!q) + goto out_unlock; + + status = vcpu->run->s.regs.gprs[1]; + + /* If IR bit(16) is set we enable the interrupt */ + if ((status >> (63 - 16)) & 0x01) + qstatus = vfio_ap_irq_enable(q, status & 0x07, + vcpu->run->s.regs.gprs[2]); + else + qstatus = vfio_ap_irq_disable(q); + +out_unlock: + memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus)); + vcpu->run->s.regs.gprs[1] >>= 32; + mutex_unlock(&matrix_dev->lock); + return 0; +} + static void vfio_ap_matrix_init(struct ap_config_info *info, struct ap_matrix *matrix) { @@ -45,8 +335,11 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev) return -ENOMEM; } + matrix_mdev->mdev = mdev; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); mdev_set_drvdata(mdev, matrix_mdev); + matrix_mdev->pqap_hook.hook = handle_pqap; + matrix_mdev->pqap_hook.owner = THIS_MODULE; mutex_lock(&matrix_dev->lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); mutex_unlock(&matrix_dev->lock); @@ -62,6 +355,7 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev) return -EBUSY; mutex_lock(&matrix_dev->lock); + vfio_ap_mdev_reset_queues(mdev); list_del(&matrix_mdev->node); mutex_unlock(&matrix_dev->lock); @@ -754,11 +1048,42 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, } matrix_mdev->kvm = kvm; + kvm_get_kvm(kvm); + kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; mutex_unlock(&matrix_dev->lock); return 0; } +/* + * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback + * + * @nb: The notifier block + * @action: Action to be taken + * @data: data associated with the request + * + * For an UNMAP request, unpin the guest IOVA (the NIB guest address we + * pinned before). Other requests are ignored. + * + */ +static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct ap_matrix_mdev *matrix_mdev; + + matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier); + + if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { + struct vfio_iommu_type1_dma_unmap *unmap = data; + unsigned long g_pfn = unmap->iova >> PAGE_SHIFT; + + vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -790,15 +1115,36 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, - unsigned int retry) +static void vfio_ap_irq_disable_apqn(int apqn) +{ + struct device *dev; + struct vfio_ap_queue *q; + + dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, + &apqn, match_apqn); + if (dev) { + q = dev_get_drvdata(dev); + vfio_ap_irq_disable(q); + put_device(dev); + } +} + +int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, + unsigned int retry) { struct ap_queue_status status; + int retry2 = 2; + int apqn = AP_MKQID(apid, apqi); do { - status = ap_zapq(AP_MKQID(apid, apqi)); + status = ap_zapq(apqn); switch (status.response_code) { case AP_RESPONSE_NORMAL: + while (!status.queue_empty && retry2--) { + msleep(20); + status = ap_tapq(apqn, NULL); + } + WARN_ON_ONCE(retry <= 0); return 0; case AP_RESPONSE_RESET_IN_PROGRESS: case AP_RESPONSE_BUSY: @@ -832,6 +1178,7 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) */ if (ret) rc = ret; + vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi)); } } @@ -858,20 +1205,37 @@ static int vfio_ap_mdev_open(struct mdev_device *mdev) return ret; } - return 0; + matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier; + events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &events, &matrix_mdev->iommu_notifier); + if (!ret) + return ret; + + vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + &matrix_mdev->group_notifier); + module_put(THIS_MODULE); + return ret; } static void vfio_ap_mdev_release(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); - if (matrix_mdev->kvm) + mutex_lock(&matrix_dev->lock); + if (matrix_mdev->kvm) { kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; + vfio_ap_mdev_reset_queues(mdev); + kvm_put_kvm(matrix_mdev->kvm); + matrix_mdev->kvm = NULL; + } + mutex_unlock(&matrix_dev->lock); - vfio_ap_mdev_reset_queues(mdev); + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &matrix_mdev->iommu_notifier); vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); - matrix_mdev->kvm = NULL; module_put(THIS_MODULE); } @@ -900,6 +1264,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, { int ret; + mutex_lock(&matrix_dev->lock); switch (cmd) { case VFIO_DEVICE_GET_INFO: ret = vfio_ap_mdev_get_device_info(arg); @@ -911,6 +1276,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, ret = -EOPNOTSUPP; break; } + mutex_unlock(&matrix_dev->lock); return ret; } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 76b7f98e47e9..f46dde56b464 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -4,6 +4,7 @@ * * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> * Halil Pasic <pasic@linux.ibm.com> + * Pierre Morel <pmorel@linux.ibm.com> * * Copyright IBM Corp. 2018 */ @@ -16,6 +17,7 @@ #include <linux/mdev.h> #include <linux/delay.h> #include <linux/mutex.h> +#include <linux/kvm_host.h> #include "ap_bus.h" @@ -80,10 +82,23 @@ struct ap_matrix_mdev { struct list_head node; struct ap_matrix matrix; struct notifier_block group_notifier; + struct notifier_block iommu_notifier; struct kvm *kvm; + struct kvm_s390_module_hook pqap_hook; + struct mdev_device *mdev; }; extern int vfio_ap_mdev_register(void); extern void vfio_ap_mdev_unregister(void); +int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, + unsigned int retry); +struct vfio_ap_queue { + struct ap_matrix_mdev *matrix_mdev; + unsigned long saved_pfn; + int apqn; +#define VFIO_AP_ISC_INVALID 0xff + unsigned char saved_isc; +}; +struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q); #endif /* _VFIO_AP_PRIVATE_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 0cbcc238ef98..12fe9deb265e 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -567,6 +567,10 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); *fcode = payload_hdr->func_val & 0xFFFF; + /* enable special processing based on the cprbs flags special bit */ + if (msg->cprbx.flags & 0x20) + ap_msg->special = 1; + return 0; } diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 7c5a25ddf832..ced896d1534a 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -7,10 +7,10 @@ config LCS prompt "Lan Channel Station Interface" depends on CCW && NETDEVICES && (ETHERNET || FDDI) help - Select this option if you want to use LCS networking on IBM System z. - This device driver supports FDDI (IEEE 802.7) and Ethernet. - To compile as a module, choose M. The module name is lcs. - If you do not know what it is, it's safe to choose Y. + Select this option if you want to use LCS networking on IBM System z. + This device driver supports FDDI (IEEE 802.7) and Ethernet. + To compile as a module, choose M. The module name is lcs. + If you do not know what it is, it's safe to choose Y. config CTCM def_tristate m diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 6a3076881321..1a55e5942d36 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -46,9 +46,15 @@ struct vq_config_block { #define VIRTIO_CCW_CONFIG_SIZE 0x100 /* same as PCI config space size, should be enough for all drivers */ +struct vcdev_dma_area { + unsigned long indicators; + unsigned long indicators2; + struct vq_config_block config_block; + __u8 status; +}; + struct virtio_ccw_device { struct virtio_device vdev; - __u8 *status; __u8 config[VIRTIO_CCW_CONFIG_SIZE]; struct ccw_device *cdev; __u32 curr_io; @@ -58,17 +64,24 @@ struct virtio_ccw_device { spinlock_t lock; struct mutex io_lock; /* Serializes I/O requests */ struct list_head virtqueues; - unsigned long indicators; - unsigned long indicators2; - struct vq_config_block *config_block; bool is_thinint; bool going_away; bool device_lost; unsigned int config_ready; void *airq_info; - u64 dma_mask; + struct vcdev_dma_area *dma_area; }; +static inline unsigned long *indicators(struct virtio_ccw_device *vcdev) +{ + return &vcdev->dma_area->indicators; +} + +static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev) +{ + return &vcdev->dma_area->indicators2; +} + struct vq_info_block_legacy { __u64 queue; __u32 align; @@ -127,11 +140,17 @@ static int virtio_ccw_use_airq = 1; struct airq_info { rwlock_t lock; - u8 summary_indicator; + u8 summary_indicator_idx; struct airq_struct airq; struct airq_iv *aiv; }; static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; +static u8 *summary_indicators; + +static inline u8 *get_summary_indicator(struct airq_info *info) +{ + return summary_indicators + info->summary_indicator_idx; +} #define CCW_CMD_SET_VQ 0x13 #define CCW_CMD_VDEV_RESET 0x33 @@ -196,7 +215,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating) break; vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); } - info->summary_indicator = 0; + *(get_summary_indicator(info)) = 0; smp_wmb(); /* Walk through indicators field, summary indicator not active. */ for (ai = 0;;) { @@ -208,7 +227,7 @@ static void virtio_airq_handler(struct airq_struct *airq, bool floating) read_unlock(&info->lock); } -static struct airq_info *new_airq_info(void) +static struct airq_info *new_airq_info(int index) { struct airq_info *info; int rc; @@ -217,13 +236,15 @@ static struct airq_info *new_airq_info(void) if (!info) return NULL; rwlock_init(&info->lock); - info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR); + info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR + | AIRQ_IV_CACHELINE); if (!info->aiv) { kfree(info); return NULL; } info->airq.handler = virtio_airq_handler; - info->airq.lsi_ptr = &info->summary_indicator; + info->summary_indicator_idx = index; + info->airq.lsi_ptr = get_summary_indicator(info); info->airq.lsi_mask = 0xff; info->airq.isc = VIRTIO_AIRQ_ISC; rc = register_adapter_interrupt(&info->airq); @@ -245,7 +266,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { if (!airq_areas[i]) - airq_areas[i] = new_airq_info(); + airq_areas[i] = new_airq_info(i); info = airq_areas[i]; if (!info) return 0; @@ -326,29 +347,29 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, struct airq_info *airq_info = vcdev->airq_info; if (vcdev->is_thinint) { - thinint_area = kzalloc(sizeof(*thinint_area), - GFP_DMA | GFP_KERNEL); + thinint_area = ccw_device_dma_zalloc(vcdev->cdev, + sizeof(*thinint_area)); if (!thinint_area) return; thinint_area->summary_indicator = - (unsigned long) &airq_info->summary_indicator; + (unsigned long) get_summary_indicator(airq_info); thinint_area->isc = VIRTIO_AIRQ_ISC; ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; ccw->count = sizeof(*thinint_area); ccw->cda = (__u32)(unsigned long) thinint_area; } else { /* payload is the address of the indicators */ - indicatorp = kmalloc(sizeof(&vcdev->indicators), - GFP_DMA | GFP_KERNEL); + indicatorp = ccw_device_dma_zalloc(vcdev->cdev, + sizeof(indicators(vcdev))); if (!indicatorp) return; *indicatorp = 0; ccw->cmd_code = CCW_CMD_SET_IND; - ccw->count = sizeof(&vcdev->indicators); + ccw->count = sizeof(indicators(vcdev)); ccw->cda = (__u32)(unsigned long) indicatorp; } /* Deregister indicators from host. */ - vcdev->indicators = 0; + *indicators(vcdev) = 0; ccw->flags = 0; ret = ccw_io_helper(vcdev, ccw, vcdev->is_thinint ? @@ -359,8 +380,8 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, "Failed to deregister indicators (%d)\n", ret); else if (vcdev->is_thinint) virtio_ccw_drop_indicators(vcdev); - kfree(indicatorp); - kfree(thinint_area); + ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev))); + ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); } static inline long __do_kvm_notify(struct subchannel_id schid, @@ -407,15 +428,15 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, { int ret; - vcdev->config_block->index = index; + vcdev->dma_area->config_block.index = index; ccw->cmd_code = CCW_CMD_READ_VQ_CONF; ccw->flags = 0; ccw->count = sizeof(struct vq_config_block); - ccw->cda = (__u32)(unsigned long)(vcdev->config_block); + ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block); ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); if (ret) return ret; - return vcdev->config_block->num ?: -ENOENT; + return vcdev->dma_area->config_block.num ?: -ENOENT; } static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) @@ -460,7 +481,8 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) ret, index); vring_del_virtqueue(vq); - kfree(info->info_block); + ccw_device_dma_free(vcdev->cdev, info->info_block, + sizeof(*info->info_block)); kfree(info); } @@ -470,7 +492,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev) struct ccw1 *ccw; struct virtio_ccw_device *vcdev = to_vc_device(vdev); - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return; @@ -479,7 +501,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev) list_for_each_entry_safe(vq, n, &vdev->vqs, list) virtio_ccw_del_vq(vq, ccw); - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); } static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, @@ -502,8 +524,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, err = -ENOMEM; goto out_err; } - info->info_block = kzalloc(sizeof(*info->info_block), - GFP_DMA | GFP_KERNEL); + info->info_block = ccw_device_dma_zalloc(vcdev->cdev, + sizeof(*info->info_block)); if (!info->info_block) { dev_warn(&vcdev->cdev->dev, "no info block\n"); err = -ENOMEM; @@ -567,7 +589,8 @@ out_err: if (vq) vring_del_virtqueue(vq); if (info) { - kfree(info->info_block); + ccw_device_dma_free(vcdev->cdev, info->info_block, + sizeof(*info->info_block)); } kfree(info); return ERR_PTR(err); @@ -581,7 +604,8 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, struct virtio_thinint_area *thinint_area = NULL; struct airq_info *info; - thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL); + thinint_area = ccw_device_dma_zalloc(vcdev->cdev, + sizeof(*thinint_area)); if (!thinint_area) { ret = -ENOMEM; goto out; @@ -596,7 +620,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, } info = vcdev->airq_info; thinint_area->summary_indicator = - (unsigned long) &info->summary_indicator; + (unsigned long) get_summary_indicator(info); thinint_area->isc = VIRTIO_AIRQ_ISC; ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; ccw->flags = CCW_FLAG_SLI; @@ -617,7 +641,7 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, virtio_ccw_drop_indicators(vcdev); } out: - kfree(thinint_area); + ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); return ret; } @@ -633,7 +657,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, int ret, i, queue_idx = 0; struct ccw1 *ccw; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return -ENOMEM; @@ -657,10 +681,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, * We need a data area under 2G to communicate. Our payload is * the address of the indicators. */ - indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); + indicatorp = ccw_device_dma_zalloc(vcdev->cdev, + sizeof(indicators(vcdev))); if (!indicatorp) goto out; - *indicatorp = (unsigned long) &vcdev->indicators; + *indicatorp = (unsigned long) indicators(vcdev); if (vcdev->is_thinint) { ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); if (ret) @@ -669,32 +694,36 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, } if (!vcdev->is_thinint) { /* Register queue indicators with host. */ - vcdev->indicators = 0; + *indicators(vcdev) = 0; ccw->cmd_code = CCW_CMD_SET_IND; ccw->flags = 0; - ccw->count = sizeof(&vcdev->indicators); + ccw->count = sizeof(indicators(vcdev)); ccw->cda = (__u32)(unsigned long) indicatorp; ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); if (ret) goto out; } /* Register indicators2 with host for config changes */ - *indicatorp = (unsigned long) &vcdev->indicators2; - vcdev->indicators2 = 0; + *indicatorp = (unsigned long) indicators2(vcdev); + *indicators2(vcdev) = 0; ccw->cmd_code = CCW_CMD_SET_CONF_IND; ccw->flags = 0; - ccw->count = sizeof(&vcdev->indicators2); + ccw->count = sizeof(indicators2(vcdev)); ccw->cda = (__u32)(unsigned long) indicatorp; ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); if (ret) goto out; - kfree(indicatorp); - kfree(ccw); + if (indicatorp) + ccw_device_dma_free(vcdev->cdev, indicatorp, + sizeof(indicators(vcdev))); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); return 0; out: - kfree(indicatorp); - kfree(ccw); + if (indicatorp) + ccw_device_dma_free(vcdev->cdev, indicatorp, + sizeof(indicators(vcdev))); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); virtio_ccw_del_vqs(vdev); return ret; } @@ -704,12 +733,12 @@ static void virtio_ccw_reset(struct virtio_device *vdev) struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct ccw1 *ccw; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return; /* Zero status bits. */ - *vcdev->status = 0; + vcdev->dma_area->status = 0; /* Send a reset ccw on device. */ ccw->cmd_code = CCW_CMD_VDEV_RESET; @@ -717,7 +746,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev) ccw->count = 0; ccw->cda = 0; ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); } static u64 virtio_ccw_get_features(struct virtio_device *vdev) @@ -728,11 +757,11 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev) u64 rc; struct ccw1 *ccw; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return 0; - features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); + features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); if (!features) { rc = 0; goto out_free; @@ -765,8 +794,8 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev) rc |= (u64)le32_to_cpu(features->features) << 32; out_free: - kfree(features); - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); return rc; } @@ -791,11 +820,11 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev) return -EINVAL; } - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return -ENOMEM; - features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); + features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); if (!features) { ret = -ENOMEM; goto out_free; @@ -830,8 +859,8 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev) ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); out_free: - kfree(features); - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); return ret; } @@ -845,11 +874,12 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, void *config_area; unsigned long flags; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return; - config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); + config_area = ccw_device_dma_zalloc(vcdev->cdev, + VIRTIO_CCW_CONFIG_SIZE); if (!config_area) goto out_free; @@ -871,8 +901,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, memcpy(buf, config_area + offset, len); out_free: - kfree(config_area); - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); } static void virtio_ccw_set_config(struct virtio_device *vdev, @@ -884,11 +914,12 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, void *config_area; unsigned long flags; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return; - config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); + config_area = ccw_device_dma_zalloc(vcdev->cdev, + VIRTIO_CCW_CONFIG_SIZE); if (!config_area) goto out_free; @@ -907,61 +938,61 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); out_free: - kfree(config_area); - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); } static u8 virtio_ccw_get_status(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); - u8 old_status = *vcdev->status; + u8 old_status = vcdev->dma_area->status; struct ccw1 *ccw; if (vcdev->revision < 1) - return *vcdev->status; + return vcdev->dma_area->status; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return old_status; ccw->cmd_code = CCW_CMD_READ_STATUS; ccw->flags = 0; - ccw->count = sizeof(*vcdev->status); - ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw->count = sizeof(vcdev->dma_area->status); + ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); /* * If the channel program failed (should only happen if the device * was hotunplugged, and then we clean up via the machine check - * handler anyway), vcdev->status was not overwritten and we just + * handler anyway), vcdev->dma_area->status was not overwritten and we just * return the old status, which is fine. */ - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); - return *vcdev->status; + return vcdev->dma_area->status; } static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); - u8 old_status = *vcdev->status; + u8 old_status = vcdev->dma_area->status; struct ccw1 *ccw; int ret; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return; /* Write the status to the host. */ - *vcdev->status = status; + vcdev->dma_area->status = status; ccw->cmd_code = CCW_CMD_WRITE_STATUS; ccw->flags = 0; ccw->count = sizeof(status); - ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); /* Write failed? We assume status is unchanged. */ if (ret) - *vcdev->status = old_status; - kfree(ccw); + vcdev->dma_area->status = old_status; + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); } static const char *virtio_ccw_bus_name(struct virtio_device *vdev) @@ -994,8 +1025,8 @@ static void virtio_ccw_release_dev(struct device *_d) struct virtio_device *dev = dev_to_virtio(_d); struct virtio_ccw_device *vcdev = to_vc_device(dev); - kfree(vcdev->status); - kfree(vcdev->config_block); + ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, + sizeof(*vcdev->dma_area)); kfree(vcdev); } @@ -1093,17 +1124,17 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, vcdev->err = -EIO; } virtio_ccw_check_activity(vcdev, activity); - for_each_set_bit(i, &vcdev->indicators, - sizeof(vcdev->indicators) * BITS_PER_BYTE) { + for_each_set_bit(i, indicators(vcdev), + sizeof(*indicators(vcdev)) * BITS_PER_BYTE) { /* The bit clear must happen before the vring kick. */ - clear_bit(i, &vcdev->indicators); + clear_bit(i, indicators(vcdev)); barrier(); vq = virtio_ccw_vq_by_ind(vcdev, i); vring_interrupt(0, vq); } - if (test_bit(0, &vcdev->indicators2)) { + if (test_bit(0, indicators2(vcdev))) { virtio_config_changed(&vcdev->vdev); - clear_bit(0, &vcdev->indicators2); + clear_bit(0, indicators2(vcdev)); } } @@ -1203,12 +1234,12 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) struct ccw1 *ccw; int ret; - ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); if (!ccw) return -ENOMEM; - rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL); + rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev)); if (!rev) { - kfree(ccw); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); return -ENOMEM; } @@ -1238,8 +1269,8 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) } } while (ret == -EOPNOTSUPP); - kfree(ccw); - kfree(rev); + ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); + ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev)); return ret; } @@ -1255,24 +1286,11 @@ static int virtio_ccw_online(struct ccw_device *cdev) ret = -ENOMEM; goto out_free; } - vcdev->vdev.dev.parent = &cdev->dev; - cdev->dev.dma_mask = &vcdev->dma_mask; - /* we are fine with common virtio infrastructure using 64 bit DMA */ - ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64)); - if (ret) { - dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n"); - goto out_free; - } - - vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), - GFP_DMA | GFP_KERNEL); - if (!vcdev->config_block) { - ret = -ENOMEM; - goto out_free; - } - vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL); - if (!vcdev->status) { + vcdev->cdev = cdev; + vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev, + sizeof(*vcdev->dma_area)); + if (!vcdev->dma_area) { ret = -ENOMEM; goto out_free; } @@ -1281,7 +1299,6 @@ static int virtio_ccw_online(struct ccw_device *cdev) vcdev->vdev.dev.release = virtio_ccw_release_dev; vcdev->vdev.config = &virtio_ccw_config_ops; - vcdev->cdev = cdev; init_waitqueue_head(&vcdev->wait_q); INIT_LIST_HEAD(&vcdev->virtqueues); spin_lock_init(&vcdev->lock); @@ -1312,8 +1329,8 @@ out_put: return ret; out_free: if (vcdev) { - kfree(vcdev->status); - kfree(vcdev->config_block); + ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, + sizeof(*vcdev->dma_area)); } kfree(vcdev); return ret; @@ -1483,8 +1500,17 @@ static void __init no_auto_parse(void) static int __init virtio_ccw_init(void) { + int rc; + /* parse no_auto string before we do anything further */ no_auto_parse(); - return ccw_driver_register(&virtio_ccw_driver); + + summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS); + if (!summary_indicators) + return -ENOMEM; + rc = ccw_driver_register(&virtio_ccw_driver); + if (rc) + cio_dma_free(summary_indicators, MAX_AIRQ_AREAS); + return rc; } device_initcall(virtio_ccw_init); |