summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 09:26:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 09:26:42 -0800
commitde399813b521ea7e38bbfb5e5b620b5e202e5783 (patch)
treeceb8302f9d6a7a4f2e25b64c5dc42c1fb80b435b /drivers
parent57ca04ab440168e101da746ef9edd1ec583b7214 (diff)
parentc6f6634721c871bfab4235e1cbcad208d3063798 (diff)
downloadlinux-de399813b521ea7e38bbfb5e5b620b5e202e5783.tar.bz2
Merge tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Highlights include: - Support for the kexec_file_load() syscall, which is a prereq for secure and trusted boot. - Prevent kernel execution of userspace on P9 Radix (similar to SMEP/PXN). - Sort the exception tables at build time, to save time at boot, and store them as relative offsets to save space in the kernel image & memory. - Allow building the kernel with thin archives, which should allow us to build an allyesconfig once some other fixes land. - Build fixes to allow us to correctly rebuild when changing the kernel endian from big to little or vice versa. - Plumbing so that we can avoid doing a full mm TLB flush on P9 Radix. - Initial stack protector support (-fstack-protector). - Support for dumping the radix (aka. Linux) and hash page tables via debugfs. - Fix an oops in cxl coredump generation when cxl_get_fd() is used. - Freescale updates from Scott: "Highlights include 8xx hugepage support, qbman fixes/cleanup, device tree updates, and some misc cleanup." - Many and varied fixes and minor enhancements as always. Thanks to: Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Anshuman Khandual, Anton Blanchard, Balbir Singh, Bartlomiej Zolnierkiewicz, Christophe Jaillet, Christophe Leroy, Denis Kirjanov, Elimar Riesebieter, Frederic Barrat, Gautham R. Shenoy, Geliang Tang, Geoff Levand, Jack Miller, Johan Hovold, Lars-Peter Clausen, Libin, Madhavan Srinivasan, Michael Neuling, Nathan Fontenot, Naveen N. Rao, Nicholas Piggin, Pan Xinhui, Peter Senna Tschudin, Rashmica Gupta, Rui Teng, Russell Currey, Scott Wood, Simon Guo, Suraj Jitindar Singh, Thiago Jung Bauermann, Tobias Klauser, Vaibhav Jain" [ And thanks to Michael, who took time off from a new baby to get this pull request done. - Linus ] * tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (174 commits) powerpc/fsl/dts: add FMan node for t1042d4rdb powerpc/fsl/dts: add sg_2500_aqr105_phy4 alias on t1024rdb powerpc/fsl/dts: add QMan and BMan nodes on t1024 powerpc/fsl/dts: add QMan and BMan nodes on t1023 soc/fsl/qman: test: use DEFINE_SPINLOCK() powerpc/fsl-lbc: use DEFINE_SPINLOCK() powerpc/8xx: Implement support of hugepages powerpc: get hugetlbpage handling more generic powerpc: port 64 bits pgtable_cache to 32 bits powerpc/boot: Request no dynamic linker for boot wrapper soc/fsl/bman: Use resource_size instead of computation soc/fsl/qe: use builtin_platform_driver powerpc/fsl_pmc: use builtin_platform_driver powerpc/83xx/suspend: use builtin_platform_driver powerpc/ftrace: Fix the comments for ftrace_modify_code powerpc/perf: macros for power9 format encoding powerpc/perf: power9 raw event format encoding powerpc/perf: update attribute_group data structure powerpc/perf: factor out the event format field powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/misc/cxl/api.c147
-rw-r--r--drivers/misc/cxl/context.c17
-rw-r--r--drivers/misc/cxl/cxl.h6
-rw-r--r--drivers/misc/cxl/debugfs.c6
-rw-r--r--drivers/misc/cxl/file.c5
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/cxl/irq.c2
-rw-r--r--drivers/misc/cxl/native.c20
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/phb.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c10
-rw-r--r--drivers/soc/fsl/qbman/bman.c8
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c17
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h1
-rw-r--r--drivers/soc/fsl/qbman/qman.c233
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c41
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h17
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c27
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c38
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c328
24 files changed, 588 insertions, 354 deletions
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d28690f6e262..5d80810934df 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,7 +102,6 @@ config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
- depends on IDE_GD_ATA
select LEDS_TRIGGERS
select LEDS_TRIGGER_DISK
help
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2e5233b60971..1b35e33d2434 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -9,18 +9,119 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <misc/cxl.h>
-#include <linux/fs.h>
#include <asm/pnv-pci.h>
#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/mount.h>
#include "cxl.h"
+/*
+ * Since we want to track memory mappings to be able to force-unmap
+ * when the AFU is no longer reachable, we need an inode. For devices
+ * opened through the cxl user API, this is not a problem, but a
+ * userland process can also get a cxl fd through the cxl_get_fd()
+ * API, which is used by the cxlflash driver.
+ *
+ * Therefore we implement our own simple pseudo-filesystem and inode
+ * allocator. We don't use the anonymous inode, as we need the
+ * meta-data associated with it (address_space) and it is shared by
+ * other drivers/processes, so it could lead to cxl unmapping VMAs
+ * from random processes.
+ */
+
+#define CXL_PSEUDO_FS_MAGIC 0x1697697f
+
+static int cxl_fs_cnt;
+static struct vfsmount *cxl_vfs_mount;
+
+static const struct dentry_operations cxl_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops,
+ CXL_PSEUDO_FS_MAGIC);
+}
+
+static struct file_system_type cxl_fs_type = {
+ .name = "cxl",
+ .owner = THIS_MODULE,
+ .mount = cxl_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+
+void cxl_release_mapping(struct cxl_context *ctx)
+{
+ if (ctx->kernelapi && ctx->mapping)
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+}
+
+static struct file *cxl_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
+{
+ struct qstr this;
+ struct path path;
+ struct file *file;
+ struct inode *inode = NULL;
+ int rc;
+
+ /* strongly inspired by anon_inode_getfile() */
+
+ if (fops->owner && !try_module_get(fops->owner))
+ return ERR_PTR(-ENOENT);
+
+ rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
+ file = ERR_PTR(rc);
+ goto err_module;
+ }
+
+ inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ file = ERR_CAST(inode);
+ goto err_fs;
+ }
+
+ file = ERR_PTR(-ENOMEM);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = 0;
+ path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
+ if (!path.dentry)
+ goto err_inode;
+
+ path.mnt = mntget(cxl_vfs_mount);
+ d_instantiate(path.dentry, inode);
+
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ if (IS_ERR(file))
+ goto err_dput;
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+ file->private_data = priv;
+
+ return file;
+
+err_dput:
+ path_put(&path);
+err_inode:
+ iput(inode);
+err_fs:
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+err_module:
+ module_put(fops->owner);
+ return file;
+}
+
struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
{
- struct address_space *mapping;
struct cxl_afu *afu;
struct cxl_context *ctx;
int rc;
@@ -30,38 +131,20 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
return ERR_CAST(afu);
ctx = cxl_context_alloc();
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- goto err_dev;
- }
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
ctx->kernelapi = true;
- /*
- * Make our own address space since we won't have one from the
- * filesystem like the user api has, and even if we do associate a file
- * with this context we don't want to use the global anonymous inode's
- * address space as that can invalidate unrelated users:
- */
- mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
- if (!mapping) {
- rc = -ENOMEM;
- goto err_ctx;
- }
- address_space_init_once(mapping);
-
/* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false, mapping);
+ rc = cxl_context_init(ctx, afu, false);
if (rc)
- goto err_mapping;
+ goto err_ctx;
return ctx;
-err_mapping:
- kfree(mapping);
err_ctx:
kfree(ctx);
-err_dev:
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -340,6 +423,11 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
{
struct file *file;
int rc, flags, fdtmp;
+ char *name = NULL;
+
+ /* only allow one per context */
+ if (ctx->mapping)
+ return ERR_PTR(-EEXIST);
flags = O_RDWR | O_CLOEXEC;
@@ -363,12 +451,13 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
} else /* use default ops */
fops = (struct file_operations *)&afu_fops;
- file = anon_inode_getfile("cxl", fops, ctx, flags);
+ name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
+ file = cxl_getfile(name, fops, ctx, flags);
+ kfree(name);
if (IS_ERR(file))
goto err_fd;
- file->f_mapping = ctx->mapping;
-
+ cxl_context_set_mapping(ctx, file->f_mapping);
*fd = fdtmp;
return file;
@@ -541,7 +630,7 @@ int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (remaining > 0) {
new_ctx = cxl_dev_context_init(pdev);
- if (!new_ctx) {
+ if (IS_ERR(new_ctx)) {
pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
return -ENOSPC;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 5d36dcc7f47e..3907387b6d15 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,8 +34,7 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
int i;
@@ -44,7 +43,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->master = master;
ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
- ctx->mapping = mapping;
+ ctx->mapping = NULL;
/*
* Allocate the segment table before we put it in the IDR so that we
@@ -114,6 +113,14 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping)
+{
+ mutex_lock(&ctx->mapping_lock);
+ ctx->mapping = mapping;
+ mutex_unlock(&ctx->mapping_lock);
+}
+
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct cxl_context *ctx = vma->vm_file->private_data;
@@ -299,8 +306,6 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->ff_page)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
- if (ctx->kernelapi)
- kfree(ctx->mapping);
kfree(ctx->irq_bitmap);
@@ -312,6 +317,8 @@ static void reclaim_ctx(struct rcu_head *rcu)
void cxl_context_free(struct cxl_context *ctx)
{
+ if (ctx->kernelapi && ctx->mapping)
+ cxl_release_mapping(ctx);
mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
mutex_unlock(&ctx->afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a144073593fa..b24d76723fb0 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -817,8 +817,9 @@ void cxl_dump_debug_buffer(void *addr, size_t size);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
@@ -877,6 +878,7 @@ void cxl_native_err_irq_dump_regs(struct cxl *adapter);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
+void cxl_release_mapping(struct cxl_context *ctx);
extern struct pci_driver cxl_pci_driver;
extern struct platform_driver cxl_of_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index ec7b8a017439..9c06ac8fa5ac 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -43,12 +43,14 @@ static int debugfs_io_u64_set(void *data, u64 val)
out_be64((u64 __iomem *)data, val);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
+ "0x%016llx\n");
static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
struct dentry *parent, u64 __iomem *value)
{
- return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
+ return debugfs_create_file_unsafe(name, mode, parent,
+ (void __force *)value, &fops_io_x64);
}
void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 77080cc5fa0a..859959f19f10 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -86,9 +86,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
- if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
+ rc = cxl_context_init(ctx, afu, master);
+ if (rc)
goto err_put_afu;
+ cxl_context_set_mapping(ctx, inode->i_mapping);
+
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
cxl_ctx_get();
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3e102cd6ed91..e04bc4ddfd74 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -887,7 +887,7 @@ static void afu_handle_errstate(struct work_struct *work)
afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
return;
- if (afu_guest->handle_err == true)
+ if (afu_guest->handle_err)
schedule_delayed_work(&afu_guest->work_err,
msecs_to_jiffies(3000));
}
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index dec60f58a767..1a402bbed687 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -104,7 +104,7 @@ irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_i
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = 1;
+ ctx->pending_afu_err = true;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index a217a74ccc98..09505f432eda 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -10,7 +10,6 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -54,7 +53,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
AFU_Cntl | command);
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- };
+ }
if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
/*
@@ -167,7 +166,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
cpu_relax();
}
PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- };
+ }
end = local_clock();
pr_devel("PSL purged in %lld ns\n", end - start);
@@ -931,9 +930,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
struct cxl_afu *afu = data;
struct cxl_context *ctx;
struct cxl_irq_info irq_info;
- int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
- int ret;
-
+ u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
+ int ph, ret;
+
+ /* check if eeh kicked in while the interrupt was in flight */
+ if (unlikely(phreg == ~0ULL)) {
+ dev_warn(&afu->dev,
+ "Ignoring slice interrupt(%d) due to fenced card",
+ irq);
+ return IRQ_HANDLED;
+ }
+ /* Mask the pe-handle from register value */
+ ph = phreg & 0xffff;
if ((ret = native_get_irq_info(afu, &irq_info))) {
WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
return fail_psl_irq(afu, &irq_info);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index e96be9ca4e60..80a87ab25b83 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1921,7 +1921,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
goto err;
ctx = cxl_dev_context_init(afu_dev);
- if (!ctx)
+ if (IS_ERR(ctx))
goto err;
afu_dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
index 0935d44c1770..6ec69ada19f4 100644
--- a/drivers/misc/cxl/phb.c
+++ b/drivers/misc/cxl/phb.c
@@ -20,7 +20,7 @@ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu
* in the virtual phb, we'll need a default context to attach them to.
*/
ctx = cxl_dev_context_init(dev);
- if (!ctx)
+ if (IS_ERR(ctx))
return false;
dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index dc67f39779ec..c614ff7c3bc3 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
{
- if (vio_find_node(dn))
+ struct vio_dev *vio_dev;
+
+ vio_dev = vio_find_node(dn);
+ if (vio_dev) {
+ put_device(&vio_dev->dev);
return -EINVAL;
+ }
if (!vio_register_device_node(dn)) {
printk(KERN_ERR
@@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
return -EINVAL;
vio_unregister_device(vio_dev);
+
+ put_device(&vio_dev->dev);
+
return 0;
}
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index ffa48fdbb1a9..a3d6d7cfa929 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -167,12 +167,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
struct bman_portal {
@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal)
i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr_ptr2idx(rcr->cursor))
- pr_crit("losing uncommited RCR entries\n");
+ pr_crit("losing uncommitted RCR entries\n");
i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr->ci)
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 9deb0524543f..a8e8389a6894 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
node->full_name);
return -ENXIO;
}
- bm_ccsr_start = devm_ioremap(dev, res->start,
- res->end - res->start + 1);
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
if (!bm_ccsr_start)
return -ENXIO;
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 986f64690e6e..8354d4dabdad 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -126,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -150,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_unlock(&bman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -159,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index b63fd72295c6..2eaf3184f61d 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/prefetch.h>
#include <linux/genalloc.h>
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 119054bc922b..6f509f68085e 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
struct qm_eqcr_entry {
u8 _ncw_verb; /* writes to this are non-coherent */
u8 dca;
- u16 seqnum;
- u32 orp; /* 24-bit */
- u32 fqid; /* 24-bit */
- u32 tag;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
} __packed;
@@ -183,41 +183,22 @@ struct qm_mr {
};
/* MC (Management Command) command */
-/* "Query FQ" */
-struct qm_mcc_queryfq {
+/* "FQ" command layout */
+struct qm_mcc_fq {
u8 _ncw_verb;
u8 __reserved1[3];
- u32 fqid; /* 24-bit */
+ __be32 fqid; /* 24-bit */
u8 __reserved2[56];
} __packed;
-/* "Alter FQ State Commands " */
-struct qm_mcc_alterfq {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2;
- u8 count; /* number of consecutive FQID */
- u8 __reserved3[10];
- u32 context_b; /* frame queue context b */
- u8 __reserved4[40];
-} __packed;
-/* "Query CGR" */
-struct qm_mcc_querycgr {
+/* "CGR" command layout */
+struct qm_mcc_cgr {
u8 _ncw_verb;
u8 __reserved1[30];
u8 cgid;
u8 __reserved2[32];
};
-struct qm_mcc_querywq {
- u8 _ncw_verb;
- u8 __reserved;
- /* select channel if verb != QUERYWQ_DEDICATED */
- u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
- u8 __reserved2[60];
-} __packed;
-
#define QM_MCC_VERB_VBIT 0x80
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
#define QM_MCC_VERB_INITFQ_PARKED 0x40
@@ -243,12 +224,9 @@ union qm_mc_command {
u8 __reserved[63];
};
struct qm_mcc_initfq initfq;
- struct qm_mcc_queryfq queryfq;
- struct qm_mcc_alterfq alterfq;
struct qm_mcc_initcgr initcgr;
- struct qm_mcc_querycgr querycgr;
- struct qm_mcc_querywq querywq;
- struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
};
/* MC (Management Command) result */
@@ -343,12 +321,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
/* --- EQCR API --- */
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
DPAA_ASSERT(!eqcr->busy);
if (pi != eqcr_ptr2idx(eqcr->cursor))
- pr_crit("losing uncommited EQCR entries\n");
+ pr_crit("losing uncommitted EQCR entries\n");
if (ci != eqcr->ci)
pr_crit("missing existing EQCR completions\n");
if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{
DPAA_ASSERT(eqcr->busy);
- DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
- DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
DPAA_ASSERT(eqcr->available >= 1);
}
@@ -962,8 +939,6 @@ struct qman_portal {
u32 sdqcr;
/* probing time config params for cpu-affine portals */
const struct qm_portal_config *config;
- /* needed for providing a non-NULL device to dma_map_***() */
- struct platform_device *pdev;
/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
struct qman_cgrs *cgrs;
/* linked-list of CSCN handlers. */
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
const struct qman_cgrs *cgrs)
{
struct qm_portal *p;
- char buf[16];
int ret;
u32 isdr;
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
- sprintf(buf, "qportal-%d", c->channel);
- portal->pdev = platform_device_alloc(buf, -1);
- if (!portal->pdev)
- goto fail_devalloc;
- if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
- goto fail_devadd;
- ret = platform_device_add(portal->pdev);
- if (ret)
- goto fail_devadd;
isdr = 0xffffffff;
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
/* special handling, drain just in case it's a few FQRNIs */
const union qm_mr_entry *e = qm_mr_current(p);
- dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
- e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
goto fail_dqrr_mr_empty;
}
/* Success */
@@ -1256,10 +1221,6 @@ fail_eqcr_empty:
fail_affinity:
free_irq(c->irq, portal);
fail_irq:
- platform_device_del(portal->pdev);
-fail_devadd:
- platform_device_put(portal->pdev);
-fail_devalloc:
kfree(portal->cgrs);
fail_cgrs:
qm_mc_finish(p);
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
qm_dqrr_finish(&qm->p);
qm_eqcr_finish(&qm->p);
- platform_device_del(qm->pdev);
- platform_device_put(qm->pdev);
-
qm->config = NULL;
}
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
case QM_MR_VERB_FQRN:
case QM_MR_VERB_FQRL:
/* Lookup in the retirement table */
- fq = fqid_to_fq(msg->fq.fqid);
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
if (WARN_ON(!fq))
break;
fq_state_change(p, fq, msg, verb);
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
break;
case QM_MR_VERB_FQPN:
/* Parked */
- fq = tag_to_fq(msg->fq.contextB);
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
fq->cb.fqs(p, fq, msg);
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
}
} else {
/* Its a software ERN */
- fq = tag_to_fq(msg->ern.tag);
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
fq->cb.ern(p, fq, msg);
}
num++;
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/*
- * VDQCR: don't trust contextB as the FQ may have
+ * VDQCR: don't trust context_b as the FQ may have
* been configured for h/w consumption and we're
* draining it post-retirement.
*/
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
clear_vdqcr(p, fq);
} else {
- /* SDQCR: contextB points to the FQ */
- fq = tag_to_fq(dq->contextB);
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq);
/*
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
return -EINVAL;
#endif
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
/* And can't be set at the same time as TDTHRESH */
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
return -EINVAL;
}
/* Issue an INITFQ_[PARKED|SCHED] management command */
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc = qm_mc_start(&p->p);
if (opts)
mcc->initfq = *opts;
- mcc->initfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
mcc->initfq.count = 0;
/*
- * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
* demux pointer. Otherwise, the caller-provided value is allowed to
* stand, don't overwrite it.
*/
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq;
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
- mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
/*
* and the physical address - NB, if the user wasn't trying to
* set CONTEXTA, clear the stashing settings.
*/
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a));
} else {
- phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
- DMA_TO_DEVICE);
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
}
if (flags & QMAN_INITFQ_FLAG_LOCAL) {
int wq = 0;
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
wq = 4;
}
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
goto out;
}
if (opts) {
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
fq_set(fq, QMAN_FQ_STATE_CGR_EN);
else
fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
}
- if (opts->we_mask & QM_INITFQ_WE_CGID)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
fq->cgr_groupid = opts->fqd.cgid;
}
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(p->config->dev, "ALTER_SCHED timeout\n");
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
msg.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
- msg.fq.fqid = fq->fqid;
- msg.fq.contextB = fq_to_tag(fq);
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
fq->cb.fqs(p, fq, &msg);
}
} else if (res == QM_MCR_RESULT_PENDING) {
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->querycgr.cgid = cgr->cgrid;
+ mcc->cgr.cgid = cgr->cgrid;
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
if (unlikely(!eq))
goto out;
- eq->fqid = fq->fqid;
- eq->tag = fq_to_tag(fq);
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
eq->fd = *fd;
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
@@ -2282,7 +2252,24 @@ out:
}
#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
-#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
static u8 qman_cgr_cpus[CGR_NUM];
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts)
{
struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts = {};
int ret;
struct qman_portal *p;
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
spin_lock(&p->cgr_lock);
if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
ret = qman_query_cgr(cgr, &cgr_state);
if (ret)
goto out;
- if (opts)
- local_opts = *opts;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl =
- QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
- else
- /* Overwrite TARG */
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
- TARG_MASK(p);
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
/* send init if flags indicate so */
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
&local_opts);
else
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs);
goto release_lock;
}
- /* Overwrite TARG */
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
- else
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
- ~(TARG_MASK(p));
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
ret = qm_modify_cgr(cgr, 0, &local_opts);
if (ret)
/* add back to the list */
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
} while (wait && !dqrr);
while (dqrr) {
- if (dqrr->fqid == fqid && (dqrr->stat & s))
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
found = 1;
qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
qm_dqrr_pvb_update(p);
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
dev = p->config->dev;
/* Determine the state of the FQID */
mcc = qm_mc_start(&p->p);
- mcc->queryfq_np.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
/* Query which channel the FQ is using */
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ timeout\n");
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
cpu_relax();
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_RETIRED:
/* Send OOS Command */
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
{
return portal->config;
}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
struct gen_pool *qm_fqalloc; /* FQID allocator */
struct gen_pool *qm_qpalloc; /* pool-channel allocator */
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
+ return err;
if (qm_fqd_get_chan(&fqd) == qp) {
/* The channel is the FQ's target, clean it */
err = qman_shutdown_fq(fq.fqid);
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
* error, looking for non-OOS FQDs whose CGR is the CGR being released
*/
struct qman_fq fq = {
- .fqid = 1
+ .fqid = QM_FQID_RANGE_START
};
int err;
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
fqd.cgid == cgrid) {
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
cgrid, fq.fqid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 0cace9e0077e..f4e6e70de259 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node,
/* map as cacheable, non-guarded */
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ if (!tmpp)
+ return -ENOMEM;
+
memset_io(tmpp, 0, sz);
flush_dcache_range((unsigned long)tmpp,
(unsigned long)tmpp + sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index d068e4820f49..adbaa30d3c5a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -30,6 +30,9 @@
#include "qman_priv.h"
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
/* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1
@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
/* all assigned portals are initialized now */
qman_init_cgr_all();
}
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
spin_unlock(&qman_lock);
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
@@ -217,9 +224,9 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- const u32 *channel;
void __iomem *va;
- int irq, len, cpu;
+ int irq, cpu, err;
+ u32 val;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
@@ -243,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev)
return -ENXIO;
}
- channel = of_get_property(node, "cell-index", &len);
- if (!channel || (len != 4)) {
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
dev_err(dev, "Can't get %s property 'cell-index'\n",
node->full_name);
- return -ENXIO;
+ return err;
}
- pcfg->channel = *channel;
+ pcfg->channel = val;
pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@@ -259,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -285,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_unlock(&qman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -294,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5cf821e623a9..53685b59718e 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -73,29 +73,23 @@ struct qm_mcr_querycgr {
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[6];
u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
u8 __reserved3[3];
u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
- u32 a_bcnt_lo; /* low 32-bits of 40-bit */
- u32 cscn_targ_swp[4];
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
} __packed;
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
}
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
}
/* "Query FQ Non-Programmable Fields" */
-struct qm_mcc_queryfq_np {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2[56];
-} __packed;
struct qm_mcr_queryfq_np {
u8 verb;
@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids);
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 6880ff17f45e..2895d062cf51 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd)
{
qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
qm_fd_set_contig_big(fd, 0x0000ffff);
- fd->cmd = 0xfeedf00d;
+ fd->cmd = cpu_to_be32(0xfeedf00d);
}
static void fd_inc(struct qm_fd *fd)
@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd)
len--;
qm_fd_set_param(fd, fmt, off, len);
- fd->cmd++;
+ fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
-static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
{
- int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
- if (!r) {
- enum qm_fd_format fmt_a, fmt_b;
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
- fmt_a = qm_fd_get_format(a);
- fmt_b = qm_fd_get_format(b);
- r = fmt_a - fmt_b;
- }
- if (!r)
- r = a->cfg - b->cfg;
- if (!r)
- r = a->cmd - b->cmd;
- return r;
+ return neq;
}
/* test */
@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
- if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
return qman_cb_dqrr_consume;
}
fd_inc(&fd_dq);
- if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
sdqcr_complete = 1;
wake_up(&waitqueue);
}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index 43cf66ba42f5..e87b65403b67 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length;
@@ -191,6 +191,9 @@ static void *__frame_ptr;
static u32 *frame_ptr;
static dma_addr_t frame_dma;
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
@@ -210,16 +213,14 @@ static int allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
- struct platform_device *pdev = platform_device_alloc("foobar", -1);
- if (!pdev) {
- pr_crit("platform_device_alloc() failed");
- return -EIO;
- }
- if (platform_device_add(pdev)) {
- pr_crit("platform_device_add() failed");
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
return -EIO;
}
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
return -ENOMEM;
@@ -229,15 +230,22 @@ static int allocate_frame_data(void)
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
- frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
return 0;
}
static void deallocate_frame_data(void)
{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
kfree(__frame_ptr);
}
@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler,
int loop;
if (qm_fd_addr_get64(fd) != handler->addr) {
- pr_crit("bad frame address");
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
return -EIO;
}
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
@@ -397,8 +406,9 @@ static int init_handler(void *h)
goto failed;
}
memset(&opts, 0, sizeof(opts));
- opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
- opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
QMAN_INITFQ_FLAG_LOCAL, &opts);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2707a827261b..ade168f5328e 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = {
.resume = qe_resume,
};
-static int __init qe_drv_init(void)
-{
- return platform_driver_register(&qe_driver);
-}
-device_initcall(qe_drv_init);
+builtin_platform_driver(qe_driver);
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378ddadc5c..c8823578a1b2 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group);
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
{
long ret = 0, locked, lock_limit;
- if (!current || !current->mm)
- return -ESRCH; /* process exited */
+ if (WARN_ON_ONCE(!mm))
+ return -EPERM;
if (!npages)
return 0;
- down_write(&current->mm->mmap_sem);
- locked = current->mm->locked_vm + npages;
+ down_write(&mm->mmap_sem);
+ locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
- current->mm->locked_vm += npages;
+ mm->locked_vm += npages;
pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
return ret;
}
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
{
- if (!current || !current->mm || !npages)
- return; /* process exited */
+ if (!mm || !npages)
+ return;
- down_write(&current->mm->mmap_sem);
- if (WARN_ON_ONCE(npages > current->mm->locked_vm))
- npages = current->mm->locked_vm;
- current->mm->locked_vm -= npages;
+ down_write(&mm->mmap_sem);
+ if (WARN_ON_ONCE(npages > mm->locked_vm))
+ npages = mm->locked_vm;
+ mm->locked_vm -= npages;
pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
}
/*
@@ -89,6 +89,15 @@ struct tce_iommu_group {
};
/*
+ * A container needs to remember which preregistered region it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+ struct list_head next;
+ struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
* The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group
* at the moment of initialization.
@@ -97,24 +106,68 @@ struct tce_container {
struct mutex lock;
bool enabled;
bool v2;
+ bool def_window_pending;
unsigned long locked_pages;
+ struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list;
+ struct list_head prereg_list;
};
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+ if (container->mm) {
+ if (container->mm == current->mm)
+ return 0;
+ return -EPERM;
+ }
+ BUG_ON(!current->mm);
+ container->mm = current->mm;
+ atomic_inc(&container->mm->mm_count);
+
+ return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+ struct tce_iommu_prereg *tcemem)
+{
+ long ret;
+
+ ret = mm_iommu_put(container->mm, tcemem->mem);
+ if (ret)
+ return ret;
+
+ list_del(&tcemem->next);
+ kfree(tcemem);
+
+ return 0;
+}
+
static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
struct mm_iommu_table_group_mem_t *mem;
+ struct tce_iommu_prereg *tcemem;
+ bool found = false;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
- return mm_iommu_put(mem);
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ return tce_iommu_prereg_free(container, tcemem);
}
static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container,
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL;
+ struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr))
return -EINVAL;
- ret = mm_iommu_get(vaddr, entries, &mem);
+ mem = mm_iommu_find(container->mm, vaddr, entries);
+ if (mem) {
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem)
+ return -EBUSY;
+ }
+ }
+
+ ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
+ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ tcemem->mem = mem;
+ list_add(&tcemem->next, &container->prereg_list);
+
container->enabled = true;
return 0;
}
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
BUG_ON(tbl->it_userspace);
- ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
if (ret)
return ret;
uas = vzalloc(cb);
if (!uas) {
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
return -ENOMEM;
}
tbl->it_userspace = uas;
@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
return 0;
}
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
vfree(tbl->it_userspace);
tbl->it_userspace = NULL;
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
}
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container)
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp;
- if (!current->mm)
- return -ESRCH; /* process exited */
-
if (container->enabled)
return -EBUSY;
@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container)
if (!table_group->tce32_size)
return -EPERM;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
locked = table_group->tce32_size >> PAGE_SHIFT;
- ret = try_increment_locked_vm(locked);
+ ret = try_increment_locked_vm(container->mm, locked);
if (ret)
return ret;
@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container)
container->enabled = false;
- if (!current->mm)
- return;
-
- decrement_locked_vm(container->locked_pages);
+ BUG_ON(!container->mm);
+ decrement_locked_vm(container->mm, container->locked_pages);
}
static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list);
+ INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg)
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl);
static void tce_iommu_release(void *iommu_data)
{
@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data)
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
+ }
+
+ while (!list_empty(&container->prereg_list)) {
+ struct tce_iommu_prereg *tcemem;
+
+ tcemem = list_first_entry(&container->prereg_list,
+ struct tce_iommu_prereg, next);
+ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
}
tce_iommu_disable(container);
+ if (container->mm)
+ mmdrop(container->mm);
mutex_destroy(&container->lock);
kfree(container);
@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
put_page(page);
}
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+ unsigned long tce, unsigned long size,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
- mem = mm_iommu_lookup(tce, size);
+ mem = mm_iommu_lookup(container->mm, tce, size);
if (!mem)
return -EINVAL;
@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
return 0;
}
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
- unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+ struct iommu_table *tbl, unsigned long entry)
{
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
- if (!pua || !current || !current->mm)
+ if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
&hpa, &mem);
if (ret)
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
if (container->v2) {
- tce_iommu_unuse_page_v2(tbl, entry);
+ tce_iommu_unuse_page_v2(container, tbl, entry);
continue;
}
@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
entry + i);
- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
- &hpa, &mem);
+ ret = tce_iommu_prereg_ua_to_hpa(container,
+ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
if (ret)
break;
@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
*pua = tce;
@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container,
if (!table_size)
return -EINVAL;
- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
if (ret)
return ret;
@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container,
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
- if (!ret && container->v2) {
- ret = tce_iommu_userspace_view_alloc(*ptbl);
- if (ret)
- (*ptbl)->it_ops->free(*ptbl);
- }
-
- if (ret)
- decrement_locked_vm(table_size >> PAGE_SHIFT);
-
return ret;
}
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl)
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
tbl->it_ops->free(tbl);
- decrement_locked_vm(pages);
+ decrement_locked_vm(container->mm, pages);
}
static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@ unset_exit:
table_group = iommu_group_get_iommudata(tcegrp->grp);
table_group->ops->unset_window(table_group, num);
}
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
return ret;
}
@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container,
/* Free table */
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
return 0;
}
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+ long ret;
+ __u64 start_addr = 0;
+ struct tce_iommu_group *tcegrp;
+ struct iommu_table_group *table_group;
+
+ if (!container->def_window_pending)
+ return 0;
+
+ if (!tce_groups_attached(container))
+ return -ENODEV;
+
+ tcegrp = list_first_entry(&container->group_list,
+ struct tce_iommu_group, next);
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+ if (!table_group)
+ return -ENODEV;
+
+ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+ table_group->tce32_size, 1, &start_addr);
+ WARN_ON_ONCE(!ret && start_addr);
+
+ if (!ret)
+ container->def_window_pending = false;
+
+ return ret;
+}
+
static long tce_iommu_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data,
}
return (ret < 0) ? 0 : ret;
+ }
+
+ /*
+ * Sanity check to prevent one userspace from manipulating
+ * another userspace mm.
+ */
+ BUG_ON(!container);
+ if (container->mm && container->mm != current->mm)
+ return -EPERM;
+ switch (cmd) {
case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
struct vfio_iommu_spapr_tce_info info;
struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data,
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (copy_from_user(&param, (void __user *)arg, minsz))
return -EFAULT;
@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ if (!container->mm)
+ return -EPERM;
+
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data,
mutex_lock(&container->lock);
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
ret = tce_iommu_create_window(container, create.page_shift,
create.window_size, create.levels,
&create.start_addr);
@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data,
if (remove.flags)
return -EINVAL;
+ if (container->def_window_pending && !remove.start_addr) {
+ container->def_window_pending = false;
+ return 0;
+ }
+
mutex_lock(&container->lock);
ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
if (tbl->it_map)
iommu_release_ownership(tbl);
@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
if (!tbl || !tbl->it_map)
continue;
- rc = tce_iommu_userspace_view_alloc(tbl);
- if (!rc)
- rc = iommu_take_ownership(tbl);
-
+ rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
- long i, ret = 0;
- struct iommu_table *tbl = NULL;
-
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
- /*
- * If it the first group attached, check if there is
- * a default DMA window and create one if none as
- * the userspace expects it to exist.
- */
- if (!tce_groups_attached(container) && !container->tables[0]) {
- ret = tce_iommu_create_table(container,
- table_group,
- 0, /* window number */
- IOMMU_PAGE_SHIFT_4K,
- table_group->tce32_size,
- 1, /* default levels */
- &tbl);
- if (ret)
- goto release_exit;
- else
- container->tables[0] = tbl;
- }
-
- /* Set all windows to the new group */
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
- tbl = container->tables[i];
-
- if (!tbl)
- continue;
-
- /* Set the default window to a new group */
- ret = table_group->ops->set_window(table_group, i, tbl);
- if (ret)
- goto release_exit;
- }
-
return 0;
-
-release_exit:
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- table_group->ops->unset_window(table_group, i);
-
- table_group->ops->release_ownership(table_group);
-
- return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data,
}
if (!table_group->ops || !table_group->ops->take_ownership ||
- !table_group->ops->release_ownership)
+ !table_group->ops->release_ownership) {
ret = tce_iommu_take_ownership(container, table_group);
- else
+ } else {
ret = tce_iommu_take_ownership_ddw(container, table_group);
+ if (!tce_groups_attached(container) && !container->tables[0])
+ container->def_window_pending = true;
+ }
if (!ret) {
tcegrp->grp = iommu_group;