summaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorAnton Ivanov <anton.ivanov@cambridgegreys.com>2021-03-12 15:16:09 +0000
committerRichard Weinberger <richard@nod.at>2021-06-17 22:01:45 +0200
commitdd3035a21ba7ccaa883d7107d357ad06320d78fc (patch)
tree70f67dc7aa7933b22aa2a38eea3bd5dddd006fb4 /arch/um
parentc0ecca6604b80e438b032578634c6e133c7028f6 (diff)
downloadlinux-dd3035a21ba7ccaa883d7107d357ad06320d78fc.tar.bz2
um: add a UML specific futex implementation
The generic asm futex implementation emulates atomic access to memory by doing a get_user followed by put_user. These translate to two mapping operations on UML with paging enabled in the meantime. This, in turn may end up changing interrupts, invoking the signal loop, etc. This replaces the generic implementation by a mapping followed by an operation on the mapped segment. Signed-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com> Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/futex.h14
-rw-r--r--arch/um/kernel/skas/uaccess.c136
3 files changed, 150 insertions, 1 deletions
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 0c31d19a7a9c..e5a7b552bb38 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -9,7 +9,6 @@ generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
generic-y += ftrace.h
-generic-y += futex.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
diff --git a/arch/um/include/asm/futex.h b/arch/um/include/asm/futex.h
new file mode 100644
index 000000000000..780aa6bfc050
--- /dev/null
+++ b/arch/um/include/asm/futex.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_UM_FUTEX_H
+#define _ASM_UM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+
+int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr);
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval);
+
+#endif
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 2dec915abe6f..6c76df96e858 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -11,6 +11,7 @@
#include <asm/current.h>
#include <asm/page.h>
#include <kern_util.h>
+#include <asm/futex.h>
#include <os.h>
pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
@@ -248,3 +249,138 @@ long __strnlen_user(const void __user *str, long len)
return 0;
}
EXPORT_SYMBOL(__strnlen_user);
+
+/**
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * argument and comparison of the previous
+ * futex value with another constant.
+ *
+ * @encoded_op: encoded operation to execute
+ * @uaddr: pointer to user space address
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
+ */
+
+int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval, ret;
+ struct page *page;
+ unsigned long addr = (unsigned long) uaddr;
+ pte_t *pte;
+
+ ret = -EFAULT;
+ if (!access_ok(uaddr, sizeof(*uaddr)))
+ return -EFAULT;
+ preempt_disable();
+ pte = maybe_map(addr, 1);
+ if (pte == NULL)
+ goto out_inuser;
+
+ page = pte_page(*pte);
+#ifdef CONFIG_64BIT
+ pagefault_disable();
+ addr = (unsigned long) page_address(page) +
+ (((unsigned long) addr) & ~PAGE_MASK);
+#else
+ addr = (unsigned long) kmap_atomic(page) +
+ ((unsigned long) addr & ~PAGE_MASK);
+#endif
+ uaddr = (u32 *) addr;
+ oldval = *uaddr;
+
+ ret = 0;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ *uaddr = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ *uaddr += oparg;
+ break;
+ case FUTEX_OP_OR:
+ *uaddr |= oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ *uaddr &= ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ *uaddr ^= oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+#ifdef CONFIG_64BIT
+ pagefault_enable();
+#else
+ kunmap_atomic((void *)addr);
+#endif
+
+out_inuser:
+ preempt_enable();
+
+ if (ret == 0)
+ *oval = oldval;
+
+ return ret;
+}
+EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
+
+/**
+ * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * uaddr with newval if the current value is
+ * oldval.
+ * @uval: pointer to store content of @uaddr
+ * @uaddr: pointer to user space address
+ * @oldval: old value
+ * @newval: new value to store to @uaddr
+ *
+ * Return:
+ * 0 - On success
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
+ */
+
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ struct page *page;
+ pte_t *pte;
+ int ret = -EFAULT;
+
+ if (!access_ok(uaddr, sizeof(*uaddr)))
+ return -EFAULT;
+
+ preempt_disable();
+ pte = maybe_map((unsigned long) uaddr, 1);
+ if (pte == NULL)
+ goto out_inatomic;
+
+ page = pte_page(*pte);
+#ifdef CONFIG_64BIT
+ pagefault_disable();
+ uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
+#else
+ uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
+#endif
+
+ *uval = *uaddr;
+
+ ret = cmpxchg(uaddr, oldval, newval);
+
+#ifdef CONFIG_64BIT
+ pagefault_enable();
+#else
+ kunmap_atomic(uaddr);
+#endif
+ ret = 0;
+
+out_inatomic:
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);