diff options
author | Dave Anglin <dave.anglin@bell.net> | 2021-11-03 12:49:32 +0100 |
---|---|---|
committer | Helge Deller <deller@gmx.de> | 2021-11-04 11:21:20 +0100 |
commit | 7e992711dddbdb1c27d077432d8440fefd44819f (patch) | |
tree | 01208b85e053dda7deeb9eac8e3924f62b9ba144 /arch/parisc/include | |
parent | 014966dcf76bce5717f7d974d0410d3402a651c2 (diff) | |
download | linux-7e992711dddbdb1c27d077432d8440fefd44819f.tar.bz2 |
parisc: Don't disable interrupts in cmpxchg and futex operations
I no longer think interrupts can be disabled in the futex and cmpxchg
operations because of COW breaks. This not ideal but I suspect it's the
best we can do.
For the cmpxchg operations in syscall.S, we rely on the code to not
schedule off the gateway page. For the futex, I added code to disable
preemption.
So far, I haven't seen the warnings with the attached change but the
change is only lightly tested.
Signed-off-by: Dave Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/include')
-rw-r--r-- | arch/parisc/include/asm/futex.h | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index e38a118cf65d..70cf8f0a7617 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -11,35 +11,34 @@ sixteen four-word locks. */ static inline void -_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) +_futex_spin_lock(u32 __user *uaddr) { extern u32 lws_lock_start[]; long index = ((long)uaddr & 0x3f8) >> 1; arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; - local_irq_save(*flags); + preempt_disable(); arch_spin_lock(s); } static inline void -_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) +_futex_spin_unlock(u32 __user *uaddr) { extern u32 lws_lock_start[]; long index = ((long)uaddr & 0x3f8) >> 1; arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; arch_spin_unlock(s); - local_irq_restore(*flags); + preempt_enable(); } static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) { - unsigned long int flags; int oldval, ret; u32 tmp; - _futex_spin_lock_irqsave(uaddr, &flags); - ret = -EFAULT; + + _futex_spin_lock(uaddr); if (unlikely(get_user(oldval, uaddr) != 0)) goto out_pagefault_enable; @@ -70,7 +69,7 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ret = -EFAULT; out_pagefault_enable: - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock(uaddr); if (!ret) *oval = oldval; @@ -83,7 +82,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { u32 val; - unsigned long flags; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -100,19 +98,19 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, * address. This should scale to a couple of CPUs. */ - _futex_spin_lock_irqsave(uaddr, &flags); + _futex_spin_lock(uaddr); if (unlikely(get_user(val, uaddr) != 0)) { - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock(uaddr); return -EFAULT; } if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock(uaddr); return -EFAULT; } *uval = val; - _futex_spin_unlock_irqrestore(uaddr, &flags); + _futex_spin_unlock(uaddr); return 0; } |