summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/jffs2/acl.c3
-rw-r--r--fs/jfs/acl.c13
-rw-r--r--include/linux/posix_acl.h10
-rw-r--r--kernel/futex.c45
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slub.c10
8 files changed, 55 insertions, 41 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 78ad38ddd01f..dbe1aabf96cd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2122,10 +2122,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
- if (!maybe_acls) {
- inode->i_acl = NULL;
- inode->i_default_acl = NULL;
- }
+ if (!maybe_acls)
+ cache_no_acl(inode);
BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
alloc_group_block, 0);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index edd2ad6416d8..8fcb6239218e 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -284,8 +284,7 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
struct posix_acl *acl, *clone;
int rc;
- inode->i_default_acl = NULL;
- inode->i_acl = NULL;
+ cache_no_acl(inode);
if (S_ISLNK(*i_mode))
return 0; /* Symlink always has no-ACL */
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index f272bf032e1e..91fa3ad6e8c2 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -118,15 +118,16 @@ out:
static int jfs_check_acl(struct inode *inode, int mask)
{
- if (inode->i_acl == ACL_NOT_CACHED) {
- struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
+ struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
+
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
+ return error;
}
- if (inode->i_acl)
- return posix_acl_permission(inode, inode->i_acl, mask);
return -EAGAIN;
}
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 0cdba01b7756..065a3652a3ea 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -83,6 +83,7 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+#ifdef CONFIG_FS_POSIX_ACL
static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
{
struct posix_acl **p, *acl;
@@ -146,5 +147,14 @@ static inline void forget_cached_acl(struct inode *inode, int type)
if (old != ACL_NOT_CACHED)
posix_acl_release(old);
}
+#endif
+
+static inline void cache_no_acl(struct inode *inode)
+{
+#ifdef CONFIG_FS_POSIX_ACL
+ inode->i_acl = NULL;
+ inode->i_default_acl = NULL;
+#endif
+}
#endif /* __LINUX_POSIX_ACL_H */
diff --git a/kernel/futex.c b/kernel/futex.c
index 80b5ce716596..1c337112335c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -284,6 +284,25 @@ void put_futex_key(int fshared, union futex_key *key)
drop_futex_key_refs(key);
}
+/*
+ * fault_in_user_writeable - fault in user address and verify RW access
+ * @uaddr: pointer to faulting user space address
+ *
+ * Slow path to fixup the fault we just took in the atomic write
+ * access to @uaddr.
+ *
+ * We have no generic implementation of a non destructive write to the
+ * user address. We know that we faulted in the atomic pagefault
+ * disabled section so we can as well avoid the #PF overhead by
+ * calling get_user_pages() right away.
+ */
+static int fault_in_user_writeable(u32 __user *uaddr)
+{
+ int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
+ sizeof(*uaddr), 1, 0, NULL, NULL);
+ return ret < 0 ? ret : 0;
+}
+
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
@@ -896,7 +915,6 @@ retry:
retry_private:
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
- u32 dummy;
double_unlock_hb(hb1, hb2);
@@ -914,7 +932,7 @@ retry_private:
goto out_put_keys;
}
- ret = get_user(dummy, uaddr2);
+ ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
@@ -1204,7 +1222,7 @@ retry_private:
double_unlock_hb(hb1, hb2);
put_futex_key(fshared, &key2);
put_futex_key(fshared, &key1);
- ret = get_user(curval2, uaddr2);
+ ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
@@ -1482,7 +1500,7 @@ retry:
handle_fault:
spin_unlock(q->lock_ptr);
- ret = get_user(uval, uaddr);
+ ret = fault_in_user_writeable(uaddr);
spin_lock(q->lock_ptr);
@@ -1807,7 +1825,6 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_hash_bucket *hb;
- u32 uval;
struct futex_q q;
int res, ret;
@@ -1909,16 +1926,9 @@ out:
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
- /*
- * We have to r/w *(int __user *)uaddr, and we have to modify it
- * atomically. Therefore, if we continue to fault after get_user()
- * below, we need to handle the fault ourselves, while still holding
- * the mmap_sem. This can occur if the uaddr is under contention as
- * we have to drop the mmap_sem in order to call get_user().
- */
queue_unlock(&q, hb);
- ret = get_user(uval, uaddr);
+ ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;
@@ -2013,17 +2023,10 @@ out:
return ret;
pi_faulted:
- /*
- * We have to r/w *(int __user *)uaddr, and we have to modify it
- * atomically. Therefore, if we continue to fault after get_user()
- * below, we need to handle the fault ourselves, while still holding
- * the mmap_sem. This can occur if the uaddr is under contention as
- * we have to drop the mmap_sem in order to call get_user().
- */
spin_unlock(&hb->lock);
put_futex_key(fshared, &key);
- ret = get_user(uval, uaddr);
+ ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aecc9cdfdfce..5d714f8fb303 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1153,10 +1153,10 @@ again:
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
- * allocate greater than single-page units with
+ * allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
- WARN_ON_ONCE(order > 0);
+ WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
diff --git a/mm/shmem.c b/mm/shmem.c
index 5f2019fc7895..d713239ce2ce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode,
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
+ cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
@@ -2379,10 +2380,6 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
if (!p)
return NULL;
-#ifdef CONFIG_TMPFS_POSIX_ACL
- p->vfs_inode.i_acl = NULL;
- p->vfs_inode.i_default_acl = NULL;
-#endif
return &p->vfs_inode;
}
diff --git a/mm/slub.c b/mm/slub.c
index ce62b770e2fc..819f056b39c6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
+ gfp_t alloc_gfp;
flags |= s->allocflags;
- page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
- oo);
+ /*
+ * Let the initial higher-order allocation fail under memory pressure
+ * so we fall-back to the minimum order allocation.
+ */
+ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
/*