diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/ioremap.c | 39 | ||||
-rw-r--r-- | lib/iov_iter.c | 4 | ||||
-rw-r--r-- | lib/radix-tree.c | 4 | ||||
-rw-r--r-- | lib/refcount.c | 14 |
4 files changed, 47 insertions, 14 deletions
diff --git a/lib/ioremap.c b/lib/ioremap.c index a3e14ce92a56..4bb30206b942 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -14,6 +14,7 @@ #include <asm/pgtable.h> #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +static int __read_mostly ioremap_p4d_capable; static int __read_mostly ioremap_pud_capable; static int __read_mostly ioremap_pmd_capable; static int __read_mostly ioremap_huge_disabled; @@ -35,6 +36,11 @@ void __init ioremap_huge_init(void) } } +static inline int ioremap_p4d_enabled(void) +{ + return ioremap_p4d_capable; +} + static inline int ioremap_pud_enabled(void) { return ioremap_pud_capable; @@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void) } #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int ioremap_p4d_enabled(void) { return 0; } static inline int ioremap_pud_enabled(void) { return 0; } static inline int ioremap_pmd_enabled(void) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ @@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, return 0; } -static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, +static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { pud_t *pud; unsigned long next; phys_addr -= addr; - pud = pud_alloc(&init_mm, pgd, addr); + pud = pud_alloc(&init_mm, p4d, addr); if (!pud) return -ENOMEM; do { @@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, return 0; } +static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, + unsigned long end, phys_addr_t phys_addr, pgprot_t prot) +{ + p4d_t *p4d; + unsigned long next; + + phys_addr -= addr; + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + + if (ioremap_p4d_enabled() && + ((next - addr) == P4D_SIZE) && + IS_ALIGNED(phys_addr + addr, P4D_SIZE)) { + if (p4d_set_huge(p4d, phys_addr + addr, prot)) + continue; + } + + if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot)) + return -ENOMEM; + } while (p4d++, addr = next, addr != end); + return 0; +} + int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { @@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); + err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot); if (err) break; } while (pgd++, addr = next, addr != end); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 97db876c6862..672c32f9f960 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) return 0; } iterate_and_advance(i, bytes, v, - __copy_from_user_nocache((to += v.iov_len) - v.iov_len, + __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), @@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) if (unlikely(i->count < bytes)) return false; iterate_all_kinds(i, bytes, v, ({ - if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, + if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)) return false; 0;}), diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 5ed506d648c4..691a9ad48497 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -2129,8 +2129,8 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); if (!bitmap) return 0; - bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); - kfree(bitmap); + if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) + kfree(bitmap); } return 1; diff --git a/lib/refcount.c b/lib/refcount.c index 1d33366189d1..aa09ad3c30b0 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -58,7 +58,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) val = old; } - WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } @@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(refcount_add_not_zero); void refcount_add(unsigned int i, refcount_t *r) { - WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); + WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); } EXPORT_SYMBOL_GPL(refcount_add); @@ -97,7 +97,7 @@ bool refcount_inc_not_zero(refcount_t *r) val = old; } - WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } @@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(refcount_inc_not_zero); */ void refcount_inc(refcount_t *r) { - WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); + WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); } EXPORT_SYMBOL_GPL(refcount_inc); @@ -125,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) new = val - i; if (new > val) { - WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return false; } @@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(refcount_dec_and_test); void refcount_dec(refcount_t *r) { - WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); + WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); } EXPORT_SYMBOL_GPL(refcount_dec); @@ -204,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r) new = val - 1; if (new > val) { - WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); return true; } |