summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-26 10:29:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-26 10:29:42 -0700
commitcbafe18c71028d5e0ee1626b4776fea5d5824a78 (patch)
tree2bb7db7db4ed8df2801f7c16553c69fb27379f7f
parentf41def397161053eb0d3ed6861ef65985efbf293 (diff)
parenta22fea94992a2bc5328005e62f368413ede49c14 (diff)
downloadlinux-cbafe18c71028d5e0ee1626b4776fea5d5824a78.tar.bz2
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - almost all of the rest of -mm - various other subsystems Subsystems affected by this patch series: memcg, misc, core-kernel, lib, checkpatch, reiserfs, fat, fork, cpumask, kexec, uaccess, kconfig, kgdb, bug, ipc, lzo, kasan, madvise, cleanups, pagemap * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (77 commits) arch/sparc/include/asm/pgtable_64.h: fix build mm: treewide: clarify pgtable_page_{ctor,dtor}() naming ntfs: remove (un)?likely() from IS_ERR() conditions IB/hfi1: remove unlikely() from IS_ERR*() condition xfs: remove unlikely() from WARN_ON() condition wimax/i2400m: remove unlikely() from WARN*() condition fs: remove unlikely() from WARN_ON() condition xen/events: remove unlikely() from WARN() condition checkpatch: check for nested (un)?likely() calls hexagon: drop empty and unused free_initrd_mem mm: factor out common parts between MADV_COLD and MADV_PAGEOUT mm: introduce MADV_PAGEOUT mm: change PAGEREF_RECLAIM_CLEAN with PAGE_REFRECLAIM mm: introduce MADV_COLD mm: untag user pointers in mmap/munmap/mremap/brk vfio/type1: untag user pointers in vaddr_get_pfn tee/shm: untag user pointers in tee_shm_register media/v4l2-core: untag user pointers in videobuf_dma_contig_user_get drm/radeon: untag user pointers in radeon_gem_userptr_ioctl drm/amdgpu: untag user pointers ...
-rw-r--r--Documentation/core-api/kernel-api.rst3
-rw-r--r--Documentation/vm/split_page_table_lock.rst10
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arm/include/asm/tlb.h2
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/include/asm/tlb.h2
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/hexagon/include/asm/pgalloc.h2
-rw-r--r--arch/hexagon/mm/init.c13
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h2
-rw-r--r--arch/mips/include/asm/pgalloc.h2
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/nios2/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/include/asm/pgalloc.h6
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/powerpc/mm/pgtable-frag.c6
-rw-r--r--arch/riscv/include/asm/pgalloc.h2
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/sh/include/asm/pgalloc.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h5
-rw-r--r--arch/sparc/mm/init_64.c4
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/um/include/asm/pgalloc.h2
-rw-r--r--arch/unicore32/include/asm/tlb.h2
-rw-r--r--arch/x86/mm/pat_rbtree.c19
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h4
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--drivers/block/drbd/drbd_interval.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c9
-rw-r--r--drivers/net/wimax/i2400m/tx.c3
-rw-r--r--drivers/tee/tee_shm.c1
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--fs/fat/dir.c4
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ntfs/mft.c12
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ntfs/runlist.c2
-rw-r--r--fs/ntfs/super.c2
-rw-r--r--fs/open.c2
-rw-r--r--fs/reiserfs/do_balan.c15
-rw-r--r--fs/reiserfs/fix_node.c6
-rw-r--r--fs/reiserfs/journal.c22
-rw-r--r--fs/reiserfs/lbalance.c3
-rw-r--r--fs/reiserfs/objectid.c3
-rw-r--r--fs/reiserfs/prints.c3
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/userfaultfd.c22
-rw-r--r--fs/xfs/xfs_buf.c4
-rw-r--r--include/asm-generic/bug.h53
-rw-r--r--include/asm-generic/pgalloc.h8
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/interval_tree_generic.h22
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_types_task.h4
-rw-r--r--include/linux/printk.h22
-rw-r--r--include/linux/rbtree_augmented.h88
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/uaccess.h21
-rw-r--r--include/trace/events/writeback.h38
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/linux/coff.h5
-rw-r--r--ipc/mqueue.c22
-rw-r--r--ipc/sem.c3
-rw-r--r--kernel/debug/debug_core.c31
-rw-r--r--kernel/elfcore.c1
-rw-r--r--kernel/fork.c16
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/panic.c42
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/bug.c11
-rw-r--r--lib/extable.c1
-rw-r--r--lib/generic-radix-tree.c4
-rw-r--r--lib/hexdump.c21
-rw-r--r--lib/lzo/lzo1x_compress.c14
-rw-r--r--lib/rbtree_test.c37
-rw-r--r--lib/string.c12
-rw-r--r--lib/strncpy_from_user.c3
-rw-r--r--lib/strnlen_user.c3
-rw-r--r--mm/frame_vector.c2
-rw-r--r--mm/gup.c4
-rw-r--r--mm/internal.h2
-rw-r--r--mm/madvise.c268
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/msync.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/swap.c42
-rw-r--r--mm/vmalloc.c5
-rw-r--r--mm/vmscan.c62
-rwxr-xr-xscripts/checkpatch.pl69
-rw-r--r--scripts/gdb/linux/symbols.py4
-rw-r--r--tools/include/linux/rbtree.h71
-rw-r--r--tools/include/linux/rbtree_augmented.h119
-rw-r--r--tools/lib/rbtree.c37
114 files changed, 1005 insertions, 564 deletions
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 08af5caf036d..f77de49b1d51 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -42,6 +42,9 @@ String Manipulation
.. kernel-doc:: lib/string.c
:export:
+.. kernel-doc:: include/linux/string.h
+ :internal:
+
.. kernel-doc:: mm/util.c
:functions: kstrdup kstrdup_const kstrndup kmemdup kmemdup_nul memdup_user
vmemdup_user strndup_user memdup_user_nul
diff --git a/Documentation/vm/split_page_table_lock.rst b/Documentation/vm/split_page_table_lock.rst
index 889b00be469f..ff51f4a5494d 100644
--- a/Documentation/vm/split_page_table_lock.rst
+++ b/Documentation/vm/split_page_table_lock.rst
@@ -54,9 +54,9 @@ Hugetlb-specific helpers:
Support of split page table lock by an architecture
===================================================
-There's no need in special enabling of PTE split page table lock:
-everything required is done by pgtable_page_ctor() and pgtable_page_dtor(),
-which must be called on PTE table allocation / freeing.
+There's no need in special enabling of PTE split page table lock: everything
+required is done by pgtable_pte_page_ctor() and pgtable_pte_page_dtor(), which
+must be called on PTE table allocation / freeing.
Make sure the architecture doesn't use slab allocator for page table
allocation: slab uses page->slab_cache for its pages.
@@ -74,7 +74,7 @@ paths: i.e X86_PAE preallocate few PMDs on pgd_alloc().
With everything in place you can set CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK.
-NOTE: pgtable_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
+NOTE: pgtable_pte_page_ctor() and pgtable_pmd_page_ctor() can fail -- it must
be handled properly.
page->ptl
@@ -94,7 +94,7 @@ trick:
split lock with enabled DEBUG_SPINLOCK or DEBUG_LOCK_ALLOC, but costs
one more cache line for indirect access;
-The spinlock_t allocated in pgtable_page_ctor() for PTE table and in
+The spinlock_t allocated in pgtable_pte_page_ctor() for PTE table and in
pgtable_pmd_page_ctor() for PMD table.
Please, never access page->ptl directly -- use appropriate helper.
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index ac23379b7a87..a18ec7f63888 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -68,6 +68,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 4751f2251cd9..b747f2ec2928 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -108,7 +108,7 @@ pte_alloc_one(struct mm_struct *mm)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
page = virt_to_page(pte_pg);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return 0;
}
@@ -123,7 +123,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
- pgtable_page_dtor(virt_to_page(ptep));
+ pgtable_pte_page_dtor(virt_to_page(ptep));
free_pages((unsigned long)ptep, __get_order_pte());
}
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index b75ea15b85c0..669474add486 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -44,7 +44,7 @@ static inline void __tlb_remove_table(void *_table)
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
#ifndef CONFIG_ARM_LPAE
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25da9b2d9610..48c2888297dd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -731,7 +731,7 @@ static void *__init late_alloc(unsigned long sz)
{
void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
- if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
+ if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
BUG();
return ptr;
}
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a95d1fcb7e21..b76df828e6b7 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -44,7 +44,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
tlb_remove_table(tlb, pte);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 53dc6f24cfb7..60c929f3683b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -384,7 +384,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
* folded, and if so pgtable_pmd_page_ctor() becomes nop.
*/
if (shift == PAGE_SHIFT)
- BUG_ON(!pgtable_page_ctor(phys_to_page(pa)));
+ BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
else if (shift == PMD_SHIFT)
BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index d089113fe41f..c7c1ed27e348 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -71,7 +71,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#define __pte_free_tlb(tlb, pte, address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page(tlb, pte); \
} while (0)
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 5a6e79e7926d..cc9be514a676 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -94,7 +94,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor((pte)); \
+ pgtable_pte_page_dtor((pte)); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index f1f6ebd537b7..c961773a6fff 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -71,19 +71,6 @@ void __init mem_init(void)
init_mm.context.ptbase = __pa(init_mm.pgd);
}
-/*
- * free_initrd_mem - frees... initrd memory.
- * @start - start of init memory
- * @end - end of init memory
- *
- * Apparently has to be passed the address of the initrd memory.
- *
- * Wrapped by #ifdef CONFIG_BLKDEV_INITRD
- */
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-}
-
void sync_icache_dcache(pte_t pte)
{
unsigned long addr;
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 4399d712f6db..b34d44d666a4 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -41,7 +41,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
@@ -54,7 +54,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -73,7 +73,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
static inline void pte_free(struct mm_struct *mm, struct page *page)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index d04d9ba9b976..acab315c851f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -36,7 +36,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
if(!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -51,7 +51,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
static inline void pte_free(struct mm_struct *mm, pgtable_t page)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
@@ -60,7 +60,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
cache_page(kmap(page));
kunmap(page);
__free_page(page);
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 1a8ddbd0d23c..856121122b91 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -21,7 +21,7 @@ extern const char bad_pmd_string[];
#define __pte_free_tlb(tlb,pte,addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index aa73cb187a07..166842337eb2 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb,pte,address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index c2b40969eb1f..57dc2ac4f8bd 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -95,6 +95,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 750d18d5980b..0b146d773c85 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -41,7 +41,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 787c1b9d2f6d..da12a4c38c4b 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -75,7 +75,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm)
if (!pte)
return NULL;
clear_page(page_address(pte));
- if (!pgtable_page_ctor(pte)) {
+ if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
@@ -89,13 +89,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
__free_page(pte);
}
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index c98162f494db..6fd8871e4081 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -48,6 +48,9 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index a7b05214760c..ee4bd6d38602 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag)
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
}
@@ -61,7 +61,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -113,7 +113,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
}
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index f66a00d8cb19..d59ea92285ec 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -78,7 +78,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define __pte_free_tlb(tlb, pte, buf) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 54fcdf66ae96..3dd253f81a77 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -210,7 +210,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
page = alloc_page(GFP_KERNEL);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -256,7 +256,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
atomic_xor_bits(&page->_refcount, 3U << 24);
}
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
@@ -308,7 +308,7 @@ void __tlb_remove_table(void *_table)
case 3: /* 4K page table with pgstes */
if (mask & 3)
atomic_xor_bits(&page->_refcount, 3 << 24);
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
break;
}
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8c6341a4d807..22d968bfe9bb 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
#define __pte_free_tlb(tlb,pte,addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b57f9c631eca..6ae8016ef4ec 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1078,7 +1078,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
}
#define io_remap_pfn_range io_remap_pfn_range
-static inline unsigned long untagged_addr(unsigned long start)
+static inline unsigned long __untagged_addr(unsigned long start)
{
if (adi_capable()) {
long addr = start;
@@ -1098,7 +1098,8 @@ static inline unsigned long untagged_addr(unsigned long start)
return start;
}
-#define untagged_addr untagged_addr
+#define untagged_addr(addr) \
+ ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
static inline bool pte_access_permitted(pte_t pte, bool write)
{
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4b099dd7a767..e6d91819da92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2903,7 +2903,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
free_unref_page(page);
return NULL;
}
@@ -2919,7 +2919,7 @@ static void __pte_free(pgtable_t pte)
{
struct page *page = virt_to_page(pte);
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index aaebbc00d262..cc3ad64479ac 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -378,7 +378,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
return NULL;
page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -389,7 +389,7 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
{
unsigned long p;
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 446e0c0f4018..881e76da1938 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -29,7 +29,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
#define __pte_free_tlb(tlb,pte, address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb),(pte)); \
} while (0)
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 10d2356bfddd..4663d8cc80ef 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -15,7 +15,7 @@
#define __pte_free_tlb(tlb, pte, addr) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index fa16036fa592..65ebe4b88f7c 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -54,23 +54,10 @@ static u64 get_subtree_max_end(struct rb_node *node)
return ret;
}
-static u64 compute_subtree_max_end(struct memtype *data)
-{
- u64 max_end = data->end, child_max_end;
-
- child_max_end = get_subtree_max_end(data->rb.rb_right);
- if (child_max_end > max_end)
- max_end = child_max_end;
-
- child_max_end = get_subtree_max_end(data->rb.rb_left);
- if (child_max_end > max_end)
- max_end = child_max_end;
-
- return max_end;
-}
+#define NODE_END(node) ((node)->end)
-RB_DECLARE_CALLBACKS(static, memtype_rb_augment_cb, struct memtype, rb,
- u64, subtree_max_end, compute_subtree_max_end)
+RB_DECLARE_CALLBACKS_MAX(static, memtype_rb_augment_cb,
+ struct memtype, rb, u64, subtree_max_end, NODE_END)
/* Find the first (lowest start addr) overlapping range from rb tree */
static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 463940faf52f..3e4b9035bb9a 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -45,7 +45,7 @@ early_param("userpte", setup_userpte);
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
paravirt_release_pte(page_to_pfn(pte));
paravirt_tlb_remove_table(tlb, pte);
}
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index dd744aa450fa..1d38f0e755ba 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -55,7 +55,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
if (!pte)
return NULL;
page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -69,7 +69,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
{
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
__free_page(pte);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index ebbb48842190..e5e643752947 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -103,6 +103,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
index c58986556161..651bd0236a99 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -13,33 +13,10 @@ sector_t interval_end(struct rb_node *node)
return this->end;
}
-/**
- * compute_subtree_last - compute end of @node
- *
- * The end of an interval is the highest (start + (size >> 9)) value of this
- * node and of its children. Called for @node and its parents whenever the end
- * may have changed.
- */
-static inline sector_t
-compute_subtree_last(struct drbd_interval *node)
-{
- sector_t max = node->sector + (node->size >> 9);
-
- if (node->rb.rb_left) {
- sector_t left = interval_end(node->rb.rb_left);
- if (left > max)
- max = left;
- }
- if (node->rb.rb_right) {
- sector_t right = interval_end(node->rb.rb_right);
- if (right > max)
- max = right;
- }
- return max;
-}
+#define NODE_END(node) ((node)->sector + ((node)->size >> 9))
-RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb,
- sector_t, end, compute_subtree_last);
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct drbd_interval, rb, sector_t, end, NODE_END);
/**
* drbd_insert_interval - insert a new interval into a tree
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 42b936b6bbf1..6d021ecc8d59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1103,7 +1103,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
- user_addr = *offset;
+ user_addr = untagged_addr(*offset);
} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b174bd5eb38e..8ceb44925947 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -291,6 +291,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 4cf58dbbe439..b2b076606f54 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -296,6 +296,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 9f53f63b1453..7bff0a1e713d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1041,7 +1041,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (cb)
iowait_pio_inc(&priv->s_iowait);
pbuf = sc_buffer_alloc(sc, plen, cb, qp);
- if (unlikely(IS_ERR_OR_NULL(pbuf))) {
+ if (IS_ERR_OR_NULL(pbuf)) {
if (cb)
verbs_pio_complete(qp, 0);
if (IS_ERR(pbuf)) {
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 76b4ac7b1678..aeb2f497c683 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -157,6 +157,7 @@ static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
+ unsigned long untagged_baddr = untagged_addr(vb->baddr);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
@@ -164,22 +165,22 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
unsigned int offset;
int ret;
- offset = vb->baddr & ~PAGE_MASK;
+ offset = untagged_baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
down_read(&mm->mmap_sem);
- vma = find_vma(mm, vb->baddr);
+ vma = find_vma(mm, untagged_baddr);
if (!vma)
goto out_up;
- if ((vb->baddr + mem->size) > vma->vm_end)
+ if ((untagged_baddr + mem->size) > vma->vm_end)
goto out_up;
pages_done = 0;
prev_pfn = 0; /* kill warning */
- user_address = vb->baddr;
+ user_address = untagged_baddr;
while (pages_done < (mem->size >> PAGE_SHIFT)) {
ret = follow_pfn(vma, user_address, &this_pfn);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index ebd64e083726..1255302e251e 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -654,8 +654,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
padding = aligned_size - tx_msg_moved->size;
if (padding > 0) {
pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
- if (unlikely(WARN_ON(pad_buf == NULL
- || pad_buf == TAIL_FULL))) {
+ if (WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL)) {
/* This should not happen -- append should verify
* there is always space left at least to append
* tx_block_size */
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2da026fd12c9..09ddcd06c715 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -254,6 +254,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
shm->teedev = teedev;
shm->ctx = ctx;
shm->id = -1;
+ addr = untagged_addr(addr);
start = rounddown(addr, PAGE_SIZE);
shm->offset = addr - start;
shm->size = length;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9a50b0558fa9..96fddc1dafc3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -375,6 +375,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem);
+ vaddr = untagged_addr(vaddr);
+
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2e8570c09789..6c8843968a52 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -247,7 +247,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
*/
unsigned int evtchn_from_irq(unsigned irq)
{
- if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+ if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
return 0;
return info_for_irq(irq)->evtchn;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 814ad2c2ba80..054acd9fd033 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -88,9 +88,7 @@ static int fat__get_entry(struct inode *dir, loff_t *pos,
int err, offset;
next:
- if (*bh)
- brelse(*bh);
-
+ brelse(*bh);
*bh = NULL;
iblock = *pos >> sb->s_blocksize_bits;
err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false);
diff --git a/fs/namespace.c b/fs/namespace.c
index b75d458f817d..fe0e9e1410fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3026,7 +3026,7 @@ void *copy_mount_options(const void __user * data)
* the remainder of the page.
*/
/* copy_from_user cannot cross TASK_SIZE ! */
- size = TASK_SIZE - (unsigned long)data;
+ size = TASK_SIZE - (unsigned long)untagged_addr(data);
if (size > PAGE_SIZE)
size = PAGE_SIZE;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 20c841a906f2..3aac5c917afe 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -71,7 +71,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
}
/* Read, map, and pin the page. */
page = ntfs_map_page(mft_vi->i_mapping, index);
- if (likely(!IS_ERR(page))) {
+ if (!IS_ERR(page)) {
/* Catch multi sector transfer fixup errors. */
if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
ofs)))) {
@@ -154,7 +154,7 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni)
mutex_lock(&ni->mrec_lock);
m = map_mft_record_page(ni);
- if (likely(!IS_ERR(m)))
+ if (!IS_ERR(m))
return m;
mutex_unlock(&ni->mrec_lock);
@@ -271,7 +271,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
m = map_mft_record(ni);
/* map_mft_record() has incremented this on success. */
atomic_dec(&ni->count);
- if (likely(!IS_ERR(m))) {
+ if (!IS_ERR(m)) {
/* Verify the sequence number. */
if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
ntfs_debug("Done 1.");
@@ -1303,7 +1303,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
- if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft bitmap attribute.");
@@ -1734,7 +1734,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mft_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mft_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
- if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
+ if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mft_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft data attribute.");
@@ -1776,7 +1776,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
do {
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
true);
- if (likely(!IS_ERR(rl2)))
+ if (!IS_ERR(rl2))
break;
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
ntfs_error(vol->sb, "Failed to allocate the minimal "
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 2d3cc9e3395d..4e6a44bc654c 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -115,7 +115,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
dent_ino = MREF(mref);
ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
dent_inode = ntfs_iget(vol->sb, dent_ino);
- if (likely(!IS_ERR(dent_inode))) {
+ if (!IS_ERR(dent_inode)) {
/* Consistency check. */
if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
NTFS_I(dent_inode)->seq_no ||
diff --git a/fs/ntfs/runlist.c b/fs/ntfs/runlist.c
index 508744a93180..97932fb5179c 100644
--- a/fs/ntfs/runlist.c
+++ b/fs/ntfs/runlist.c
@@ -951,7 +951,7 @@ mpa_err:
}
/* Now combine the new and old runlists checking for overlaps. */
old_rl = ntfs_runlists_merge(old_rl, rl);
- if (likely(!IS_ERR(old_rl)))
+ if (!IS_ERR(old_rl))
return old_rl;
ntfs_free(rl);
ntfs_error(vol->sb, "Failed to merge runlists.");
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 29621d40f448..7dc3bc604f78 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1475,7 +1475,7 @@ not_enabled:
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
- if (unlikely(IS_ERR(tmp_ino) || is_bad_inode(tmp_ino))) {
+ if (IS_ERR(tmp_ino) || unlikely(is_bad_inode(tmp_ino))) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $UsnJrnl.");
diff --git a/fs/open.c b/fs/open.c
index c60cd22cc052..b62f5c0923a8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -776,7 +776,7 @@ static int do_dentry_open(struct file *f,
f->f_mode |= FMODE_ATOMIC_POS;
f->f_op = fops_get(inode->i_fop);
- if (unlikely(WARN_ON(!f->f_op))) {
+ if (WARN_ON(!f->f_op)) {
error = -ENODEV;
goto cleanup_all;
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 9c02d96d3a42..4075e41408b4 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -239,10 +239,8 @@ static int balance_leaf_when_delete_left(struct tree_balance *tb)
static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
{
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
- int item_pos = PATH_LAST_POSITION(tb->tb_path);
struct buffer_info bi;
int n;
- struct item_head *ih;
RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
"vs- 12000: level: wrong FR %z", tb->FR[0]);
@@ -251,7 +249,6 @@ static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
"PAP-12010: tree can not be empty");
- ih = item_head(tbS0, item_pos);
buffer_info_init_tbS0(tb, &bi);
/* Delete or truncate the item */
@@ -298,7 +295,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* part of new item falls into L[0] */
int new_item_len, shift;
- int version;
ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
@@ -317,8 +313,6 @@ static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
min_t(int, tb->zeroes_num, ih_item_len(ih)));
- version = ih_version(ih);
-
/*
* Calculate key component, item length and body to
* insert into S[0]
@@ -632,7 +626,6 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
int n = B_NR_ITEMS(tbS0);
struct buffer_info bi;
- int ret;
/* new item or part of it doesn't fall into R[0] */
if (n - tb->rnum[0] >= tb->item_pos) {
@@ -646,13 +639,11 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
loff_t old_key_comp, old_len, r_zeroes_number;
const char *r_body;
- int version, shift;
+ int shift;
loff_t offset;
leaf_shift_right(tb, tb->rnum[0] - 1, -1);
- version = ih_version(ih);
-
/* Remember key component and item length */
old_key_comp = le_ih_k_offset(ih);
old_len = ih_item_len(ih);
@@ -698,7 +689,7 @@ static void balance_leaf_insert_right(struct tree_balance *tb,
/* whole new item falls into R[0] */
/* Shift rnum[0]-1 items to R[0] */
- ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
+ leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
/* Insert new item into R[0] */
buffer_info_init_right(tb, &bi);
@@ -950,14 +941,12 @@ static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
int old_key_comp, old_len, r_zeroes_number;
const char *r_body;
- int version;
/* Move snum[i]-1 items from S[0] to S_new[i] */
leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
tb->S_new[i]);
/* Remember key component and item length */
- version = ih_version(ih);
old_key_comp = le_ih_k_offset(ih);
old_len = ih_item_len(ih);
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 6b0ddb2a9091..117092224111 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -376,7 +376,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
int to, int to_bytes, short *snum012, int flow)
{
int i;
- int cur_free;
int units;
struct virtual_node *vn = tb->tb_vn;
int total_node_size, max_node_size, current_item_size;
@@ -438,7 +437,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
/* leaf level */
needed_nodes = 1;
total_node_size = 0;
- cur_free = max_node_size;
/* start from 'from'-th item */
start_item = from;
@@ -1734,14 +1732,12 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
* and Fh is its father.
*/
struct buffer_head *Sh, *Fh;
- int maxsize, ret;
+ int ret;
int lfree, rfree /* free space in L and R */ ;
Sh = PATH_H_PBUFFER(tb->tb_path, h);
Fh = PATH_H_PPARENT(tb->tb_path, h);
- maxsize = MAX_CHILD_SIZE(Sh);
-
/*
* using tb->insert_size[h], which is negative in this case,
* create_virtual_node calculates:
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 4517a1394c6f..4b3e3e73b512 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -891,7 +891,6 @@ static int flush_older_commits(struct super_block *s,
struct list_head *entry;
unsigned int trans_id = jl->j_trans_id;
unsigned int other_trans_id;
- unsigned int first_trans_id;
find_first:
/*
@@ -914,8 +913,6 @@ find_first:
return 0;
}
- first_trans_id = first_jl->j_trans_id;
-
entry = &first_jl->j_list;
while (1) {
other_jl = JOURNAL_LIST_ENTRY(entry);
@@ -1351,7 +1348,7 @@ static int flush_journal_list(struct super_block *s,
struct reiserfs_journal_list *jl, int flushall)
{
struct reiserfs_journal_list *pjl;
- struct reiserfs_journal_cnode *cn, *last;
+ struct reiserfs_journal_cnode *cn;
int count;
int was_jwait = 0;
int was_dirty = 0;
@@ -1509,7 +1506,6 @@ static int flush_journal_list(struct super_block *s,
b_blocknr, __func__);
}
free_cnode:
- last = cn;
cn = cn->next;
if (saved_bh) {
/*
@@ -1792,7 +1788,6 @@ static int flush_used_journal_lists(struct super_block *s,
{
unsigned long len = 0;
unsigned long cur_len;
- int ret;
int i;
int limit = 256;
struct reiserfs_journal_list *tjl;
@@ -1829,9 +1824,9 @@ static int flush_used_journal_lists(struct super_block *s,
* transactions, but only bother if we've actually spanned
* across multiple lists
*/
- if (flush_jl != jl) {
- ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
- }
+ if (flush_jl != jl)
+ kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
+
flush_journal_list(s, flush_jl, 1);
put_journal_list(s, flush_jl);
put_journal_list(s, jl);
@@ -1911,7 +1906,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
struct super_block *sb, int error)
{
struct reiserfs_transaction_handle myth;
- int flushed = 0;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
/*
@@ -1933,7 +1927,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1);
journal_mark_dirty(&myth, SB_BUFFER_WITH_SB(sb));
do_journal_end(&myth, FLUSH_ALL);
- flushed = 1;
}
}
@@ -3444,9 +3437,8 @@ static int remove_from_transaction(struct super_block *sb,
if (cn == journal->j_last) {
journal->j_last = cn->prev;
}
- if (bh)
- remove_journal_hash(sb, journal->j_hash_table, NULL,
- bh->b_blocknr, 0);
+ remove_journal_hash(sb, journal->j_hash_table, NULL,
+ bh->b_blocknr, 0);
clear_buffer_journaled(bh); /* don't log this one */
if (!already_cleaned) {
@@ -3988,7 +3980,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
struct buffer_head *c_bh; /* commit bh */
struct buffer_head *d_bh; /* desc bh */
int cur_write_start = 0; /* start index of current log write */
- int old_start;
int i;
int flush;
int wait_on_commit;
@@ -4245,7 +4236,6 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, int flags)
journal->j_num_work_lists++;
/* reset journal values for the next transaction */
- old_start = journal->j_start;
journal->j_start =
(journal->j_start + journal->j_len +
2) % SB_ONDISK_JOURNAL_SIZE(sb);
diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
index f5cebd70d903..7f868569d4d0 100644
--- a/fs/reiserfs/lbalance.c
+++ b/fs/reiserfs/lbalance.c
@@ -1322,7 +1322,7 @@ void leaf_paste_entries(struct buffer_info *bi,
char *item;
struct reiserfs_de_head *deh;
char *insert_point;
- int i, old_entry_num;
+ int i;
struct buffer_head *bh = bi->bi_bh;
if (new_entry_count == 0)
@@ -1362,7 +1362,6 @@ void leaf_paste_entries(struct buffer_info *bi,
put_deh_location(&deh[i],
deh_location(&deh[i]) + paste_size);
- old_entry_num = ih_entry_count(ih);
put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count);
/* prepare space for pasted records */
diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c
index 415d66ca87d1..34baf5c0f265 100644
--- a/fs/reiserfs/objectid.c
+++ b/fs/reiserfs/objectid.c
@@ -183,13 +183,12 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2;
int old_max = sb_oid_maxsize(disk_sb);
struct reiserfs_super_block_v1 *disk_sb_v1;
- __le32 *objectid_map, *new_objectid_map;
+ __le32 *objectid_map;
int i;
disk_sb_v1 =
(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data);
objectid_map = (__le32 *) (disk_sb_v1 + 1);
- new_objectid_map = (__le32 *) (disk_sb + 1);
if (cur_size > new_size) {
/*
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 9fed1c05f1f4..500f2000eb41 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -746,9 +746,6 @@ static void check_leaf_block_head(struct buffer_head *bh)
static void check_internal_block_head(struct buffer_head *bh)
{
- struct block_head *blkh;
-
- blkh = B_BLK_HEAD(bh);
if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 0037aea97d39..da9ebe33882b 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -593,7 +593,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
struct buffer_head *bh;
struct path_element *last_element;
int node_level, retval;
- int right_neighbor_of_leaf_node;
int fs_gen;
struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA];
@@ -614,8 +613,6 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
pathrelse(search_path);
- right_neighbor_of_leaf_node = 0;
-
/*
* With each iteration of this loop we search through the items in the
* current node, and calculate the next current node(next path element)
@@ -701,7 +698,6 @@ io_error:
*/
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
- right_neighbor_of_leaf_node = 0;
/* repeat search from the root */
continue;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index fe6d804a38dc..f9fd18670e22 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1272,21 +1272,23 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
}
static __always_inline int validate_range(struct mm_struct *mm,
- __u64 start, __u64 len)
+ __u64 *start, __u64 len)
{
__u64 task_size = mm->task_size;
- if (start & ~PAGE_MASK)
+ *start = untagged_addr(*start);
+
+ if (*start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
- if (start < mmap_min_addr)
+ if (*start < mmap_min_addr)
return -EINVAL;
- if (start >= task_size)
+ if (*start >= task_size)
return -EINVAL;
- if (len > task_size - start)
+ if (len > task_size - *start)
return -EINVAL;
return 0;
}
@@ -1336,7 +1338,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
}
- ret = validate_range(mm, uffdio_register.range.start,
+ ret = validate_range(mm, &uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
@@ -1525,7 +1527,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
- ret = validate_range(mm, uffdio_unregister.start,
+ ret = validate_range(mm, &uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
@@ -1676,7 +1678,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
- ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
+ ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
@@ -1716,7 +1718,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
+ ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
@@ -1772,7 +1774,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
- ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
+ ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 120ef99d09e8..21c243622a79 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -2097,7 +2097,7 @@ xfs_verify_magic(
int idx;
idx = xfs_sb_version_hascrc(&mp->m_sb);
- if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])))
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
return false;
return dmagic == bp->b_ops->magic[idx];
}
@@ -2115,7 +2115,7 @@ xfs_verify_magic16(
int idx;
idx = xfs_sb_version_hascrc(&mp->m_sb);
- if (unlikely(WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])))
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
return false;
return dmagic == bp->b_ops->magic16[idx];
}
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7357a3c942a0..384b5c835ced 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
#define BUGFLAG_WARNING (1 << 0)
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
@@ -61,18 +62,6 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
-#ifdef __WARN_FLAGS
-#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
-#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
-
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_ONCE_TAINT(TAINT_WARN); \
- unlikely(__ret_warn_on); \
-})
-#endif
-
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant kernel issues that need prompt attention if they should ever
@@ -89,27 +78,27 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_TAINT
-extern __printf(3, 4)
-void warn_slowpath_fmt(const char *file, const int line,
- const char *fmt, ...);
+#ifndef __WARN_FLAGS
extern __printf(4, 5)
-void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
- const char *fmt, ...);
-extern void warn_slowpath_null(const char *file, const int line);
-#define WANT_WARN_ON_SLOWPATH
-#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
-#define __WARN_printf_taint(taint, arg...) \
- warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
+ const char *fmt, ...);
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#define __WARN_printf(taint, arg...) \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() do { \
- printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
-} while (0)
-#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
-#define __WARN_printf_taint(taint, arg...) \
- do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#define __WARN_printf(taint, arg...) do { \
+ __warn_printk(arg); \
+ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ } while (0)
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
#endif
/* used internally by panic.c */
@@ -132,7 +121,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf(format); \
+ __WARN_printf(TAINT_WARN, format); \
unlikely(__ret_warn_on); \
})
#endif
@@ -140,7 +129,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#define WARN_TAINT(condition, taint, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- __WARN_printf_taint(taint, format); \
+ __WARN_printf(taint, format); \
unlikely(__ret_warn_on); \
})
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 6f8cc06ee44e..73f7421413cb 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
@@ -63,7 +63,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
pte = alloc_page(gfp);
if (!pte)
return NULL;
- if (!pgtable_page_ctor(pte)) {
+ if (!pgtable_pte_page_ctor(pte)) {
__free_page(pte);
return NULL;
}
@@ -76,7 +76,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_page_ctor().
+ * Allocates a page and runs the pgtable_pte_page_ctor().
*
* Return: `struct page` initialized as page table or %NULL on error
*/
@@ -98,7 +98,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_page_dtor(pte_page);
+ pgtable_pte_page_dtor(pte_page);
__free_page(pte_page);
}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index b5a5a1ed9efd..78a73eba64dd 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -200,8 +200,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_wrap(cpu, mask, start) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask, and) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
#else
/**
* cpumask_first - get the first cpu in a cpumask
@@ -290,20 +290,20 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
/**
* for_each_cpu_and - iterate over every cpu in both masks
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the first cpumask pointer
- * @and: the second cpumask pointer
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask, &and);
+ * cpumask_and(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask, and) \
+#define for_each_cpu_and(cpu, mask1, mask2) \
for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask), (and)), \
+ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
(cpu) < nr_cpu_ids;)
#endif /* SMP */
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 855476145fe1..aaa8a0767aa3 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -30,26 +30,8 @@
\
/* Callbacks for augmented rbtree insert and remove */ \
\
-static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
-{ \
- ITTYPE max = ITLAST(node), subtree_last; \
- if (node->ITRB.rb_left) { \
- subtree_last = rb_entry(node->ITRB.rb_left, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- if (node->ITRB.rb_right) { \
- subtree_last = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB)->ITSUBTREE; \
- if (max < subtree_last) \
- max = subtree_last; \
- } \
- return max; \
-} \
- \
-RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
- ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
\
/* Insert / remove interval nodes from the tree */ \
\
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0b809258ed3..cc162f3e6461 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -183,6 +183,8 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index fbf144aaa749..b072aeb1fd78 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -326,8 +326,10 @@ extern atomic_t kgdb_active;
(raw_smp_processor_id() == atomic_read(&kgdb_active))
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
+extern void kgdb_panic(const char *msg);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
+static inline void kgdb_panic(const char *msg) {}
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 294a67b94147..cc292273e6ba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1949,7 +1949,7 @@ static inline void pgtable_init(void)
pgtable_cache_init();
}
-static inline bool pgtable_page_ctor(struct page *page)
+static inline bool pgtable_pte_page_ctor(struct page *page)
{
if (!ptlock_init(page))
return false;
@@ -1958,7 +1958,7 @@ static inline bool pgtable_page_ctor(struct page *page)
return true;
}
-static inline void pgtable_page_dtor(struct page *page)
+static inline void pgtable_pte_page_dtor(struct page *page)
{
ptlock_free(page);
__ClearPageTable(page);
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index d7016dcb245e..c1bc6731125c 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -36,6 +36,10 @@ struct vmacache {
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
+/*
+ * When updating this, please also update struct resident_page_types[] in
+ * kernel/fork.c
+ */
enum {
MM_FILEPAGES, /* Resident file mapping pages */
MM_ANONPAGES, /* Resident anonymous pages */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cefd374c47b1..c09d67edda3a 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -488,13 +488,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, bool ascii);
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
- dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
-#else
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len);
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
#else
static inline void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
@@ -526,4 +519,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+/**
+ * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ *
+ * Calls print_hex_dump(), with log level of KERN_DEBUG,
+ * rowsize of 16, groupsize of 1, and ASCII output included.
+ */
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true)
+
#endif
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 179faab29f52..fdd421b8d9ae 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,41 +60,87 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
diff --git a/include/linux/string.h b/include/linux/string.h
index 4deb11f7976b..b2f9df7f0761 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -474,8 +474,9 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
* But this can lead to bugs due to typos, or if prefix is a pointer
* and not a constant. Instead use str_has_prefix().
*
- * Returns: 0 if @str does not start with @prefix
- strlen(@prefix) if @str does start with @prefix
+ * Returns:
+ * * strlen(@prefix) if @str starts with @prefix
+ * * 0 if @str does not start with @prefix
*/
static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
{
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de2c67a33b7e..063c0c1e112b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
+extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
@@ -364,6 +365,7 @@ extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages;
+extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 8d8821b3689a..659a4400517b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count)
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-static __always_inline bool
+static __always_inline __must_check bool
check_copy_size(const void *addr, size_t bytes, bool is_source)
{
int sz = __compiletime_object_size(addr);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 34a038563d97..70bbdc38dc37 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -55,7 +55,7 @@
* as usual) and both source and destination can trigger faults.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
@@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
@@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*/
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
@@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-static __always_inline unsigned long
+static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef INLINE_COPY_FROM_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
@@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
#ifdef INLINE_COPY_TO_USER
-static inline unsigned long
+static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
return n;
}
#else
-extern unsigned long
+extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -222,8 +222,9 @@ static inline bool pagefault_disabled(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS
-static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 3a27335fce2c..c2ce6480b4b1 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
+ 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strncpy(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -316,8 +317,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -360,8 +361,9 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name,
+ wb->bdi->dev ? dev_name(wb->bdi->dev) :
+ "(unknown)", 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -414,7 +416,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(unsigned int, cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%u",
@@ -436,7 +438,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -461,7 +463,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -512,7 +514,7 @@ TRACE_EVENT(writeback_queue_io,
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -598,7 +600,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -663,7 +665,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -723,8 +725,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -797,8 +799,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_fast_assign(
- strncpy(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ strscpy_pad(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 63b1f506ea67..c160a5354eb6 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -67,6 +67,9 @@
#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+#define MADV_COLD 20 /* deactivate these pages */
+#define MADV_PAGEOUT 21 /* reclaim these pages */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h
index e4a79f80b9a0..ab5c7e847eed 100644
--- a/include/uapi/linux/coff.h
+++ b/include/uapi/linux/coff.h
@@ -11,6 +11,9 @@
more information about COFF, then O'Reilly has a very excellent book.
*/
+#ifndef _UAPI_LINUX_COFF_H
+#define _UAPI_LINUX_COFF_H
+
#define E_SYMNMLEN 8 /* Number of characters in a symbol name */
#define E_FILNMLEN 14 /* Number of characters in a file name */
#define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */
@@ -350,3 +353,5 @@ struct COFF_reloc {
/* For new sections we haven't heard of before */
#define COFF_DEF_SECTION_ALIGNMENT 4
+
+#endif /* _UAPI_LINUX_COFF_H */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 7c15729d9d25..3d920ff15c80 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1240,15 +1240,14 @@ static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
/* create the notify skb */
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
- if (!nc) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!nc)
+ return -ENOMEM;
+
if (copy_from_user(nc->data,
notification->sigev_value.sival_ptr,
NOTIFY_COOKIE_LEN)) {
ret = -EFAULT;
- goto out;
+ goto free_skb;
}
/* TODO: add a header? */
@@ -1264,8 +1263,7 @@ retry:
fdput(f);
if (IS_ERR(sock)) {
ret = PTR_ERR(sock);
- sock = NULL;
- goto out;
+ goto free_skb;
}
timeo = MAX_SCHEDULE_TIMEOUT;
@@ -1274,11 +1272,8 @@ retry:
sock = NULL;
goto retry;
}
- if (ret) {
- sock = NULL;
- nc = NULL;
- goto out;
- }
+ if (ret)
+ return ret;
}
}
@@ -1333,7 +1328,8 @@ out_fput:
out:
if (sock)
netlink_detachskb(sock, nc);
- else if (nc)
+ else
+free_skb:
dev_kfree_skb(nc);
return ret;
diff --git a/ipc/sem.c b/ipc/sem.c
index 7da4504bcc7c..ec97a7072413 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1852,7 +1852,8 @@ static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
- list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+ list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
+ spin_is_locked(&ulp->lock)) {
if (un->semid == semid)
return un;
}
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 10f1187b3907..f76d6f77dd5e 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -893,30 +893,25 @@ static struct sysrq_key_op sysrq_dbg_op = {
};
#endif
-static int kgdb_panic_event(struct notifier_block *self,
- unsigned long val,
- void *data)
+void kgdb_panic(const char *msg)
{
+ if (!kgdb_io_module_registered)
+ return;
+
/*
- * Avoid entering the debugger if we were triggered due to a panic
- * We don't want to get stuck waiting for input from user in such case.
- * panic_timeout indicates the system should automatically
+ * We don't want to get stuck waiting for input from user if
+ * "panic_timeout" indicates the system should automatically
* reboot on panic.
*/
if (panic_timeout)
- return NOTIFY_DONE;
+ return;
if (dbg_kdb_mode)
- kdb_printf("PANIC: %s\n", (char *)data);
+ kdb_printf("PANIC: %s\n", msg);
+
kgdb_breakpoint();
- return NOTIFY_DONE;
}
-static struct notifier_block kgdb_panic_event_nb = {
- .notifier_call = kgdb_panic_event,
- .priority = INT_MAX,
-};
-
void __weak kgdb_arch_late(void)
{
}
@@ -965,8 +960,6 @@ static void kgdb_register_callbacks(void)
kgdb_arch_late();
register_module_notifier(&dbg_module_load_nb);
register_reboot_notifier(&dbg_reboot_notifier);
- atomic_notifier_chain_register(&panic_notifier_list,
- &kgdb_panic_event_nb);
#ifdef CONFIG_MAGIC_SYSRQ
register_sysrq_key('g', &sysrq_dbg_op);
#endif
@@ -980,16 +973,14 @@ static void kgdb_register_callbacks(void)
static void kgdb_unregister_callbacks(void)
{
/*
- * When this routine is called KGDB should unregister from the
- * panic handler and clean up, making sure it is not handling any
+ * When this routine is called KGDB should unregister from
+ * handlers and clean up, making sure it is not handling any
* break exceptions at the time.
*/
if (kgdb_io_module_registered) {
kgdb_io_module_registered = 0;
unregister_reboot_notifier(&dbg_reboot_notifier);
unregister_module_notifier(&dbg_module_load_nb);
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &kgdb_panic_event_nb);
kgdb_arch_exit();
#ifdef CONFIG_MAGIC_SYSRQ
unregister_sysrq_key('g', &sysrq_dbg_op);
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index fc482c8e0bd8..57fb4dcff434 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -3,6 +3,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/binfmts.h>
+#include <linux/elfcore.h>
Elf_Half __weak elf_core_extra_phdrs(void)
{
diff --git a/kernel/fork.c b/kernel/fork.c
index 5a0fd518e04e..60763c043aa3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -125,6 +125,15 @@ int nr_threads; /* The idle threads do not count.. */
static int max_threads; /* tunable limit on nr_threads */
+#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
+
+static const char * const resident_page_types[] = {
+ NAMED_ARRAY_INDEX(MM_FILEPAGES),
+ NAMED_ARRAY_INDEX(MM_ANONPAGES),
+ NAMED_ARRAY_INDEX(MM_SWAPENTS),
+ NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
+};
+
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
@@ -645,12 +654,15 @@ static void check_mm(struct mm_struct *mm)
{
int i;
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
+ "Please make sure 'struct resident_page_types[]' is updated as well");
+
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = atomic_long_read(&mm->rss_stat.count[i]);
if (unlikely(x))
- printk(KERN_ALERT "BUG: Bad rss-counter state "
- "mm:%p idx:%d val:%ld\n", mm, i, x);
+ pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
+ mm, resident_page_types[i], x);
}
if (mm_pgtables_bytes(mm))
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index d5870723b8ad..15d70a90b50d 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
struct page *pages;
+ if (fatal_signal_pending(current))
+ return NULL;
pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
if (pages) {
unsigned int count, i;
diff --git a/kernel/panic.c b/kernel/panic.c
index 057540b6eee9..47e8ebccc22b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -12,6 +12,7 @@
#include <linux/debug_locks.h>
#include <linux/sched/debug.h>
#include <linux/interrupt.h>
+#include <linux/kgdb.h>
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
@@ -220,6 +221,13 @@ void panic(const char *fmt, ...)
#endif
/*
+ * If kgdb is enabled, give it a chance to run before we stop all
+ * the other CPUs or else we won't be able to debug processes left
+ * running on them.
+ */
+ kgdb_panic(buf);
+
+ /*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
* If we want to run this after calling panic_notifiers, pass
@@ -551,9 +559,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
{
disable_trace_on_warning();
- if (args)
- pr_warn(CUT_HERE);
-
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
@@ -591,37 +596,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
add_taint(taint, LOCKDEP_STILL_OK);
}
-#ifdef WANT_WARN_ON_SLOWPATH
-void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+#ifndef __WARN_FLAGS
+void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ const char *fmt, ...)
{
struct warn_args args;
- args.fmt = fmt;
- va_start(args.args, fmt);
- __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL,
- &args);
- va_end(args.args);
-}
-EXPORT_SYMBOL(warn_slowpath_fmt);
+ pr_warn(CUT_HERE);
-void warn_slowpath_fmt_taint(const char *file, int line,
- unsigned taint, const char *fmt, ...)
-{
- struct warn_args args;
+ if (!fmt) {
+ __warn(file, line, __builtin_return_address(0), taint,
+ NULL, NULL);
+ return;
+ }
args.fmt = fmt;
va_start(args.args, fmt);
__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
va_end(args.args);
}
-EXPORT_SYMBOL(warn_slowpath_fmt_taint);
-
-void warn_slowpath_null(const char *file, int line)
-{
- pr_warn(CUT_HERE);
- __warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
-}
-EXPORT_SYMBOL(warn_slowpath_null);
+EXPORT_SYMBOL(warn_slowpath_fmt);
#else
void __warn_printk(const char *fmt, ...)
{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b1b1703a646..93d97f9b0157 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -311,7 +311,7 @@ config HEADERS_CHECK
relevant for userspace, say 'Y'.
config OPTIMIZE_INLINING
- bool "Allow compiler to uninline functions marked 'inline'"
+ def_bool y
help
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
@@ -322,8 +322,6 @@ config OPTIMIZE_INLINING
decision will become the default in the future. Until then this option
is there to test gcc for this.
- If unsure, say N.
-
config DEBUG_SECTION_MISMATCH
bool "Enable full Section mismatch analysis"
help
diff --git a/lib/bug.c b/lib/bug.c
index 1077366f496b..8c98af0bf585 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -181,6 +181,15 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
}
}
+ /*
+ * BUG() and WARN_ON() families don't print a custom debug message
+ * before triggering the exception handler, so we must add the
+ * "cut here" line now. WARN() issues its own "cut here" before the
+ * extra debugging message it writes before triggering the handler.
+ */
+ if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
+ printk(KERN_DEFAULT CUT_HERE);
+
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
__warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
@@ -188,8 +197,6 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_WARN;
}
- printk(KERN_DEFAULT CUT_HERE);
-
if (file)
pr_crit("kernel BUG at %s:%u!\n", file, line);
else
diff --git a/lib/extable.c b/lib/extable.c
index 25da4071122a..c3e59caf7ffa 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define ex_to_insn(x) ((x)->insn)
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index a7bafc413730..ae25e2fa2187 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -36,12 +36,12 @@ static inline size_t genradix_depth_size(unsigned depth)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
-unsigned genradix_root_to_depth(struct genradix_root *r)
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
{
return (unsigned long) r & GENRADIX_DEPTH_MASK;
}
-struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
{
return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
}
diff --git a/lib/hexdump.c b/lib/hexdump.c
index b1d55b669ae2..147133f8eb2f 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -270,25 +270,4 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
}
EXPORT_SYMBOL(print_hex_dump);
-#if !defined(CONFIG_DYNAMIC_DEBUG)
-/**
- * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
- * @prefix_str: string to prefix each line with;
- * caller supplies trailing spaces for alignment if desired
- * @prefix_type: controls whether prefix of an offset, address, or none
- * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
- * @buf: data blob to dump
- * @len: number of bytes in the @buf
- *
- * Calls print_hex_dump(), with log level of KERN_DEBUG,
- * rowsize of 16, groupsize of 1, and ASCII output included.
- */
-void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
- const void *buf, size_t len)
-{
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
- buf, len, true);
-}
-EXPORT_SYMBOL(print_hex_dump_bytes);
-#endif /* !defined(CONFIG_DYNAMIC_DEBUG) */
#endif /* defined(CONFIG_PRINTK) */
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index ba16c08e8cb9..717c940112f9 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -83,17 +83,19 @@ next:
ALIGN((uintptr_t)ir, 4)) &&
(ir < limit) && (*ir == 0))
ir++;
- for (; (ir + 4) <= limit; ir += 4) {
- dv = *((u32 *)ir);
- if (dv) {
+ if (IS_ALIGNED((uintptr_t)ir, 4)) {
+ for (; (ir + 4) <= limit; ir += 4) {
+ dv = *((u32 *)ir);
+ if (dv) {
# if defined(__LITTLE_ENDIAN)
- ir += __builtin_ctz(dv) >> 3;
+ ir += __builtin_ctz(dv) >> 3;
# elif defined(__BIG_ENDIAN)
- ir += __builtin_clz(dv) >> 3;
+ ir += __builtin_clz(dv) >> 3;
# else
# error "missing endian definition"
# endif
- break;
+ break;
+ }
}
}
#endif
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 62b8ee92643d..41ae3c7570d3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -77,26 +77,10 @@ static inline void erase_cached(struct test_node *node, struct rb_root_cached *r
}
-static inline u32 augment_recompute(struct test_node *node)
-{
- u32 max = node->val, child_augmented;
- if (node->rb.rb_left) {
- child_augmented = rb_entry(node->rb.rb_left, struct test_node,
- rb)->augmented;
- if (max < child_augmented)
- max = child_augmented;
- }
- if (node->rb.rb_right) {
- child_augmented = rb_entry(node->rb.rb_right, struct test_node,
- rb)->augmented;
- if (max < child_augmented)
- max = child_augmented;
- }
- return max;
-}
+#define NODE_VAL(node) ((node)->val)
-RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
- u32, augmented, augment_recompute)
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct test_node, rb, u32, augmented, NODE_VAL)
static void insert_augmented(struct test_node *node,
struct rb_root_cached *root)
@@ -238,7 +222,20 @@ static void check_augmented(int nr_nodes)
check(nr_nodes);
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
- WARN_ON_ONCE(node->augmented != augment_recompute(node));
+ u32 subtree, max = node->val;
+ if (node->rb.rb_left) {
+ subtree = rb_entry(node->rb.rb_left, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ if (node->rb.rb_right) {
+ subtree = rb_entry(node->rb.rb_right, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ WARN_ON_ONCE(node->augmented != max);
}
}
diff --git a/lib/string.c b/lib/string.c
index 461fb620f85f..cd7a10c19210 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -173,8 +173,9 @@ EXPORT_SYMBOL(strlcpy);
* doesn't unnecessarily force the tail of the destination buffer to be
* zeroed. If zeroing is desired please use strscpy_pad().
*
- * Return: The number of characters copied (not including the trailing
- * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * Returns:
+ * * The number of characters copied (not including the trailing %NUL)
+ * * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy(char *dest, const char *src, size_t count)
{
@@ -182,7 +183,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
size_t max = count;
long res = 0;
- if (count == 0)
+ if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -253,8 +254,9 @@ EXPORT_SYMBOL(strscpy);
* For full explanation of why you may want to consider using the
* 'strscpy' functions please see the function docstring for strscpy().
*
- * Return: The number of characters copied (not including the trailing
- * %NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * Returns:
+ * * The number of characters copied (not including the trailing %NUL)
+ * * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy_pad(char *dest, const char *src, size_t count)
{
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 023ba9f3b99f..dccb95af6003 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -6,6 +6,7 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/mm.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
@@ -108,7 +109,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
return 0;
max_addr = user_addr_max();
- src_addr = (unsigned long)src;
+ src_addr = (unsigned long)untagged_addr(src);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 7f2db3fe311f..28ff554a1be8 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
#include <asm/word-at-a-time.h>
@@ -109,7 +110,7 @@ long strnlen_user(const char __user *str, long count)
return 0;
max_addr = user_addr_max();
- src_addr = (unsigned long)str;
+ src_addr = (unsigned long)untagged_addr(str);
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
long retval;
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index c64dca6e27c2..c431ca81dad5 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -46,6 +46,8 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
nr_frames = vec->nr_allocated;
+ start = untagged_addr(start);
+
down_read(&mm->mmap_sem);
locked = 1;
vma = find_vma_intersection(mm, start, start + 1);
diff --git a/mm/gup.c b/mm/gup.c
index 60c3915c8ee6..23a9f9c9d377 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -788,6 +788,8 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!nr_pages)
return 0;
+ start = untagged_addr(start);
+
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
/*
@@ -950,6 +952,8 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma;
vm_fault_t ret, major = 0;
+ address = untagged_addr(address);
+
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
diff --git a/mm/internal.h b/mm/internal.h
index e32390802fd3..0d5f720c75ab 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,7 +39,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
-static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
+static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
{
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 68ab988ad433..2be9f3fdb05e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,6 +11,7 @@
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
#include <linux/page-isolation.h>
+#include <linux/page_idle.h>
#include <linux/userfaultfd_k.h>
#include <linux/hugetlb.h>
#include <linux/falloc.h>
@@ -31,6 +32,11 @@
#include "internal.h"
+struct madvise_walk_private {
+ struct mmu_gather *tlb;
+ bool pageout;
+};
+
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
* take mmap_sem for writing. Others, which simply traverse vmas, need
@@ -42,6 +48,8 @@ static int madvise_need_mmap_write(int behavior)
case MADV_REMOVE:
case MADV_WILLNEED:
case MADV_DONTNEED:
+ case MADV_COLD:
+ case MADV_PAGEOUT:
case MADV_FREE:
return 0;
default:
@@ -289,6 +297,254 @@ static long madvise_willneed(struct vm_area_struct *vma,
return 0;
}
+static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct madvise_walk_private *private = walk->private;
+ struct mmu_gather *tlb = private->tlb;
+ bool pageout = private->pageout;
+ struct mm_struct *mm = tlb->mm;
+ struct vm_area_struct *vma = walk->vma;
+ pte_t *orig_pte, *pte, ptent;
+ spinlock_t *ptl;
+ struct page *page = NULL;
+ LIST_HEAD(page_list);
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge(*pmd)) {
+ pmd_t orig_pmd;
+ unsigned long next = pmd_addr_end(addr, end);
+
+ tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
+ return 0;
+
+ orig_pmd = *pmd;
+ if (is_huge_zero_pmd(orig_pmd))
+ goto huge_unlock;
+
+ if (unlikely(!pmd_present(orig_pmd))) {
+ VM_BUG_ON(thp_migration_supported() &&
+ !is_pmd_migration_entry(orig_pmd));
+ goto huge_unlock;
+ }
+
+ page = pmd_page(orig_pmd);
+ if (next - addr != HPAGE_PMD_SIZE) {
+ int err;
+
+ if (page_mapcount(page) != 1)
+ goto huge_unlock;
+
+ get_page(page);
+ spin_unlock(ptl);
+ lock_page(page);
+ err = split_huge_page(page);
+ unlock_page(page);
+ put_page(page);
+ if (!err)
+ goto regular_page;
+ return 0;
+ }
+
+ if (pmd_young(orig_pmd)) {
+ pmdp_invalidate(vma, addr, pmd);
+ orig_pmd = pmd_mkold(orig_pmd);
+
+ set_pmd_at(mm, addr, pmd, orig_pmd);
+ tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+ }
+
+ ClearPageReferenced(page);
+ test_and_clear_page_young(page);
+ if (pageout) {
+ if (!isolate_lru_page(page))
+ list_add(&page->lru, &page_list);
+ } else
+ deactivate_page(page);
+huge_unlock:
+ spin_unlock(ptl);
+ if (pageout)
+ reclaim_pages(&page_list);
+ return 0;
+ }
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
+regular_page:
+#endif
+ tlb_change_page_size(tlb, PAGE_SIZE);
+ orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ flush_tlb_batched_pending(mm);
+ arch_enter_lazy_mmu_mode();
+ for (; addr < end; pte++, addr += PAGE_SIZE) {
+ ptent = *pte;
+
+ if (pte_none(ptent))
+ continue;
+
+ if (!pte_present(ptent))
+ continue;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ continue;
+
+ /*
+ * Creating a THP page is expensive so split it only if we
+ * are sure it's worth. Split it if we are only owner.
+ */
+ if (PageTransCompound(page)) {
+ if (page_mapcount(page) != 1)
+ break;
+ get_page(page);
+ if (!trylock_page(page)) {
+ put_page(page);
+ break;
+ }
+ pte_unmap_unlock(orig_pte, ptl);
+ if (split_huge_page(page)) {
+ unlock_page(page);
+ put_page(page);
+ pte_offset_map_lock(mm, pmd, addr, &ptl);
+ break;
+ }
+ unlock_page(page);
+ put_page(page);
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ pte--;
+ addr -= PAGE_SIZE;
+ continue;
+ }
+
+ VM_BUG_ON_PAGE(PageTransCompound(page), page);
+
+ if (pte_young(ptent)) {
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
+ ptent = pte_mkold(ptent);
+ set_pte_at(mm, addr, pte, ptent);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ }
+
+ /*
+ * We are deactivating a page for accelerating reclaiming.
+ * VM couldn't reclaim the page unless we clear PG_young.
+ * As a side effect, it makes confuse idle-page tracking
+ * because they will miss recent referenced history.
+ */
+ ClearPageReferenced(page);
+ test_and_clear_page_young(page);
+ if (pageout) {
+ if (!isolate_lru_page(page))
+ list_add(&page->lru, &page_list);
+ } else
+ deactivate_page(page);
+ }
+
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(orig_pte, ptl);
+ if (pageout)
+ reclaim_pages(&page_list);
+ cond_resched();
+
+ return 0;
+}
+
+static const struct mm_walk_ops cold_walk_ops = {
+ .pmd_entry = madvise_cold_or_pageout_pte_range,
+};
+
+static void madvise_cold_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ struct madvise_walk_private walk_private = {
+ .pageout = false,
+ .tlb = tlb,
+ };
+
+ tlb_start_vma(tlb, vma);
+ walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
+ tlb_end_vma(tlb, vma);
+}
+
+static long madvise_cold(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start_addr, unsigned long end_addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather tlb;
+
+ *prev = vma;
+ if (!can_madv_lru_vma(vma))
+ return -EINVAL;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
+ madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
+ tlb_finish_mmu(&tlb, start_addr, end_addr);
+
+ return 0;
+}
+
+static void madvise_pageout_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ struct madvise_walk_private walk_private = {
+ .pageout = true,
+ .tlb = tlb,
+ };
+
+ tlb_start_vma(tlb, vma);
+ walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
+ tlb_end_vma(tlb, vma);
+}
+
+static inline bool can_do_pageout(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return true;
+ if (!vma->vm_file)
+ return false;
+ /*
+ * paging out pagecache only for non-anonymous mappings that correspond
+ * to the files the calling process could (if tried) open for writing;
+ * otherwise we'd be including shared non-exclusive mappings, which
+ * opens a side channel.
+ */
+ return inode_owner_or_capable(file_inode(vma->vm_file)) ||
+ inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
+}
+
+static long madvise_pageout(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start_addr, unsigned long end_addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_gather tlb;
+
+ *prev = vma;
+ if (!can_madv_lru_vma(vma))
+ return -EINVAL;
+
+ if (!can_do_pageout(vma))
+ return 0;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
+ madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
+ tlb_finish_mmu(&tlb, start_addr, end_addr);
+
+ return 0;
+}
+
static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -493,7 +749,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
int behavior)
{
*prev = vma;
- if (!can_madv_dontneed_vma(vma))
+ if (!can_madv_lru_vma(vma))
return -EINVAL;
if (!userfaultfd_remove(vma, start, end)) {
@@ -515,7 +771,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
*/
return -ENOMEM;
}
- if (!can_madv_dontneed_vma(vma))
+ if (!can_madv_lru_vma(vma))
return -EINVAL;
if (end > vma->vm_end) {
/*
@@ -669,6 +925,10 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
return madvise_remove(vma, prev, start, end);
case MADV_WILLNEED:
return madvise_willneed(vma, prev, start, end);
+ case MADV_COLD:
+ return madvise_cold(vma, prev, start, end);
+ case MADV_PAGEOUT:
+ return madvise_pageout(vma, prev, start, end);
case MADV_FREE:
case MADV_DONTNEED:
return madvise_dontneed_free(vma, prev, start, end, behavior);
@@ -690,6 +950,8 @@ madvise_behavior_valid(int behavior)
case MADV_WILLNEED:
case MADV_DONTNEED:
case MADV_FREE:
+ case MADV_COLD:
+ case MADV_PAGEOUT:
#ifdef CONFIG_KSM
case MADV_MERGEABLE:
case MADV_UNMERGEABLE:
@@ -784,6 +1046,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
size_t len;
struct blk_plug plug;
+ start = untagged_addr(start);
+
if (!madvise_behavior_valid(behavior))
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2156ef775d04..c313c49074ca 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2943,6 +2943,16 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+
+ /*
+ * Enforce __GFP_NOFAIL allocation because callers are not
+ * prepared to see failures and likely do not have any failure
+ * handling code.
+ */
+ if (gfp & __GFP_NOFAIL) {
+ page_counter_charge(&memcg->kmem, nr_pages);
+ return 0;
+ }
cancel_charge(memcg, nr_pages);
return -ENOMEM;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 464406e8da91..de27d08b1ff8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1405,6 +1405,7 @@ static long kernel_mbind(unsigned long start, unsigned long len,
int err;
unsigned short mode_flags;
+ start = untagged_addr(start);
mode_flags = mode & MPOL_MODE_FLAGS;
mode &= ~MPOL_MODE_FLAGS;
if (mode >= MPOL_MAX)
@@ -1558,6 +1559,8 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval);
nodemask_t nodes;
+ addr = untagged_addr(addr);
+
if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
diff --git a/mm/migrate.c b/mm/migrate.c
index 73d476d690b1..4fe45d1428c8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1612,7 +1612,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
goto out_flush;
if (get_user(node, nodes + i))
goto out_flush;
- addr = (unsigned long)p;
+ addr = (unsigned long)untagged_addr(p);
err = -ENODEV;
if (node < 0 || node >= MAX_NUMNODES)
diff --git a/mm/mincore.c b/mm/mincore.c
index f9a9dbe8cd33..49b6fa2f6aa1 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -256,6 +256,8 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
unsigned long pages;
unsigned char *tmp;
+ start = untagged_addr(start);
+
/* Check the start address: needs to be page-aligned.. */
if (start & ~PAGE_MASK)
return -EINVAL;
diff --git a/mm/mlock.c b/mm/mlock.c
index a90099da4fb4..a72c1eeded77 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -674,6 +674,8 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
unsigned long lock_limit;
int error = -ENOMEM;
+ start = untagged_addr(start);
+
if (!can_do_mlock())
return -EPERM;
@@ -735,6 +737,8 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
+ start = untagged_addr(start);
+
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
diff --git a/mm/mmap.c b/mm/mmap.c
index f1e8c7f93e04..a7d8c84d19b7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -201,6 +201,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
bool downgraded = false;
LIST_HEAD(uf);
+ brk = untagged_addr(brk);
+
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
@@ -289,9 +291,9 @@ out:
return retval;
}
-static long vma_compute_subtree_gap(struct vm_area_struct *vma)
+static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
{
- unsigned long max, prev_end, subtree_gap;
+ unsigned long gap, prev_end;
/*
* Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
@@ -299,14 +301,21 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
* an unmapped area; whereas when expanding we only require one.
* That's a little inconsistent, but keeps the code here simpler.
*/
- max = vm_start_gap(vma);
+ gap = vm_start_gap(vma);
if (vma->vm_prev) {
prev_end = vm_end_gap(vma->vm_prev);
- if (max > prev_end)
- max -= prev_end;
+ if (gap > prev_end)
+ gap -= prev_end;
else
- max = 0;
+ gap = 0;
}
+ return gap;
+}
+
+#ifdef CONFIG_DEBUG_VM_RB
+static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
+{
+ unsigned long max = vma_compute_gap(vma), subtree_gap;
if (vma->vm_rb.rb_left) {
subtree_gap = rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -322,7 +331,6 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
return max;
}
-#ifdef CONFIG_DEBUG_VM_RB
static int browse_rb(struct mm_struct *mm)
{
struct rb_root *root = &mm->mm_rb;
@@ -428,8 +436,9 @@ static void validate_mm(struct mm_struct *mm)
#define validate_mm(mm) do { } while (0)
#endif
-RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
- unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
+RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
+ struct vm_area_struct, vm_rb,
+ unsigned long, rb_subtree_gap, vma_compute_gap)
/*
* Update augmented rbtree rb_subtree_gap values after vma->vm_start or
@@ -439,8 +448,8 @@ RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
static void vma_gap_update(struct vm_area_struct *vma)
{
/*
- * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
- * function that does exactly what we want.
+ * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
+ * a callback function that does exactly what we want.
*/
vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
}
@@ -1580,6 +1589,8 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
struct file *file = NULL;
unsigned long retval;
+ addr = untagged_addr(addr);
+
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
file = fget(fd);
@@ -2878,6 +2889,7 @@ EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
+ addr = untagged_addr(addr);
profile_munmap(addr);
return __vm_munmap(addr, len, true);
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 675e5d34a507..7967825f6d33 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -459,6 +459,8 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
(prot & PROT_READ);
+ start = untagged_addr(start);
+
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index fc241d23cd97..1fc8a29fbe3f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -606,6 +606,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
+ addr = untagged_addr(addr);
+ new_addr = untagged_addr(new_addr);
+
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
return ret;
diff --git a/mm/msync.c b/mm/msync.c
index ef30a429623a..c3bd3e75f687 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -37,6 +37,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
int unmapped_error = 0;
int error = -EINVAL;
+ start = untagged_addr(start);
+
if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
goto out;
if (offset_in_page(start))
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c1d9496b4c43..71e3acea7817 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -523,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
set_bit(MMF_UNSTABLE, &mm->flags);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
- if (!can_madv_dontneed_vma(vma))
+ if (!can_madv_lru_vma(vma))
continue;
/*
diff --git a/mm/swap.c b/mm/swap.c
index 784dc1620620..38c3fa4308e2 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,6 +47,7 @@ int page_cluster;
static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
@@ -538,6 +539,22 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
update_page_reclaim_stat(lruvec, file, 0);
}
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
+{
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+ int file = page_is_file_cache(page);
+ int lru = page_lru_base_type(page);
+
+ del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
+ ClearPageActive(page);
+ ClearPageReferenced(page);
+ add_page_to_lru_list(page, lruvec, lru);
+
+ __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
+ update_page_reclaim_stat(lruvec, file, 0);
+ }
+}
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg)
@@ -590,6 +607,10 @@ void lru_add_drain_cpu(int cpu)
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+ pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+ if (pagevec_count(pvec))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+
pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
@@ -623,6 +644,26 @@ void deactivate_file_page(struct page *page)
}
}
+/*
+ * deactivate_page - deactivate a page
+ * @page: page to deactivate
+ *
+ * deactivate_page() moves @page to the inactive list if @page was on the active
+ * list and was not an unevictable page. This is done to accelerate the reclaim
+ * of @page.
+ */
+void deactivate_page(struct page *page)
+{
+ if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+
+ get_page(page);
+ if (!pagevec_add(pvec, page) || PageCompound(page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ put_cpu_var(lru_deactivate_pvecs);
+ }
+}
+
/**
* mark_page_lazyfree - make an anon page lazyfree
* @page: page to deactivate
@@ -687,6 +728,7 @@ void lru_add_drain_all(void)
if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
+ pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
need_activate_page_drain(cpu)) {
INIT_WORK(work, lru_add_drain_per_cpu);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fcadd3e25c0c..a3c70e275f4e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -396,9 +396,8 @@ compute_subtree_max_size(struct vmap_area *va)
get_subtree_max_size(va->rb_node.rb_right));
}
-RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb,
- struct vmap_area, rb_node, unsigned long, subtree_max_size,
- compute_subtree_max_size)
+RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
+ struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
static void purge_vmap_area_lazy(void);
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4911754c93b7..e5d52d6a24af 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1123,7 +1123,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct scan_control *sc,
enum ttu_flags ttu_flags,
struct reclaim_stat *stat,
- bool force_reclaim)
+ bool ignore_references)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
@@ -1137,7 +1137,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
struct address_space *mapping;
struct page *page;
int may_enter_fs;
- enum page_references references = PAGEREF_RECLAIM_CLEAN;
+ enum page_references references = PAGEREF_RECLAIM;
bool dirty, writeback;
unsigned int nr_pages;
@@ -1268,7 +1268,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
}
- if (!force_reclaim)
+ if (!ignore_references)
references = page_check_references(page, sc);
switch (references) {
@@ -2145,6 +2145,62 @@ static void shrink_active_list(unsigned long nr_to_scan,
nr_deactivate, nr_rotated, sc->priority, file);
}
+unsigned long reclaim_pages(struct list_head *page_list)
+{
+ int nid = -1;
+ unsigned long nr_reclaimed = 0;
+ LIST_HEAD(node_page_list);
+ struct reclaim_stat dummy_stat;
+ struct page *page;
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .priority = DEF_PRIORITY,
+ .may_writepage = 1,
+ .may_unmap = 1,
+ .may_swap = 1,
+ };
+
+ while (!list_empty(page_list)) {
+ page = lru_to_page(page_list);
+ if (nid == -1) {
+ nid = page_to_nid(page);
+ INIT_LIST_HEAD(&node_page_list);
+ }
+
+ if (nid == page_to_nid(page)) {
+ ClearPageActive(page);
+ list_move(&page->lru, &node_page_list);
+ continue;
+ }
+
+ nr_reclaimed += shrink_page_list(&node_page_list,
+ NODE_DATA(nid),
+ &sc, 0,
+ &dummy_stat, false);
+ while (!list_empty(&node_page_list)) {
+ page = lru_to_page(&node_page_list);
+ list_del(&page->lru);
+ putback_lru_page(page);
+ }
+
+ nid = -1;
+ }
+
+ if (!list_empty(&node_page_list)) {
+ nr_reclaimed += shrink_page_list(&node_page_list,
+ NODE_DATA(nid),
+ &sc, 0,
+ &dummy_stat, false);
+ while (!list_empty(&node_page_list)) {
+ page = lru_to_page(&node_page_list);
+ list_del(&page->lru);
+ putback_lru_page(page);
+ }
+ }
+
+ return nr_reclaimed;
+}
+
/*
* The inactive anon list should be small enough that the VM never has
* to do too much work.
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 93a7edfe0f05..6fcc66afb088 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -62,6 +62,8 @@ my $conststructsfile = "$D/const_structs.checkpatch";
my $typedefsfile = "";
my $color = "auto";
my $allow_c99_comments = 1; # Can be overridden by --ignore C99_COMMENT_TOLERANCE
+# git output parsing needs US English output, so first set backtick child process LANGUAGE
+my $git_command ='export LANGUAGE=en_US.UTF-8; git';
sub help {
my ($exitcode) = @_;
@@ -904,7 +906,7 @@ sub seed_camelcase_includes {
$camelcase_seeded = 1;
if (-e ".git") {
- my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
+ my $git_last_include_commit = `${git_command} log --no-merges --pretty=format:"%h%n" -1 -- include`;
chomp $git_last_include_commit;
$camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
} else {
@@ -932,7 +934,7 @@ sub seed_camelcase_includes {
}
if (-e ".git") {
- $files = `git ls-files "include/*.h"`;
+ $files = `${git_command} ls-files "include/*.h"`;
@include_files = split('\n', $files);
}
@@ -956,13 +958,13 @@ sub git_commit_info {
return ($id, $desc) if ((which("git") eq "") || !(-e ".git"));
- my $output = `git log --no-color --format='%H %s' -1 $commit 2>&1`;
+ my $output = `${git_command} log --no-color --format='%H %s' -1 $commit 2>&1`;
$output =~ s/^\s*//gm;
my @lines = split("\n", $output);
return ($id, $desc) if ($#lines < 0);
- if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous\./) {
+ if ($lines[0] =~ /^error: short SHA1 $commit is ambiguous/) {
# Maybe one day convert this block of bash into something that returns
# all matching commit ids, but it's very slow...
#
@@ -1006,7 +1008,7 @@ if ($git) {
} else {
$git_range = "-1 $commit_expr";
}
- my $lines = `git log --no-color --no-merges --pretty=format:'%H %s' $git_range`;
+ my $lines = `${git_command} log --no-color --no-merges --pretty=format:'%H %s' $git_range`;
foreach my $line (split(/\n/, $lines)) {
$line =~ /^([0-9a-fA-F]{40,40}) (.*)$/;
next if (!defined($1) || !defined($2));
@@ -2725,8 +2727,10 @@ sub process {
($line =~ /^\s*(?:WARNING:|BUG:)/ ||
$line =~ /^\s*\[\s*\d+\.\d{6,6}\s*\]/ ||
# timestamp
- $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/)) {
- # stack dump address
+ $line =~ /^\s*\[\<[0-9a-fA-F]{8,}\>\]/) ||
+ $line =~ /^(?:\s+\w+:\s+[0-9a-fA-F]+){3,3}/ ||
+ $line =~ /^\s*\#\d+\s*\[[0-9a-fA-F]+\]\s*\w+ at [0-9a-fA-F]+/) {
+ # stack dump address styles
$commit_log_possible_stack_dump = 1;
}
@@ -2898,6 +2902,17 @@ sub process {
}
}
+# check for invalid commit id
+ if ($in_commit_log && $line =~ /(^fixes:|\bcommit)\s+([0-9a-f]{6,40})\b/i) {
+ my $id;
+ my $description;
+ ($id, $description) = git_commit_info($2, undef, undef);
+ if (!defined($id)) {
+ WARN("UNKNOWN_COMMIT_ID",
+ "Unknown commit id '$2', maybe rebased or not pulled?\n" . $herecurr);
+ }
+ }
+
# ignore non-hunk lines and lines being removed
next if (!$hunk_line || $line =~ /^-/);
@@ -3069,21 +3084,21 @@ sub process {
# check SPDX comment style for .[chsS] files
if ($realfile =~ /\.[chsS]$/ &&
$rawline =~ /SPDX-License-Identifier:/ &&
- $rawline !~ /^\+\s*\Q$comment\E\s*/) {
+ $rawline !~ m@^\+\s*\Q$comment\E\s*@) {
WARN("SPDX_LICENSE_TAG",
"Improper SPDX comment style for '$realfile', please use '$comment' instead\n" . $herecurr);
}
if ($comment !~ /^$/ &&
- $rawline !~ /^\+\Q$comment\E SPDX-License-Identifier: /) {
- WARN("SPDX_LICENSE_TAG",
- "Missing or malformed SPDX-License-Identifier tag in line $checklicenseline\n" . $herecurr);
+ $rawline !~ m@^\+\Q$comment\E SPDX-License-Identifier: @) {
+ WARN("SPDX_LICENSE_TAG",
+ "Missing or malformed SPDX-License-Identifier tag in line $checklicenseline\n" . $herecurr);
} elsif ($rawline =~ /(SPDX-License-Identifier: .*)/) {
- my $spdx_license = $1;
- if (!is_SPDX_License_valid($spdx_license)) {
- WARN("SPDX_LICENSE_TAG",
- "'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
- }
+ my $spdx_license = $1;
+ if (!is_SPDX_License_valid($spdx_license)) {
+ WARN("SPDX_LICENSE_TAG",
+ "'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
+ }
}
}
}
@@ -4660,7 +4675,7 @@ sub process {
# closing brace should have a space following it when it has anything
# on the line
- if ($line =~ /}(?!(?:,|;|\)))\S/) {
+ if ($line =~ /}(?!(?:,|;|\)|\}))\S/) {
if (ERROR("SPACING",
"space required after that close brace '}'\n" . $herecurr) &&
$fix) {
@@ -5191,7 +5206,7 @@ sub process {
next if ($arg =~ /\.\.\./);
next if ($arg =~ /^type$/i);
my $tmp_stmt = $define_stmt;
- $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
+ $tmp_stmt =~ s/\b(sizeof|typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
$tmp_stmt =~ s/\#+\s*$arg\b//g;
$tmp_stmt =~ s/\b$arg\s*\#\#//g;
my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
@@ -5873,6 +5888,18 @@ sub process {
"__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
}
+# Check for __attribute__ section, prefer __section
+ if ($realfile !~ m@\binclude/uapi/@ &&
+ $line =~ /\b__attribute__\s*\(\s*\(.*_*section_*\s*\(\s*("[^"]*")/) {
+ my $old = substr($rawline, $-[1], $+[1] - $-[1]);
+ my $new = substr($old, 1, -1);
+ if (WARN("PREFER_SECTION",
+ "__section($new) is preferred over __attribute__((section($old)))\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/\b__attribute__\s*\(\s*\(\s*_*section_*\s*\(\s*\Q$old\E\s*\)\s*\)\s*\)/__section($new)/;
+ }
+ }
+
# Check for __attribute__ format(printf, prefer __printf
if ($realfile !~ m@\binclude/uapi/@ &&
$line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
@@ -6480,6 +6507,12 @@ sub process {
"Using $1 should generally have parentheses around the comparison\n" . $herecurr);
}
+# nested likely/unlikely calls
+ if ($line =~ /\b(?:(?:un)?likely)\s*\(\s*!?\s*(IS_ERR(?:_OR_NULL|_VALUE)?|WARN)/) {
+ WARN("LIKELY_MISUSE",
+ "nested (un)?likely() calls, $1 already uses unlikely() internally\n" . $herecurr);
+ }
+
# whine mightly about in_atomic
if ($line =~ /\bin_atomic\s*\(/) {
if ($realfile =~ m@^drivers/@) {
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 2f5b95f09fa0..34e40e96dee2 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -77,12 +77,12 @@ lx-symbols command."""
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
- if name.endswith(".ko"):
+ if name.endswith(".ko") or name.endswith(".ko.debug"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
- module_pattern = ".*/{0}\.ko$".format(
+ module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index d83763a5327c..e03b1ea23e0e 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -31,25 +31,9 @@ struct rb_root {
struct rb_node *rb_node;
};
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
- struct rb_root rb_root;
- struct rb_node *rb_leftmost;
-};
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -71,12 +55,6 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
-extern void rb_insert_color_cached(struct rb_node *,
- struct rb_root_cached *, bool);
-extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
-/* Same as rb_first(), but O(1) */
-#define rb_first_cached(root) (root)->rb_leftmost
-
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -84,8 +62,6 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
-extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@@ -129,4 +105,51 @@ static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
+static inline void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root,
+ bool leftmost)
+{
+ if (leftmost)
+ root->rb_leftmost = node;
+ rb_insert_color(node, &root->rb_root);
+}
+
+static inline void rb_erase_cached(struct rb_node *node,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase(node, &root->rb_root);
+}
+
+static inline void rb_replace_node_cached(struct rb_node *victim,
+ struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
+ rb_replace_node(victim, new, &root->rb_root);
+}
+
#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index ddd01006ece5..381aa948610d 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -32,17 +32,16 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node,
- struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
- * rb_augment_inserted() instead of the usual rb_insert_color() call.
- * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * rb_insert_augmented() instead of the usual rb_insert_color() call.
+ * If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
@@ -50,7 +49,7 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+ __rb_insert_augmented(node, root, augment->rotate);
}
static inline void
@@ -58,45 +57,92 @@ rb_insert_augmented_cached(struct rb_node *node,
struct rb_root_cached *root, bool newleft,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, &root->rb_root,
- newleft, &root->rb_leftmost, augment->rotate);
+ if (newleft)
+ root->rb_leftmost = node;
+ rb_insert_augmented(node, &root->rb_root, augment);
}
-#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
- rbtype, rbaugmented, rbcompute) \
+/*
+ * Template for declaring augmented rbtree callbacks (generic case)
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data
+ */
+
+#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \
static inline void \
-rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
- rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
- rbtype augmented = rbcompute(node); \
- if (node->rbaugmented == augmented) \
+ RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
+ if (RBCOMPUTE(node, true)) \
break; \
- node->rbaugmented = augmented; \
- rb = rb_parent(&node->rbfield); \
+ rb = rb_parent(&node->RBFIELD); \
} \
} \
static inline void \
-rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
} \
static void \
-rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
- rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
- rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
- new->rbaugmented = old->rbaugmented; \
- old->rbaugmented = rbcompute(old); \
+ RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
+ RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
+ new->RBAUGMENTED = old->RBAUGMENTED; \
+ RBCOMPUTE(old, false); \
} \
-rbstatic const struct rb_augment_callbacks rbname = { \
- .propagate = rbname ## _propagate, \
- .copy = rbname ## _copy, \
- .rotate = rbname ## _rotate \
+RBSTATIC const struct rb_augment_callbacks RBNAME = { \
+ .propagate = RBNAME ## _propagate, \
+ .copy = RBNAME ## _copy, \
+ .rotate = RBNAME ## _rotate \
};
+/*
+ * Template for declaring augmented rbtree callbacks,
+ * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
+ *
+ * RBSTATIC: 'static' or empty
+ * RBNAME: name of the rb_augment_callbacks structure
+ * RBSTRUCT: struct type of the tree nodes
+ * RBFIELD: name of struct rb_node field within RBSTRUCT
+ * RBTYPE: type of the RBAUGMENTED field
+ * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
+ * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar
+ */
+
+#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \
+ RBTYPE, RBAUGMENTED, RBCOMPUTE) \
+static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \
+{ \
+ RBSTRUCT *child; \
+ RBTYPE max = RBCOMPUTE(node); \
+ if (node->RBFIELD.rb_left) { \
+ child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (node->RBFIELD.rb_right) { \
+ child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
+ if (child->RBAUGMENTED > max) \
+ max = child->RBAUGMENTED; \
+ } \
+ if (exit && node->RBAUGMENTED == max) \
+ return true; \
+ node->RBAUGMENTED = max; \
+ return false; \
+} \
+RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
+ RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
+
#define RB_RED 0
#define RB_BLACK 1
@@ -139,7 +185,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
- struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right;
@@ -147,9 +192,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node *parent, *rebalance;
unsigned long pc;
- if (leftmost && node == *leftmost)
- *leftmost = rb_next(node);
-
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -249,8 +291,7 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root,
- NULL, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
@@ -259,11 +300,9 @@ static __always_inline void
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost,
- augment);
- if (rebalance)
- __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase_augmented(node, &root->rb_root, augment);
}
#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 804f145e3113..2548ff8c4d9c 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
- if (newleft)
- *leftmost = node;
-
while (true) {
/*
* Loop invariant: node is red.
@@ -436,34 +432,17 @@ static const struct rb_augment_callbacks dummy_callbacks = {
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, false, NULL, dummy_rotate);
+ __rb_insert(node, root, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root,
- NULL, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
-void rb_insert_color_cached(struct rb_node *node,
- struct rb_root_cached *root, bool leftmost)
-{
- __rb_insert(node, &root->rb_root, leftmost,
- &root->rb_leftmost, dummy_rotate);
-}
-
-void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
-{
- struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost, &dummy_callbacks);
- if (rebalance)
- ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
-}
-
/*
* Augmented rbtree manipulation functions.
*
@@ -472,10 +451,9 @@ void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, newleft, leftmost, augment_rotate);
+ __rb_insert(node, root, augment_rotate);
}
/*
@@ -580,15 +558,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
__rb_change_child(victim, new, parent, root);
}
-void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root)
-{
- rb_replace_node(victim, new, &root->rb_root);
-
- if (root->rb_leftmost == victim)
- root->rb_leftmost = new;
-}
-
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
{
for (;;) {