summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-12-12 16:42:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 18:55:07 -0800
commit692a68c1544d6be4ba7c6e929e9c7b2ba0447b91 (patch)
treea8d1ee7cb797cb64a260c229b8ce67926657166a /arch/ia64
parent07e326610e5634e5038fce32fff370949eb42101 (diff)
downloadlinux-692a68c1544d6be4ba7c6e929e9c7b2ba0447b91.tar.bz2
mm: remove the page size change check in tlb_remove_page
Now that we check for page size change early in the loop, we can partially revert e9d55e157034a ("mm: change the interface for __tlb_remove_page"). This simplies the code much, by removing the need to track the last address with which we adjusted the range. We also go back to the older way of filling the mmu_gather array, ie, we add an entry and then check whether the gather batch is full. Link: http://lkml.kernel.org/r/20161026084839.27299-6-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/tlb.h16
1 files changed, 4 insertions, 12 deletions
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index bfe6295aa746..fced197b9626 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -207,15 +207,15 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
*/
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (tlb->nr == tlb->max)
- return true;
-
tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page;
+ VM_WARN_ON(tlb->nr > tlb->max);
+ if (tlb->nr == tlb->max)
+ return true;
return false;
}
@@ -236,10 +236,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- if (__tlb_remove_page(tlb, page)) {
+ if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
- __tlb_remove_page(tlb, page);
- }
}
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
@@ -248,12 +246,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
-static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
- struct page *page)
-{
- return __tlb_remove_page(tlb, page);
-}
-
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{