summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c53
-rw-r--r--mm/slab.c13
-rw-r--r--mm/util.c6
-rw-r--r--mm/vmalloc.c6
9 files changed, 54 insertions, 44 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 5d88489ef2de..db7c55de92cd 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -92,7 +92,7 @@ config HAVE_MEMORY_PRESENT
#
# SPARSEMEM_EXTREME (which is the default) does some bootmem
-# allocations when memory_present() is called. If this can not
+# allocations when memory_present() is called. If this cannot
# be done on your architecture, select this option. However,
# statically allocating the mem_section[] array can potentially
# consume vast quantities of .bss, so be careful.
@@ -104,7 +104,7 @@ config SPARSEMEM_STATIC
def_bool n
#
-# Architectecture platforms which require a two level mem_section in SPARSEMEM
+# Architecture platforms which require a two level mem_section in SPARSEMEM
# must select this option. This is usually for architecture platforms with
# an extremely sparse physical address space.
#
diff --git a/mm/filemap.c b/mm/filemap.c
index fef7d879ddf5..3464b681f844 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1139,11 +1139,11 @@ success:
}
/**
- * __generic_file_aio_read - generic filesystem read routine
+ * generic_file_aio_read - generic filesystem read routine
* @iocb: kernel I/O control block
* @iov: io vector request
* @nr_segs: number of segments in the iovec
- * @ppos: current file position
+ * @pos: current file position
*
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c7d03dbf73d..1d709ff528e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -364,6 +364,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
pte_t *ptep;
pte_t pte;
struct page *page;
+ struct page *tmp;
+ LIST_HEAD(page_list);
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~HPAGE_MASK);
@@ -384,12 +386,16 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
continue;
page = pte_page(pte);
- put_page(page);
+ list_add(&page->lru, &page_list);
add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
}
spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, start, end);
+ list_for_each_entry_safe(page, tmp, &page_list, lru) {
+ list_del(&page->lru);
+ put_page(page);
+ }
}
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/mm/nommu.c b/mm/nommu.c
index 365019599df8..8bdde9508f3b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -221,7 +221,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
* Allocate enough pages to cover @size from the page level
* allocator and map them into continguos kernel virtual space.
*
- * For tight cotrol over page level allocator and protection flags
+ * For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
void *vmalloc(unsigned long size)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c0d4ce144dec..a0f339057449 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1,5 +1,5 @@
/*
- * mm/page-writeback.c.
+ * mm/page-writeback.c
*
* Copyright (C) 2002, Linus Torvalds.
*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f59d90b81e6..a8c003e7b3d5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -900,7 +900,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
/* free_pages my go negative - that's OK */
- long min = mark, free_pages = z->free_pages - (1 << order) + 1;
+ unsigned long min = mark;
+ long free_pages = z->free_pages - (1 << order) + 1;
int o;
if (alloc_flags & ALLOC_HIGH)
@@ -2050,8 +2051,8 @@ int __init early_pfn_to_nid(unsigned long pfn)
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
- * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed
- * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node
+ * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
+ * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
*
* If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this
@@ -2081,11 +2082,11 @@ void __init free_bootmem_with_active_regions(int nid,
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
- * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used
+ * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
*
* If an architecture guarantees that all ranges registered with
* add_active_ranges() contain no holes and may be freed, this
- * this function may be used instead of calling memory_present() manually.
+ * function may be used instead of calling memory_present() manually.
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
@@ -2155,14 +2156,14 @@ static void __init account_node_boundary(unsigned int nid,
/**
* get_pfn_range_for_nid - Return the start and end page frames for a node
- * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned
- * @start_pfn: Passed by reference. On return, it will have the node start_pfn
- * @end_pfn: Passed by reference. On return, it will have the node end_pfn
+ * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
+ * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
+ * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
*
* It returns the start and end page frame of a node based on information
* provided by an arch calling add_active_range(). If called for a node
* with no available memory, a warning is printed and the start and end
- * PFNs will be 0
+ * PFNs will be 0.
*/
void __init get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
@@ -2215,7 +2216,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
/*
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
- * then all holes in the requested range will be accounted for
+ * then all holes in the requested range will be accounted for.
*/
unsigned long __init __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
@@ -2268,7 +2269,7 @@ unsigned long __init __absent_pages_in_range(int nid,
* @start_pfn: The start PFN to start searching for holes
* @end_pfn: The end PFN to stop searching for holes
*
- * It returns the number of pages frames in memory holes within a range
+ * It returns the number of pages frames in memory holes within a range.
*/
unsigned long __init absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn)
@@ -2582,11 +2583,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
/**
* remove_all_active_ranges - Remove all currently registered regions
+ *
* During discovery, it may be found that a table like SRAT is invalid
* and an alternative discovery method must be used. This function removes
* all currently registered regions.
*/
-void __init remove_all_active_ranges()
+void __init remove_all_active_ranges(void)
{
memset(early_node_map, 0, sizeof(early_node_map));
nr_nodemap_entries = 0;
@@ -2636,7 +2638,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
* find_min_pfn_with_active_regions - Find the minimum PFN registered
*
* It returns the minimum PFN based on information provided via
- * add_active_range()
+ * add_active_range().
*/
unsigned long __init find_min_pfn_with_active_regions(void)
{
@@ -2647,7 +2649,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
* find_max_pfn_with_active_regions - Find the maximum PFN registered
*
* It returns the maximum PFN based on information provided via
- * add_active_range()
+ * add_active_range().
*/
unsigned long __init find_max_pfn_with_active_regions(void)
{
@@ -2662,10 +2664,7 @@ unsigned long __init find_max_pfn_with_active_regions(void)
/**
* free_area_init_nodes - Initialise all pg_data_t and zone data
- * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA
- * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32
- * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL
- * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM
+ * @max_zone_pfn: an array of max PFNs for each zone
*
* This will call free_area_init_node() for each active node in the system.
* Using the page ranges provided by add_active_range(), the size of each
@@ -2723,14 +2722,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
/**
- * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA
- * @new_dma_reserve - The number of pages to mark reserved
+ * set_dma_reserve - set the specified number of pages reserved in the first zone
+ * @new_dma_reserve: The number of pages to mark reserved
*
* The per-cpu batchsize and zone watermarks are determined by present_pages.
* In the DMA zone, a significant percentage may be consumed by kernel image
* and other unfreeable allocations which can skew the watermarks badly. This
- * function may optionally be used to account for unfreeable pages in
- * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize
+ * function may optionally be used to account for unfreeable pages in the
+ * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
+ * smaller per-cpu batchsize.
*/
void __init set_dma_reserve(unsigned long new_dma_reserve)
{
@@ -2843,10 +2843,11 @@ static void setup_per_zone_lowmem_reserve(void)
calculate_totalreserve_pages();
}
-/*
- * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures
- * that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
+/**
+ * setup_per_zone_pages_min - called when min_free_kbytes changes.
+ *
+ * Ensures that the pages_{min,low,high} values for each zone are set correctly
+ * with respect to min_free_kbytes.
*/
void setup_per_zone_pages_min(void)
{
diff --git a/mm/slab.c b/mm/slab.c
index 3dbd6f4e7477..c23b99250df2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3488,22 +3488,25 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
}
+#ifdef CONFIG_DEBUG_SLAB
void *__kmalloc(size_t size, gfp_t flags)
{
-#ifndef CONFIG_DEBUG_SLAB
- return __do_kmalloc(size, flags, NULL);
-#else
return __do_kmalloc(size, flags, __builtin_return_address(0));
-#endif
}
EXPORT_SYMBOL(__kmalloc);
-#ifdef CONFIG_DEBUG_SLAB
void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
{
return __do_kmalloc(size, flags, caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);
+
+#else
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __do_kmalloc(size, flags, NULL);
+}
+EXPORT_SYMBOL(__kmalloc);
#endif
/**
diff --git a/mm/util.c b/mm/util.c
index e14fa84ef39a..ace2aea69f1a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -11,7 +11,7 @@
*/
void *__kzalloc(size_t size, gfp_t flags)
{
- void *ret = ____kmalloc(size, flags);
+ void *ret = kmalloc_track_caller(size, flags);
if (ret)
memset(ret, 0, size);
return ret;
@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp)
return NULL;
len = strlen(s) + 1;
- buf = ____kmalloc(len, gfp);
+ buf = kmalloc_track_caller(len, gfp);
if (buf)
memcpy(buf, s, len);
return buf;
@@ -51,7 +51,7 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
- p = ____kmalloc(len, gfp);
+ p = kmalloc_track_caller(len, gfp);
if (p)
memcpy(p, src, len);
return p;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1ac191ce5641..750ab6ed13fc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -503,7 +503,7 @@ EXPORT_SYMBOL(__vmalloc);
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
*
- * For tight cotrol over page level allocator and protection flags
+ * For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
void *vmalloc(unsigned long size)
@@ -542,7 +542,7 @@ EXPORT_SYMBOL(vmalloc_user);
* Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
*
- * For tight cotrol over page level allocator and protection flags
+ * For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/
void *vmalloc_node(unsigned long size, int node)
@@ -563,7 +563,7 @@ EXPORT_SYMBOL(vmalloc_node);
* the page level allocator and map them into contiguous and
* executable kernel virtual space.
*
- * For tight cotrol over page level allocator and protection flags
+ * For tight control over page level allocator and protection flags
* use __vmalloc() instead.
*/