diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/debug_vm_pgtable.c | 2 | ||||
-rw-r--r-- | mm/frontswap.c | 2 | ||||
-rw-r--r-- | mm/ksm.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 4 | ||||
-rw-r--r-- | mm/swap.c | 4 |
10 files changed, 13 insertions, 13 deletions
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 61ab16fb2e36..d315ff544f05 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -307,7 +307,7 @@ static int __init debug_vm_pgtable(void) phys_addr_t paddr; unsigned long vaddr, pte_aligned, pmd_aligned; unsigned long pud_aligned, p4d_aligned, pgd_aligned; - spinlock_t *uninitialized_var(ptl); + spinlock_t *ptl = NULL; pr_info("Validating architecture page table helpers\n"); prot = vm_get_page_prot(VMFLAGS); diff --git a/mm/frontswap.c b/mm/frontswap.c index bfa3a339253e..9d977b1fc016 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -446,7 +446,7 @@ static int __frontswap_shrink(unsigned long target_pages, void frontswap_shrink(unsigned long target_pages) { unsigned long pages_to_unuse = 0; - int uninitialized_var(type), ret; + int type, ret; /* * we don't want to hold swap_lock while doing a very @@ -2387,7 +2387,7 @@ next_mm: static void ksm_do_scan(unsigned int scan_npages) { struct rmap_item *rmap_item; - struct page *uninitialized_var(page); + struct page *page; while (scan_npages-- && likely(!freezing(current))) { cond_resched(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 13f559af1ab6..8cc617ede7e2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1004,7 +1004,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { - struct mem_cgroup_reclaim_iter *uninitialized_var(iter); + struct mem_cgroup_reclaim_iter *iter; struct cgroup_subsys_state *css = NULL; struct mem_cgroup *memcg = NULL; struct mem_cgroup *pos = NULL; diff --git a/mm/memory.c b/mm/memory.c index 6e9903d3f096..0da48f6586f8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2205,7 +2205,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; int err = 0; - spinlock_t *uninitialized_var(ptl); + spinlock_t *ptl; if (create) { pte = (mm == &init_mm) ? diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 381320671677..b9e85d467352 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1234,7 +1234,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, static struct page *new_page(struct page *page, unsigned long start) { struct vm_area_struct *vma; - unsigned long uninitialized_var(address); + unsigned long address; vma = find_vma(current->mm, start); while (vma) { @@ -1629,7 +1629,7 @@ static int kernel_get_mempolicy(int __user *policy, unsigned long flags) { int err; - int uninitialized_var(pval); + int pval; nodemask_t nodes; addr = untagged_addr(addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e028b87ce294..901a21f61d68 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -961,7 +961,7 @@ static inline void __free_one_page(struct page *page, int migratetype, bool report) { struct capture_control *capc = task_capc(zone); - unsigned long uninitialized_var(buddy_pfn); + unsigned long buddy_pfn; unsigned long combined_pfn; unsigned int max_order; struct page *buddy; diff --git a/mm/percpu.c b/mm/percpu.c index 696367b18222..b626766160ce 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2513,7 +2513,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; - int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ + int upa, max_upa, best_upa; /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; diff --git a/mm/slub.c b/mm/slub.c index ef303070d175..f226d66408ee 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1218,7 +1218,7 @@ static noinline int free_debug_processing( struct kmem_cache_node *n = get_node(s, page_to_nid(page)); void *object = head; int cnt = 0; - unsigned long uninitialized_var(flags); + unsigned long flags; int ret = 0; spin_lock_irqsave(&n->list_lock, flags); @@ -2901,7 +2901,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, struct page new; unsigned long counters; struct kmem_cache_node *n = NULL; - unsigned long uninitialized_var(flags); + unsigned long flags; stat(s, FREE_SLOWPATH); diff --git a/mm/swap.c b/mm/swap.c index a82efc33411f..de257c0a89b1 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -830,8 +830,8 @@ void release_pages(struct page **pages, int nr) LIST_HEAD(pages_to_free); struct pglist_data *locked_pgdat = NULL; struct lruvec *lruvec; - unsigned long uninitialized_var(flags); - unsigned int uninitialized_var(lock_batch); + unsigned long flags; + unsigned int lock_batch; for (i = 0; i < nr; i++) { struct page *page = pages[i]; |