diff options
| author | Ingo Molnar <mingo@kernel.org> | 2013-08-12 19:51:43 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-08-12 19:51:43 +0200 | 
| commit | 6356bb0ad6525dae93c06478a098ed3848e9ab01 (patch) | |
| tree | 8fdbda28405fb0b1c213f2a87c532a760510adf0 /mm | |
| parent | 7781544e7c367d0cae87feb0f0675fd333bfc9d8 (diff) | |
| parent | 0ca06c0857aee11911f91621db14498496f2c2cd (diff) | |
| download | linux-6356bb0ad6525dae93c06478a098ed3848e9ab01.tar.bz2 | |
Merge tag 'please-pull-mce-f-bit' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras into x86/ras
Pull MCE-uncorrected-error fix from Tony Luck:
 "Bit 12 may or may not be set in MCi_STATUS.MCACOD when
  an uncorrected error is reported. Ignore it when checking
  error signatures."
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/huge_memory.c | 4 | ||||
| -rw-r--r-- | mm/memcontrol.c | 1 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/mmap.c | 2 | ||||
| -rw-r--r-- | mm/shmem.c | 3 | ||||
| -rw-r--r-- | mm/swap.c | 29 | ||||
| -rw-r--r-- | mm/vmpressure.c | 28 | ||||
| -rw-r--r-- | mm/zbud.c | 2 | 
8 files changed, 39 insertions, 36 deletions
| diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 243e710c6039..a92012a71702 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page,  				     ((1L << PG_referenced) |  				      (1L << PG_swapbacked) |  				      (1L << PG_mlocked) | -				      (1L << PG_uptodate))); +				      (1L << PG_uptodate) | +				      (1L << PG_active) | +				      (1L << PG_unevictable)));  		page_tail->flags |= (1L << PG_dirty);  		/* clear PageTail before overwriting first_page */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 00a7a664b9c1..c290a1cf3862 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)  	mem_cgroup_invalidate_reclaim_iterators(memcg);  	mem_cgroup_reparent_charges(memcg);  	mem_cgroup_destroy_all_caches(memcg); +	vmpressure_cleanup(&memcg->vmpressure);  }  static void mem_cgroup_css_free(struct cgroup *cont) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 74310017296e..4baf12e534d1 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,  		if (prev) {  			vma = prev;  			next = vma->vm_next; -			continue; +			if (mpol_equal(vma_policy(vma), new_pol)) +				continue; +			/* vma_merge() joined vma && vma->next, case 8 */ +			goto replace;  		}  		if (vma->vm_start != vmstart) {  			err = split_vma(vma->vm_mm, vma, vmstart, 1); @@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,  			if (err)  				goto out;  		} + replace:  		err = vma_replace_policy(vma, new_pol);  		if (err)  			goto out; diff --git a/mm/mmap.c b/mm/mmap.c index fbad7b091090..1edbaa3136c3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -865,7 +865,7 @@ again:			remove_next = 1 + (end > next->vm_end);  		if (next->anon_vma)  			anon_vma_merge(vma, next);  		mm->map_count--; -		vma_set_policy(vma, vma_policy(next)); +		mpol_put(vma_policy(next));  		kmem_cache_free(vm_area_cachep, next);  		/*  		 * In mprotect's case 6 (see comments on vma_merge), diff --git a/mm/shmem.c b/mm/shmem.c index a87990cf9f94..8335dbd3fc35 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)  		}  	} -	offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); +	if (offset >= 0) +		offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);  	mutex_unlock(&inode->i_mutex);  	return offset;  } diff --git a/mm/swap.c b/mm/swap.c index 4a1d0d2c52fa..62b78a6e224f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add);   */  void lru_cache_add(struct page *page)  { -	if (PageActive(page)) { -		VM_BUG_ON(PageUnevictable(page)); -	} else if (PageUnevictable(page)) { -		VM_BUG_ON(PageActive(page)); -	} - +	VM_BUG_ON(PageActive(page) && PageUnevictable(page));  	VM_BUG_ON(PageLRU(page));  	__lru_cache_add(page);  } @@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page)  	spin_lock_irq(&zone->lru_lock);  	lruvec = mem_cgroup_page_lruvec(page, zone); +	ClearPageActive(page);  	SetPageUnevictable(page);  	SetPageLRU(page);  	add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); @@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release);  void lru_add_page_tail(struct page *page, struct page *page_tail,  		       struct lruvec *lruvec, struct list_head *list)  { -	int uninitialized_var(active); -	enum lru_list lru;  	const int file = 0;  	VM_BUG_ON(!PageHead(page)); @@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,  	if (!list)  		SetPageLRU(page_tail); -	if (page_evictable(page_tail)) { -		if (PageActive(page)) { -			SetPageActive(page_tail); -			active = 1; -			lru = LRU_ACTIVE_ANON; -		} else { -			active = 0; -			lru = LRU_INACTIVE_ANON; -		} -	} else { -		SetPageUnevictable(page_tail); -		lru = LRU_UNEVICTABLE; -	} -  	if (likely(PageLRU(page)))  		list_add_tail(&page_tail->lru, &page->lru);  	else if (list) { @@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,  		 * Use the standard add function to put page_tail on the list,  		 * but then correct its position so they all end up in order.  		 */ -		add_page_to_lru_list(page_tail, lruvec, lru); +		add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));  		list_head = page_tail->lru.prev;  		list_move_tail(&page_tail->lru, list_head);  	}  	if (!PageUnevictable(page)) -		update_page_reclaim_stat(lruvec, file, active); +		update_page_reclaim_stat(lruvec, file, PageActive(page_tail));  }  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,  	int active = PageActive(page);  	enum lru_list lru = page_lru(page); -	VM_BUG_ON(PageUnevictable(page));  	VM_BUG_ON(PageLRU(page));  	SetPageLRU(page); diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 736a6011c2c8..0c1e37d829fa 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work)  	if (!vmpr->scanned)  		return; -	mutex_lock(&vmpr->sr_lock); +	spin_lock(&vmpr->sr_lock);  	scanned = vmpr->scanned;  	reclaimed = vmpr->reclaimed;  	vmpr->scanned = 0;  	vmpr->reclaimed = 0; -	mutex_unlock(&vmpr->sr_lock); +	spin_unlock(&vmpr->sr_lock);  	do {  		if (vmpressure_event(vmpr, scanned, reclaimed)) @@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,  	if (!scanned)  		return; -	mutex_lock(&vmpr->sr_lock); +	spin_lock(&vmpr->sr_lock);  	vmpr->scanned += scanned;  	vmpr->reclaimed += reclaimed;  	scanned = vmpr->scanned; -	mutex_unlock(&vmpr->sr_lock); +	spin_unlock(&vmpr->sr_lock); -	if (scanned < vmpressure_win || work_pending(&vmpr->work)) +	if (scanned < vmpressure_win)  		return;  	schedule_work(&vmpr->work);  } @@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,   */  void vmpressure_init(struct vmpressure *vmpr)  { -	mutex_init(&vmpr->sr_lock); +	spin_lock_init(&vmpr->sr_lock);  	mutex_init(&vmpr->events_lock);  	INIT_LIST_HEAD(&vmpr->events);  	INIT_WORK(&vmpr->work, vmpressure_work_fn);  } + +/** + * vmpressure_cleanup() - shuts down vmpressure control structure + * @vmpr:	Structure to be cleaned up + * + * This function should be called before the structure in which it is + * embedded is cleaned up. + */ +void vmpressure_cleanup(struct vmpressure *vmpr) +{ +	/* +	 * Make sure there is no pending work before eventfd infrastructure +	 * goes away. +	 */ +	flush_work(&vmpr->work); +} diff --git a/mm/zbud.c b/mm/zbud.c index 9bb4710e3589..ad1e781284fd 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,  	if (size <= 0 || gfp & __GFP_HIGHMEM)  		return -EINVAL; -	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) +	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)  		return -ENOSPC;  	chunks = size_to_chunks(size);  	spin_lock(&pool->lock); |