diff options
author | David S. Miller <davem@davemloft.net> | 2018-04-01 19:49:34 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-04-01 19:49:34 -0400 |
commit | c0b458a9463bd6be165374a8e9e3235800ee132e (patch) | |
tree | a96c6393749ab231c6dda8c62683493bd1c66070 /mm | |
parent | 859a59352e926315b6384c5fd895b00a30659a12 (diff) | |
parent | b5dbc28762fd3fd40ba76303be0c7f707826f982 (diff) | |
download | linux-c0b458a9463bd6be165374a8e9e3235800ee132e.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflicts in drivers/net/ethernet/mellanox/mlx5/core/en_rep.c,
we had some overlapping changes:
1) In 'net' MLX5E_PARAMS_LOG_{SQ,RQ}_SIZE -->
MLX5E_REP_PARAMS_LOG_{SQ,RQ}_SIZE
2) In 'net-next' params->log_rq_size is renamed to be
params->log_rq_mtu_frames.
3) In 'net-next' params->hard_mtu is added.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kmemleak.c | 12 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/page_owner.c | 6 | ||||
-rw-r--r-- | mm/slab.c | 1 | ||||
-rw-r--r-- | mm/vmstat.c | 2 |
5 files changed, 16 insertions, 11 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index e83987c55a08..46c2290a08f1 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1657,8 +1657,7 @@ static void start_scan_thread(void) } /* - * Stop the automatic memory scanning thread. This function must be called - * with the scan_mutex held. + * Stop the automatic memory scanning thread. */ static void stop_scan_thread(void) { @@ -1921,12 +1920,15 @@ static void kmemleak_do_cleanup(struct work_struct *work) { stop_scan_thread(); + mutex_lock(&scan_mutex); /* - * Once the scan thread has stopped, it is safe to no longer track - * object freeing. Ordering of the scan thread stopping and the memory - * accesses below is guaranteed by the kthread_stop() function. + * Once it is made sure that kmemleak_scan has stopped, it is safe to no + * longer track object freeing. Ordering of the scan thread stopping and + * the memory accesses below is guaranteed by the kthread_stop() + * function. */ kmemleak_free_enabled = 0; + mutex_unlock(&scan_mutex); if (!kmemleak_found_leaks) __kmemleak_do_cleanup(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 670e99b68aa6..9ec024b862ac 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -714,9 +714,9 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) * invocations for reference counting, or use mem_cgroup_iter_break() * to cancel a hierarchy walk before the round-trip is complete. * - * Reclaimers can specify a zone and a priority level in @reclaim to + * Reclaimers can specify a node and a priority level in @reclaim to * divide up the memcgs in the hierarchy among all concurrent - * reclaimers operating on the same zone and priority. + * reclaimers operating on the same node and priority. */ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, @@ -2299,7 +2299,7 @@ void memcg_kmem_put_cache(struct kmem_cache *cachep) } /** - * memcg_kmem_charge: charge a kmem page + * memcg_kmem_charge_memcg: charge a kmem page * @page: page to charge * @gfp: reclaim mode * @order: allocation order diff --git a/mm/page_owner.c b/mm/page_owner.c index 9886c6073828..7172e0a80e13 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -123,13 +123,13 @@ void __reset_page_owner(struct page *page, unsigned int order) static inline bool check_recursive_alloc(struct stack_trace *trace, unsigned long ip) { - int i, count; + int i; if (!trace->nr_entries) return false; - for (i = 0, count = 0; i < trace->nr_entries; i++) { - if (trace->entries[i] == ip && ++count == 2) + for (i = 0; i < trace->nr_entries; i++) { + if (trace->entries[i] == ip) return true; } diff --git a/mm/slab.c b/mm/slab.c index 324446621b3e..9095c3945425 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void) nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); + memcg_link_cache(kmem_cache); slab_state = PARTIAL; /* diff --git a/mm/vmstat.c b/mm/vmstat.c index 40b2db6db6b1..33581be705f0 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1839,9 +1839,11 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ + preempt_disable(); queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); + preempt_enable(); } } |