summaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-10-09 15:25:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:49 -0400
commitb8c20a9b85b057c850f63ee4c63531a356d8596a (patch)
tree474512d1a811633ea3150dd2bd884d11ed47b5d2 /fs/proc/task_mmu.c
parentad2a00e4b7e20ab03700b0bb13270b6cee45c6e0 (diff)
downloadlinux-b8c20a9b85b057c850f63ee4c63531a356d8596a.tar.bz2
fs/proc/task_mmu.c: reintroduce m->version logic
Add the "last_addr" optimization back. Like before, every ->show() method checks !seq_overflow() and sets m->version = vma->vm_start. However, it also checks that m_next_vma(vma) != NULL, otherwise it sets m->version = -1 for the lockless "EOF" fast-path in m_start(). m_start() can simply do find_vma() + m_next_vma() if last_addr is not zero, the code looks clear and simple and this case is clearly separated from "scan vmas" path. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fef398948462..c7228c2326d1 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -146,13 +146,24 @@ m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
return vma->vm_next ?: priv->tail_vma;
}
+static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
+{
+ if (m->count < m->size) /* vma is copied successfully */
+ m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
+}
+
static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
+ unsigned long last_addr = m->version;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned int pos = *ppos;
+ /* See m_cache_vma(). Zero at the start or after lseek. */
+ if (last_addr == -1UL)
+ return NULL;
+
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
return ERR_PTR(-ESRCH);
@@ -165,6 +176,13 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);
+ if (last_addr) {
+ vma = find_vma(mm, last_addr);
+ if (vma && (vma = m_next_vma(priv, vma)))
+ return vma;
+ }
+
+ m->version = 0;
if (pos < mm->map_count) {
for (vma = mm->mmap; pos; pos--)
vma = vma->vm_next;
@@ -340,6 +358,7 @@ done:
static int show_map(struct seq_file *m, void *v, int is_pid)
{
show_map_vma(m, v, is_pid);
+ m_cache_vma(m, v);
return 0;
}
@@ -617,6 +636,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
mss.nonlinear >> 10);
show_smap_vma_flags(m, vma);
+ m_cache_vma(m, vma);
return 0;
}
@@ -1471,6 +1491,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
seq_printf(m, " N%d=%lu", nid, md->node[nid]);
out:
seq_putc(m, '\n');
+ m_cache_vma(m, vma);
return 0;
}