diff options
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/internal.h | 1 | ||||
-rw-r--r-- | fs/proc/kcore.c | 6 | ||||
-rw-r--r-- | fs/proc/nommu.c | 20 | ||||
-rw-r--r-- | fs/proc/proc_misc.c | 11 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 5 | ||||
-rw-r--r-- | fs/proc/task_nommu.c | 74 |
6 files changed, 87 insertions, 30 deletions
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 146a434ba944..987c773dbb20 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -28,6 +28,7 @@ do { \ (vmi)->largest_chunk = 0; \ } while(0) +extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *); #endif extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 6a984f64edd7..3ceff3857272 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -279,12 +279,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) tsz = elf_buflen - *fpos; if (buflen < tsz) tsz = buflen; - elf_buf = kmalloc(elf_buflen, GFP_ATOMIC); + elf_buf = kzalloc(elf_buflen, GFP_ATOMIC); if (!elf_buf) { read_unlock(&kclist_lock); return -ENOMEM; } - memset(elf_buf, 0, elf_buflen); elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); read_unlock(&kclist_lock); if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { @@ -330,10 +329,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) unsigned long curstart = start; unsigned long cursize = tsz; - elf_buf = kmalloc(tsz, GFP_KERNEL); + elf_buf = kzalloc(tsz, GFP_KERNEL); if (!elf_buf) return -ENOMEM; - memset(elf_buf, 0, tsz); read_lock(&vmlist_lock); for (m=vmlist; m && cursize; m=m->next) { diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index cff10ab1af63..d7dbdf9e0f49 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c @@ -33,19 +33,15 @@ #include "internal.h" /* - * display a list of all the VMAs the kernel knows about - * - nommu kernals have a single flat list + * display a single VMA to a sequenced file */ -static int nommu_vma_list_show(struct seq_file *m, void *v) +int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) { - struct vm_area_struct *vma; unsigned long ino = 0; struct file *file; dev_t dev = 0; int flags, len; - vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb); - flags = vma->vm_flags; file = vma->vm_file; @@ -78,6 +74,18 @@ static int nommu_vma_list_show(struct seq_file *m, void *v) return 0; } +/* + * display a list of all the VMAs the kernel knows about + * - nommu kernals have a single flat list + */ +static int nommu_vma_list_show(struct seq_file *m, void *v) +{ + struct vm_area_struct *vma; + + vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb); + return nommu_vma_show(m, vma); +} + static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) { struct rb_node *_rb; diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 942156225447..5bbd60896050 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -157,10 +157,12 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "SwapCached: %8lu kB\n" "Active: %8lu kB\n" "Inactive: %8lu kB\n" +#ifdef CONFIG_HIGHMEM "HighTotal: %8lu kB\n" "HighFree: %8lu kB\n" "LowTotal: %8lu kB\n" "LowFree: %8lu kB\n" +#endif "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" @@ -168,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" "Slab: %8lu kB\n" + "SReclaimable: %8lu kB\n" + "SUnreclaim: %8lu kB\n" "PageTables: %8lu kB\n" "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" @@ -183,17 +187,22 @@ static int meminfo_read_proc(char *page, char **start, off_t off, K(total_swapcache_pages), K(active), K(inactive), +#ifdef CONFIG_HIGHMEM K(i.totalhigh), K(i.freehigh), K(i.totalram-i.totalhigh), K(i.freeram-i.freehigh), +#endif K(i.totalswap), K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_FILE_MAPPED)), - K(global_page_state(NR_SLAB)), + K(global_page_state(NR_SLAB_RECLAIMABLE) + + global_page_state(NR_SLAB_UNRECLAIMABLE)), + K(global_page_state(NR_SLAB_RECLAIMABLE)), + K(global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_PAGETABLE)), K(global_page_state(NR_UNSTABLE_NFS)), K(global_page_state(NR_BOUNCE)), diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0a163a4f7764..6b769afac55a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -122,11 +122,6 @@ struct mem_size_stats unsigned long private_dirty; }; -__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) -{ - return NULL; -} - static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) { struct proc_maps_private *priv = m->private; diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 4616ed50ffcd..091aa8e48e02 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -138,25 +138,63 @@ out: } /* - * Albert D. Cahalan suggested to fake entries for the traditional - * sections here. This might be worth investigating. + * display mapping lines for a particular process's /proc/pid/maps */ -static int show_map(struct seq_file *m, void *v) +static int show_map(struct seq_file *m, void *_vml) { - return 0; + struct vm_list_struct *vml = _vml; + return nommu_vma_show(m, vml->vma); } + static void *m_start(struct seq_file *m, loff_t *pos) { + struct proc_maps_private *priv = m->private; + struct vm_list_struct *vml; + struct mm_struct *mm; + loff_t n = *pos; + + /* pin the task and mm whilst we play with them */ + priv->task = get_pid_task(priv->pid, PIDTYPE_PID); + if (!priv->task) + return NULL; + + mm = get_task_mm(priv->task); + if (!mm) { + put_task_struct(priv->task); + priv->task = NULL; + return NULL; + } + + down_read(&mm->mmap_sem); + + /* start from the Nth VMA */ + for (vml = mm->context.vmlist; vml; vml = vml->next) + if (n-- == 0) + return vml; return NULL; } -static void m_stop(struct seq_file *m, void *v) + +static void m_stop(struct seq_file *m, void *_vml) { + struct proc_maps_private *priv = m->private; + + if (priv->task) { + struct mm_struct *mm = priv->task->mm; + up_read(&mm->mmap_sem); + mmput(mm); + put_task_struct(priv->task); + } } -static void *m_next(struct seq_file *m, void *v, loff_t *pos) + +static void *m_next(struct seq_file *m, void *_vml, loff_t *pos) { - return NULL; + struct vm_list_struct *vml = _vml; + + (*pos)++; + return vml ? vml->next : NULL; } -static struct seq_operations proc_pid_maps_op = { + +static struct seq_operations proc_pid_maps_ops = { .start = m_start, .next = m_next, .stop = m_stop, @@ -165,11 +203,19 @@ static struct seq_operations proc_pid_maps_op = { static int maps_open(struct inode *inode, struct file *file) { - int ret; - ret = seq_open(file, &proc_pid_maps_op); - if (!ret) { - struct seq_file *m = file->private_data; - m->private = NULL; + struct proc_maps_private *priv; + int ret = -ENOMEM; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (priv) { + priv->pid = proc_pid(inode); + ret = seq_open(file, &proc_pid_maps_ops); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = priv; + } else { + kfree(priv); + } } return ret; } @@ -178,6 +224,6 @@ struct file_operations proc_maps_operations = { .open = maps_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_private, }; |