summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/init_64.c
diff options
context:
space:
mode:
authorAnshuman Khandual <khandual@linux.vnet.ibm.com>2017-04-06 19:44:50 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2017-06-28 13:08:17 +1000
commit39e46751839dfe4c34eb354eee1e278082fc9d07 (patch)
tree3f4088a2828a88ba1511c68b220e7e5ca07029aa /arch/powerpc/mm/init_64.c
parentb0f36c10de8d9bd271ae261604d4e4f47f663255 (diff)
downloadlinux-39e46751839dfe4c34eb354eee1e278082fc9d07.tar.bz2
powerpc/mm: Add comments on vmemmap physical mapping
Adds some explaination on how the vmemmap based struct page layout's physical mapping is allocated and tracked through linked list. It also keeps note of a possible race condition. Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/init_64.c')
-rw-r--r--arch/powerpc/mm/init_64.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index ec84b31c6c86..f1a4964f9317 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -110,8 +110,29 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
return 0;
}
+/*
+ * vmemmap virtual address space management does not have a traditonal page
+ * table to track which virtual struct pages are backed by physical mapping.
+ * The virtual to physical mappings are tracked in a simple linked list
+ * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
+ * all times where as the 'next' list maintains the available
+ * vmemmap_backing structures which have been deleted from the
+ * 'vmemmap_global' list during system runtime (memory hotplug remove
+ * operation). The freed 'vmemmap_backing' structures are reused later when
+ * new requests come in without allocating fresh memory. This pointer also
+ * tracks the allocated 'vmemmap_backing' structures as we allocate one
+ * full page memory at a time when we dont have any.
+ */
struct vmemmap_backing *vmemmap_list;
static struct vmemmap_backing *next;
+
+/*
+ * The same pointer 'next' tracks individual chunks inside the allocated
+ * full page during the boot time and again tracks the freeed nodes during
+ * runtime. It is racy but it does not happen as they are separated by the
+ * boot process. Will create problem if some how we have memory hotplug
+ * operation during boot !!
+ */
static int num_left;
static int num_freed;