summaryrefslogtreecommitdiffstats
path: root/mm/memblock.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-07-12 14:24:57 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-08-05 12:56:18 +1000
commit7f219c736f9439acb1c50d264fbee93c353773ca (patch)
tree24e4aa4f3135f639954243fcd015b44786a9a11d /mm/memblock.c
parent7590abe891c85fbc65dc906516d0bf89e070c19a (diff)
downloadlinux-7f219c736f9439acb1c50d264fbee93c353773ca.tar.bz2
memblock: split memblock_find_base() out of __memblock_alloc_base()
This will be used by the array resize code and might prove useful to some arch code as well at which point it can be made non-static. Also add comment as to why aligning size is important Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> --- v2. Fix loss of size alignment v3. Fix result code
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c58
1 files changed, 38 insertions, 20 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index ae856d4e25a3..b775fca4fba5 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -345,12 +345,15 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
BUG_ON(0 == size);
+ /* We align the size to limit fragmentation. Without this, a lot of
+ * small allocs quickly eat up the whole reserve array on sparc
+ */
+ size = memblock_align_up(size, align);
+
/* We do a bottom-up search for a region with the right
* nid since that's easier considering how memblock_nid_range()
* works
*/
- size = memblock_align_up(size, align);
-
for (i = 0; i < mem->cnt; i++) {
phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
size, align, nid);
@@ -366,20 +369,7 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
-phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
-{
- phys_addr_t alloc;
-
- alloc = __memblock_alloc_base(size, align, max_addr);
-
- if (alloc == 0)
- panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
- (unsigned long long) size, (unsigned long long) max_addr);
-
- return alloc;
-}
-
-phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
long i;
phys_addr_t base = 0;
@@ -387,8 +377,6 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
BUG_ON(0 == size);
- size = memblock_align_up(size, align);
-
/* Pump up max_addr */
if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
max_addr = memblock.current_limit;
@@ -405,13 +393,43 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
continue;
base = min(memblockbase + memblocksize, max_addr);
res_base = memblock_find_region(memblockbase, base, size, align);
- if (res_base != MEMBLOCK_ERROR &&
- memblock_add_region(&memblock.reserved, res_base, size) >= 0)
+ if (res_base != MEMBLOCK_ERROR)
return res_base;
}
+ return MEMBLOCK_ERROR;
+}
+
+phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+{
+ phys_addr_t found;
+
+ /* We align the size to limit fragmentation. Without this, a lot of
+ * small allocs quickly eat up the whole reserve array on sparc
+ */
+ size = memblock_align_up(size, align);
+
+ found = memblock_find_base(size, align, max_addr);
+ if (found != MEMBLOCK_ERROR &&
+ memblock_add_region(&memblock.reserved, found, size) >= 0)
+ return found;
+
return 0;
}
+phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
+{
+ phys_addr_t alloc;
+
+ alloc = __memblock_alloc_base(size, align, max_addr);
+
+ if (alloc == 0)
+ panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
+ (unsigned long long) size, (unsigned long long) max_addr);
+
+ return alloc;
+}
+
+
/* You must call memblock_analyze() before this. */
phys_addr_t __init memblock_phys_mem_size(void)
{