summaryrefslogtreecommitdiffstats
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-29 08:53:59 +0100
committerDan Williams <dan.j.williams@intel.com>2018-01-08 11:46:23 -0800
commiteb8045335c70ef8b272d2888a225b81344423139 (patch)
treedfcf7d1f4453dfa4ffe31027390d64728052c475 /mm/sparse-vmemmap.c
parenta8fc357b2875da8732c91eb085862a0648d82767 (diff)
downloadlinux-eb8045335c70ef8b272d2888a225b81344423139.tar.bz2
mm: merge vmem_altmap_alloc into altmap_alloc_block_buf
There is no clear separation between the two, so merge them. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c45
1 files changed, 16 insertions, 29 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d012c9e2811b..bd0276d5f66b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
}
/**
- * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
- * @altmap - reserved page pool for the allocation
- * @nr_pfns - size (in pages) of the allocation
+ * altmap_alloc_block_buf - allocate pages from the device page map
+ * @altmap: device page map
+ * @size: size (in bytes) of the allocation
*
- * Allocations are aligned to the size of the request
+ * Allocations are aligned to the size of the request.
*/
-static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
-{
- unsigned long pfn = vmem_altmap_next_pfn(altmap);
- unsigned long nr_align;
-
- nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
- nr_align = ALIGN(pfn, nr_align) - pfn;
-
- if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
- return ULONG_MAX;
- altmap->alloc += nr_pfns;
- altmap->align += nr_align;
- return pfn + nr_align;
-}
-
void * __meminit altmap_alloc_block_buf(unsigned long size,
struct vmem_altmap *altmap)
{
- unsigned long pfn, nr_pfns;
- void *ptr;
+ unsigned long pfn, nr_pfns, nr_align;
if (size & ~PAGE_MASK) {
pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
@@ -141,16 +124,20 @@ void * __meminit altmap_alloc_block_buf(unsigned long size,
return NULL;
}
+ pfn = vmem_altmap_next_pfn(altmap);
nr_pfns = size >> PAGE_SHIFT;
- pfn = vmem_altmap_alloc(altmap, nr_pfns);
- if (pfn < ULONG_MAX)
- ptr = __va(__pfn_to_phys(pfn));
- else
- ptr = NULL;
+ nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
+ nr_align = ALIGN(pfn, nr_align) - pfn;
+ if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
+ return NULL;
+
+ altmap->alloc += nr_pfns;
+ altmap->align += nr_align;
+ pfn += nr_align;
+
pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
-
- return ptr;
+ return __va(__pfn_to_phys(pfn));
}
void __meminit vmemmap_verify(pte_t *pte, int node,