summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 14:44:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 14:44:53 -0800
commitfff875a18382f1983b4a27be9282e697dbccb3db (patch)
tree3fd1b13d0a633d8502ffddb89348ef7619b3c229 /include
parent870d16757ba8918c3f8cac162b9ca7669556dbab (diff)
parent5bdba520c1b318578caffd325515b35d187f8a0e (diff)
downloadlinux-fff875a18382f1983b4a27be9282e697dbccb3db.tar.bz2
Merge tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock
Pull memblock updates from Mike Rapoport: "memblock debug enhancements. Improve tracking of early memory allocations when memblock debug is enabled: - Add memblock_dbg() to memblock_phys_alloc_range() to get details about its usage - Make memblock allocator wrappers actually inline to track their callers in memblock debug messages" * tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: mm: memblock: drop __init from memblock functions to make it inline mm: memblock: add more debug logs
Diffstat (limited to 'include')
-rw-r--r--include/linux/memblock.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ef131255cedc..b93c44b9121e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -404,13 +404,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
-static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_raw(phys_addr_t size,
+static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
@@ -418,7 +418,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size,
NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_from(phys_addr_t size,
+static inline void *memblock_alloc_from(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
@@ -426,33 +426,33 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_low(phys_addr_t size,
+static inline void *memblock_alloc_low(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}
-static inline void * __init memblock_alloc_node(phys_addr_t size,
+static inline void *memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void __init memblock_free_early(phys_addr_t base,
+static inline void memblock_free_early(phys_addr_t base,
phys_addr_t size)
{
memblock_free(base, size);
}
-static inline void __init memblock_free_early_nid(phys_addr_t base,
+static inline void memblock_free_early_nid(phys_addr_t base,
phys_addr_t size, int nid)
{
memblock_free(base, size);
}
-static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
+static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
{
__memblock_free_late(base, size);
}
@@ -460,7 +460,7 @@ static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline void __init memblock_set_bottom_up(bool enable)
+static inline void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}