From 5a7d596a05dddd09c44ae462f881491cf87ed120 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 24 Oct 2022 16:14:28 +0800 Subject: mm/percpu: remove unused pcpu_map_extend_chunks Since commit 40064aeca35c ("percpu: replace area map allocator with bitmap"), it is unneeded. Signed-off-by: Baoquan He Signed-off-by: Dennis Zhou --- mm/percpu.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 27697b2429c2..26d8cd2ca323 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -174,9 +174,6 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ -/* chunks which need their map areas extended, protected by pcpu_lock */ -static LIST_HEAD(pcpu_map_extend_chunks); - /* * The number of empty populated pages, protected by pcpu_lock. * The reserved chunk doesn't contribute to the count. -- cgit v1.2.3 From c1f6688d35d47ca11200789b000b3b20f5ecdbd9 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 25 Oct 2022 11:11:45 +0800 Subject: mm/percpu: use list_first_entry_or_null in pcpu_reclaim_populated() To replace list_empty()/list_first_entry() pair to simplify code. Signed-off-by: Baoquan He Acked-by: Dennis Zhou Signed-off-by: Dennis Zhou --- mm/percpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 26d8cd2ca323..841bb93aaae6 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2143,9 +2143,9 @@ static void pcpu_reclaim_populated(void) * other accessor is the free path which only returns area back to the * allocator not touching the populated bitmap. */ - while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) { - chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot], - struct pcpu_chunk, list); + while ((chunk = list_first_entry_or_null( + &pcpu_chunk_lists[pcpu_to_depopulate_slot], + struct pcpu_chunk, list))) { WARN_ON(chunk->immutable); /* -- cgit v1.2.3 From e04cb6976340d5ebf2b28ad91bf6a13a285aa566 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 24 Oct 2022 16:14:30 +0800 Subject: mm/percpu: Update the code comment when creating new chunk The lock pcpu_alloc_mutex taking code has been moved to the beginning of pcpu_allo() if it's non atomic allocation. So the code comment above above pcpu_create_chunk() callsite need be updated. Signed-off-by: Baoquan He Signed-off-by: Dennis Zhou --- mm/percpu.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 841bb93aaae6..68d5ba61c935 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1817,16 +1817,12 @@ restart: spin_unlock_irqrestore(&pcpu_lock, flags); - /* - * No space left. Create a new chunk. We don't want multiple - * tasks to create chunks simultaneously. Serialize and create iff - * there's still no empty chunk after grabbing the mutex. - */ if (is_atomic) { err = "atomic alloc failed, no space left"; goto fail; } + /* No space left. Create a new chunk. */ if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { chunk = pcpu_create_chunk(pcpu_gfp); if (!chunk) { -- cgit v1.2.3 From 73046f8d31701c379f6db899cb09ba70a3285143 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 25 Oct 2022 11:45:16 +0800 Subject: mm/percpu: add comment to state the empty populated pages accounting When allocating an area from a chunk, pcpu_block_update_hint_alloc() is called to update chunk metadata, including chunk's and global nr_empty_pop_pages. However, if the allocation is not atomic, some blocks may not be populated with pages yet, while we still subtract the number here. The number of pages will be added back with pcpu_chunk_populated() when populating pages. Adding code comment to make that more understandable. Signed-off-by: Baoquan He Signed-off-by: Dennis Zhou --- mm/percpu.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 68d5ba61c935..2a7313b56254 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -831,13 +831,15 @@ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, /* * Update s_block. - * block->first_free must be updated if the allocation takes its place. - * If the allocation breaks the contig_hint, a scan is required to - * restore this hint. */ if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; + /* + * block->first_free must be updated if the allocation takes its place. + * If the allocation breaks the contig_hint, a scan is required to + * restore this hint. + */ if (s_off == s_block->first_free) s_block->first_free = find_next_zero_bit( pcpu_index_alloc_map(chunk, s_index), @@ -912,6 +914,12 @@ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, } } + /* + * If the allocation is not atomic, some blocks may not be + * populated with pages, while we account it here. The number + * of pages will be added back with pcpu_chunk_populated() + * when populating pages. + */ if (nr_empty_pages) pcpu_update_empty_pages(chunk, -nr_empty_pages); -- cgit v1.2.3 From 83d261fc9e5fb03e8c32e365ca4ee53952611a2b Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 24 Oct 2022 16:14:32 +0800 Subject: mm/percpu: replace the goto with break In function pcpu_reclaim_populated(), the line of goto jumping is unnecessary since the label 'end_chunk' is near the end of the for loop, use break instead. Signed-off-by: Baoquan He Signed-off-by: Dennis Zhou --- mm/percpu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 2a7313b56254..a223b3120d33 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2167,7 +2167,7 @@ static void pcpu_reclaim_populated(void) /* reintegrate chunk to prevent atomic alloc failures */ if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { reintegrate = true; - goto end_chunk; + break; } /* @@ -2203,7 +2203,6 @@ static void pcpu_reclaim_populated(void) end = -1; } -end_chunk: /* batch tlb flush per chunk to amortize cost */ if (freed_page_start < freed_page_end) { spin_unlock_irq(&pcpu_lock); -- cgit v1.2.3 From 3289e0533e70aafa9fb6d128fd4452db1b8befe8 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 24 Oct 2022 16:14:33 +0800 Subject: mm/percpu.c: remove the lcm code since block size is fixed at page size Since commit b239f7daf553 ("percpu: set PCPU_BITMAP_BLOCK_SIZE to PAGE_SIZE"), the PCPU_BITMAP_BLOCK_SIZE has been set to page size fixedly. So the lcm code in pcpu_alloc_first_chunk() doesn't make sense any more, clean it up. Signed-off-by: Baoquan He Signed-off-by: Dennis Zhou --- mm/percpu.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index a223b3120d33..acd78da0493b 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -72,7 +72,6 @@ #include #include #include -#include #include #include #include @@ -1347,7 +1346,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, int map_size) { struct pcpu_chunk *chunk; - unsigned long aligned_addr, lcm_align; + unsigned long aligned_addr; int start_offset, offset_bits, region_size, region_bits; size_t alloc_size; @@ -1355,14 +1354,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, aligned_addr = tmp_addr & PAGE_MASK; start_offset = tmp_addr - aligned_addr; - - /* - * Align the end of the region with the LCM of PAGE_SIZE and - * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of - * the other. - */ - lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); - region_size = ALIGN(start_offset + map_size, lcm_align); + region_size = ALIGN(start_offset + map_size, PAGE_SIZE); /* allocate chunk */ alloc_size = struct_size(chunk, populated, -- cgit v1.2.3