summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2017-07-24 19:02:16 -0400
committerTejun Heo <tj@kernel.org>2017-07-26 17:41:06 -0400
commitfc3043345a648a49978c6fb0bf8c188b7cfe0ab3 (patch)
tree3384725859de4b8f449e393d5cef89c00a26dae5 /mm
parent268625a6f9df6a7c9b0ae7707a8a1cd5a9993bd2 (diff)
downloadlinux-fc3043345a648a49978c6fb0bf8c188b7cfe0ab3.tar.bz2
percpu: update alloc path to only scan if contig hints are broken
Metadata is kept per block to keep track of where the contig hints are. Scanning can be avoided when the contig hints are not broken. In that case, left and right contigs have to be managed manually. This patch changes the allocation path hint updating to only scan when contig hints are broken. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c59
1 files changed, 56 insertions, 3 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index d0d3fa872a8c..f38f47a65642 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -521,6 +521,10 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
* @chunk: chunk of interest
* @bit_off: chunk offset
* @bits: size of request
+ *
+ * Updates metadata for the allocation path. The metadata only has to be
+ * refreshed by a full scan iff the chunk's contig hint is broken. Block level
+ * scans are required if the block's contig hint is broken.
*/
static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
int bits)
@@ -545,14 +549,56 @@ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
/*
* Update s_block.
+ * block->first_free must be updated if the allocation takes its place.
+ * If the allocation breaks the contig_hint, a scan is required to
+ * restore this hint.
*/
- pcpu_block_refresh_hint(chunk, s_index);
+ if (s_off == s_block->first_free)
+ s_block->first_free = find_next_zero_bit(
+ pcpu_index_alloc_map(chunk, s_index),
+ PCPU_BITMAP_BLOCK_BITS,
+ s_off + bits);
+
+ if (s_off >= s_block->contig_hint_start &&
+ s_off < s_block->contig_hint_start + s_block->contig_hint) {
+ /* block contig hint is broken - scan to fix it */
+ pcpu_block_refresh_hint(chunk, s_index);
+ } else {
+ /* update left and right contig manually */
+ s_block->left_free = min(s_block->left_free, s_off);
+ if (s_index == e_index)
+ s_block->right_free = min_t(int, s_block->right_free,
+ PCPU_BITMAP_BLOCK_BITS - e_off);
+ else
+ s_block->right_free = 0;
+ }
/*
* Update e_block.
*/
if (s_index != e_index) {
- pcpu_block_refresh_hint(chunk, e_index);
+ /*
+ * When the allocation is across blocks, the end is along
+ * the left part of the e_block.
+ */
+ e_block->first_free = find_next_zero_bit(
+ pcpu_index_alloc_map(chunk, e_index),
+ PCPU_BITMAP_BLOCK_BITS, e_off);
+
+ if (e_off == PCPU_BITMAP_BLOCK_BITS) {
+ /* reset the block */
+ e_block++;
+ } else {
+ if (e_off > e_block->contig_hint_start) {
+ /* contig hint is broken - scan to fix it */
+ pcpu_block_refresh_hint(chunk, e_index);
+ } else {
+ e_block->left_free = 0;
+ e_block->right_free =
+ min_t(int, e_block->right_free,
+ PCPU_BITMAP_BLOCK_BITS - e_off);
+ }
+ }
/* update in-between md_blocks */
for (block = s_block + 1; block < e_block; block++) {
@@ -562,7 +608,14 @@ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
}
}
- pcpu_chunk_refresh_hint(chunk);
+ /*
+ * The only time a full chunk scan is required is if the chunk
+ * contig hint is broken. Otherwise, it means a smaller space
+ * was used and therefore the chunk contig hint is still correct.
+ */
+ if (bit_off >= chunk->contig_bits_start &&
+ bit_off < chunk->contig_bits_start + chunk->contig_bits)
+ pcpu_chunk_refresh_hint(chunk);
}
/**