summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2017-09-08 16:15:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 18:26:49 -0700
commitf808c13fd3738948e10196496959871130612b61 (patch)
tree0f9b1bf3ccc9c4d051bf4fed87b493dced56d032 /mm
parent09663c86e24953556ff8696efa023557901f2b66 (diff)
downloadlinux-f808c13fd3738948e10196496959871130612b61.tar.bz2
lib/interval_tree: fast overlap detection
Allow interval trees to quickly check for overlaps to avoid unnecesary tree lookups in interval_tree_iter_first(). As of this patch, all interval tree flavors will require using a 'rb_root_cached' such that we can have the leftmost node easily available. While most users will make use of this feature, those with special functions (in addition to the generic insert, delete, search calls) will avoid using the cached option as they can do funky things with insertions -- for example, vma_interval_tree_insert_after(). [jglisse@redhat.com: fix deadlock from typo vm_lock_anon_vma()] Link: http://lkml.kernel.org/r/20170808225719.20723-1-jglisse@redhat.com Link: http://lkml.kernel.org/r/20170719014603.19029-12-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Doug Ledford <dledford@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Jason Wang <jasowang@redhat.com> Cc: Christian Benvenuti <benve@cisco.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/interval_tree.c10
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/rmap.c4
4 files changed, 14 insertions, 14 deletions
diff --git a/mm/interval_tree.c b/mm/interval_tree.c
index f2c2492681bf..b47664358796 100644
--- a/mm/interval_tree.c
+++ b/mm/interval_tree.c
@@ -28,7 +28,7 @@ INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
/* Insert node immediately after prev in the interval tree */
void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct vm_area_struct *prev,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
struct rb_node **link;
struct vm_area_struct *parent;
@@ -55,7 +55,7 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node,
node->shared.rb_subtree_last = last;
rb_link_node(&node->shared.rb, &parent->shared.rb, link);
- rb_insert_augmented(&node->shared.rb, root,
+ rb_insert_augmented(&node->shared.rb, &root->rb_root,
&vma_interval_tree_augment);
}
@@ -74,7 +74,7 @@ INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
static inline, __anon_vma_interval_tree)
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
#ifdef CONFIG_DEBUG_VM_RB
node->cached_vma_start = avc_start_pgoff(node);
@@ -84,13 +84,13 @@ void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
}
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
__anon_vma_interval_tree_remove(node, root);
}
struct anon_vma_chain *
-anon_vma_interval_tree_iter_first(struct rb_root *root,
+anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long first, unsigned long last)
{
return __anon_vma_interval_tree_iter_first(root, first, last);
diff --git a/mm/memory.c b/mm/memory.c
index 0bbc1d612a63..ec4e15494901 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2761,7 +2761,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
}
-static inline void unmap_mapping_range_tree(struct rb_root *root,
+static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
struct zap_details *details)
{
struct vm_area_struct *vma;
@@ -2825,7 +2825,7 @@ void unmap_mapping_range(struct address_space *mapping,
details.last_index = ULONG_MAX;
i_mmap_lock_write(mapping);
- if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
+ if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 4c5981651407..680506faceae 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -685,7 +685,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
struct address_space *mapping = NULL;
- struct rb_root *root = NULL;
+ struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool start_changed = false, end_changed = false;
@@ -3340,7 +3340,7 @@ static DEFINE_MUTEX(mm_all_locks_mutex);
static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
{
- if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
+ if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
/*
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
@@ -3356,7 +3356,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* anon_vma->root->rwsem.
*/
if (__test_and_set_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_node))
+ &anon_vma->root->rb_root.rb_root.rb_node))
BUG();
}
}
@@ -3458,7 +3458,7 @@ out_unlock:
static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
{
- if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
+ if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
/*
* The LSB of head.next can't change to 0 from under
* us because we hold the mm_all_locks_mutex.
@@ -3472,7 +3472,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
* anon_vma->root->rwsem.
*/
if (!__test_and_clear_bit(0, (unsigned long *)
- &anon_vma->root->rb_root.rb_node))
+ &anon_vma->root->rb_root.rb_root.rb_node))
BUG();
anon_vma_unlock_write(anon_vma);
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 0618cd85b862..b874c4761e84 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -391,7 +391,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
- if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+ if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
anon_vma->parent->degree--;
continue;
}
@@ -425,7 +425,7 @@ static void anon_vma_ctor(void *data)
init_rwsem(&anon_vma->rwsem);
atomic_set(&anon_vma->refcount, 0);
- anon_vma->rb_root = RB_ROOT;
+ anon_vma->rb_root = RB_ROOT_CACHED;
}
void __init anon_vma_init(void)