diff options
author | Mauro Carvalho Chehab <mchehab+huawei@kernel.org> | 2020-04-21 19:04:07 +0200 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2020-06-29 11:58:11 -0700 |
commit | 90c73cb2c65f9e78eb09a8cbcd4bcd4add2d3f4d (patch) | |
tree | 3daa33a4ec460b51c35f2a7aee8f6a9c62a08e7e /Documentation/RCU | |
parent | 43cb5451dffe0bc5d59688d4898c9a1f7c40d3b4 (diff) | |
download | linux-90c73cb2c65f9e78eb09a8cbcd4bcd4add2d3f4d.tar.bz2 |
docs: RCU: Convert rcuref.txt to ReST
- Add a SPDX header;
- Adjust document title;
- Some whitespace fixes and new line breaks;
- Mark literal blocks as such;
- Add it to RCU/index.rst.
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'Documentation/RCU')
-rw-r--r-- | Documentation/RCU/index.rst | 1 | ||||
-rw-r--r-- | Documentation/RCU/rcuref.rst (renamed from Documentation/RCU/rcuref.txt) | 199 |
2 files changed, 104 insertions, 96 deletions
diff --git a/Documentation/RCU/index.rst b/Documentation/RCU/index.rst index 5d5f9a1ab8f9..9a1d51f394dc 100644 --- a/Documentation/RCU/index.rst +++ b/Documentation/RCU/index.rst @@ -18,6 +18,7 @@ RCU concepts whatisRCU rcu rculist_nulls + rcuref torture listRCU NMI-RCU diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.rst index 5e6429d66c24..b33aeb14fde3 100644 --- a/Documentation/RCU/rcuref.txt +++ b/Documentation/RCU/rcuref.rst @@ -1,4 +1,8 @@ -Reference-count design for elements of lists/arrays protected by RCU. +.. SPDX-License-Identifier: GPL-2.0 + +==================================================================== +Reference-count design for elements of lists/arrays protected by RCU +==================================================================== Please note that the percpu-ref feature is likely your first @@ -12,32 +16,33 @@ please read on. Reference counting on elements of lists which are protected by traditional reader/writer spinlocks or semaphores are straightforward: -CODE LISTING A: -1. 2. -add() search_and_reference() -{ { - alloc_object read_lock(&list_lock); - ... search_for_element - atomic_set(&el->rc, 1); atomic_inc(&el->rc); - write_lock(&list_lock); ... - add_element read_unlock(&list_lock); - ... ... - write_unlock(&list_lock); } -} - -3. 4. -release_referenced() delete() -{ { - ... write_lock(&list_lock); - if(atomic_dec_and_test(&el->rc)) ... - kfree(el); - ... remove_element -} write_unlock(&list_lock); - ... - if (atomic_dec_and_test(&el->rc)) - kfree(el); - ... - } +CODE LISTING A:: + + 1. 2. + add() search_and_reference() + { { + alloc_object read_lock(&list_lock); + ... search_for_element + atomic_set(&el->rc, 1); atomic_inc(&el->rc); + write_lock(&list_lock); ... + add_element read_unlock(&list_lock); + ... ... + write_unlock(&list_lock); } + } + + 3. 4. + release_referenced() delete() + { { + ... write_lock(&list_lock); + if(atomic_dec_and_test(&el->rc)) ... + kfree(el); + ... remove_element + } write_unlock(&list_lock); + ... + if (atomic_dec_and_test(&el->rc)) + kfree(el); + ... + } If this list/array is made lock free using RCU as in changing the write_lock() in add() and delete() to spin_lock() and changing read_lock() @@ -46,34 +51,35 @@ search_and_reference() could potentially hold reference to an element which has already been deleted from the list/array. Use atomic_inc_not_zero() in this scenario as follows: -CODE LISTING B: -1. 2. -add() search_and_reference() -{ { - alloc_object rcu_read_lock(); - ... search_for_element - atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) { - spin_lock(&list_lock); rcu_read_unlock(); - return FAIL; - add_element } - ... ... - spin_unlock(&list_lock); rcu_read_unlock(); -} } -3. 4. -release_referenced() delete() -{ { - ... spin_lock(&list_lock); - if (atomic_dec_and_test(&el->rc)) ... - call_rcu(&el->head, el_free); remove_element - ... spin_unlock(&list_lock); -} ... - if (atomic_dec_and_test(&el->rc)) - call_rcu(&el->head, el_free); - ... - } +CODE LISTING B:: + + 1. 2. + add() search_and_reference() + { { + alloc_object rcu_read_lock(); + ... search_for_element + atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) { + spin_lock(&list_lock); rcu_read_unlock(); + return FAIL; + add_element } + ... ... + spin_unlock(&list_lock); rcu_read_unlock(); + } } + 3. 4. + release_referenced() delete() + { { + ... spin_lock(&list_lock); + if (atomic_dec_and_test(&el->rc)) ... + call_rcu(&el->head, el_free); remove_element + ... spin_unlock(&list_lock); + } ... + if (atomic_dec_and_test(&el->rc)) + call_rcu(&el->head, el_free); + ... + } Sometimes, a reference to the element needs to be obtained in the -update (write) stream. In such cases, atomic_inc_not_zero() might be +update (write) stream. In such cases, atomic_inc_not_zero() might be overkill, since we hold the update-side spinlock. One might instead use atomic_inc() in such cases. @@ -82,39 +88,40 @@ search_and_reference() code path. In such cases, the atomic_dec_and_test() may be moved from delete() to el_free() as follows: -CODE LISTING C: -1. 2. -add() search_and_reference() -{ { - alloc_object rcu_read_lock(); - ... search_for_element - atomic_set(&el->rc, 1); atomic_inc(&el->rc); - spin_lock(&list_lock); ... - - add_element rcu_read_unlock(); - ... } - spin_unlock(&list_lock); 4. -} delete() -3. { -release_referenced() spin_lock(&list_lock); -{ ... - ... remove_element - if (atomic_dec_and_test(&el->rc)) spin_unlock(&list_lock); - kfree(el); ... - ... call_rcu(&el->head, el_free); -} ... -5. } -void el_free(struct rcu_head *rhp) -{ - release_referenced(); -} +CODE LISTING C:: + + 1. 2. + add() search_and_reference() + { { + alloc_object rcu_read_lock(); + ... search_for_element + atomic_set(&el->rc, 1); atomic_inc(&el->rc); + spin_lock(&list_lock); ... + + add_element rcu_read_unlock(); + ... } + spin_unlock(&list_lock); 4. + } delete() + 3. { + release_referenced() spin_lock(&list_lock); + { ... + ... remove_element + if (atomic_dec_and_test(&el->rc)) spin_unlock(&list_lock); + kfree(el); ... + ... call_rcu(&el->head, el_free); + } ... + 5. } + void el_free(struct rcu_head *rhp) + { + release_referenced(); + } The key point is that the initial reference added by add() is not removed until after a grace period has elapsed following removal. This means that search_and_reference() cannot find this element, which means that the value of el->rc cannot increase. Thus, once it reaches zero, there are no -readers that can or ever will be able to reference the element. The -element can therefore safely be freed. This in turn guarantees that if +readers that can or ever will be able to reference the element. The +element can therefore safely be freed. This in turn guarantees that if any reader finds the element, that reader may safely acquire a reference without checking the value of the reference counter. @@ -130,21 +137,21 @@ the eventual invocation of kfree(), which is usually not a problem on modern computer systems, even the small ones. In cases where delete() can sleep, synchronize_rcu() can be called from -delete(), so that el_free() can be subsumed into delete as follows: - -4. -delete() -{ - spin_lock(&list_lock); - ... - remove_element - spin_unlock(&list_lock); - ... - synchronize_rcu(); - if (atomic_dec_and_test(&el->rc)) - kfree(el); - ... -} +delete(), so that el_free() can be subsumed into delete as follows:: + + 4. + delete() + { + spin_lock(&list_lock); + ... + remove_element + spin_unlock(&list_lock); + ... + synchronize_rcu(); + if (atomic_dec_and_test(&el->rc)) + kfree(el); + ... + } As additional examples in the kernel, the pattern in listing C is used by reference counting of struct pid, while the pattern in listing B is used by |