summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-06-13 19:23:53 -0700
committerTejun Heo <tj@kernel.org>2013-06-13 19:23:53 -0700
commitdbece3a0f1ef0b19aff1cc6ed0942fec9ab98de1 (patch)
tree3f594245c15b2f3ae8de6ec0c339e024988d22c4 /lib
parentbc497bd33b2d6a6f07bc8574b4764edbd7fdffa8 (diff)
downloadlinux-dbece3a0f1ef0b19aff1cc6ed0942fec9ab98de1.tar.bz2
percpu-refcount: implement percpu_tryget() along with percpu_ref_kill_and_confirm()
Implement percpu_tryget() which stops giving out references once the percpu_ref is visible as killed. Because the refcnt is per-cpu, different CPUs will start to see a refcnt as killed at different points in time and tryget() may continue to succeed on subset of cpus for a while after percpu_ref_kill() returns. For use cases where it's necessary to know when all CPUs start to see the refcnt as dead, percpu_ref_kill_and_confirm() is added. The new function takes an extra argument @confirm_kill which is invoked when the refcnt is guaranteed to be viewed as killed on all CPUs. While this isn't the prettiest interface, it doesn't force synchronous wait and is much safer than requiring the caller to do its own call_rcu(). v2: Patch description rephrased to emphasize that tryget() may continue to succeed on some CPUs after kill() returns as suggested by Kent. v3: Function comment in percpu_ref_kill_and_confirm() updated warning people to not depend on the implied RCU grace period from the confirm callback as it's an implementation detail. Signed-off-by: Tejun Heo <tj@kernel.org> Slightly-Grumpily-Acked-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu-refcount.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index ebeaac274cb9..8bf9e719cca0 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -118,6 +118,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
+ /* @ref is viewed as dead on all CPUs, send out kill confirmation */
+ if (ref->confirm_kill)
+ ref->confirm_kill(ref);
+
/*
* Now we're in single atomic_t mode with a consistent refcount, so it's
* safe to drop our initial ref:
@@ -126,22 +130,29 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
}
/**
- * percpu_ref_kill - safely drop initial ref
+ * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
* @ref: percpu_ref to kill
+ * @confirm_kill: optional confirmation callback
*
- * Must be used to drop the initial ref on a percpu refcount; must be called
- * precisely once before shutdown.
+ * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
+ * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
+ * called after @ref is seen as dead from all CPUs - all further
+ * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
+ * for more details.
*
- * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
- * percpu counters and dropping the initial ref.
+ * Due to the way percpu_ref is implemented, @confirm_kill will be called
+ * after at least one full RCU grace period has passed but this is an
+ * implementation detail and callers must not depend on it.
*/
-void percpu_ref_kill(struct percpu_ref *ref)
+void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_kill)
{
WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
"percpu_ref_kill() called more than once!\n");
ref->pcpu_count = (unsigned __percpu *)
(((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
+ ref->confirm_kill = confirm_kill;
call_rcu(&ref->rcu, percpu_ref_kill_rcu);
}