summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2017-03-15 14:05:14 +1100
committerShaohua Li <shli@fb.com>2017-03-22 19:18:43 -0700
commit210f7cdcf088c304ee0533ffd33d6f71a8821862 (patch)
treeb039d920dc1bef971d419a06cee4c7544fec4afd
parent55cc39f345256af241deb6152ff5c06bedd10f11 (diff)
downloadlinux-210f7cdcf088c304ee0533ffd33d6f71a8821862.tar.bz2
percpu-refcount: support synchronous switch to atomic mode.
percpu_ref_switch_to_atomic_sync() schedules the switch to atomic mode, then waits for it to complete. Also export percpu_ref_switch_to_* so they can be used from modules. This will be used in md/raid to count the number of pending write requests to an array. We occasionally need to check if the count is zero, but most often we don't care. We always want updates to the counter to be fast, as in some cases we count every 4K page. Signed-off-by: NeilBrown <neilb@suse.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Shaohua Li <shli@fb.com>
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--lib/percpu-refcount.c17
2 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 3a481a49546e..c13dceb87b60 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -99,6 +99,7 @@ int __must_check percpu_ref_init(struct percpu_ref *ref,
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9ac959ef4cae..fe03c6d52761 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -260,6 +260,22 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
+
+/**
+ * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ *
+ * Schedule switching the ref to atomic mode, and wait for the
+ * switch to complete. Caller must ensure that no other thread
+ * will switch back to percpu mode.
+ */
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
+{
+ percpu_ref_switch_to_atomic(ref, NULL);
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
@@ -290,6 +306,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
/**
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation