summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-01-06 11:38:14 -0700
committerDan Williams <dan.j.williams@intel.com>2009-01-06 11:38:14 -0700
commit6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1 (patch)
treeafba24357d1f4ff69ccb2b39a19542546590a50b /include
parent07f2211e4fbce6990722d78c4f04225da9c0e9cf (diff)
downloadlinux-6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1.tar.bz2
dmaengine: up-level reference counting to the module level
Simply, if a client wants any dmaengine channel then prevent all dmaengine modules from being removed. Once the clients are done re-enable module removal. Why?, beyond reducing complication: 1/ Tracking reference counts per-transaction in an efficient manner, as is currently done, requires a complicated scheme to avoid cache-line bouncing effects. 2/ Per-transaction ref-counting gives the false impression that a dma-driver can be gracefully removed ahead of its user (net, md, or dma-slave) 3/ None of the in-tree dma-drivers talk to hot pluggable hardware, but if such an engine were built one day we still would not need to notify clients of remove events. The driver can simply return NULL to a ->prep() request, something that is much easier for a client to handle. Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/dmaengine.h21
-rw-r--r--include/net/netdma.h4
2 files changed, 2 insertions, 23 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e4ec7e7b8056..d18d37d1015d 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -165,7 +165,6 @@ struct dma_slave {
*/
struct dma_chan_percpu {
- local_t refcount;
/* stats */
unsigned long memcpy_count;
unsigned long bytes_transferred;
@@ -205,26 +204,6 @@ struct dma_chan {
void dma_chan_cleanup(struct kref *kref);
-static inline void dma_chan_get(struct dma_chan *chan)
-{
- if (unlikely(chan->slow_ref))
- kref_get(&chan->refcount);
- else {
- local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
- put_cpu();
- }
-}
-
-static inline void dma_chan_put(struct dma_chan *chan)
-{
- if (unlikely(chan->slow_ref))
- kref_put(&chan->refcount, dma_chan_cleanup);
- else {
- local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
- put_cpu();
- }
-}
-
/*
* typedef dma_event_callback - function pointer to a DMA event callback
* For each channel added to the system this routine is called for each client.
diff --git a/include/net/netdma.h b/include/net/netdma.h
index f28c6e064e8f..cbe2737f4a61 100644
--- a/include/net/netdma.h
+++ b/include/net/netdma.h
@@ -27,11 +27,11 @@
static inline struct dma_chan *get_softnet_dma(void)
{
struct dma_chan *chan;
+
rcu_read_lock();
chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
- if (chan)
- dma_chan_get(chan);
rcu_read_unlock();
+
return chan;
}