summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2008-02-19 11:36:53 +0100
committerJens Axboe <jens.axboe@oracle.com>2008-02-19 11:36:53 +0100
commit2fb98e8414c42cb14698833aac640b143b9ade4f (patch)
treeab241305b5ce3ea5ef3e76df7abe04b831cd9e34 /block
parent6b00769fe1502b4ad97bb327ef7ac971b208bfb5 (diff)
downloadlinux-2fb98e8414c42cb14698833aac640b143b9ade4f.tar.bz2
block: implement request_queue->dma_drain_needed
Draining shouldn't be done for commands where overflow may indicate data integrity issues. Add dma_drain_needed callback to request_queue. Drain buffer is appened iff this function returns non-zero. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c7
2 files changed, 6 insertions, 3 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39f2e077a014..bef1b4d0fc02 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,7 +220,7 @@ new_segment:
bvprv = bvec;
} /* segments in rq */
- if (q->dma_drain_size) {
+ if (q->dma_drain_size && q->dma_drain_needed(rq)) {
sg->page_link &= ~0x02;
sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 13536a388d27..9a8ffdd0ce3d 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
*
* @q: the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer
* @size: size of the buffer in bytes
*
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* device can support otherwise there won't be room for the drain
* buffer.
*/
-int blk_queue_dma_drain(struct request_queue *q, void *buf,
- unsigned int size)
+extern int blk_queue_dma_drain(struct request_queue *q,
+ dma_drain_needed_fn *dma_drain_needed,
+ void *buf, unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
+ q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;