summaryrefslogtreecommitdiffstats
path: root/block/blk-settings.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-15 12:24:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-15 12:24:45 -0800
commitb3c9dd182ed3bdcdaf0e42625a35924b0497afdc (patch)
treead48ad4d923fee147c736318d0fad35b3755f4f5 /block/blk-settings.c
parent83c2f912b43c3a7babbb6cb7ae2a5276c1ed2a3e (diff)
parent5d381efb3d1f1ef10535a31ca0dd9b22fe1e1922 (diff)
downloadlinux-b3c9dd182ed3bdcdaf0e42625a35924b0497afdc.tar.bz2
Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block
* 'for-3.3/core' of git://git.kernel.dk/linux-block: (37 commits) Revert "block: recursive merge requests" block: Stop using macro stubs for the bio data integrity calls blockdev: convert some macros to static inlines fs: remove unneeded plug in mpage_readpages() block: Add BLKROTATIONAL ioctl block: Introduce blk_set_stacking_limits function block: remove WARN_ON_ONCE() in exit_io_context() block: an exiting task should be allowed to create io_context block: ioc_cgroup_changed() needs to be exported block: recursive merge requests block, cfq: fix empty queue crash caused by request merge block, cfq: move icq creation and rq->elv.icq association to block core block, cfq: restructure io_cq creation path for io_context interface cleanup block, cfq: move io_cq exit/release to blk-ioc.c block, cfq: move icq cache management to block core block, cfq: move io_cq lookup to blk-ioc.c block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq block, cfq: reorganize cfq_io_context into generic and cfq specific parts block: remove elevator_queue->ops block: reorder elevator switch sequence ... Fix up conflicts in: - block/blk-cgroup.c Switch from can_attach_task to can_attach - block/cfq-iosched.c conflict with now removed cic index changes (we now use q->id instead)
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index fa1eb0449a05..d3234fc494ad 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -104,9 +104,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
* @lim: the queue_limits structure to reset
*
* Description:
- * Returns a queue_limit struct to its default state. Can be used by
- * stacking drivers like DM that stage table swaps and reuse an
- * existing device queue.
+ * Returns a queue_limit struct to its default state.
*/
void blk_set_default_limits(struct queue_limits *lim)
{
@@ -114,13 +112,12 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- lim->max_sectors = BLK_DEF_MAX_SECTORS;
- lim->max_hw_sectors = INT_MAX;
+ lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->max_discard_sectors = 0;
lim->discard_granularity = 0;
lim->discard_alignment = 0;
lim->discard_misaligned = 0;
- lim->discard_zeroes_data = 1;
+ lim->discard_zeroes_data = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0;
@@ -131,6 +128,27 @@ void blk_set_default_limits(struct queue_limits *lim)
EXPORT_SYMBOL(blk_set_default_limits);
/**
+ * blk_set_stacking_limits - set default limits for stacking devices
+ * @lim: the queue_limits structure to reset
+ *
+ * Description:
+ * Returns a queue_limit struct to its default state. Should be used
+ * by stacking drivers like DM that have no internal limits.
+ */
+void blk_set_stacking_limits(struct queue_limits *lim)
+{
+ blk_set_default_limits(lim);
+
+ /* Inherit limits from component devices */
+ lim->discard_zeroes_data = 1;
+ lim->max_segments = USHRT_MAX;
+ lim->max_hw_sectors = UINT_MAX;
+
+ lim->max_sectors = BLK_DEF_MAX_SECTORS;
+}
+EXPORT_SYMBOL(blk_set_stacking_limits);
+
+/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
* @mfn: the alternate make_request function
@@ -165,8 +183,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits);
- blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
- q->limits.discard_zeroes_data = 0;
/*
* by default assume old behaviour and bounce for any highmem page