summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorJonathan Brassow <jbrassow@redhat.com>2012-07-31 10:03:53 +1000
committerNeilBrown <neilb@suse.de>2012-07-31 10:03:53 +1000
commitcc4d1efdd017083bbcbaf23feb4cdc717fa7dab8 (patch)
treea6a7775a4715ab009c00ec4ef7655eafc307b694 /drivers/md/raid10.c
parent473e87ce485ffcac041f7911b33f0b4cd4d6cf2b (diff)
downloadlinux-cc4d1efdd017083bbcbaf23feb4cdc717fa7dab8.tar.bz2
MD RAID10: Export md_raid10_congested
md/raid10: Export is_congested test. In similar fashion to commits 11d8a6e3719519fbc0e2c9d61b6fa931b84bf813 1ed7242e591af7e233234d483f12d33818b189d9 we export the RAID10 congestion checking function so that dm-raid.c can make use of it and make use of the personality. The 'queue' and 'gendisk' structures will not be available to the MD code when device-mapper sets up the device, so we conditionalize access to these fields also. Signed-off-by: Jonathan Brassow <jbrassow@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c56
1 files changed, 34 insertions, 22 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e77acf024055..e2549deab7c3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -853,9 +853,8 @@ retry:
return rdev;
}
-static int raid10_congested(void *data, int bits)
+int md_raid10_congested(struct mddev *mddev, int bits)
{
- struct mddev *mddev = data;
struct r10conf *conf = mddev->private;
int i, ret = 0;
@@ -863,8 +862,6 @@ static int raid10_congested(void *data, int bits)
conf->pending_count >= max_queued_requests)
return 1;
- if (mddev_congested(mddev, bits))
- return 1;
rcu_read_lock();
for (i = 0;
(i < conf->geo.raid_disks || i < conf->prev.raid_disks)
@@ -880,6 +877,15 @@ static int raid10_congested(void *data, int bits)
rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL_GPL(md_raid10_congested);
+
+static int raid10_congested(void *data, int bits)
+{
+ struct mddev *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid10_congested(mddev, bits);
+}
static void flush_pending_writes(struct r10conf *conf)
{
@@ -3486,12 +3492,14 @@ static int run(struct mddev *mddev)
conf->thread = NULL;
chunk_size = mddev->chunk_sectors << 9;
- blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->geo.raid_disks % conf->geo.near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
- else
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->geo.raid_disks / conf->geo.near_copies));
+ if (mddev->queue) {
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
+ else
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->geo.raid_disks / conf->geo.near_copies));
+ }
rdev_for_each(rdev, mddev) {
long long diff;
@@ -3525,8 +3533,9 @@ static int run(struct mddev *mddev)
if (first || diff < min_offset_diff)
min_offset_diff = diff;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ if (mddev->gendisk)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
disk->head_position = 0;
}
@@ -3589,22 +3598,22 @@ static int run(struct mddev *mddev)
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
- mddev->queue->backing_dev_info.congested_fn = raid10_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
-
- /* Calculate max read-ahead size.
- * We need to readahead at least twice a whole stripe....
- * maybe...
- */
- {
+ if (mddev->queue) {
int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
+ mddev->queue->backing_dev_info.congested_fn = raid10_congested;
+ mddev->queue->backing_dev_info.congested_data = mddev;
+
+ /* Calculate max read-ahead size.
+ * We need to readahead at least twice a whole stripe....
+ * maybe...
+ */
stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
- blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
if (md_integrity_register(mddev))
goto out_free_conf;
@@ -3655,7 +3664,10 @@ static int stop(struct mddev *mddev)
lower_barrier(conf);
md_unregister_thread(&mddev->thread);
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ if (mddev->queue)
+ /* the unplug fn references 'conf'*/
+ blk_sync_queue(mddev->queue);
+
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);