summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorXiao Ni <xni@redhat.com>2017-04-27 16:28:49 +0800
committerShaohua Li <shli@fb.com>2017-04-27 14:01:16 -0700
commit43ac9b84a399bc10210a2d9f4e0778b7c6059c07 (patch)
treefed7746123467e4c61fe7ce792aa888a0252f1d3 /drivers/md/raid1.c
parente5bc9c3c5432f5531a58e6fdd9f6c6587f2137b3 (diff)
downloadlinux-43ac9b84a399bc10210a2d9f4e0778b7c6059c07.tar.bz2
md/raid1: Use a new variable to count flighting sync requests
In new barrier codes, raise_barrier waits if conf->nr_pending[idx] is not zero. After all the conditions are true, the resync request can go on be handled. But it adds conf->nr_pending[idx] again. The next resync request hit the same bucket idx need to wait the resync request which is submitted before. The performance of resync/recovery is degraded. So we should use a new variable to count sync requests which are in flight. I did a simple test: 1. Without the patch, create a raid1 with two disks. The resync speed: Device: rrqm/s wrqm/s r/s w/s rMB/s wMB/s avgrq-sz avgqu-sz await r_await w_await svctm %util sdb 0.00 0.00 166.00 0.00 10.38 0.00 128.00 0.03 0.20 0.20 0.00 0.19 3.20 sdc 0.00 0.00 0.00 166.00 0.00 10.38 128.00 0.96 5.77 0.00 5.77 5.75 95.50 2. With the patch, the result is: sdb 2214.00 0.00 766.00 0.00 185.69 0.00 496.46 2.80 3.66 3.66 0.00 1.03 79.10 sdc 0.00 2205.00 0.00 769.00 0.00 186.44 496.52 5.25 6.84 0.00 6.84 1.30 100.10 Suggested-by: Shaohua Li <shli@kernel.org> Signed-off-by: Xiao Ni <xni@redhat.com> Acked-by: Coly Li <colyli@suse.de> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 70a596c10306..5d19c1a7d90a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -885,7 +885,7 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+ atomic_inc(&conf->nr_sync_pending);
spin_unlock_irq(&conf->resync_lock);
}
@@ -896,7 +896,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
atomic_dec(&conf->barrier[idx]);
- atomic_dec(&conf->nr_pending[idx]);
+ atomic_dec(&conf->nr_sync_pending);
wake_up(&conf->wait_barrier);
}
@@ -1033,7 +1033,8 @@ static int get_unqueued_pending(struct r1conf *conf)
{
int idx, ret;
- for (ret = 0, idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
+ ret = atomic_read(&conf->nr_sync_pending);
+ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
ret += atomic_read(&conf->nr_pending[idx]) -
atomic_read(&conf->nr_queued[idx]);