From 669cc7ba77864e7b1ac39c9f2b2afb8730f341f4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 4 Sep 2014 16:30:38 +1000 Subject: md/raid1: clean up request counts properly in close_sync() If there are outstanding writes when close_sync is called, the change to ->start_next_window might cause them to decrement the wrong counter when they complete. Fix this by merging the two counters into the one that will be decremented. Having an incorrect value in a counter can cause raise_barrier() to hangs, so this is suitable for -stable. Fixes: 79ef3a8aa1cb1523cc231c9a90a278333c21f761 cc: stable@vger.kernel.org (v3.13+) Signed-off-by: NeilBrown --- drivers/md/raid1.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/md/raid1.c') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ad0468c42d23..a31c92bbcfc9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1545,8 +1545,13 @@ static void close_sync(struct r1conf *conf) mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; + spin_lock_irq(&conf->resync_lock); conf->next_resync = 0; conf->start_next_window = MaxSector; + conf->current_window_requests += + conf->next_window_requests; + conf->next_window_requests = 0; + spin_unlock_irq(&conf->resync_lock); } static int raid1_spare_active(struct mddev *mddev) -- cgit v1.2.3