summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>2015-02-12 15:00:36 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 18:54:11 -0800
commitba6b17d68c8e3aa8d55d0474299cb931965c5ea5 (patch)
tree3130712f638e8158e7b5f985cec46189564fc935 /drivers
parent1fec117281d9f5349c35279c9521f4096fa33357 (diff)
downloadlinux-ba6b17d68c8e3aa8d55d0474299cb931965c5ea5.tar.bz2
zram: fix umount-reset_store-mount race condition
Ganesh Mahendran was the first one who proposed to use bdev->bd_mutex to avoid ->bd_holders race condition: CPU0 CPU1 umount /* zram->init_done is true */ reset_store() bdev->bd_holders == 0 mount ... zram_make_request() zram_reset_device() However, his solution required some considerable amount of code movement, which we can avoid. Apart from using bdev->bd_mutex in reset_store(), this patch also simplifies zram_reset_device(). zram_reset_device() has a bool parameter reset_capacity which tells it whether disk capacity and itself disk should be reset. There are two zram_reset_device() callers: -- zram_exit() passes reset_capacity=false -- reset_store() passes reset_capacity=true So we can move reset_capacity-sensitive work out of zram_reset_device() and perform it unconditionally in reset_store(). This also lets us drop reset_capacity parameter from zram_reset_device() and pass zram pointer only. Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reported-by: Ganesh Mahendran <opensource.ganesh@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/zram/zram_drv.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0e07652cf7c1..2607bd9f4955 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -715,7 +715,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
}
-static void zram_reset_device(struct zram *zram, bool reset_capacity)
+static void zram_reset_device(struct zram *zram)
{
down_write(&zram->init_lock);
@@ -734,18 +734,7 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- if (reset_capacity)
- set_capacity(zram->disk, 0);
-
up_write(&zram->init_lock);
-
- /*
- * Revalidate disk out of the init_lock to avoid lockdep splat.
- * It's okay because disk's capacity is protected by init_lock
- * so that revalidate_disk always sees up-to-date capacity.
- */
- if (reset_capacity)
- revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@@ -818,6 +807,7 @@ static ssize_t reset_store(struct device *dev,
if (!bdev)
return -ENOMEM;
+ mutex_lock(&bdev->bd_mutex);
/* Do not reset an active device! */
if (bdev->bd_holders) {
ret = -EBUSY;
@@ -835,12 +825,17 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all pending I/O is finished */
fsync_bdev(bdev);
+ zram_reset_device(zram);
+ set_capacity(zram->disk, 0);
+
+ mutex_unlock(&bdev->bd_mutex);
+ revalidate_disk(zram->disk);
bdput(bdev);
- zram_reset_device(zram, true);
return len;
out:
+ mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
return ret;
}
@@ -1186,7 +1181,7 @@ static void __exit zram_exit(void)
* Shouldn't access zram->disk after destroy_device
* because destroy_device already released zram->disk.
*/
- zram_reset_device(zram, false);
+ zram_reset_device(zram);
}
unregister_blkdev(zram_major, "zram");