summaryrefslogtreecommitdiffstats
path: root/drivers/block/loop.c
diff options
context:
space:
mode:
authorLukas Czerner <lczerner@redhat.com>2012-11-30 11:42:41 +0100
committerJens Axboe <axboe@kernel.dk>2012-11-30 11:48:05 +0100
commit7b5a35225b0d4fd779cf79d7624e63d1957f6c4d (patch)
tree5e2324e3b657b8ead9c0cdd58c6ed5d79f8e76f8 /drivers/block/loop.c
parenteed8c02e680c04cd737e0a9cef74e68d8eb0cefa (diff)
downloadlinux-7b5a35225b0d4fd779cf79d7624e63d1957f6c4d.tar.bz2
loop: Limit the number of requests in the bio list
Currently there is not limitation of number of requests in the loop bio list. This can lead into some nasty situations when the caller spawns tons of bio requests taking huge amount of memory. This is even more obvious with discard where blkdev_issue_discard() will submit all bios for the range and wait for them to finish afterwards. On really big loop devices and slow backing file system this can lead to OOM situation as reported by Dave Chinner. With this patch we will wait in loop_make_request() if the number of bios in the loop bio list would exceed 'nr_congestion_on'. We'll wake up the process as we process the bios form the list. Some threshold hysteresis is in place to avoid high frequency oscillation. Signed-off-by: Lukas Czerner <lczerner@redhat.com> Reported-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/loop.c')
-rw-r--r--drivers/block/loop.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e9d594fd12cb..800aec7927de 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -463,6 +463,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
+ lo->lo_bio_count++;
bio_list_add(&lo->lo_bio_list, bio);
}
@@ -471,6 +472,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
+ lo->lo_bio_count--;
return bio_list_pop(&lo->lo_bio_list);
}
@@ -489,6 +491,10 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
goto out;
if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
goto out;
+ if (lo->lo_bio_count >= q->nr_congestion_on)
+ wait_event_lock_irq(lo->lo_req_wait,
+ lo->lo_bio_count < q->nr_congestion_off,
+ lo->lo_lock);
loop_add_bio(lo, old_bio);
wake_up(&lo->lo_event);
spin_unlock_irq(&lo->lo_lock);
@@ -546,6 +552,8 @@ static int loop_thread(void *data)
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
+ if (lo->lo_bio_count < lo->lo_queue->nr_congestion_off)
+ wake_up(&lo->lo_req_wait);
spin_unlock_irq(&lo->lo_lock);
BUG_ON(!bio);
@@ -873,6 +881,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
+ lo->lo_bio_count = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
@@ -1660,6 +1669,7 @@ static int loop_add(struct loop_device **l, int i)
lo->lo_number = i;
lo->lo_thread = NULL;
init_waitqueue_head(&lo->lo_event);
+ init_waitqueue_head(&lo->lo_req_wait);
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
disk->first_minor = i << part_shift;