summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c74
1 files changed, 35 insertions, 39 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 64f9a8668253..080204a10fcf 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -837,7 +837,7 @@ static struct io_context *rq_ioc(struct bio *bio)
}
/**
- * get_request - get a free request
+ * __get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
@@ -850,8 +850,8 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int rw_flags,
- struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_queue *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
struct request *rq;
struct request_list *rl = &q->rq;
@@ -1029,56 +1029,55 @@ rq_starved:
}
/**
- * get_request_wait - get a free request with retry
+ * get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
*
- * Get a free request from @q. This function keeps retrying under memory
- * pressure and fails iff @q is dead.
+ * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
+ * function keeps retrying under memory pressure and fails iff @q is dead.
*
* Must be callled with @q->queue_lock held and,
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request_wait(struct request_queue *q, int rw_flags,
- struct bio *bio)
+static struct request *get_request(struct request_queue *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(rw_flags) != 0;
+ DEFINE_WAIT(wait);
+ struct request_list *rl = &q->rq;
struct request *rq;
+retry:
+ rq = __get_request(q, rw_flags, bio, gfp_mask);
+ if (rq)
+ return rq;
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
- while (!rq) {
- DEFINE_WAIT(wait);
- struct request_list *rl = &q->rq;
-
- if (unlikely(blk_queue_dead(q)))
- return NULL;
-
- prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
- TASK_UNINTERRUPTIBLE);
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q)))
+ return NULL;
- trace_block_sleeprq(q, bio, rw_flags & 1);
+ /* wait on @rl and retry */
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+ TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
+ trace_block_sleeprq(q, bio, rw_flags & 1);
- /*
- * After sleeping, we become a "batching" process and
- * will be able to allocate at least one request, and
- * up to a big batch of them for a small period time.
- * See ioc_batching, ioc_set_batching
- */
- create_io_context(GFP_NOIO, q->node);
- ioc_set_batching(q, current->io_context);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
- spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[is_sync], &wait);
+ /*
+ * After sleeping, we become a "batching" process and will be able
+ * to allocate at least one request, and up to a big batch of them
+ * for a small period time. See ioc_batching, ioc_set_batching
+ */
+ create_io_context(GFP_NOIO, q->node);
+ ioc_set_batching(q, current->io_context);
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
- };
+ spin_lock_irq(q->queue_lock);
+ finish_wait(&rl->wait[is_sync], &wait);
- return rq;
+ goto retry;
}
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
@@ -1088,10 +1087,7 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
BUG_ON(rw != READ && rw != WRITE);
spin_lock_irq(q->queue_lock);
- if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw, NULL);
- else
- rq = get_request(q, rw, NULL, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
@@ -1481,7 +1477,7 @@ get_rq:
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, rw_flags, bio);
+ req = get_request(q, rw_flags, bio, GFP_NOIO);
if (unlikely(!req)) {
bio_endio(bio, -ENODEV); /* @q is dead */
goto out_unlock;