summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-02-02 10:16:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-02-02 10:16:28 -0800
commitc8864cb70fd13beb57211982bf3b78b6629f1a68 (patch)
treeb2f94166c842a3e8c32a34a8b141816084fae3b7
parent3cde55ee7921609331178c84cca485491c97df2a (diff)
parent9a6d5488002fdca7134a0e59b0ae252f61042810 (diff)
downloadlinux-c8864cb70fd13beb57211982bf3b78b6629f1a68.tar.bz2
Merge tag 'for-linus-20190202' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few fixes that should go into this release. This contains: - MD pull request from Song, fixing a recovery OOM issue (Alexei) - Fix for a sync related stall (Jianchao) - Dummy callback for timeouts (Tetsuo) - IDE atapi sense ordering fix (me)" * tag 'for-linus-20190202' of git://git.kernel.dk/linux-block: ide: ensure atapi sense request aren't preempted blk-mq: fix a hung issue when fsync block: pass no-op callback to INIT_WORK(). md/raid5: fix 'out of memory' during raid cache recovery
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c2
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--include/linux/ide.h2
9 files changed, 93 insertions, 53 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c5f61ceeb67..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
kblockd_schedule_work(&q->timeout_work);
}
+static void blk_timeout_work(struct work_struct *work)
+{
+}
+
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
- INIT_WORK(&q->timeout_work, NULL);
+ INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a3fc7191c694..6e0f2d97fc6d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
- blk_mq_run_hw_queue(hctx, true);
+ blk_mq_sched_restart(hctx);
}
/**
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{
- struct request *sense_rq = drive->sense_rq;
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *sense_rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
/* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
drive->name);
+ spin_unlock_irqrestore(&hwif->lock, flags);
return -ENOMEM;
}
+ sense_rq = drive->sense_rq;
ide_req(sense_rq)->special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
ide_insert_request_head(drive, sense_rq);
+ spin_unlock_irqrestore(&hwif->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
}
if (!blk_update_request(rq, error, nr_bytes)) {
- if (rq == drive->sense_rq)
+ if (rq == drive->sense_rq) {
drive->sense_rq = NULL;
+ drive->sense_rq_active = false;
+ }
__blk_mq_end_request(rq, error);
return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
}
-/*
- * Issue a new request to a device.
- */
-blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
+ bool local_requeue)
{
- ide_drive_t *drive = hctx->queue->queuedata;
- ide_hwif_t *hwif = drive->hwif;
+ ide_hwif_t *hwif = drive->hwif;
struct ide_host *host = hwif->host;
- struct request *rq = bd->rq;
ide_startstop_t startstop;
if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ide_lock_host(host, hwif))
return BLK_STS_DEV_RESOURCE;
- blk_mq_start_request(rq);
-
spin_lock_irq(&hwif->lock);
if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
/*
- * we know that the queue isn't empty, but this can happen
- * if ->prep_rq() decides to kill a request
- */
- if (!rq) {
- rq = bd->rq;
- if (!rq) {
- ide_unlock_port(hwif);
- goto out;
- }
- }
-
- /*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
* blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
}
} else {
plug_device:
+ if (local_requeue)
+ list_add(&rq->queuelist, &drive->rq_list);
spin_unlock_irq(&hwif->lock);
ide_unlock_host(host);
- ide_requeue_and_plug(drive, rq);
+ if (!local_requeue)
+ ide_requeue_and_plug(drive, rq);
return BLK_STS_OK;
}
@@ -573,6 +559,26 @@ out:
return BLK_STS_OK;
}
+/*
+ * Issue a new request to a device.
+ */
+blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ ide_drive_t *drive = hctx->queue->queuedata;
+ ide_hwif_t *hwif = drive->hwif;
+
+ spin_lock_irq(&hwif->lock);
+ if (drive->sense_rq_active) {
+ spin_unlock_irq(&hwif->lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+ spin_unlock_irq(&hwif->lock);
+
+ blk_mq_start_request(bd->rq);
+ return ide_issue_rq(drive, bd->rq, false);
+}
+
static int drive_is_ready(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
{
- ide_hwif_t *hwif = drive->hwif;
- unsigned long flags;
-
- spin_lock_irqsave(&hwif->lock, flags);
+ drive->sense_rq_active = true;
list_add_tail(&rq->queuelist, &drive->rq_list);
- spin_unlock_irqrestore(&hwif->lock, flags);
-
kblockd_schedule_work(&drive->rq_work);
}
EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
+ spin_lock_irq(&hwif->lock);
ide_insert_request_head(drive, rq);
+ spin_unlock_irq(&hwif->lock);
out:
return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
+ blk_status_t ret;
LIST_HEAD(list);
- spin_lock_irq(&hwif->lock);
- if (!list_empty(&drive->rq_list))
- list_splice_init(&drive->rq_list, &list);
- spin_unlock_irq(&hwif->lock);
+ blk_mq_quiesce_queue(drive->queue);
- while (!list_empty(&list)) {
- rq = list_first_entry(&list, struct request, queuelist);
+ ret = BLK_STS_OK;
+ spin_lock_irq(&hwif->lock);
+ while (!list_empty(&drive->rq_list)) {
+ rq = list_first_entry(&drive->rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
+
+ spin_unlock_irq(&hwif->lock);
+ ret = ide_issue_rq(drive, rq, true);
+ spin_lock_irq(&hwif->lock);
}
+ spin_unlock_irq(&hwif->lock);
+
+ blk_mq_unquiesce_queue(drive->queue);
+
+ if (ret != BLK_STS_OK)
+ kblockd_schedule_work(&drive->rq_work);
}
static const u8 ide_hwif_to_major[] =
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
}
static struct stripe_head *
-r5c_recovery_alloc_stripe(struct r5conf *conf,
- sector_t stripe_sect)
+r5c_recovery_alloc_stripe(
+ struct r5conf *conf,
+ sector_t stripe_sect,
+ int noblock)
{
struct stripe_head *sh;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
if (!sh)
return NULL; /* no more stripe available */
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect);
if (!sh) {
- sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
/*
* cannot get stripe from raid5_get_active_stripe
* try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes(
cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect);
+ conf, stripe_sect, 1);
}
if (!sh) {
+ int new_size = conf->min_nr_stripes * 2;
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
mdname(mddev),
- conf->min_nr_stripes * 2);
- raid5_set_cache_size(mddev,
- conf->min_nr_stripes * 2);
- sh = r5c_recovery_alloc_stripe(conf,
- stripe_sect);
+ new_size);
+ ret = raid5_set_cache_size(mddev, new_size);
+ if (conf->min_nr_stripes <= new_size / 2) {
+ pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
+ mdname(mddev),
+ ret,
+ new_size,
+ conf->min_nr_stripes,
+ conf->max_nr_stripes);
+ return -ENOMEM;
+ }
+ sh = r5c_recovery_alloc_stripe(
+ conf, stripe_sect, 0);
}
if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
- mdname(mddev));
+ mdname(mddev));
return -ENOMEM;
}
list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
int
raid5_set_cache_size(struct mddev *mddev, int size)
{
+ int result = 0;
struct r5conf *conf = mddev->private;
if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
- if (!grow_one_stripe(conf, GFP_KERNEL))
+ if (!grow_one_stripe(conf, GFP_KERNEL)) {
+ conf->min_nr_stripes = conf->max_nr_stripes;
+ result = -ENOMEM;
break;
+ }
mutex_unlock(&conf->cache_size_mutex);
- return 0;
+ return result;
}
EXPORT_SYMBOL(raid5_set_cache_size);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e7d29ae633cd..971cf76a78a0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -615,6 +615,7 @@ struct ide_drive_s {
/* current sense rq and buffer */
bool sense_rq_armed;
+ bool sense_rq_active;
struct request *sense_rq;
struct request_sense sense_data;
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id);
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *);