summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-05-07 10:03:23 -0600
committerJens Axboe <axboe@kernel.dk>2018-05-08 15:11:02 -0600
commit782f569774d7000e54ae9d680b0e4cd29b1c7ca3 (patch)
tree137bb8ebd2903af1d82fb9159ccc5a06b9005335
parent8bea60901974ad44b06b08d52e1dd421ea8c6e9c (diff)
downloadlinux-782f569774d7000e54ae9d680b0e4cd29b1c7ca3.tar.bz2
blk-wbt: throttle discards like background writes
Throttle discards like we would any background write. Discards should be background activity, so if they are impacting foreground IO, then we will throttle them down. Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-stat.h6
-rw-r--r--block/blk-wbt.c43
-rw-r--r--block/blk-wbt.h4
3 files changed, 32 insertions, 21 deletions
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 2dd36347252a..c22049a8125e 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -10,11 +10,11 @@
/*
* from upper:
- * 3 bits: reserved for other usage
+ * 4 bits: reserved for other usage
* 12 bits: size
- * 49 bits: time
+ * 48 bits: time
*/
-#define BLK_STAT_RES_BITS 3
+#define BLK_STAT_RES_BITS 4
#define BLK_STAT_SIZE_BITS 12
#define BLK_STAT_RES_SHIFT (64 - BLK_STAT_RES_BITS)
#define BLK_STAT_SIZE_SHIFT (BLK_STAT_RES_SHIFT - BLK_STAT_SIZE_BITS)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 25d202345965..a7a724580033 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -106,6 +106,8 @@ static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
{
if (wb_acct & WBT_KSWAPD)
return &rwb->rq_wait[WBT_RWQ_KSWAPD];
+ else if (wb_acct & WBT_DISCARD)
+ return &rwb->rq_wait[WBT_RWQ_DISCARD];
return &rwb->rq_wait[WBT_RWQ_BG];
}
@@ -143,10 +145,13 @@ void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
}
/*
- * If the device does write back caching, drop further down
- * before we wake people up.
+ * For discards, our limit is always the background. For writes, if
+ * the device does write back caching, drop further down before we
+ * wake people up.
*/
- if (rwb->wc && !wb_recent_wait(rwb))
+ if (wb_acct & WBT_DISCARD)
+ limit = rwb->wb_background;
+ else if (rwb->wc && !wb_recent_wait(rwb))
limit = 0;
else
limit = rwb->wb_normal;
@@ -483,6 +488,9 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
{
unsigned int limit;
+ if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
+ return rwb->wb_background;
+
/*
* At this point we know it's a buffered write. If this is
* kswapd trying to free memory, or REQ_SYNC is set, then
@@ -564,21 +572,20 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
{
- const int op = bio_op(bio);
-
- /*
- * If not a WRITE, do nothing
- */
- if (op != REQ_OP_WRITE)
- return false;
-
- /*
- * Don't throttle WRITE_ODIRECT
- */
- if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
+ switch (bio_op(bio)) {
+ case REQ_OP_WRITE:
+ /*
+ * Don't throttle WRITE_ODIRECT
+ */
+ if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
+ (REQ_SYNC | REQ_IDLE))
+ return false;
+ /* fallthrough */
+ case REQ_OP_DISCARD:
+ return true;
+ default:
return false;
-
- return true;
+ }
}
/*
@@ -605,6 +612,8 @@ enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
if (current_is_kswapd())
ret |= WBT_KSWAPD;
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ ret |= WBT_DISCARD;
__wbt_wait(rwb, ret, bio->bi_opf, lock);
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 8038b4a0d4ef..d6a125e49db5 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -14,13 +14,15 @@ enum wbt_flags {
WBT_TRACKED = 1, /* write, tracked for throttling */
WBT_READ = 2, /* read */
WBT_KSWAPD = 4, /* write, from kswapd */
+ WBT_DISCARD = 8, /* discard */
- WBT_NR_BITS = 3, /* number of bits */
+ WBT_NR_BITS = 4, /* number of bits */
};
enum {
WBT_RWQ_BG = 0,
WBT_RWQ_KSWAPD,
+ WBT_RWQ_DISCARD,
WBT_NUM_RWQ,
};