summaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/block.h
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-11-29 15:41:03 +0200
committerUlf Hansson <ulf.hansson@linaro.org>2017-12-11 12:44:33 +0100
commit81196976ed946cbf36bb41ddda402853c7df7cfa (patch)
treee0ac4bcf73f0870f6a75d75ebc683745c7458fcd /drivers/mmc/core/block.h
parentc3d53d0da69d127f488dc85638e9440220b268e8 (diff)
downloadlinux-81196976ed946cbf36bb41ddda402853c7df7cfa.tar.bz2
mmc: block: Add blk-mq support
Define and use a blk-mq queue. Discards and flushes are processed synchronously, but reads and writes asynchronously. In order to support slow DMA unmapping, DMA unmapping is not done until after the next request is started. That means the request is not completed until then. If there is no next request then the completion is done by queued work. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org>
Diffstat (limited to 'drivers/mmc/core/block.h')
-rw-r--r--drivers/mmc/core/block.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 5946636101ef..6d34e87b18f6 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -7,4 +7,13 @@ struct request;
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
+
#endif