summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm
diff options
context:
space:
mode:
authorJavier González <javier@cnexlabs.com>2016-02-20 08:52:40 +0100
committerJens Axboe <axboe@fb.com>2016-03-03 14:46:35 -0700
commit6adb03de406e8c92579c2e4b11640841fa908277 (patch)
tree154b268390df5fd4d2a4da747ebedb272b4f4f8b /drivers/lightnvm
parentfa3184b898717d696242241541b8cbcb65c5d497 (diff)
downloadlinux-6adb03de406e8c92579c2e4b11640841fa908277.tar.bz2
lightnvm: update closed list outside of intr context
When an I/O finishes, full blocks are moved from the open to the closed list - a lock is taken to protect the list. This happens at the moment in the interrupt context, which is not correct. This patch moves this logic to the block workqueue instead, avoiding holding a spinlock without interrupt save in an interrupt context. Signed-off-by: Javier González <javier@cnexlabs.com> Fixes: ff0e498bfa18 ("lightnvm: manage open and closed blocks sepa...") Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r--drivers/lightnvm/rrpc.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c75958ced3..e2710da5cd78 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -497,12 +497,21 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_lun *lun = rblk->parent->lun;
+ struct nvm_block *blk = rblk->parent;
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
+ spin_lock(&lun->lock);
+ lun->nr_open_blocks--;
+ lun->nr_closed_blocks++;
+ blk->state &= ~NVM_BLK_ST_OPEN;
+ blk->state |= NVM_BLK_ST_CLOSED;
+ list_move_tail(&rblk->list, &rlun->closed_list);
+ spin_unlock(&lun->lock);
+
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
@@ -666,20 +675,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
- struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&lun->lock);
- lun->nr_open_blocks--;
- lun->nr_closed_blocks++;
- blk->state &= ~NVM_BLK_ST_OPEN;
- blk->state |= NVM_BLK_ST_CLOSED;
- list_move_tail(&rblk->list, &rlun->closed_list);
- spin_unlock(&lun->lock);
-
+ if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
rrpc_run_gc(rrpc, rblk);
- }
}
}