From 3291b52f9ff0acc80a8ee3f92a960db937dccecb Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 16 Sep 2016 16:59:26 +0200 Subject: UBI: introduce the VID buffer concept Currently, all VID headers are allocated and freed using the ubi_zalloc_vid_hdr() and ubi_free_vid_hdr() function. These functions make sure to align allocation on ubi->vid_hdr_alsize and adjust the vid_hdr pointer to match the ubi->vid_hdr_shift requirements. This works fine, but is a bit convoluted. Moreover, the future introduction of LEB consolidation (needed to support MLC/TLC NANDs) will allows a VID buffer to contain more than one VID header. Hence the creation of a ubi_vid_io_buf struct to attach extra information to the VID header. We currently only store the actual pointer of the underlying buffer, but will soon add the number of VID headers contained in the buffer. Signed-off-by: Boris Brezillon Signed-off-by: Richard Weinberger --- drivers/mtd/ubi/wl.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers/mtd/ubi/wl.c') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index f4533266d7b2..6351e8ae91aa 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -649,6 +649,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int anchor = wrk->anchor; #endif struct ubi_wl_entry *e1, *e2; + struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; int dst_leb_clean = 0; @@ -656,10 +657,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, if (shutdown) return 0; - vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); - if (!vid_hdr) + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); + if (!vidb) return -ENOMEM; + vid_hdr = ubi_get_vid_hdr(vidb); + mutex_lock(&ubi->move_mutex); spin_lock(&ubi->wl_lock); ubi_assert(!ubi->move_from && !ubi->move_to); @@ -753,7 +756,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * which is being moved was unmapped. */ - err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); + err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0); if (err && err != UBI_IO_BITFLIPS) { dst_leb_clean = 1; if (err == UBI_IO_FF) { @@ -790,7 +793,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); - err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); + err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb); if (err) { if (err == MOVE_CANCEL_RACE) { /* @@ -847,7 +850,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, if (scrubbing) ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", e1->pnum, vol_id, lnum, e2->pnum); - ubi_free_vid_hdr(ubi, vid_hdr); + ubi_free_vid_buf(vidb); spin_lock(&ubi->wl_lock); if (!ubi->move_to_put) { @@ -913,7 +916,7 @@ out_not_moved: ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - ubi_free_vid_hdr(ubi, vid_hdr); + ubi_free_vid_buf(vidb); if (dst_leb_clean) { ensure_wear_leveling(ubi, 1); } else { @@ -937,7 +940,7 @@ out_error: ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - ubi_free_vid_hdr(ubi, vid_hdr); + ubi_free_vid_buf(vidb); wl_entry_destroy(ubi, e1); wl_entry_destroy(ubi, e2); @@ -951,7 +954,7 @@ out_cancel: ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); mutex_unlock(&ubi->move_mutex); - ubi_free_vid_hdr(ubi, vid_hdr); + ubi_free_vid_buf(vidb); return 0; } -- cgit v1.2.3