summaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2013-11-22 09:09:59 +0800
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-12-23 10:18:02 +0900
commit74de593af77b109f202c47e090c9e134c8882869 (patch)
treef7afcd5d6ac031268442cb0b5d21a003ebcd10ee /fs/f2fs
parentd4d288bc72c020d335868ce217695c4d5dfd74d0 (diff)
downloadlinux-74de593af77b109f202c47e090c9e134c8882869.tar.bz2
f2fs: read contiguous sit entry pages by merging for mount performance
Previously we read sit entries page one by one, this method lost the chance of reading contiguous page together. So we read pages as contiguous as possible for better mount performance. change log: o merge judgements/use 'Continue' or 'Break' instead of 'Goto' as Gu Zheng suggested. o add mark_page_accessed() before release page to delay VM reclaiming. o remove '*order' for simplification of function as Jaegeuk Kim suggested. Signed-off-by: Chao Yu <chao2.yu@samsung.com> [Jaegeuk Kim: fix a bug on the block address calculation] Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/segment.c101
-rw-r--r--fs/f2fs/segment.h2
2 files changed, 77 insertions, 26 deletions
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a1acaa025bde..6dd1dc16a9d5 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -14,6 +14,7 @@
#include <linux/blkdev.h>
#include <linux/prefetch.h>
#include <linux/vmalloc.h>
+#include <linux/swap.h>
#include "f2fs.h"
#include "segment.h"
@@ -1706,41 +1707,89 @@ static int build_curseg(struct f2fs_sb_info *sbi)
return restore_curseg_summaries(sbi);
}
+static int ra_sit_pages(struct f2fs_sb_info *sbi, int start, int nrpages)
+{
+ struct address_space *mapping = sbi->meta_inode->i_mapping;
+ struct page *page;
+ block_t blk_addr, prev_blk_addr = 0;
+ int sit_blk_cnt = SIT_BLK_CNT(sbi);
+ int blkno = start;
+
+ for (; blkno < start + nrpages && blkno < sit_blk_cnt; blkno++) {
+
+ blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK);
+
+ if (blkno != start && prev_blk_addr + 1 != blk_addr)
+ break;
+ prev_blk_addr = blk_addr;
+repeat:
+ page = grab_cache_page(mapping, blk_addr);
+ if (!page) {
+ cond_resched();
+ goto repeat;
+ }
+ if (PageUptodate(page)) {
+ mark_page_accessed(page);
+ f2fs_put_page(page, 1);
+ continue;
+ }
+
+ submit_read_page(sbi, page, blk_addr, READ_SYNC);
+
+ mark_page_accessed(page);
+ f2fs_put_page(page, 0);
+ }
+
+ f2fs_submit_read_bio(sbi, READ_SYNC);
+ return blkno - start;
+}
+
static void build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
- unsigned int start;
-
- for (start = 0; start < TOTAL_SEGS(sbi); start++) {
- struct seg_entry *se = &sit_i->sentries[start];
- struct f2fs_sit_block *sit_blk;
- struct f2fs_sit_entry sit;
- struct page *page;
- int i;
+ int sit_blk_cnt = SIT_BLK_CNT(sbi);
+ unsigned int i, start, end;
+ unsigned int readed, start_blk = 0;
+ int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < sits_in_cursum(sum); i++) {
- if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
- sit = sit_in_journal(sum, i);
- mutex_unlock(&curseg->curseg_mutex);
- goto got_it;
+ do {
+ readed = ra_sit_pages(sbi, start_blk, nrpages);
+
+ start = start_blk * sit_i->sents_per_block;
+ end = (start_blk + readed) * sit_i->sents_per_block;
+
+ for (; start < end && start < TOTAL_SEGS(sbi); start++) {
+ struct seg_entry *se = &sit_i->sentries[start];
+ struct f2fs_sit_block *sit_blk;
+ struct f2fs_sit_entry sit;
+ struct page *page;
+
+ mutex_lock(&curseg->curseg_mutex);
+ for (i = 0; i < sits_in_cursum(sum); i++) {
+ if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
+ sit = sit_in_journal(sum, i);
+ mutex_unlock(&curseg->curseg_mutex);
+ goto got_it;
+ }
}
- }
- mutex_unlock(&curseg->curseg_mutex);
- page = get_current_sit_page(sbi, start);
- sit_blk = (struct f2fs_sit_block *)page_address(page);
- sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
- f2fs_put_page(page, 1);
+ mutex_unlock(&curseg->curseg_mutex);
+
+ page = get_current_sit_page(sbi, start);
+ sit_blk = (struct f2fs_sit_block *)page_address(page);
+ sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
+ f2fs_put_page(page, 1);
got_it:
- check_block_count(sbi, start, &sit);
- seg_info_from_raw_sit(se, &sit);
- if (sbi->segs_per_sec > 1) {
- struct sec_entry *e = get_sec_entry(sbi, start);
- e->valid_blocks += se->valid_blocks;
+ check_block_count(sbi, start, &sit);
+ seg_info_from_raw_sit(se, &sit);
+ if (sbi->segs_per_sec > 1) {
+ struct sec_entry *e = get_sec_entry(sbi, start);
+ e->valid_blocks += se->valid_blocks;
+ }
}
- }
+ start_blk += readed;
+ } while (start_blk < sit_blk_cnt);
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 38f6196493ff..b84dd2396665 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -78,6 +78,8 @@
(segno / SIT_ENTRY_PER_BLOCK)
#define START_SEGNO(sit_i, segno) \
(SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK)
+#define SIT_BLK_CNT(sbi) \
+ ((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
#define f2fs_bitmap_size(nr) \
(BITS_TO_LONGS(nr) * sizeof(unsigned long))
#define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)