summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChao Yu <yuchao0@huawei.com>2020-06-18 14:36:22 +0800
committerJaegeuk Kim <jaegeuk@kernel.org>2020-07-07 21:51:43 -0700
commit0ef818335f734bbb4bb26b0eaab2c27aa6e5d2c2 (patch)
treecc7810711c74a7135fda96df90d2bfd5d931e46f
parentba87a45c23d645b25e1265b477e83cc1b73086e4 (diff)
downloadlinux-0ef818335f734bbb4bb26b0eaab2c27aa6e5d2c2.tar.bz2
f2fs: add prefix for exported symbols
to avoid polluting global symbol namespace. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
-rw-r--r--fs/f2fs/compress.c4
-rw-r--r--fs/f2fs/data.c14
-rw-r--r--fs/f2fs/f2fs.h4
-rw-r--r--fs/f2fs/file.c4
-rw-r--r--fs/f2fs/gc.c2
-rw-r--r--fs/f2fs/segment.c2
6 files changed, 15 insertions, 15 deletions
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 4dca9e456734..7dbd56abe936 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -949,7 +949,7 @@ retry:
}
if (prealloc) {
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -964,7 +964,7 @@ retry:
break;
}
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
if (likely(!ret)) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 326c63879ddc..c78ce08f6400 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1426,7 +1426,7 @@ map_blocks:
return err;
}
-void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (lock)
@@ -1491,7 +1491,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
next_dnode:
if (map->m_may_create)
- __do_map_lock(sbi, flag, true);
+ f2fs_do_map_lock(sbi, flag, true);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1640,7 +1640,7 @@ skip:
f2fs_put_dnode(&dn);
if (map->m_may_create) {
- __do_map_lock(sbi, flag, false);
+ f2fs_do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
@@ -1666,7 +1666,7 @@ sync_out:
f2fs_put_dnode(&dn);
unlock_out:
if (map->m_may_create) {
- __do_map_lock(sbi, flag, false);
+ f2fs_do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
@@ -3217,7 +3217,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_MASK) >= i_size_read(inode)) {
- __do_map_lock(sbi, flag, true);
+ f2fs_do_map_lock(sbi, flag, true);
locked = true;
}
@@ -3254,7 +3254,7 @@ restart:
err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err || dn.data_blkaddr == NULL_ADDR) {
f2fs_put_dnode(&dn);
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
true);
WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
locked = true;
@@ -3270,7 +3270,7 @@ out:
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
- __do_map_lock(sbi, flag, false);
+ f2fs_do_map_lock(sbi, flag, false);
return err;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b35a50f4953c..9b6f7f72923a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3325,7 +3325,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
-void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end);
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
@@ -3448,7 +3448,7 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
struct page *f2fs_get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size);
int f2fs_do_write_data_page(struct f2fs_io_info *fio);
-void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
+void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 98721f9bef25..e15d4ce4c08e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -105,11 +105,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
if (need_alloc) {
/* block allocation */
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = f2fs_get_block(&dn, page->index);
f2fs_put_dnode(&dn);
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 9b6fc61ce649..cb18a99a7f87 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1441,7 +1441,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
/* Move out cursegs from the target range */
for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
- allocate_segment_for_resize(sbi, type, start, end);
+ f2fs_allocate_segment_for_resize(sbi, type, start, end);
/* do GC to move out valid blocks in the range */
for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 66eeaba30e91..cf693a018bec 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2700,7 +2700,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
stat_inc_seg_type(sbi, curseg);
}
-void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
+void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);