diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 22:29:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 22:29:03 -0700 |
commit | 161fa27ff2e2fd4ea7ccb660184670eab2b6ea98 (patch) | |
tree | 31e3403f1398171d2ed32d534640dfad67ce832a /fs/buffer.c | |
parent | a1a4f841ec4585185c0e75bfae43a18b282dd316 (diff) | |
parent | 806a1477b10a153cd01ee7ccba8ca2492df3e0b2 (diff) | |
download | linux-161fa27ff2e2fd4ea7ccb660184670eab2b6ea98.tar.bz2 |
Merge branch 'iomap-4.19-merge' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull fs iomap refactoring from Darrick Wong:
"This is the first part of the XFS changes for 4.19.
Christoph and Andreas coordinated some refactoring work on the iomap
code in preparation for removing buffer heads from XFS and porting
gfs2 to iomap. I'm sending this small pull request ahead of the main
XFS merge to avoid holding up gfs2 unnecessarily"
* 'iomap-4.19-merge' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
iomap: add inline data support to iomap_readpage_actor
iomap: support direct I/O to inline data
iomap: refactor iomap_dio_actor
iomap: add initial support for writes without buffer heads
iomap: add an iomap-based readpage and readpages implementation
iomap: add private pointer to struct iomap
iomap: add a page_done callback
iomap: generic inline data handling
iomap: complete partial direct I/O writes synchronously
iomap: mark newly allocated buffer heads as new
fs: factor out a __generic_write_end helper
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 76 |
1 files changed, 40 insertions, 36 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index cabc045f483d..c8c2b7d8b8d6 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1900,15 +1900,16 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, break; case IOMAP_UNWRITTEN: /* - * For unwritten regions, we always need to ensure that - * sub-block writes cause the regions in the block we are not - * writing to are zeroed. Set the buffer as new to ensure this. + * For unwritten regions, we always need to ensure that regions + * in the block we are not writing to are zeroed. Mark the + * buffer as new to ensure this. */ set_buffer_new(bh); set_buffer_unwritten(bh); /* FALLTHRU */ case IOMAP_MAPPED: - if (offset >= i_size_read(inode)) + if ((iomap->flags & IOMAP_F_NEW) || + offset >= i_size_read(inode)) set_buffer_new(bh); bh->b_blocknr = (iomap->addr + offset - iomap->offset) >> inode->i_blkbits; @@ -2076,6 +2077,40 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, } EXPORT_SYMBOL(block_write_begin); +int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied, + struct page *page) +{ + loff_t old_size = inode->i_size; + bool i_size_changed = false; + + /* + * No need to use i_size_read() here, the i_size cannot change under us + * because we hold i_rwsem. + * + * But it's important to update i_size while still holding page lock: + * page writeout could otherwise come in and zero beyond i_size. + */ + if (pos + copied > inode->i_size) { + i_size_write(inode, pos + copied); + i_size_changed = true; + } + + unlock_page(page); + put_page(page); + + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); + /* + * Don't mark the inode dirty under page lock. First, it unnecessarily + * makes the holding time of page lock longer. Second, it forces lock + * ordering of page lock and transaction start for journaling + * filesystems. + */ + if (i_size_changed) + mark_inode_dirty(inode); + return copied; +} + int block_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) @@ -2116,39 +2151,8 @@ int generic_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - struct inode *inode = mapping->host; - loff_t old_size = inode->i_size; - int i_size_changed = 0; - copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); - - /* - * No need to use i_size_read() here, the i_size - * cannot change under us because we hold i_mutex. - * - * But it's important to update i_size while still holding page lock: - * page writeout could otherwise come in and zero beyond i_size. - */ - if (pos+copied > inode->i_size) { - i_size_write(inode, pos+copied); - i_size_changed = 1; - } - - unlock_page(page); - put_page(page); - - if (old_size < pos) - pagecache_isize_extended(inode, old_size, pos); - /* - * Don't mark the inode dirty under page lock. First, it unnecessarily - * makes the holding time of page lock longer. Second, it forces lock - * ordering of page lock and transaction start for journaling - * filesystems. - */ - if (i_size_changed) - mark_inode_dirty(inode); - - return copied; + return __generic_write_end(mapping->host, pos, copied, page); } EXPORT_SYMBOL(generic_write_end); |