diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/extent_io.c | 3 | ||||
-rw-r--r-- | fs/cifs/file.c | 2 | ||||
-rw-r--r-- | fs/dax.c | 73 | ||||
-rw-r--r-- | fs/ext2/file.c | 4 | ||||
-rw-r--r-- | fs/ext4/file.c | 4 | ||||
-rw-r--r-- | fs/ext4/readpage.c | 2 | ||||
-rw-r--r-- | fs/f2fs/data.c | 3 | ||||
-rw-r--r-- | fs/fs-writeback.c | 111 | ||||
-rw-r--r-- | fs/inode.c | 2 | ||||
-rw-r--r-- | fs/mpage.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/cluster/tcp.c | 8 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.c | 26 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.h | 1 | ||||
-rw-r--r-- | fs/ocfs2/dlmglue.c | 13 | ||||
-rw-r--r-- | fs/ocfs2/inode.h | 7 | ||||
-rw-r--r-- | fs/ocfs2/journal.c | 41 | ||||
-rw-r--r-- | fs/ocfs2/stackglue.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/super.c | 1 | ||||
-rw-r--r-- | fs/orangefs/inode.c | 2 | ||||
-rw-r--r-- | fs/pipe.c | 32 | ||||
-rw-r--r-- | fs/proc/meminfo.c | 7 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 10 | ||||
-rw-r--r-- | fs/super.c | 2 | ||||
-rw-r--r-- | fs/userfaultfd.c | 22 | ||||
-rw-r--r-- | fs/xfs/xfs_file.c | 6 |
25 files changed, 198 insertions, 190 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 27c214941004..cee4cb99b8ce 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4178,7 +4178,8 @@ int extent_readpages(struct extent_io_tree *tree, prefetchw(&page->flags); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, - page->index, GFP_NOFS)) { + page->index, + readahead_gfp_mask(mapping))) { put_page(page); continue; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d4890b6dc22d..579e41b350a2 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3366,7 +3366,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, struct page *page, *tpage; unsigned int expected_index; int rc; - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + gfp_t gfp = readahead_gfp_mask(mapping); INIT_LIST_HEAD(tmplist); @@ -819,16 +819,16 @@ static int dax_insert_mapping(struct address_space *mapping, } /** - * __dax_fault - handle a page fault on a DAX file + * dax_fault - handle a page fault on a DAX file * @vma: The virtual memory area where the fault occurred * @vmf: The description of the fault * @get_block: The filesystem method used to translate file offsets to blocks * * When a page fault occurs, filesystems may call this helper in their - * fault handler for DAX files. __dax_fault() assumes the caller has done all + * fault handler for DAX files. dax_fault() assumes the caller has done all * the necessary locking for the page fault to proceed successfully. */ -int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, +int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { struct file *file = vma->vm_file; @@ -913,33 +913,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_SIGBUS | major; return VM_FAULT_NOPAGE | major; } -EXPORT_SYMBOL(__dax_fault); - -/** - * dax_fault - handle a page fault on a DAX file - * @vma: The virtual memory area where the fault occurred - * @vmf: The description of the fault - * @get_block: The filesystem method used to translate file offsets to blocks - * - * When a page fault occurs, filesystems may call this helper in their - * fault handler for DAX files. - */ -int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - get_block_t get_block) -{ - int result; - struct super_block *sb = file_inode(vma->vm_file)->i_sb; - - if (vmf->flags & FAULT_FLAG_WRITE) { - sb_start_pagefault(sb); - file_update_time(vma->vm_file); - } - result = __dax_fault(vma, vmf, get_block); - if (vmf->flags & FAULT_FLAG_WRITE) - sb_end_pagefault(sb); - - return result; -} EXPORT_SYMBOL_GPL(dax_fault); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -967,7 +940,16 @@ static void __dax_dbg(struct buffer_head *bh, unsigned long address, #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd") -int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, +/** + * dax_pmd_fault - handle a PMD fault on a DAX file + * @vma: The virtual memory area where the fault occurred + * @vmf: The description of the fault + * @get_block: The filesystem method used to translate file offsets to blocks + * + * When a page fault occurs, filesystems may call this helper in their + * pmd_fault handler for DAX files. + */ +int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, get_block_t get_block) { struct file *file = vma->vm_file; @@ -1119,7 +1101,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, * * The PMD path doesn't have an equivalent to * dax_pfn_mkwrite(), though, so for a read followed by a - * write we traverse all the way through __dax_pmd_fault() + * write we traverse all the way through dax_pmd_fault() * twice. This means we can just skip inserting a radix tree * entry completely on the initial read and just wait until * the write to insert a dirty entry. @@ -1148,33 +1130,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, result = VM_FAULT_FALLBACK; goto out; } -EXPORT_SYMBOL_GPL(__dax_pmd_fault); - -/** - * dax_pmd_fault - handle a PMD fault on a DAX file - * @vma: The virtual memory area where the fault occurred - * @vmf: The description of the fault - * @get_block: The filesystem method used to translate file offsets to blocks - * - * When a page fault occurs, filesystems may call this helper in their - * pmd_fault handler for DAX files. - */ -int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmd, unsigned int flags, get_block_t get_block) -{ - int result; - struct super_block *sb = file_inode(vma->vm_file)->i_sb; - - if (flags & FAULT_FLAG_WRITE) { - sb_start_pagefault(sb); - file_update_time(vma->vm_file); - } - result = __dax_pmd_fault(vma, address, pmd, flags, get_block); - if (flags & FAULT_FLAG_WRITE) - sb_end_pagefault(sb); - - return result; -} EXPORT_SYMBOL_GPL(dax_pmd_fault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 868c02317b05..5efeefe17abb 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } down_read(&ei->dax_sem); - ret = __dax_fault(vma, vmf, ext2_get_block); + ret = dax_fault(vma, vmf, ext2_get_block); up_read(&ei->dax_sem); if (vmf->flags & FAULT_FLAG_WRITE) @@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, } down_read(&ei->dax_sem); - ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block); + ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block); up_read(&ei->dax_sem); if (flags & FAULT_FLAG_WRITE) diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 4f615cdd22ca..261ac3734c58 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else - result = __dax_fault(vma, vmf, ext4_dax_get_block); + result = dax_fault(vma, vmf, ext4_dax_get_block); if (write) { if (!IS_ERR(handle)) @@ -237,7 +237,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else - result = __dax_pmd_fault(vma, addr, pmd, flags, + result = dax_pmd_fault(vma, addr, pmd, flags, ext4_dax_get_block); if (write) { diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index bfc7f4d30643..a81b829d56de 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -130,7 +130,7 @@ int ext4_mpage_readpages(struct address_space *mapping, page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, - mapping_gfp_constraint(mapping, GFP_KERNEL))) + readahead_gfp_mask(mapping))) goto next_page; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 8769e8349dff..ded224518978 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1002,7 +1002,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping, page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) + page->index, + readahead_gfp_mask(mapping))) goto next_page; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index fe7e83a45eff..6f9c9f6f5157 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -981,6 +981,42 @@ void inode_io_list_del(struct inode *inode) } /* + * mark an inode as under writeback on the sb + */ +void sb_mark_inode_writeback(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + unsigned long flags; + + if (list_empty(&inode->i_wb_list)) { + spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); + if (list_empty(&inode->i_wb_list)) { + list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); + trace_sb_mark_inode_writeback(inode); + } + spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); + } +} + +/* + * clear an inode as under writeback on the sb + */ +void sb_clear_inode_writeback(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + unsigned long flags; + + if (!list_empty(&inode->i_wb_list)) { + spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); + if (!list_empty(&inode->i_wb_list)) { + list_del_init(&inode->i_wb_list); + trace_sb_clear_inode_writeback(inode); + } + spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); + } +} + +/* * Redirty an inode: set its when-it-was dirtied timestamp and move it to the * furthest end of its superblock's dirty-inode list. * @@ -2154,7 +2190,7 @@ EXPORT_SYMBOL(__mark_inode_dirty); */ static void wait_sb_inodes(struct super_block *sb) { - struct inode *inode, *old_inode = NULL; + LIST_HEAD(sync_list); /* * We need to be protected against the filesystem going from @@ -2163,38 +2199,60 @@ static void wait_sb_inodes(struct super_block *sb) WARN_ON(!rwsem_is_locked(&sb->s_umount)); mutex_lock(&sb->s_sync_lock); - spin_lock(&sb->s_inode_list_lock); /* - * Data integrity sync. Must wait for all pages under writeback, - * because there may have been pages dirtied before our sync - * call, but which had writeout started before we write it out. - * In which case, the inode may not be on the dirty list, but - * we still have to wait for that writeout. + * Splice the writeback list onto a temporary list to avoid waiting on + * inodes that have started writeback after this point. + * + * Use rcu_read_lock() to keep the inodes around until we have a + * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as + * the local list because inodes can be dropped from either by writeback + * completion. + */ + rcu_read_lock(); + spin_lock_irq(&sb->s_inode_wblist_lock); + list_splice_init(&sb->s_inodes_wb, &sync_list); + + /* + * Data integrity sync. Must wait for all pages under writeback, because + * there may have been pages dirtied before our sync call, but which had + * writeout started before we write it out. In which case, the inode + * may not be on the dirty list, but we still have to wait for that + * writeout. */ - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + while (!list_empty(&sync_list)) { + struct inode *inode = list_first_entry(&sync_list, struct inode, + i_wb_list); struct address_space *mapping = inode->i_mapping; + /* + * Move each inode back to the wb list before we drop the lock + * to preserve consistency between i_wb_list and the mapping + * writeback tag. Writeback completion is responsible to remove + * the inode from either list once the writeback tag is cleared. + */ + list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb); + + /* + * The mapping can appear untagged while still on-list since we + * do not have the mapping lock. Skip it here, wb completion + * will remove it. + */ + if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) + continue; + + spin_unlock_irq(&sb->s_inode_wblist_lock); + spin_lock(&inode->i_lock); - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || - (mapping->nrpages == 0)) { + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { spin_unlock(&inode->i_lock); + + spin_lock_irq(&sb->s_inode_wblist_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); - spin_unlock(&sb->s_inode_list_lock); - - /* - * We hold a reference to 'inode' so it couldn't have been - * removed from s_inodes list while we dropped the - * s_inode_list_lock. We cannot iput the inode now as we can - * be holding the last reference and we cannot iput it under - * s_inode_list_lock. So we keep the reference and iput it - * later. - */ - iput(old_inode); - old_inode = inode; + rcu_read_unlock(); /* * We keep the error status of individual mapping so that @@ -2205,10 +2263,13 @@ static void wait_sb_inodes(struct super_block *sb) cond_resched(); - spin_lock(&sb->s_inode_list_lock); + iput(inode); + + rcu_read_lock(); + spin_lock_irq(&sb->s_inode_wblist_lock); } - spin_unlock(&sb->s_inode_list_lock); - iput(old_inode); + spin_unlock_irq(&sb->s_inode_wblist_lock); + rcu_read_unlock(); mutex_unlock(&sb->s_sync_lock); } diff --git a/fs/inode.c b/fs/inode.c index 4ccbc21b30ce..e171f7b5f9e4 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -365,6 +365,7 @@ void inode_init_once(struct inode *inode) INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_io_list); + INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); address_space_init_once(&inode->i_data); i_size_ordered_init(inode); @@ -507,6 +508,7 @@ void clear_inode(struct inode *inode) BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); + BUG_ON(!list_empty(&inode->i_wb_list)); /* don't need i_lock here, no concurrent mods to i_state */ inode->i_state = I_FREEING | I_CLEAR; } diff --git a/fs/mpage.c b/fs/mpage.c index 37b28280ad04..2ca1f39c8cba 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -72,6 +72,8 @@ mpage_alloc(struct block_device *bdev, { struct bio *bio; + /* Restrict the given (page cache) mask for slab allocations */ + gfp_flags &= GFP_KERNEL; bio = bio_alloc(gfp_flags, nr_vecs); if (bio == NULL && (current->flags & PF_MEMALLOC)) { @@ -363,7 +365,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + gfp_t gfp = readahead_gfp_mask(mapping); map_bh.b_state = 0; map_bh.b_size = 0; diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 4238eb28889f..1d67fcbf7160 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1618,16 +1618,12 @@ static void o2net_start_connect(struct work_struct *work) /* watch for racing with tearing a node down */ node = o2nm_get_node_by_num(o2net_num_from_nn(nn)); - if (node == NULL) { - ret = 0; + if (node == NULL) goto out; - } mynode = o2nm_get_node_by_num(o2nm_this_node()); - if (mynode == NULL) { - ret = 0; + if (mynode == NULL) goto out; - } spin_lock(&nn->nn_lock); /* diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 825136070d2c..e7b760deefae 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -347,26 +347,6 @@ static struct dentry *dlm_debugfs_root; #define DLM_DEBUGFS_PURGE_LIST "purge_list" /* begin - utils funcs */ -static void dlm_debug_free(struct kref *kref) -{ - struct dlm_debug_ctxt *dc; - - dc = container_of(kref, struct dlm_debug_ctxt, debug_refcnt); - - kfree(dc); -} - -static void dlm_debug_put(struct dlm_debug_ctxt *dc) -{ - if (dc) - kref_put(&dc->debug_refcnt, dlm_debug_free); -} - -static void dlm_debug_get(struct dlm_debug_ctxt *dc) -{ - kref_get(&dc->debug_refcnt); -} - static int debug_release(struct inode *inode, struct file *file) { free_page((unsigned long)file->private_data); @@ -932,11 +912,9 @@ int dlm_debug_init(struct dlm_ctxt *dlm) goto bail; } - dlm_debug_get(dc); return 0; bail: - dlm_debug_shutdown(dlm); return -ENOMEM; } @@ -949,7 +927,8 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm) debugfs_remove(dc->debug_mle_dentry); debugfs_remove(dc->debug_lockres_dentry); debugfs_remove(dc->debug_state_dentry); - dlm_debug_put(dc); + kfree(dc); + dc = NULL; } } @@ -969,7 +948,6 @@ int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm) mlog_errno(-ENOMEM); goto bail; } - kref_init(&dlm->dlm_debug_ctxt->debug_refcnt); return 0; bail: diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h index 1f27c4812d1a..5ced5482e7d3 100644 --- a/fs/ocfs2/dlm/dlmdebug.h +++ b/fs/ocfs2/dlm/dlmdebug.h @@ -30,7 +30,6 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle); #ifdef CONFIG_DEBUG_FS struct dlm_debug_ctxt { - struct kref debug_refcnt; struct dentry *debug_state_dentry; struct dentry *debug_lockres_dentry; struct dentry *debug_mle_dentry; diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 1eaa9100c889..83d576f6a287 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1635,7 +1635,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode) int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); BUG_ON(!ocfs2_inode_is_new(inode)); mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -1665,10 +1664,8 @@ int ocfs2_create_new_inode_locks(struct inode *inode) } ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0); - if (ret) { + if (ret) mlog_errno(ret); - goto bail; - } bail: return ret; @@ -1680,8 +1677,6 @@ int ocfs2_rw_lock(struct inode *inode, int write) struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); - mlog(0, "inode %llu take %s RW lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); @@ -1724,8 +1719,6 @@ int ocfs2_open_lock(struct inode *inode) struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); - mlog(0, "inode %llu take PRMODE open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -1749,8 +1742,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write) struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); - mlog(0, "inode %llu try to take %s open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); @@ -2328,8 +2319,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *local_bh = NULL; - BUG_ON(!inode); - mlog(0, "inode %llu, take %s META lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, ex ? "EXMODE" : "PRMODE"); diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index d8f3fc8d2551..50cc55047443 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -145,22 +145,15 @@ int ocfs2_drop_inode(struct inode *inode); struct inode *ocfs2_ilookup(struct super_block *sb, u64 feoff); struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, unsigned flags, int sysfile_type); -int ocfs2_inode_init_private(struct inode *inode); int ocfs2_inode_revalidate(struct dentry *dentry); void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, int create_ino); -void ocfs2_read_inode(struct inode *inode); -void ocfs2_read_inode2(struct inode *inode, void *opaque); -ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf, - size_t size, loff_t *offp); void ocfs2_sync_blockdev(struct super_block *sb); void ocfs2_refresh_inode(struct inode *inode, struct ocfs2_dinode *fe); int ocfs2_mark_inode_dirty(handle_t *handle, struct inode *inode, struct buffer_head *bh); -struct buffer_head *ocfs2_bread(struct inode *inode, - int block, int *err, int reada); void ocfs2_set_inode_flags(struct inode *inode); void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi); diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index e607419cdfa4..a244f14c6b87 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1159,10 +1159,8 @@ static int ocfs2_force_read_journal(struct inode *inode) int status = 0; int i; u64 v_blkno, p_blkno, p_blocks, num_blocks; -#define CONCURRENT_JOURNAL_FILL 32ULL - struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; - - memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); + struct buffer_head *bh = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); v_blkno = 0; @@ -1174,29 +1172,32 @@ static int ocfs2_force_read_journal(struct inode *inode) goto bail; } - if (p_blocks > CONCURRENT_JOURNAL_FILL) - p_blocks = CONCURRENT_JOURNAL_FILL; - - /* We are reading journal data which should not - * be put in the uptodate cache */ - status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), - p_blkno, p_blocks, bhs); - if (status < 0) { - mlog_errno(status); - goto bail; - } + for (i = 0; i < p_blocks; i++, p_blkno++) { + bh = __find_get_block(osb->sb->s_bdev, p_blkno, + osb->sb->s_blocksize); + /* block not cached. */ + if (!bh) + continue; + + brelse(bh); + bh = NULL; + /* We are reading journal data which should not + * be put in the uptodate cache. + */ + status = ocfs2_read_blocks_sync(osb, p_blkno, 1, &bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } - for(i = 0; i < p_blocks; i++) { - brelse(bhs[i]); - bhs[i] = NULL; + brelse(bh); + bh = NULL; } v_blkno += p_blocks; } bail: - for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) - brelse(bhs[i]); return status; } diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 13219ed73e1d..52c07346bea3 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -735,8 +735,6 @@ static void __exit ocfs2_stack_glue_exit(void) { memset(&locking_max_version, 0, sizeof(struct ocfs2_protocol_version)); - locking_max_version.pv_major = 0; - locking_max_version.pv_minor = 0; ocfs2_sysfs_exit(); if (ocfs2_table_header) unregister_sysctl_table(ocfs2_table_header); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 3971146228d3..603b28d6f008 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2072,7 +2072,6 @@ static int ocfs2_initialize_super(struct super_block *sb, osb->osb_dx_seed[3] = le32_to_cpu(di->id2.i_super.s_uuid_hash); osb->sb = sb; - /* Save off for ocfs2_rw_direct */ osb->s_sectsize_bits = blksize_bits(sector_size); BUG_ON(!osb->s_sectsize_bits); diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index a44caabb0fc2..8f2fa94cc4f6 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -80,7 +80,7 @@ static int orangefs_readpages(struct file *file, if (!add_to_page_cache(page, mapping, page->index, - GFP_KERNEL)) { + readahead_gfp_mask(mapping))) { ret = read_one_page(page); gossip_debug(GOSSIP_INODE_DEBUG, "failure adding page to cache, read_one_page returned: %d\n", diff --git a/fs/pipe.c b/fs/pipe.c index 0d3f5165cb0b..4b32928f5426 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -21,6 +21,7 @@ #include <linux/audit.h> #include <linux/syscalls.h> #include <linux/fcntl.h> +#include <linux/memcontrol.h> #include <asm/uaccess.h> #include <asm/ioctls.h> @@ -137,6 +138,22 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, put_page(page); } +static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + struct page *page = buf->page; + + if (page_count(page) == 1) { + if (memcg_kmem_enabled()) { + memcg_kmem_uncharge(page, 0); + __ClearPageKmemcg(page); + } + __SetPageLocked(page); + return 0; + } + return 1; +} + /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to @@ -219,7 +236,7 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, - .steal = generic_pipe_buf_steal, + .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; @@ -227,7 +244,7 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, - .steal = generic_pipe_buf_steal, + .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; @@ -405,7 +422,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) int copied; if (!page) { - page = alloc_page(GFP_HIGHUSER); + page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; @@ -611,7 +628,7 @@ struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; - pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); + pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); if (pipe) { unsigned long pipe_bufs = PIPE_DEF_BUFFERS; struct user_struct *user = get_current_user(); @@ -619,7 +636,9 @@ struct pipe_inode_info *alloc_pipe_info(void) if (!too_many_pipe_buffers_hard(user)) { if (too_many_pipe_buffers_soft(user)) pipe_bufs = 1; - pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL); + pipe->bufs = kcalloc(pipe_bufs, + sizeof(struct pipe_buffer), + GFP_KERNEL_ACCOUNT); } if (pipe->bufs) { @@ -1010,7 +1029,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) if (nr_pages < pipe->nrbufs) return -EBUSY; - bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN); + bufs = kcalloc(nr_pages, sizeof(*bufs), + GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (unlikely(!bufs)) return -ENOMEM; diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 83720460c5bc..cf301a9ef512 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -105,6 +105,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE "AnonHugePages: %8lu kB\n" + "ShmemHugePages: %8lu kB\n" + "ShmemPmdMapped: %8lu kB\n" #endif #ifdef CONFIG_CMA "CmaTotal: %8lu kB\n" @@ -162,8 +164,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR) + , K(global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR) + , K(global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR) + , K(global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR) #endif #ifdef CONFIG_CMA , K(totalcma_pages) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4648c7f63ae2..187d84ef9de9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -448,6 +448,7 @@ struct mem_size_stats { unsigned long referenced; unsigned long anonymous; unsigned long anonymous_thp; + unsigned long shmem_thp; unsigned long swap; unsigned long shared_hugetlb; unsigned long private_hugetlb; @@ -576,7 +577,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); if (IS_ERR_OR_NULL(page)) return; - mss->anonymous_thp += HPAGE_PMD_SIZE; + if (PageAnon(page)) + mss->anonymous_thp += HPAGE_PMD_SIZE; + else if (PageSwapBacked(page)) + mss->shmem_thp += HPAGE_PMD_SIZE; + else + VM_BUG_ON_PAGE(1, page); smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); } #else @@ -770,6 +776,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) "Referenced: %8lu kB\n" "Anonymous: %8lu kB\n" "AnonHugePages: %8lu kB\n" + "ShmemPmdMapped: %8lu kB\n" "Shared_Hugetlb: %8lu kB\n" "Private_Hugetlb: %7lu kB\n" "Swap: %8lu kB\n" @@ -787,6 +794,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) mss.referenced >> 10, mss.anonymous >> 10, mss.anonymous_thp >> 10, + mss.shmem_thp >> 10, mss.shared_hugetlb >> 10, mss.private_hugetlb >> 10, mss.swap >> 10, diff --git a/fs/super.c b/fs/super.c index d78b9847e6cb..5806ffd45563 100644 --- a/fs/super.c +++ b/fs/super.c @@ -206,6 +206,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) mutex_init(&s->s_sync_lock); INIT_LIST_HEAD(&s->s_inodes); spin_lock_init(&s->s_inode_list_lock); + INIT_LIST_HEAD(&s->s_inodes_wb); + spin_lock_init(&s->s_inode_wblist_lock); if (list_lru_init_memcg(&s->s_dentry_lru)) goto fail; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 2d97952e341a..85959d8324df 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -257,10 +257,9 @@ out: * fatal_signal_pending()s, and the mmap_sem must be released before * returning it. */ -int handle_userfault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long reason) +int handle_userfault(struct fault_env *fe, unsigned long reason) { - struct mm_struct *mm = vma->vm_mm; + struct mm_struct *mm = fe->vma->vm_mm; struct userfaultfd_ctx *ctx; struct userfaultfd_wait_queue uwq; int ret; @@ -269,7 +268,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); ret = VM_FAULT_SIGBUS; - ctx = vma->vm_userfaultfd_ctx.ctx; + ctx = fe->vma->vm_userfaultfd_ctx.ctx; if (!ctx) goto out; @@ -302,17 +301,17 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, * without first stopping userland access to the memory. For * VM_UFFD_MISSING userfaults this is enough for now. */ - if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) { + if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) { /* * Validate the invariant that nowait must allow retry * to be sure not to return SIGBUS erroneously on * nowait invocations. */ - BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT); + BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT); #ifdef CONFIG_DEBUG_VM if (printk_ratelimit()) { printk(KERN_WARNING - "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags); + "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags); dump_stack(); } #endif @@ -324,7 +323,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, * and wait. */ ret = VM_FAULT_RETRY; - if (flags & FAULT_FLAG_RETRY_NOWAIT) + if (fe->flags & FAULT_FLAG_RETRY_NOWAIT) goto out; /* take the reference before dropping the mmap_sem */ @@ -332,10 +331,11 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); uwq.wq.private = current; - uwq.msg = userfault_msg(address, flags, reason); + uwq.msg = userfault_msg(fe->address, fe->flags, reason); uwq.ctx = ctx; - return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == + return_to_userland = + (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); spin_lock(&ctx->fault_pending_wqh.lock); @@ -353,7 +353,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, TASK_KILLABLE); spin_unlock(&ctx->fault_pending_wqh.lock); - must_wait = userfaultfd_must_wait(ctx, address, flags, reason); + must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); up_read(&mm->mmap_sem); if (likely(must_wait && !ACCESS_ONCE(ctx->released) && diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 47fc63295422..1b3dc9dd8861 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1551,7 +1551,7 @@ xfs_filemap_page_mkwrite( xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (IS_DAX(inode)) { - ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); + ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); } else { ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); ret = block_page_mkwrite_return(ret); @@ -1585,7 +1585,7 @@ xfs_filemap_fault( * changes to xfs_get_blocks_direct() to map unwritten extent * ioend for conversion on read-only mappings. */ - ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault); + ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault); } else ret = filemap_fault(vma, vmf); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); @@ -1622,7 +1622,7 @@ xfs_filemap_pmd_fault( } xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); - ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); + ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (flags & FAULT_FLAG_WRITE) |