diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-05 13:24:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-05 13:24:20 -0700 |
commit | 6567af78acd18a1f9dec99f27f1b7ba9d270074b (patch) | |
tree | 72884b463f13443076609fa53e94b285570f911d /mm | |
parent | 1434763ca5b300ad3b880954bd32dc339d16a833 (diff) | |
parent | afd9d6a1df75807684fa40dab77c52e104e5c74b (diff) | |
download | linux-6567af78acd18a1f9dec99f27f1b7ba9d270074b.tar.bz2 |
Merge tag 'xfs-4.18-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull xfs updates from Darrick Wong:
"New features this cycle include the ability to relabel mounted
filesystems, support for fallocated swapfiles, and using FUA for pure
data O_DSYNC directio writes. With this cycle we begin to integrate
online filesystem repair and refactor the growfs code in preparation
for eventual subvolume support, though the road ahead for both
features is quite long.
There are also numerous refactorings of the iomap code to remove
unnecessary log overhead, to disentangle some of the quota code, and
to prepare for buffer head removal in a future upstream kernel.
Metadata validation continues to improve, both in the hot path
veifiers and the online filesystem check code. I anticipate sending a
second pull request in a few days with more metadata validation
improvements.
This series has been run through a full xfstests run over the weekend
and through a quick xfstests run against this morning's master, with
no major failures reported.
Summary:
- Strengthen inode number and structure validation when allocating
inodes.
- Reduce pointless buffer allocations during cache miss
- Use FUA for pure data O_DSYNC directio writes
- Various iomap refactorings
- Strengthen quota metadata verification to avoid unfixable broken
quota
- Make AGFL block freeing a deferred operation to avoid blowing out
transaction reservations when running complex operations
- Get rid of the log item descriptors to reduce log overhead
- Fix various reflink bugs where inodes were double-joined to
transactions
- Don't issue discards when trimming unwritten extents
- Refactor incore dquot initialization and retrieval interfaces
- Fix some locking problmes in the quota scrub code
- Strengthen btree structure checks in scrub code
- Rewrite swapfile activation to use iomap and support unwritten
extents
- Make scrub exit to userspace sooner when corruptions or
cross-referencing problems are found
- Make scrub invoke the data fork scrubber directly on metadata
inodes
- Don't do background reclamation of post-eof and cow blocks when the
fs is suspended
- Fix secondary superblock buffer lifespan hinting
- Refactor growfs to use table-dispatched functions instead of long
stringy functions
- Move growfs code to libxfs
- Implement online fs label getting and setting
- Introduce online filesystem repair (in a very limited capacity)
- Fix unit conversion problems in the realtime freemap iteration
functions
- Various refactorings and cleanups in preparation to remove buffer
heads in a future release
- Reimplement the old bmap call with iomap
- Remove direct buffer head accesses from seek hole/data
- Various bug fixes"
* tag 'xfs-4.18-merge-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (121 commits)
fs: use ->is_partially_uptodate in page_cache_seek_hole_data
fs: remove the buffer_unwritten check in page_seek_hole_data
fs: move page_cache_seek_hole_data to iomap.c
xfs: use iomap_bmap
iomap: add an iomap-based bmap implementation
iomap: add a iomap_sector helper
iomap: use __bio_add_page in iomap_dio_zero
iomap: move IOMAP_F_BOUNDARY to gfs2
iomap: fix the comment describing IOMAP_NOWAIT
iomap: inline data should be an iomap type, not a flag
mm: split ->readpages calls to avoid non-contiguous pages lists
mm: return an unsigned int from __do_page_cache_readahead
mm: give the 'ret' variable a better name __do_page_cache_readahead
block: add a lower-level bio_add_page interface
xfs: fix error handling in xfs_refcount_insert()
xfs: fix xfs_rtalloc_rec units
xfs: strengthen rtalloc query range checks
xfs: xfs_rtbuf_get should check the bmapi_read results
xfs: xfs_rtword_t should be unsigned, not signed
dax: change bdev_dax_supported() to support boolean returns
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/readahead.c | 39 |
2 files changed, 23 insertions, 18 deletions
diff --git a/mm/internal.h b/mm/internal.h index 502d14189794..9e3654d70289 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -53,7 +53,7 @@ void unmap_page_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, struct zap_details *details); -extern int __do_page_cache_readahead(struct address_space *mapping, +extern unsigned int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size); diff --git a/mm/readahead.c b/mm/readahead.c index 539bbb6c1fad..e273f0de3376 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -140,23 +140,23 @@ out: } /* - * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all - * the pages first, then submits them all for I/O. This avoids the very bad + * __do_page_cache_readahead() actually reads a chunk of disk. It allocates + * the pages first, then submits them for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. * * Returns the number of pages requested, or the maximum amount of I/O allowed. */ -int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read, - unsigned long lookahead_size) +unsigned int __do_page_cache_readahead(struct address_space *mapping, + struct file *filp, pgoff_t offset, unsigned long nr_to_read, + unsigned long lookahead_size) { struct inode *inode = mapping->host; struct page *page; unsigned long end_index; /* The last page we want to read */ LIST_HEAD(page_pool); int page_idx; - int ret = 0; + unsigned int nr_pages = 0; loff_t isize = i_size_read(inode); gfp_t gfp_mask = readahead_gfp_mask(mapping); @@ -177,8 +177,18 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, rcu_read_lock(); page = radix_tree_lookup(&mapping->i_pages, page_offset); rcu_read_unlock(); - if (page && !radix_tree_exceptional_entry(page)) + if (page && !radix_tree_exceptional_entry(page)) { + /* + * Page already present? Kick off the current batch of + * contiguous pages before continuing with the next + * batch. + */ + if (nr_pages) + read_pages(mapping, filp, &page_pool, nr_pages, + gfp_mask); + nr_pages = 0; continue; + } page = __page_cache_alloc(gfp_mask); if (!page) @@ -187,7 +197,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, list_add(&page->lru, &page_pool); if (page_idx == nr_to_read - lookahead_size) SetPageReadahead(page); - ret++; + nr_pages++; } /* @@ -195,11 +205,11 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, * uptodate then the caller will launch readpage again, and * will then handle the error. */ - if (ret) - read_pages(mapping, filp, &page_pool, ret, gfp_mask); + if (nr_pages) + read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); BUG_ON(!list_empty(&page_pool)); out: - return ret; + return nr_pages; } /* @@ -223,16 +233,11 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); nr_to_read = min(nr_to_read, max_pages); while (nr_to_read) { - int err; - unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; if (this_chunk > nr_to_read) this_chunk = nr_to_read; - err = __do_page_cache_readahead(mapping, filp, - offset, this_chunk, 0); - if (err < 0) - return err; + __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); offset += this_chunk; nr_to_read -= this_chunk; |