diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 2 | ||||
-rw-r--r-- | mm/readahead.c | 39 |
2 files changed, 23 insertions, 18 deletions
diff --git a/mm/internal.h b/mm/internal.h index 502d14189794..9e3654d70289 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -53,7 +53,7 @@ void unmap_page_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, struct zap_details *details); -extern int __do_page_cache_readahead(struct address_space *mapping, +extern unsigned int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, unsigned long lookahead_size); diff --git a/mm/readahead.c b/mm/readahead.c index 539bbb6c1fad..e273f0de3376 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -140,23 +140,23 @@ out: } /* - * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all - * the pages first, then submits them all for I/O. This avoids the very bad + * __do_page_cache_readahead() actually reads a chunk of disk. It allocates + * the pages first, then submits them for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. * * Returns the number of pages requested, or the maximum amount of I/O allowed. */ -int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read, - unsigned long lookahead_size) +unsigned int __do_page_cache_readahead(struct address_space *mapping, + struct file *filp, pgoff_t offset, unsigned long nr_to_read, + unsigned long lookahead_size) { struct inode *inode = mapping->host; struct page *page; unsigned long end_index; /* The last page we want to read */ LIST_HEAD(page_pool); int page_idx; - int ret = 0; + unsigned int nr_pages = 0; loff_t isize = i_size_read(inode); gfp_t gfp_mask = readahead_gfp_mask(mapping); @@ -177,8 +177,18 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, rcu_read_lock(); page = radix_tree_lookup(&mapping->i_pages, page_offset); rcu_read_unlock(); - if (page && !radix_tree_exceptional_entry(page)) + if (page && !radix_tree_exceptional_entry(page)) { + /* + * Page already present? Kick off the current batch of + * contiguous pages before continuing with the next + * batch. + */ + if (nr_pages) + read_pages(mapping, filp, &page_pool, nr_pages, + gfp_mask); + nr_pages = 0; continue; + } page = __page_cache_alloc(gfp_mask); if (!page) @@ -187,7 +197,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, list_add(&page->lru, &page_pool); if (page_idx == nr_to_read - lookahead_size) SetPageReadahead(page); - ret++; + nr_pages++; } /* @@ -195,11 +205,11 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, * uptodate then the caller will launch readpage again, and * will then handle the error. */ - if (ret) - read_pages(mapping, filp, &page_pool, ret, gfp_mask); + if (nr_pages) + read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); BUG_ON(!list_empty(&page_pool)); out: - return ret; + return nr_pages; } /* @@ -223,16 +233,11 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); nr_to_read = min(nr_to_read, max_pages); while (nr_to_read) { - int err; - unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; if (this_chunk > nr_to_read) this_chunk = nr_to_read; - err = __do_page_cache_readahead(mapping, filp, - offset, this_chunk, 0); - if (err < 0) - return err; + __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); offset += this_chunk; nr_to_read -= this_chunk; |