diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-09-23 19:37:09 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-09-25 18:08:25 +0200 |
commit | 5b0830cb9085f4b69f9d57d7f3aaff322ffbec26 (patch) | |
tree | 10040eb359269d4cd05487790b758144a69e8e39 /mm | |
parent | 71fd05a887e0f3f6bfff76ff81b33776177d0606 (diff) | |
download | linux-5b0830cb9085f4b69f9d57d7f3aaff322ffbec26.tar.bz2 |
writeback: get rid to incorrect references to pdflush in comments
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 8 | ||||
-rw-r--r-- | mm/shmem.c | 5 | ||||
-rw-r--r-- | mm/vmscan.c | 8 |
3 files changed, 11 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3c78fc316202..8bef063125b1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -58,7 +58,7 @@ static inline long sync_writeback_pages(unsigned long dirtied) /* The following parameters are exported via /proc/sys/vm */ /* - * Start background writeback (via pdflush) at this percentage + * Start background writeback (via writeback threads) at this percentage */ int dirty_background_ratio = 10; @@ -477,8 +477,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to perform writeback if the system is over `vm_dirty_ratio'. - * If we're over `background_thresh' then pdflush is woken to perform some - * writeout. + * If we're over `background_thresh' then the writeback threads are woken to + * perform some writeout. */ static void balance_dirty_pages(struct address_space *mapping, unsigned long write_chunk) @@ -582,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping, bdi->dirty_exceeded = 0; if (writeback_in_progress(bdi)) - return; /* pdflush is already working this queue */ + return; /* * In laptop mode, we wait until hitting the higher threshold before diff --git a/mm/shmem.c b/mm/shmem.c index b206a7a32e2a..aa9481166aae 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1046,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) * sync from ever calling shmem_writepage; but a stacking filesystem * may use the ->writepage of its underlying filesystem, in which case * tmpfs should write out to swap only in response to memory pressure, - * and not for pdflush or sync. However, in those cases, we do still - * want to check if there's a redundant swappage to be discarded. + * and not for the writeback threads or sync. However, in those cases, + * we do still want to check if there's a redundant swappage to be + * discarded. */ if (wbc->for_reclaim) swap = get_swap_page(); diff --git a/mm/vmscan.c b/mm/vmscan.c index 613e89f471d9..359c3c57ef85 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1709,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist, * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this - * caller can't do much about. We kick pdflush and take explicit naps in the - * hope that some of these pages can be written. But if the allocating task - * holds filesystem locks which prevent writeout this might not work, and the - * allocation attempt will fail. + * caller can't do much about. We kick the writeback threads and take explicit + * naps in the hope that some of these pages can be written. But if the + * allocating task holds filesystem locks which prevent writeout this might not + * work, and the allocation attempt will fail. * * returns: 0, if no pages reclaimed * else, the number of pages reclaimed |