diff options
author | Dave Chinner <dchinner@redhat.com> | 2012-04-23 15:58:56 +1000 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2012-05-14 16:20:52 -0500 |
commit | aa5c158ec97bd4014f47a2bc0150fb6b20e6c48b (patch) | |
tree | 8c79791167a5d659949952ad11a08ce325eb025c /fs/xfs/xfs_buf.c | |
parent | 7ca790a507a9288ebedab90a8e40b9afa8e4e949 (diff) | |
download | linux-aa5c158ec97bd4014f47a2bc0150fb6b20e6c48b.tar.bz2 |
xfs: kill XBF_DONTBLOCK
Just about all callers of xfs_buf_read() and xfs_buf_get() use XBF_DONTBLOCK.
This is used to make memory allocation use GFP_NOFS rather than GFP_KERNEL to
avoid recursion through memory reclaim back into the filesystem.
All the blocking get calls in growfs occur inside a transaction, even though
they are no part of the transaction, so all allocation will be GFP_NOFS due to
the task flag PF_TRANS being set. The blocking read calls occur during log
recovery, so they will probably be unaffected by converting to GFP_NOFS
allocations.
Hence make XBF_DONTBLOCK behaviour always occur for buffers and kill the flag.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r-- | fs/xfs/xfs_buf.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 8366348aa1f9..59e39160d379 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -56,11 +56,7 @@ static struct workqueue_struct *xfslogd_workqueue; #endif #define xb_to_gfp(flags) \ - ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \ - ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) - -#define xb_to_km(flags) \ - (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) + ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) static inline int @@ -178,14 +174,14 @@ xfs_buf_alloc( { struct xfs_buf *bp; - bp = kmem_zone_zalloc(xfs_buf_zone, xb_to_km(flags)); + bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); if (unlikely(!bp)) return NULL; /* * We don't want certain flags to appear in b_flags. */ - flags &= ~(XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD); + flags &= ~(XBF_MAPPED|XBF_READ_AHEAD); atomic_set(&bp->b_hold, 1); atomic_set(&bp->b_lru_ref, 1); @@ -239,7 +235,7 @@ _xfs_buf_get_pages( bp->b_pages = bp->b_page_array; } else { bp->b_pages = kmem_alloc(sizeof(struct page *) * - page_count, xb_to_km(flags)); + page_count, KM_NOFS); if (bp->b_pages == NULL) return -ENOMEM; } @@ -316,7 +312,7 @@ xfs_buf_allocate_memory( */ size = BBTOB(bp->b_length); if (size < PAGE_SIZE) { - bp->b_addr = kmem_alloc(size, xb_to_km(flags)); + bp->b_addr = kmem_alloc(size, KM_NOFS); if (!bp->b_addr) { /* low memory - use alloc_page loop instead */ goto use_alloc_page; @@ -659,7 +655,7 @@ xfs_buf_readahead( return; xfs_buf_read(target, blkno, numblks, - XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK); + XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); } /* @@ -750,7 +746,7 @@ xfs_buf_associate_memory( bp->b_pages = NULL; bp->b_addr = mem; - rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK); + rval = _xfs_buf_get_pages(bp, page_count, 0); if (rval) return rval; |