diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-11 15:30:29 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-11 15:30:29 -0800 |
commit | f4d544ee5720d336a8c64f9fd33efb888c302309 (patch) | |
tree | 3b4674d46b04fbcfc38677df59c92320f65568dd | |
parent | 0e2f7b837600979d5b6f174a6ff695b85942e985 (diff) | |
parent | 44a743f68705c681439f264deb05f8f38e9048d3 (diff) | |
download | linux-f4d544ee5720d336a8c64f9fd33efb888c302309.tar.bz2 |
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: Fix error return for fallocate() on XFS
xfs: cleanup dmapi macros in the umount path
xfs: remove incorrect sparse annotation for xfs_iget_cache_miss
xfs: kill the STATIC_INLINE macro
xfs: uninline xfs_get_extsz_hint
xfs: rename xfs_attr_fetch to xfs_attr_get_int
xfs: simplify xfs_buf_get / xfs_buf_read interfaces
xfs: remove IO_ISAIO
xfs: Wrapped journal record corruption on read at recovery
xfs: cleanup data end I/O handlers
xfs: use WRITE_SYNC_PLUG for synchronous writeout
xfs: reset the i_iolock lock class in the reclaim path
xfs: I/O completion handlers must use NOFS allocations
xfs: fix mmap_sem/iolock inversion in xfs_free_eofblocks
xfs: simplify inode teardown
30 files changed, 254 insertions, 321 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 70f989895d15..87813e405cef 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -235,71 +235,36 @@ xfs_setfilesize( } /* - * Buffered IO write completion for delayed allocate extents. + * IO write completion. */ STATIC void -xfs_end_bio_delalloc( - struct work_struct *work) -{ - xfs_ioend_t *ioend = - container_of(work, xfs_ioend_t, io_work); - - xfs_setfilesize(ioend); - xfs_destroy_ioend(ioend); -} - -/* - * Buffered IO write completion for regular, written extents. - */ -STATIC void -xfs_end_bio_written( - struct work_struct *work) -{ - xfs_ioend_t *ioend = - container_of(work, xfs_ioend_t, io_work); - - xfs_setfilesize(ioend); - xfs_destroy_ioend(ioend); -} - -/* - * IO write completion for unwritten extents. - * - * Issue transactions to convert a buffer range from unwritten - * to written extents. - */ -STATIC void -xfs_end_bio_unwritten( +xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); - xfs_off_t offset = ioend->io_offset; - size_t size = ioend->io_size; - - if (likely(!ioend->io_error)) { - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - int error; - error = xfs_iomap_write_unwritten(ip, offset, size); - if (error) - ioend->io_error = error; - } - xfs_setfilesize(ioend); - } - xfs_destroy_ioend(ioend); -} -/* - * IO read completion for regular, written extents. - */ -STATIC void -xfs_end_bio_read( - struct work_struct *work) -{ - xfs_ioend_t *ioend = - container_of(work, xfs_ioend_t, io_work); + /* + * For unwritten extents we need to issue transactions to convert a + * range to normal written extens after the data I/O has finished. + */ + if (ioend->io_type == IOMAP_UNWRITTEN && + likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { + int error; + + error = xfs_iomap_write_unwritten(ip, ioend->io_offset, + ioend->io_size); + if (error) + ioend->io_error = error; + } + /* + * We might have to update the on-disk file size after extending + * writes. + */ + if (ioend->io_type != IOMAP_READ) + xfs_setfilesize(ioend); xfs_destroy_ioend(ioend); } @@ -314,10 +279,10 @@ xfs_finish_ioend( int wait) { if (atomic_dec_and_test(&ioend->io_remaining)) { - struct workqueue_struct *wq = xfsdatad_workqueue; - if (ioend->io_work.func == xfs_end_bio_unwritten) - wq = xfsconvertd_workqueue; + struct workqueue_struct *wq; + wq = (ioend->io_type == IOMAP_UNWRITTEN) ? + xfsconvertd_workqueue : xfsdatad_workqueue; queue_work(wq, &ioend->io_work); if (wait) flush_workqueue(wq); @@ -355,15 +320,7 @@ xfs_alloc_ioend( ioend->io_offset = 0; ioend->io_size = 0; - if (type == IOMAP_UNWRITTEN) - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); - else if (type == IOMAP_DELAY) - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); - else if (type == IOMAP_READ) - INIT_WORK(&ioend->io_work, xfs_end_bio_read); - else - INIT_WORK(&ioend->io_work, xfs_end_bio_written); - + INIT_WORK(&ioend->io_work, xfs_end_io); return ioend; } @@ -380,7 +337,7 @@ xfs_map_blocks( return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); } -STATIC_INLINE int +STATIC int xfs_iomap_valid( xfs_iomap_t *iomapp, loff_t offset) @@ -412,8 +369,9 @@ xfs_end_bio( STATIC void xfs_submit_ioend_bio( - xfs_ioend_t *ioend, - struct bio *bio) + struct writeback_control *wbc, + xfs_ioend_t *ioend, + struct bio *bio) { atomic_inc(&ioend->io_remaining); bio->bi_private = ioend; @@ -426,7 +384,8 @@ xfs_submit_ioend_bio( if (xfs_ioend_new_eof(ioend)) xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); - submit_bio(WRITE, bio); + submit_bio(wbc->sync_mode == WB_SYNC_ALL ? + WRITE_SYNC_PLUG : WRITE, bio); ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); bio_put(bio); } @@ -505,6 +464,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) */ STATIC void xfs_submit_ioend( + struct writeback_control *wbc, xfs_ioend_t *ioend) { xfs_ioend_t *head = ioend; @@ -533,19 +493,19 @@ xfs_submit_ioend( retry: bio = xfs_alloc_ioend_bio(bh); } else if (bh->b_blocknr != lastblock + 1) { - xfs_submit_ioend_bio(ioend, bio); + xfs_submit_ioend_bio(wbc, ioend, bio); goto retry; } if (bio_add_buffer(bio, bh) != bh->b_size) { - xfs_submit_ioend_bio(ioend, bio); + xfs_submit_ioend_bio(wbc, ioend, bio); goto retry; } lastblock = bh->b_blocknr; } if (bio) - xfs_submit_ioend_bio(ioend, bio); + xfs_submit_ioend_bio(wbc, ioend, bio); xfs_finish_ioend(ioend, 0); } while ((ioend = next) != NULL); } @@ -1191,7 +1151,7 @@ xfs_page_state_convert( } if (iohead) - xfs_submit_ioend(iohead); + xfs_submit_ioend(wbc, iohead); return page_dirty; @@ -1528,7 +1488,7 @@ xfs_end_io_direct( * didn't map an unwritten extent so switch it's completion * handler. */ - INIT_WORK(&ioend->io_work, xfs_end_bio_written); + ioend->io_type = IOMAP_NEW; xfs_finish_ioend(ioend, 0); } diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 965df1227d64..4ddc973aea7a 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -149,7 +149,7 @@ page_region_mask( return mask; } -STATIC_INLINE void +STATIC void set_page_region( struct page *page, size_t offset, @@ -161,7 +161,7 @@ set_page_region( SetPageUptodate(page); } -STATIC_INLINE int +STATIC int test_page_region( struct page *page, size_t offset, @@ -582,7 +582,7 @@ found: * although backing storage may not be. */ xfs_buf_t * -xfs_buf_get_flags( +xfs_buf_get( xfs_buftarg_t *target,/* target for buffer */ xfs_off_t ioff, /* starting offset of range */ size_t isize, /* length of range */ @@ -661,7 +661,7 @@ _xfs_buf_read( } xfs_buf_t * -xfs_buf_read_flags( +xfs_buf_read( xfs_buftarg_t *target, xfs_off_t ioff, size_t isize, @@ -671,7 +671,7 @@ xfs_buf_read_flags( flags |= XBF_READ; - bp = xfs_buf_get_flags(target, ioff, isize, flags); + bp = xfs_buf_get(target, ioff, isize, flags); if (bp) { if (!XFS_BUF_ISDONE(bp)) { XB_TRACE(bp, "read", (unsigned long)flags); @@ -718,7 +718,7 @@ xfs_buf_readahead( return; flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); - xfs_buf_read_flags(target, ioff, isize, flags); + xfs_buf_read(target, ioff, isize, flags); } xfs_buf_t * @@ -1113,7 +1113,7 @@ xfs_bdwrite( xfs_buf_delwri_queue(bp, 1); } -STATIC_INLINE void +STATIC void _xfs_buf_ioend( xfs_buf_t *bp, int schedule) diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 9b4d666ad31f..5f07dd91c5fa 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -186,15 +186,10 @@ extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t, #define xfs_incore(buftarg,blkno,len,lockit) \ _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) -extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t, +extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t, xfs_buf_flags_t); -#define xfs_buf_get(target, blkno, len, flags) \ - xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED) - -extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t, +extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t, xfs_buf_flags_t); -#define xfs_buf_read(target, blkno, len, flags) \ - xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED) extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *); diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index eff61e2732af..e4caeb28ce2e 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -52,7 +52,7 @@ xfs_file_aio_read( loff_t pos) { struct file *file = iocb->ki_filp; - int ioflags = IO_ISAIO; + int ioflags = 0; BUG_ON(iocb->ki_pos != pos); if (unlikely(file->f_flags & O_DIRECT)) @@ -71,7 +71,7 @@ xfs_file_aio_write( loff_t pos) { struct file *file = iocb->ki_filp; - int ioflags = IO_ISAIO; + int ioflags = 0; BUG_ON(iocb->ki_pos != pos); if (unlikely(file->f_flags & O_DIRECT)) diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index cd42ef78f6b5..1f3b4b8f7dd4 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -573,8 +573,8 @@ xfs_vn_fallocate( bf.l_len = len; xfs_ilock(ip, XFS_IOLOCK_EXCL); - error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, - 0, XFS_ATTR_NOLOCK); + error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, + 0, XFS_ATTR_NOLOCK); if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) new_size = offset + len; @@ -585,7 +585,7 @@ xfs_vn_fallocate( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; - error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); + error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); } xfs_iunlock(ip, XFS_IOLOCK_EXCL); diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 072050f8d346..78dbfcd5eec2 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -255,8 +255,6 @@ xfs_read( iocb->ki_pos = *offset; ret = generic_file_aio_read(iocb, iovp, segs, *offset); - if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) - ret = wait_on_sync_kiocb(iocb); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); @@ -774,9 +772,6 @@ write_retry: current->backing_dev_info = NULL; - if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) - ret = wait_on_sync_kiocb(iocb); - isize = i_size_read(inode); if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize)) *offset = isize; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 18a4b8e11df2..1bfb0e980193 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -930,13 +930,39 @@ xfs_fs_alloc_inode( */ STATIC void xfs_fs_destroy_inode( - struct inode *inode) + struct inode *inode) { - xfs_inode_t *ip = XFS_I(inode); + struct xfs_inode *ip = XFS_I(inode); + + xfs_itrace_entry(ip); XFS_STATS_INC(vn_reclaim); - if (xfs_reclaim(ip)) - panic("%s: cannot reclaim 0x%p\n", __func__, inode); + + /* bad inode, get out here ASAP */ + if (is_bad_inode(inode)) + goto out_reclaim; + + xfs_ioend_wait(ip); + + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); + + /* + * We should never get here with one of the reclaim flags already set. + */ + ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); + ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); + + /* + * If we have nothing to flush with this inode then complete the + * teardown now, otherwise delay the flush operation. + */ + if (!xfs_inode_clean(ip)) { + xfs_inode_set_reclaim_tag(ip); + return; + } + +out_reclaim: + xfs_ireclaim(ip); } /* @@ -973,7 +999,6 @@ xfs_fs_inode_init_once( mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, "xfsino", ip->i_ino); - mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); } /* @@ -1075,6 +1100,20 @@ xfs_fs_clear_inode( XFS_STATS_INC(vn_remove); XFS_STATS_DEC(vn_active); + /* + * The iolock is used by the file system to coordinate reads, + * writes, and block truncates. Up to this point the lock + * protected concurrent accesses by users of the inode. But + * from here forward we're doing some final processing of the + * inode because we're done with it, and although we reuse the + * iolock for protection it is really a distinct lock class + * (in the lockdep sense) from before. To keep lockdep happy + * (and basically indicate what we are doing), we explicitly + * re-init the iolock here. + */ + ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); + xfs_inactive(ip); } @@ -1092,8 +1131,6 @@ xfs_fs_put_super( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); - struct xfs_inode *rip = mp->m_rootip; - int unmount_event_flags = 0; xfs_syncd_stop(mp); @@ -1109,20 +1146,7 @@ xfs_fs_put_super( xfs_sync_attr(mp, 0); } -#ifdef HAVE_DMAPI - if (mp->m_flags & XFS_MOUNT_DMAPI) { - unmount_event_flags = - (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? - 0 : DM_FLAGS_UNWANTED; - /* - * Ignore error from dmapi here, first unmount is not allowed - * to fail anyway, and second we wouldn't want to fail a - * unmount because of dmapi. - */ - XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL, - NULL, NULL, 0, 0, unmount_event_flags); - } -#endif + XFS_SEND_PREUNMOUNT(mp); /* * Blow away any referenced inode in the filestreams cache. @@ -1133,10 +1157,7 @@ xfs_fs_put_super( XFS_bflush(mp->m_ddev_targp); - if (mp->m_flags & XFS_MOUNT_DMAPI) { - XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0, - unmount_event_flags); - } + XFS_SEND_UNMOUNT(mp); xfs_unmountfs(mp); xfs_freesb(mp); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 961df0a22c78..d895a3a960f5 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -663,10 +663,9 @@ xfs_syncd_stop( kthread_stop(mp->m_sync_task); } -int +STATIC int xfs_reclaim_inode( xfs_inode_t *ip, - int locked, int sync_mode) { xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); @@ -682,10 +681,6 @@ xfs_reclaim_inode( !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { spin_unlock(&ip->i_flags_lock); write_unlock(&pag->pag_ici_lock); - if (locked) { - xfs_ifunlock(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } return -EAGAIN; } __xfs_iflags_set(ip, XFS_IRECLAIM); @@ -704,10 +699,8 @@ xfs_reclaim_inode( * We get the flush lock regardless, though, just to make sure * we don't free it while it is being flushed. */ - if (!locked) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_iflock(ip); - } + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_iflock(ip); /* * In the case of a forced shutdown we rely on xfs_iflush() to @@ -778,7 +771,7 @@ xfs_reclaim_inode_now( } read_unlock(&pag->pag_ici_lock); - return xfs_reclaim_inode(ip, 0, flags); + return xfs_reclaim_inode(ip, flags); } int diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 27920eb7a820..a500b4d91835 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -44,7 +44,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); -int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index ad7fbead4c97..00cabf5354d2 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -36,7 +36,6 @@ struct attrlist_cursor_kern; /* * Flags for read/write calls - same values as IRIX */ -#define IO_ISAIO 0x00001 /* don't wait for completion */ #define IO_ISDIRECT 0x00004 /* bypass page cache */ #define IO_INVIS 0x00020 /* don't update inode timestamps */ diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h index 6f4fd37c67af..d2d20462fd4f 100644 --- a/fs/xfs/support/debug.h +++ b/fs/xfs/support/debug.h @@ -41,10 +41,6 @@ extern void assfail(char *expr, char *f, int l); # define STATIC static noinline #endif -#ifndef STATIC_INLINE -# define STATIC_INLINE static inline -#endif - #else /* DEBUG */ #define ASSERT(expr) \ @@ -54,19 +50,5 @@ extern void assfail(char *expr, char *f, int l); # define STATIC noinline #endif -/* - * We stop inlining of inline functions in debug mode. - * Unfortunately, this means static inline in header files - * get multiple definitions, so they need to remain static. - * This then gives tonnes of warnings about unused but defined - * functions, so we need to add the unused attribute to prevent - * these spurious warnings. - */ -#ifndef STATIC_INLINE -# define STATIC_INLINE static __attribute__ ((unused)) noinline -#endif - #endif /* DEBUG */ - - #endif /* __XFS_SUPPORT_DEBUG_H__ */ diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 4ece1906bd41..8fe6f6b78a4a 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -123,9 +123,13 @@ xfs_inode_hasattr( * Overall external interface routines. *========================================================================*/ -int -xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name, - char *value, int *valuelenp, int flags) +STATIC int +xfs_attr_get_int( + struct xfs_inode *ip, + struct xfs_name *name, + char *value, + int *valuelenp, + int flags) { xfs_da_args_t args; int error; @@ -188,7 +192,7 @@ xfs_attr_get( return error; xfs_ilock(ip, XFS_ILOCK_SHARED); - error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags); + error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags); xfs_iunlock(ip, XFS_ILOCK_SHARED); return(error); } @@ -2143,8 +2147,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); - bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt, - XFS_BUF_LOCK | XBF_DONT_BLOCK); + bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, + XFS_BUF_LOCK | XBF_DONT_BLOCK); ASSERT(bp); ASSERT(!XFS_BUF_GETERROR(bp)); diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h index fb3b2a68b9b9..12f0be3a73d4 100644 --- a/fs/xfs/xfs_attr.h +++ b/fs/xfs/xfs_attr.h @@ -131,7 +131,6 @@ typedef struct xfs_attr_list_context { */ int xfs_attr_calc_size(struct xfs_inode *, int, int, int *); int xfs_attr_inactive(struct xfs_inode *dp); -int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); int xfs_attr_rmtval_get(struct xfs_da_args *args); int xfs_attr_list_int(struct xfs_attr_list_context *); diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index afdc8911637d..0b687351293f 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -98,7 +98,7 @@ STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index); * If namespace bits don't match return 0. * If all match then return 1. */ -STATIC_INLINE int +STATIC int xfs_attr_namesp_match(int arg_flags, int ondisk_flags) { return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags); diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index eb7b702d0690..6f5ccede63f9 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -98,8 +98,7 @@ xfs_bmdr_to_bmbt( * This code must be in sync with the routines xfs_bmbt_get_startoff, * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state. */ - -STATIC_INLINE void +STATIC void __xfs_bmbt_get_all( __uint64_t l0, __uint64_t l1, diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h index f655f7dc334c..4aba67c5f64f 100644 --- a/fs/xfs/xfs_filestream.h +++ b/fs/xfs/xfs_filestream.h @@ -79,7 +79,7 @@ extern ktrace_t *xfs_filestreams_trace_buf; * the cache that reference per-ag array elements that have since been * reallocated. */ -STATIC_INLINE int +static inline int xfs_filestream_peek_ag( xfs_mount_t *mp, xfs_agnumber_t agno) @@ -87,7 +87,7 @@ xfs_filestream_peek_ag( return atomic_read(&mp->m_perag[agno].pagf_fstrms); } -STATIC_INLINE int +static inline int xfs_filestream_get_ag( xfs_mount_t *mp, xfs_agnumber_t agno) @@ -95,7 +95,7 @@ xfs_filestream_get_ag( return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms); } -STATIC_INLINE int +static inline int xfs_filestream_put_ag( xfs_mount_t *mp, xfs_agnumber_t agno) @@ -122,7 +122,7 @@ int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp); /* filestreams for the inode? */ -STATIC_INLINE int +static inline int xfs_inode_is_filestream( struct xfs_inode *ip) { diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 2d0b3e1da9e6..36079aa91344 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -201,8 +201,8 @@ xfs_growfs_data_private( * AG freelist header block */ bp = xfs_buf_get(mp->m_ddev_targp, - XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), 0); + XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); agf = XFS_BUF_TO_AGF(bp); memset(agf, 0, mp->m_sb.sb_sectsize); agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); @@ -233,8 +233,8 @@ xfs_growfs_data_private( * AG inode header block */ bp = xfs_buf_get(mp->m_ddev_targp, - XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), 0); + XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), + XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); agi = XFS_BUF_TO_AGI(bp); memset(agi, 0, mp->m_sb.sb_sectsize); agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); @@ -257,8 +257,9 @@ xfs_growfs_data_private( * BNO btree root block */ bp = xfs_buf_get(mp->m_ddev_targp, - XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), - BTOBB(mp->m_sb.sb_blocksize), 0); + XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), + XBF_LOCK | XBF_MAPPED); block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC); @@ -278,8 +279,9 @@ xfs_growfs_data_private( * CNT btree root block */ bp = xfs_buf_get(mp->m_ddev_targp, - XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), - BTOBB(mp->m_sb.sb_blocksize), 0); + XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), + XBF_LOCK | XBF_MAPPED); block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC); @@ -300,8 +302,9 @@ xfs_growfs_data_private( * INO btree root block */ bp = xfs_buf_get(mp->m_ddev_targp, - XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), - BTOBB(mp->m_sb.sb_blocksize), 0); + XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), + BTOBB(mp->m_sb.sb_blocksize), + XBF_LOCK | XBF_MAPPED); block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC); @@ -611,7 +614,7 @@ xfs_fs_log_dummy( xfs_inode_t *ip; int error; - tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); if (error) { xfs_trans_cancel(tp, 0); diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 0785797db828..cb907ba69c4c 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -425,7 +425,7 @@ xfs_ialloc_ag_alloc( return 0; } -STATIC_INLINE xfs_agnumber_t +STATIC xfs_agnumber_t xfs_ialloc_next_ag( xfs_mount_t *mp) { diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 80e526489be5..073bb4a26b19 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -73,6 +73,9 @@ xfs_inode_alloc( ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); + ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); + + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); /* initialise the xfs inode */ ip->i_ino = ino; @@ -290,7 +293,7 @@ xfs_iget_cache_miss( struct xfs_inode **ipp, xfs_daddr_t bno, int flags, - int lock_flags) __releases(pag->pag_ici_lock) + int lock_flags) { struct xfs_inode *ip; int error; diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 67ae5555a30a..7294abce6ef2 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -860,8 +860,15 @@ xfs_iomap_write_unwritten( * set up a transaction to convert the range of extents * from unwritten to real. Do allocations in a loop until * we have covered the range passed in. + * + * Note that we open code the transaction allocation here + * to pass KM_NOFS--we can't risk to recursing back into + * the filesystem here as we might be asked to write out + * the same inode that we complete here and might deadlock + * on the iolock. */ - tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); + xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); + tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); tp->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index fb17f8226b09..1ec98ed914d4 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2206,6 +2206,7 @@ xlog_recover_do_buffer_trans( xfs_daddr_t blkno; int len; ushort flags; + uint buf_flags; buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; @@ -2246,12 +2247,11 @@ xlog_recover_do_buffer_trans( } mp = log->l_mp; - if (flags & XFS_BLI_INODE_BUF) { - bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len, - XFS_BUF_LOCK); - } else { - bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0); - } + buf_flags = XFS_BUF_LOCK; + if (!(flags & XFS_BLI_INODE_BUF)) + buf_flags |= XFS_BUF_MAPPED; + + bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags); if (XFS_BUF_ISERROR(bp)) { xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, bp, blkno); @@ -2350,8 +2350,8 @@ xlog_recover_do_inode_trans( goto error; } - bp = xfs_buf_read_flags(mp->m_ddev_targp, in_f->ilf_blkno, - in_f->ilf_len, XFS_BUF_LOCK); + bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, + XFS_BUF_LOCK); if (XFS_BUF_ISERROR(bp)) { xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, bp, in_f->ilf_blkno); @@ -3517,7 +3517,7 @@ xlog_do_recovery_pass( { xlog_rec_header_t *rhead; xfs_daddr_t blk_no; - xfs_caddr_t bufaddr, offset; + xfs_caddr_t offset; xfs_buf_t *hbp, *dbp; int error = 0, h_size; int bblks, split_bblks; @@ -3610,7 +3610,7 @@ xlog_do_recovery_pass( /* * Check for header wrapping around physical end-of-log */ - offset = NULL; + offset = XFS_BUF_PTR(hbp); split_hblks = 0; wrapped_hblks = 0; if (blk_no + hblks <= log->l_logBBsize) { @@ -3646,9 +3646,8 @@ xlog_do_recovery_pass( * - order is important. */ wrapped_hblks = hblks - split_hblks; - bufaddr = XFS_BUF_PTR(hbp); error = XFS_BUF_SET_PTR(hbp, - bufaddr + BBTOB(split_hblks), + offset + BBTOB(split_hblks), BBTOB(hblks - split_hblks)); if (error) goto bread_err2; @@ -3658,14 +3657,10 @@ xlog_do_recovery_pass( if (error) goto bread_err2; - error = XFS_BUF_SET_PTR(hbp, bufaddr, + error = XFS_BUF_SET_PTR(hbp, offset, BBTOB(hblks)); if (error) goto bread_err2; - - if (!offset) - offset = xlog_align(log, 0, - wrapped_hblks, hbp); } rhead = (xlog_rec_header_t *)offset; error = xlog_valid_rec_header(log, rhead, @@ -3685,7 +3680,7 @@ xlog_do_recovery_pass( } else { /* This log record is split across the * physical end of log */ - offset = NULL; + offset = XFS_BUF_PTR(dbp); split_bblks = 0; if (blk_no != log->l_logBBsize) { /* some data is before the physical @@ -3714,9 +3709,8 @@ xlog_do_recovery_pass( * _first_, then the log start (LR header end) * - order is important. */ - bufaddr = XFS_BUF_PTR(dbp); error = XFS_BUF_SET_PTR(dbp, - bufaddr + BBTOB(split_bblks), + offset + BBTOB(split_bblks), BBTOB(bblks - split_bblks)); if (error) goto bread_err2; @@ -3727,13 +3721,9 @@ xlog_do_recovery_pass( if (error) goto bread_err2; - error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size); + error = XFS_BUF_SET_PTR(dbp, offset, h_size); if (error) goto bread_err2; - - if (!offset) - offset = xlog_align(log, wrapped_hblks, - bblks - split_bblks, dbp); } xlog_unpack_data(rhead, offset, log); if ((error = xlog_recover_process_data(log, rhash, diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 8b6c9e807efb..66a888a9ad6f 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -583,8 +583,8 @@ xfs_readsb(xfs_mount_t *mp, int flags) sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; - bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, - BTOBB(sector_size), extra_flags); + bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), + extra_flags); if (!bp || XFS_BUF_ISERROR(bp)) { xfs_fs_mount_cmn_err(flags, "SB read failed"); error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; @@ -624,8 +624,8 @@ xfs_readsb(xfs_mount_t *mp, int flags) XFS_BUF_UNMANAGE(bp); xfs_buf_relse(bp); sector_size = mp->m_sb.sb_sectsize; - bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, - BTOBB(sector_size), extra_flags); + bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, + BTOBB(sector_size), extra_flags); if (!bp || XFS_BUF_ISERROR(bp)) { xfs_fs_mount_cmn_err(flags, "SB re-read failed"); error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; @@ -1471,7 +1471,7 @@ xfs_log_sbcount( if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) return 0; - tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT); + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { @@ -2123,7 +2123,7 @@ xfs_icsb_destroy_counters( mutex_destroy(&mp->m_icsb_mutex); } -STATIC_INLINE void +STATIC void xfs_icsb_lock_cntr( xfs_icsb_cnts_t *icsbp) { @@ -2132,7 +2132,7 @@ xfs_icsb_lock_cntr( } } -STATIC_INLINE void +STATIC void xfs_icsb_unlock_cntr( xfs_icsb_cnts_t *icsbp) { @@ -2140,7 +2140,7 @@ xfs_icsb_unlock_cntr( } -STATIC_INLINE void +STATIC void xfs_icsb_lock_all_counters( xfs_mount_t *mp) { @@ -2153,7 +2153,7 @@ xfs_icsb_lock_all_counters( } } -STATIC_INLINE void +STATIC void xfs_icsb_unlock_all_counters( xfs_mount_t *mp) { diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index a6c023bc0fb2..1df7e4502967 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -93,6 +93,9 @@ typedef struct xfs_dmops { xfs_send_unmount_t xfs_send_unmount; } xfs_dmops_t; +#define XFS_DMAPI_UNMOUNT_FLAGS(mp) \ + (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED) + #define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \ (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock) #define XFS_SEND_MMAP(mp, vma,fl) \ @@ -101,12 +104,24 @@ typedef struct xfs_dmops { (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right) #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl) -#define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ - (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) #define XFS_SEND_MOUNT(mp,right,path,name) \ (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name) -#define XFS_SEND_UNMOUNT(mp, ip,right,mode,rval,fl) \ - (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl) +#define XFS_SEND_PREUNMOUNT(mp) \ +do { \ + if (mp->m_flags & XFS_MOUNT_DMAPI) { \ + (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \ + (mp)->m_rootip, DM_RIGHT_NULL, \ + (mp)->m_rootip, DM_RIGHT_NULL, \ + NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \ + } \ +} while (0) +#define XFS_SEND_UNMOUNT(mp) \ +do { \ + if (mp->m_flags & XFS_MOUNT_DMAPI) { \ + (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \ + DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \ + } \ +} while (0) #ifdef HAVE_PERCPU_SB @@ -387,13 +402,13 @@ xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag) * Per-cpu superblock locking functions */ #ifdef HAVE_PERCPU_SB -STATIC_INLINE void +static inline void xfs_icsb_lock(xfs_mount_t *mp) { mutex_lock(&mp->m_icsb_mutex); } -STATIC_INLINE void +static inline void xfs_icsb_unlock(xfs_mount_t *mp) { mutex_unlock(&mp->m_icsb_mutex); diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index 3f816ad7ff19..4c199d18f850 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -277,10 +277,10 @@ xfs_read_buf( xfs_buf_t *bp; int error; - if (flags) - bp = xfs_buf_read_flags(target, blkno, len, flags); - else - bp = xfs_buf_read(target, blkno, len, flags); + if (!flags) + flags = XBF_LOCK | XBF_MAPPED; + + bp = xfs_buf_read(target, blkno, len, flags); if (!bp) return XFS_ERROR(EIO); error = XFS_BUF_GETERROR(bp); @@ -336,3 +336,25 @@ xfs_bwrite( } return (error); } + +/* + * helper function to extract extent size hint from inode + */ +xfs_extlen_t +xfs_get_extsz_hint( + struct xfs_inode *ip) +{ + xfs_extlen_t extsz; + + if (unlikely(XFS_IS_REALTIME_INODE(ip))) { + extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) + ? ip->i_d.di_extsize + : ip->i_mount->m_sb.sb_rextsize; + ASSERT(extsz); + } else { + extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) + ? ip->i_d.di_extsize : 0; + } + + return extsz; +} diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index f5e4874c37d8..571f2174435c 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h @@ -37,34 +37,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) } /* - * Flags for xfs_free_eofblocks - */ -#define XFS_FREE_EOF_LOCK (1<<0) -#define XFS_FREE_EOF_NOLOCK (1<<1) - - -/* - * helper function to extract extent size hint from inode - */ -STATIC_INLINE xfs_extlen_t -xfs_get_extsz_hint( - xfs_inode_t *ip) -{ - xfs_extlen_t extsz; - - if (unlikely(XFS_IS_REALTIME_INODE(ip))) { - extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) - ? ip->i_d.di_extsize - : ip->i_mount->m_sb.sb_rextsize; - ASSERT(extsz); - } else { - extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) - ? ip->i_d.di_extsize : 0; - } - return extsz; -} - -/* * Prototypes for functions in xfs_rw.c. */ extern int xfs_write_clear_setuid(struct xfs_inode *ip); @@ -76,5 +48,6 @@ extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp, struct xfs_buf **bpp); extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, xfs_buf_t *bp, xfs_daddr_t blkno); +extern xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip); #endif /* __XFS_RW_H__ */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 66b849358e62..237badcbac3b 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -236,19 +236,20 @@ xfs_trans_alloc( uint type) { xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); - return _xfs_trans_alloc(mp, type); + return _xfs_trans_alloc(mp, type, KM_SLEEP); } xfs_trans_t * _xfs_trans_alloc( xfs_mount_t *mp, - uint type) + uint type, + uint memflags) { xfs_trans_t *tp; atomic_inc(&mp->m_active_trans); - tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); + tp = kmem_zone_zalloc(xfs_trans_zone, memflags); tp->t_magic = XFS_TRANS_MAGIC; tp->t_type = type; tp->t_mountp = mp; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index ed47fc77759c..a0574f593f52 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -924,7 +924,7 @@ typedef struct xfs_trans { * XFS transaction mechanism exported interfaces. */ xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); -xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint); +xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint); xfs_trans_t *xfs_trans_dup(xfs_trans_t *); int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, uint, uint); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 218829e6a152..03a1f701fea8 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -79,11 +79,8 @@ xfs_trans_get_buf(xfs_trans_t *tp, /* * Default to a normal get_buf() call if the tp is NULL. */ - if (tp == NULL) { - bp = xfs_buf_get_flags(target_dev, blkno, len, - flags | BUF_BUSY); - return(bp); - } + if (tp == NULL) + return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY); /* * If we find the buffer in the cache with this transaction @@ -129,7 +126,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, * easily deadlock with our current transaction as well as cause * us to run out of stack space. */ - bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY); + bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY); if (bp == NULL) { return NULL; } @@ -302,7 +299,7 @@ xfs_trans_read_buf( * Default to a normal get_buf() call if the tp is NULL. */ if (tp == NULL) { - bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); + bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY); if (!bp) return (flags & XFS_BUF_TRYLOCK) ? EAGAIN : XFS_ERROR(ENOMEM); @@ -398,7 +395,7 @@ xfs_trans_read_buf( * easily deadlock with our current transaction as well as cause * us to run out of stack space. */ - bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY); + bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY); if (bp == NULL) { *bpp = NULL; return 0; diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index b572f7e840e0..578f3f59b789 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -538,9 +538,8 @@ xfs_readlink_bmap( d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); - bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt), - XBF_LOCK | XBF_MAPPED | - XBF_DONT_BLOCK); + bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), + XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); error = XFS_BUF_GETERROR(bp); if (error) { xfs_ioerror_alert("xfs_readlink", @@ -709,6 +708,11 @@ xfs_fsync( } /* + * Flags for xfs_free_eofblocks + */ +#define XFS_FREE_EOF_TRYLOCK (1<<0) + +/* * This is called by xfs_inactive to free any blocks beyond eof * when the link count isn't zero and by xfs_dm_punch_hole() when * punching a hole to EOF. @@ -726,7 +730,6 @@ xfs_free_eofblocks( xfs_filblks_t map_len; int nimaps; xfs_bmbt_irec_t imap; - int use_iolock = (flags & XFS_FREE_EOF_LOCK); /* * Figure out if there are any blocks beyond the end @@ -768,14 +771,19 @@ xfs_free_eofblocks( * cache and we can't * do that within a transaction. */ - if (use_iolock) + if (flags & XFS_FREE_EOF_TRYLOCK) { + if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { + xfs_trans_cancel(tp, 0); + return 0; + } + } else { xfs_ilock(ip, XFS_IOLOCK_EXCL); + } error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, ip->i_size); if (error) { xfs_trans_cancel(tp, 0); - if (use_iolock) - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; } @@ -812,8 +820,7 @@ xfs_free_eofblocks( error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); } - xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL) - : XFS_ILOCK_EXCL)); + xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL); } return error; } @@ -1113,7 +1120,17 @@ xfs_release( (ip->i_df.if_flags & XFS_IFEXTENTS)) && (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { - error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); + + /* + * If we can't get the iolock just skip truncating + * the blocks past EOF because we could deadlock + * with the mmap_sem otherwise. We'll get another + * chance to drop them once the last reference to + * the inode is dropped, so we'll never leak blocks + * permanently. + */ + error = xfs_free_eofblocks(mp, ip, + XFS_FREE_EOF_TRYLOCK); if (error) return error; } @@ -1184,7 +1201,7 @@ xfs_inactive( (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || (ip->i_delayed_blks != 0)))) { - error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); + error = xfs_free_eofblocks(mp, ip, 0); if (error) return VN_INACTIVE_CACHE; } @@ -2456,46 +2473,6 @@ xfs_set_dmattrs( return error; } -int -xfs_reclaim( - xfs_inode_t *ip) -{ - - xfs_itrace_entry(ip); - - ASSERT(!VN_MAPPED(VFS_I(ip))); - - /* bad inode, get out here ASAP */ - if (is_bad_inode(VFS_I(ip))) { - xfs_ireclaim(ip); - return 0; - } - - xfs_ioend_wait(ip); - - ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); - - /* - * If we have nothing to flush with this inode then complete the - * teardown now, otherwise break the link between the xfs inode and the - * linux inode and clean up the xfs inode later. This avoids flushing - * the inode to disk during the delete operation itself. - * - * When breaking the link, we need to set the XFS_IRECLAIMABLE flag - * first to ensure that xfs_iunpin() will never see an xfs inode - * that has a linux inode being reclaimed. Synchronisation is provided - * by the i_flags_lock. - */ - if (!ip->i_update_core && (ip->i_itemp == NULL)) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_iflock(ip); - xfs_iflags_set(ip, XFS_IRECLAIMABLE); - return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); - } - xfs_inode_set_reclaim_tag(ip); - return 0; -} - /* * xfs_alloc_file_space() * This routine allocates disk space for the given file. diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index a9e102de71a1..167a467403a5 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -38,7 +38,6 @@ int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, const char *target_path, mode_t mode, struct xfs_inode **ipp, cred_t *credp); int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); -int xfs_reclaim(struct xfs_inode *ip); int xfs_change_file_space(struct xfs_inode *ip, int cmd, xfs_flock64_t *bf, xfs_off_t offset, int attr_flags); int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, |