summaryrefslogtreecommitdiffstats
path: root/fs/block_dev.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-01-28 20:13:39 -0800
committerDan Williams <dan.j.williams@intel.com>2016-01-30 13:35:31 -0800
commit9f4736fe7ca804aa79b5916221bb13dfc6221a0f (patch)
tree1604ce0292634817892790f0eb8cdb0e1821de1e /fs/block_dev.c
parent65f87ee71852a754f7981d0653e7136039b8798a (diff)
downloadlinux-9f4736fe7ca804aa79b5916221bb13dfc6221a0f.tar.bz2
block: revert runtime dax control of the raw block device
Dynamically enabling DAX requires that the page cache first be flushed and invalidated. This must occur atomically with the change of DAX mode otherwise we confuse the fsync/msync tracking and violate data durability guarantees. Eliminate the possibilty of DAX-disabled to DAX-enabled transitions for now and revisit this for the next cycle. Cc: Jan Kara <jack@suse.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Dave Chinner <david@fromorbit.com> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r--fs/block_dev.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7b9cd49622b1..afb437484362 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1736,37 +1736,13 @@ static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
}
-static void blkdev_vm_open(struct vm_area_struct *vma)
-{
- struct inode *bd_inode = bdev_file_inode(vma->vm_file);
- struct block_device *bdev = I_BDEV(bd_inode);
-
- inode_lock(bd_inode);
- bdev->bd_map_count++;
- inode_unlock(bd_inode);
-}
-
-static void blkdev_vm_close(struct vm_area_struct *vma)
-{
- struct inode *bd_inode = bdev_file_inode(vma->vm_file);
- struct block_device *bdev = I_BDEV(bd_inode);
-
- inode_lock(bd_inode);
- bdev->bd_map_count--;
- inode_unlock(bd_inode);
-}
-
static const struct vm_operations_struct blkdev_dax_vm_ops = {
- .open = blkdev_vm_open,
- .close = blkdev_vm_close,
.fault = blkdev_dax_fault,
.pmd_fault = blkdev_dax_pmd_fault,
.pfn_mkwrite = blkdev_dax_fault,
};
static const struct vm_operations_struct blkdev_default_vm_ops = {
- .open = blkdev_vm_open,
- .close = blkdev_vm_close,
.fault = filemap_fault,
.map_pages = filemap_map_pages,
};
@@ -1774,18 +1750,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = {
static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *bd_inode = bdev_file_inode(file);
- struct block_device *bdev = I_BDEV(bd_inode);
file_accessed(file);
- inode_lock(bd_inode);
- bdev->bd_map_count++;
if (IS_DAX(bd_inode)) {
vma->vm_ops = &blkdev_dax_vm_ops;
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
} else {
vma->vm_ops = &blkdev_default_vm_ops;
}
- inode_unlock(bd_inode);
return 0;
}