summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2015-08-18 13:55:41 -0600
committerDan Williams <dan.j.williams@intel.com>2015-08-20 14:07:24 -0400
commite2e05394e4a3420dab96f728df4531893494e15d (patch)
treeba7d067880cf626ca90856ad6ba703b8c99fc276 /fs
parent2765cfbb342c727c3fd47b165196cb16da158022 (diff)
downloadlinux-e2e05394e4a3420dab96f728df4531893494e15d.tar.bz2
pmem, dax: have direct_access use __pmem annotation
Update the annotation for the kaddr pointer returned by direct_access() so that it is a __pmem pointer. This is consistent with the PMEM driver and with how this direct_access() pointer is used in the DAX code. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/dax.c37
2 files changed, 21 insertions, 18 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 198243717da5..2345a9870e2c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(bdev_write_page);
* accessible at this address.
*/
long bdev_direct_access(struct block_device *bdev, sector_t sector,
- void **addr, unsigned long *pfn, long size)
+ void __pmem **addr, unsigned long *pfn, long size)
{
long avail;
const struct block_device_operations *ops = bdev->bd_disk->fops;
diff --git a/fs/dax.c b/fs/dax.c
index e07fecc93f80..7c634ac797b1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -35,7 +35,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
might_sleep();
do {
- void *addr;
+ void __pmem *addr;
unsigned long pfn;
long count;
@@ -47,7 +47,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
if (pgsz > count)
pgsz = count;
- clear_pmem((void __pmem *)addr, pgsz);
+ clear_pmem(addr, pgsz);
addr += pgsz;
size -= pgsz;
count -= pgsz;
@@ -62,7 +62,8 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
}
EXPORT_SYMBOL_GPL(dax_clear_blocks);
-static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
+static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
+ unsigned blkbits)
{
unsigned long pfn;
sector_t sector = bh->b_blocknr << (blkbits - 9);
@@ -70,15 +71,15 @@ static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
}
/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
-static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
- loff_t end)
+static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
+ loff_t pos, loff_t end)
{
loff_t final = end - pos + first; /* The final byte of the buffer */
if (first > 0)
- clear_pmem((void __pmem *)addr, first);
+ clear_pmem(addr, first);
if (final < size)
- clear_pmem((void __pmem *)addr + final, size - final);
+ clear_pmem(addr + final, size - final);
}
static bool buffer_written(struct buffer_head *bh)
@@ -106,7 +107,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
loff_t pos = start;
loff_t max = start;
loff_t bh_max = start;
- void *addr;
+ void __pmem *addr;
bool hole = false;
bool need_wmb = false;
@@ -158,11 +159,11 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
}
if (iov_iter_rw(iter) == WRITE) {
- len = copy_from_iter_pmem((void __pmem *)addr,
- max - pos, iter);
+ len = copy_from_iter_pmem(addr, max - pos, iter);
need_wmb = true;
} else if (!hole)
- len = copy_to_iter(addr, max - pos, iter);
+ len = copy_to_iter((void __force *)addr, max - pos,
+ iter);
else
len = iov_iter_zero(max - pos, iter);
@@ -268,11 +269,13 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
static int copy_user_bh(struct page *to, struct buffer_head *bh,
unsigned blkbits, unsigned long vaddr)
{
- void *vfrom, *vto;
+ void __pmem *vfrom;
+ void *vto;
+
if (dax_get_addr(bh, &vfrom, blkbits) < 0)
return -EIO;
vto = kmap_atomic(to);
- copy_user_page(vto, vfrom, vaddr, to);
+ copy_user_page(vto, (void __force *)vfrom, vaddr, to);
kunmap_atomic(vto);
return 0;
}
@@ -283,7 +286,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
struct address_space *mapping = inode->i_mapping;
sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
unsigned long vaddr = (unsigned long)vmf->virtual_address;
- void *addr;
+ void __pmem *addr;
unsigned long pfn;
pgoff_t size;
int error;
@@ -312,7 +315,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
}
if (buffer_unwritten(bh) || buffer_new(bh)) {
- clear_pmem((void __pmem *)addr, PAGE_SIZE);
+ clear_pmem(addr, PAGE_SIZE);
wmb_pmem();
}
@@ -548,11 +551,11 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
if (err < 0)
return err;
if (buffer_written(&bh)) {
- void *addr;
+ void __pmem *addr;
err = dax_get_addr(&bh, &addr, inode->i_blkbits);
if (err < 0)
return err;
- clear_pmem((void __pmem *)addr + offset, length);
+ clear_pmem(addr + offset, length);
wmb_pmem();
}