summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 19:52:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 19:52:47 -0700
commitaab008db8063364dc3c8ccf4981c21124866b395 (patch)
tree72914203f4decb023efdaabd0301a62d742dfa8c /mm
parent4f5b1affdda3e0c48cac674182f52004137b0ffc (diff)
parent16c0cfa425b8e1488f7a1873bd112a7a099325f0 (diff)
downloadlinux-aab008db8063364dc3c8ccf4981c21124866b395.tar.bz2
Merge tag 'stable/for-linus-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/mm
Pull cleancache changes from Konrad Rzeszutek Wilk: "This has some patches for the cleancache API that should have been submitted a _long_ time ago. They are basically cleanups: - rename of flush to invalidate - moving reporting of statistics into debugfs - use __read_mostly as necessary. Oh, and also the MAINTAINERS file change. The files (except the MAINTAINERS file) have been in #linux-next for months now. The late addition of MAINTAINERS file is a brain-fart on my side - didn't realize I needed that just until I was typing this up - and I based that patch on v3.3 - so the tree is on top of v3.3." * tag 'stable/for-linus-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/mm: MAINTAINERS: Adding cleancache API to the list. mm: cleancache: Use __read_mostly as appropiate. mm: cleancache: report statistics via debugfs instead of sysfs. mm: zcache/tmem/cleancache: s/flush/invalidate/ mm: cleancache: s/flush/invalidate/
Diffstat (limited to 'mm')
-rw-r--r--mm/cleancache.c98
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/truncate.c10
3 files changed, 44 insertions, 66 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c
index bcaae4c2a770..5646c740f613 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -15,29 +15,34 @@
#include <linux/fs.h>
#include <linux/exportfs.h>
#include <linux/mm.h>
+#include <linux/debugfs.h>
#include <linux/cleancache.h>
/*
* This global enablement flag may be read thousands of times per second
- * by cleancache_get/put/flush even on systems where cleancache_ops
+ * by cleancache_get/put/invalidate even on systems where cleancache_ops
* is not claimed (e.g. cleancache is config'ed on but remains
* disabled), so is preferred to the slower alternative: a function
* call that checks a non-global.
*/
-int cleancache_enabled;
+int cleancache_enabled __read_mostly;
EXPORT_SYMBOL(cleancache_enabled);
/*
* cleancache_ops is set by cleancache_ops_register to contain the pointers
* to the cleancache "backend" implementation functions.
*/
-static struct cleancache_ops cleancache_ops;
+static struct cleancache_ops cleancache_ops __read_mostly;
-/* useful stats available in /sys/kernel/mm/cleancache */
-static unsigned long cleancache_succ_gets;
-static unsigned long cleancache_failed_gets;
-static unsigned long cleancache_puts;
-static unsigned long cleancache_flushes;
+/*
+ * Counters available via /sys/kernel/debug/frontswap (if debugfs is
+ * properly configured. These are for information only so are not protected
+ * against increment races.
+ */
+static u64 cleancache_succ_gets;
+static u64 cleancache_failed_gets;
+static u64 cleancache_puts;
+static u64 cleancache_invalidates;
/*
* register operations for cleancache, returning previous thus allowing
@@ -148,10 +153,11 @@ void __cleancache_put_page(struct page *page)
EXPORT_SYMBOL(__cleancache_put_page);
/*
- * Flush any data from cleancache associated with the poolid and the
+ * Invalidate any data from cleancache associated with the poolid and the
* page's inode and page index so that a subsequent "get" will fail.
*/
-void __cleancache_flush_page(struct address_space *mapping, struct page *page)
+void __cleancache_invalidate_page(struct address_space *mapping,
+ struct page *page)
{
/* careful... page->mapping is NULL sometimes when this is called */
int pool_id = mapping->host->i_sb->cleancache_poolid;
@@ -160,85 +166,57 @@ void __cleancache_flush_page(struct address_space *mapping, struct page *page)
if (pool_id >= 0) {
VM_BUG_ON(!PageLocked(page));
if (cleancache_get_key(mapping->host, &key) >= 0) {
- (*cleancache_ops.flush_page)(pool_id, key, page->index);
- cleancache_flushes++;
+ (*cleancache_ops.invalidate_page)(pool_id,
+ key, page->index);
+ cleancache_invalidates++;
}
}
}
-EXPORT_SYMBOL(__cleancache_flush_page);
+EXPORT_SYMBOL(__cleancache_invalidate_page);
/*
- * Flush all data from cleancache associated with the poolid and the
+ * Invalidate all data from cleancache associated with the poolid and the
* mappings's inode so that all subsequent gets to this poolid/inode
* will fail.
*/
-void __cleancache_flush_inode(struct address_space *mapping)
+void __cleancache_invalidate_inode(struct address_space *mapping)
{
int pool_id = mapping->host->i_sb->cleancache_poolid;
struct cleancache_filekey key = { .u.key = { 0 } };
if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
- (*cleancache_ops.flush_inode)(pool_id, key);
+ (*cleancache_ops.invalidate_inode)(pool_id, key);
}
-EXPORT_SYMBOL(__cleancache_flush_inode);
+EXPORT_SYMBOL(__cleancache_invalidate_inode);
/*
* Called by any cleancache-enabled filesystem at time of unmount;
* note that pool_id is surrendered and may be reutrned by a subsequent
* cleancache_init_fs or cleancache_init_shared_fs
*/
-void __cleancache_flush_fs(struct super_block *sb)
+void __cleancache_invalidate_fs(struct super_block *sb)
{
if (sb->cleancache_poolid >= 0) {
int old_poolid = sb->cleancache_poolid;
sb->cleancache_poolid = -1;
- (*cleancache_ops.flush_fs)(old_poolid);
+ (*cleancache_ops.invalidate_fs)(old_poolid);
}
}
-EXPORT_SYMBOL(__cleancache_flush_fs);
-
-#ifdef CONFIG_SYSFS
-
-/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */
-
-#define CLEANCACHE_SYSFS_RO(_name) \
- static ssize_t cleancache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", cleancache_##_name); \
- } \
- static struct kobj_attribute cleancache_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = cleancache_##_name##_show, \
- }
-
-CLEANCACHE_SYSFS_RO(succ_gets);
-CLEANCACHE_SYSFS_RO(failed_gets);
-CLEANCACHE_SYSFS_RO(puts);
-CLEANCACHE_SYSFS_RO(flushes);
-
-static struct attribute *cleancache_attrs[] = {
- &cleancache_succ_gets_attr.attr,
- &cleancache_failed_gets_attr.attr,
- &cleancache_puts_attr.attr,
- &cleancache_flushes_attr.attr,
- NULL,
-};
-
-static struct attribute_group cleancache_attr_group = {
- .attrs = cleancache_attrs,
- .name = "cleancache",
-};
-
-#endif /* CONFIG_SYSFS */
+EXPORT_SYMBOL(__cleancache_invalidate_fs);
static int __init init_cleancache(void)
{
-#ifdef CONFIG_SYSFS
- int err;
-
- err = sysfs_create_group(mm_kobj, &cleancache_attr_group);
-#endif /* CONFIG_SYSFS */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *root = debugfs_create_dir("cleancache", NULL);
+ if (root == NULL)
+ return -ENXIO;
+ debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
+ debugfs_create_u64("failed_gets", S_IRUGO,
+ root, &cleancache_failed_gets);
+ debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
+ debugfs_create_u64("invalidates", S_IRUGO,
+ root, &cleancache_invalidates);
+#endif
return 0;
}
module_init(init_cleancache)
diff --git a/mm/filemap.c b/mm/filemap.c
index 843042045dc9..c3811bc6b9e3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -122,7 +122,7 @@ void __delete_from_page_cache(struct page *page)
if (PageUptodate(page) && PageMappedToDisk(page))
cleancache_put_page(page);
else
- cleancache_flush_page(mapping, page);
+ cleancache_invalidate_page(mapping, page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
diff --git a/mm/truncate.c b/mm/truncate.c
index a188058582e0..18aded3a89fc 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -52,7 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
static inline void truncate_partial_page(struct page *page, unsigned partial)
{
zero_user_segment(page, partial, PAGE_CACHE_SIZE);
- cleancache_flush_page(page->mapping, page);
+ cleancache_invalidate_page(page->mapping, page);
if (page_has_private(page))
do_invalidatepage(page, partial);
}
@@ -213,7 +213,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
pgoff_t end;
int i;
- cleancache_flush_inode(mapping);
+ cleancache_invalidate_inode(mapping);
if (mapping->nrpages == 0)
return;
@@ -292,7 +292,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
mem_cgroup_uncharge_end();
index++;
}
- cleancache_flush_inode(mapping);
+ cleancache_invalidate_inode(mapping);
}
EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -444,7 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
int ret2 = 0;
int did_range_unmap = 0;
- cleancache_flush_inode(mapping);
+ cleancache_invalidate_inode(mapping);
pagevec_init(&pvec, 0);
index = start;
while (index <= end && pagevec_lookup(&pvec, mapping, index,
@@ -500,7 +500,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
cond_resched();
index++;
}
- cleancache_flush_inode(mapping);
+ cleancache_invalidate_inode(mapping);
return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);