summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 08:47:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 08:47:59 -0700
commit49f8275c7d9247cf1dd4440fc8162f784252c849 (patch)
tree7caefaa8b68d3162f60ecef7bafacbed0e1056d8 /mm/memcontrol.c
parent8bb7eca972ad531c9b149c0a51ab43a417385813 (diff)
parent121703c1c817b3c77f61002466d0bfca7e39f25d (diff)
downloadlinux-49f8275c7d9247cf1dd4440fc8162f784252c849.tar.bz2
Merge tag 'folio-5.16' of git://git.infradead.org/users/willy/pagecache
Pull memory folios from Matthew Wilcox: "Add memory folios, a new type to represent either order-0 pages or the head page of a compound page. This should be enough infrastructure to support filesystems converting from pages to folios. The point of all this churn is to allow filesystems and the page cache to manage memory in larger chunks than PAGE_SIZE. The original plan was to use compound pages like THP does, but I ran into problems with some functions expecting only a head page while others expect the precise page containing a particular byte. The folio type allows a function to declare that it's expecting only a head page. Almost incidentally, this allows us to remove various calls to VM_BUG_ON(PageTail(page)) and compound_head(). This converts just parts of the core MM and the page cache. For 5.17, we intend to convert various filesystems (XFS and AFS are ready; other filesystems may make it) and also convert more of the MM and page cache to folios. For 5.18, multi-page folios should be ready. The multi-page folios offer some improvement to some workloads. The 80% win is real, but appears to be an artificial benchmark (postgres startup, which isn't a serious workload). Real workloads (eg building the kernel, running postgres in a steady state, etc) seem to benefit between 0-10%. I haven't heard of any performance losses as a result of this series. Nobody has done any serious performance tuning; I imagine that tweaking the readahead algorithm could provide some more interesting wins. There are also other places where we could choose to create large folios and currently do not, such as writes that are larger than PAGE_SIZE. I'd like to thank all my reviewers who've offered review/ack tags: Christoph Hellwig, David Howells, Jan Kara, Jeff Layton, Johannes Weiner, Kirill A. Shutemov, Michal Hocko, Mike Rapoport, Vlastimil Babka, William Kucharski, Yu Zhao and Zi Yan. I'd also like to thank those who gave feedback I incorporated but haven't offered up review tags for this part of the series: Nick Piggin, Mel Gorman, Ming Lei, Darrick Wong, Ted Ts'o, John Hubbard, Hugh Dickins, and probably a few others who I forget" * tag 'folio-5.16' of git://git.infradead.org/users/willy/pagecache: (90 commits) mm/writeback: Add folio_write_one mm/filemap: Add FGP_STABLE mm/filemap: Add filemap_get_folio mm/filemap: Convert mapping_get_entry to return a folio mm/filemap: Add filemap_add_folio() mm/filemap: Add filemap_alloc_folio mm/page_alloc: Add folio allocation functions mm/lru: Add folio_add_lru() mm/lru: Convert __pagevec_lru_add_fn to take a folio mm: Add folio_evictable() mm/workingset: Convert workingset_refault() to take a folio mm/filemap: Add readahead_folio() mm/filemap: Add folio_mkwrite_check_truncate() mm/filemap: Add i_blocks_per_folio() mm/writeback: Add folio_redirty_for_writepage() mm/writeback: Add folio_account_redirty() mm/writeback: Add folio_clear_dirty_for_io() mm/writeback: Add folio_cancel_dirty() mm/writeback: Add folio_account_cleaned() mm/writeback: Add filemap_dirty_folio() ...
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c356
1 files changed, 176 insertions, 180 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6da5020a8656..8dab23a71fc4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -456,28 +456,6 @@ ino_t page_cgroup_ino(struct page *page)
return ino;
}
-static struct mem_cgroup_per_node *
-mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
-{
- int nid = page_to_nid(page);
-
- return memcg->nodeinfo[nid];
-}
-
-static struct mem_cgroup_tree_per_node *
-soft_limit_tree_node(int nid)
-{
- return soft_limit_tree.rb_tree_per_node[nid];
-}
-
-static struct mem_cgroup_tree_per_node *
-soft_limit_tree_from_page(struct page *page)
-{
- int nid = page_to_nid(page);
-
- return soft_limit_tree.rb_tree_per_node[nid];
-}
-
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
struct mem_cgroup_tree_per_node *mctz,
unsigned long new_usage_in_excess)
@@ -548,13 +526,13 @@ static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
return excess;
}
-static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
+static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
{
unsigned long excess;
struct mem_cgroup_per_node *mz;
struct mem_cgroup_tree_per_node *mctz;
- mctz = soft_limit_tree_from_page(page);
+ mctz = soft_limit_tree.rb_tree_per_node[nid];
if (!mctz)
return;
/*
@@ -562,7 +540,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
* because their event counter is not touched.
*/
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- mz = mem_cgroup_page_nodeinfo(memcg, page);
+ mz = memcg->nodeinfo[nid];
excess = soft_limit_excess(memcg);
/*
* We have to update the tree if mz is on RB-tree or
@@ -593,7 +571,7 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
for_each_node(nid) {
mz = memcg->nodeinfo[nid];
- mctz = soft_limit_tree_node(nid);
+ mctz = soft_limit_tree.rb_tree_per_node[nid];
if (mctz)
mem_cgroup_remove_exceeded(mz, mctz);
}
@@ -799,7 +777,6 @@ static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
}
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
- struct page *page,
int nr_pages)
{
/* pagein of a big page is an event. So, ignore page size */
@@ -842,7 +819,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
* Check events in order.
*
*/
-static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
+static void memcg_check_events(struct mem_cgroup *memcg, int nid)
{
/* threshold event is triggered in finer grain than soft limit */
if (unlikely(mem_cgroup_event_ratelimit(memcg,
@@ -853,7 +830,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
MEM_CGROUP_TARGET_SOFTLIMIT);
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
- mem_cgroup_update_tree(memcg, page);
+ mem_cgroup_update_tree(memcg, nid);
}
}
@@ -1149,64 +1126,88 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
}
#ifdef CONFIG_DEBUG_VM
-void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
struct mem_cgroup *memcg;
if (mem_cgroup_disabled())
return;
- memcg = page_memcg(page);
+ memcg = folio_memcg(folio);
if (!memcg)
- VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
+ VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
else
- VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
+ VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
}
#endif
/**
- * lock_page_lruvec - lock and return lruvec for a given page.
- * @page: the page
+ * folio_lruvec_lock - Lock the lruvec for a folio.
+ * @folio: Pointer to the folio.
*
* These functions are safe to use under any of the following conditions:
- * - page locked
- * - PageLRU cleared
- * - lock_page_memcg()
- * - page->_refcount is zero
+ * - folio locked
+ * - folio_test_lru false
+ * - folio_memcg_lock()
+ * - folio frozen (refcount of 0)
+ *
+ * Return: The lruvec this folio is on with its lock held.
*/
-struct lruvec *lock_page_lruvec(struct page *page)
+struct lruvec *folio_lruvec_lock(struct folio *folio)
{
- struct lruvec *lruvec;
+ struct lruvec *lruvec = folio_lruvec(folio);
- lruvec = mem_cgroup_page_lruvec(page);
spin_lock(&lruvec->lru_lock);
-
- lruvec_memcg_debug(lruvec, page);
+ lruvec_memcg_debug(lruvec, folio);
return lruvec;
}
-struct lruvec *lock_page_lruvec_irq(struct page *page)
+/**
+ * folio_lruvec_lock_irq - Lock the lruvec for a folio.
+ * @folio: Pointer to the folio.
+ *
+ * These functions are safe to use under any of the following conditions:
+ * - folio locked
+ * - folio_test_lru false
+ * - folio_memcg_lock()
+ * - folio frozen (refcount of 0)
+ *
+ * Return: The lruvec this folio is on with its lock held and interrupts
+ * disabled.
+ */
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
- struct lruvec *lruvec;
+ struct lruvec *lruvec = folio_lruvec(folio);
- lruvec = mem_cgroup_page_lruvec(page);
spin_lock_irq(&lruvec->lru_lock);
-
- lruvec_memcg_debug(lruvec, page);
+ lruvec_memcg_debug(lruvec, folio);
return lruvec;
}
-struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
+/**
+ * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
+ * @folio: Pointer to the folio.
+ * @flags: Pointer to irqsave flags.
+ *
+ * These functions are safe to use under any of the following conditions:
+ * - folio locked
+ * - folio_test_lru false
+ * - folio_memcg_lock()
+ * - folio frozen (refcount of 0)
+ *
+ * Return: The lruvec this folio is on with its lock held and interrupts
+ * disabled.
+ */
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
+ unsigned long *flags)
{
- struct lruvec *lruvec;
+ struct lruvec *lruvec = folio_lruvec(folio);
- lruvec = mem_cgroup_page_lruvec(page);
spin_lock_irqsave(&lruvec->lru_lock, *flags);
-
- lruvec_memcg_debug(lruvec, page);
+ lruvec_memcg_debug(lruvec, folio);
return lruvec;
}
@@ -1956,18 +1957,17 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
}
/**
- * lock_page_memcg - lock a page and memcg binding
- * @page: the page
+ * folio_memcg_lock - Bind a folio to its memcg.
+ * @folio: The folio.
*
- * This function protects unlocked LRU pages from being moved to
+ * This function prevents unlocked LRU folios from being moved to
* another cgroup.
*
- * It ensures lifetime of the locked memcg. Caller is responsible
- * for the lifetime of the page.
+ * It ensures lifetime of the bound memcg. The caller is responsible
+ * for the lifetime of the folio.
*/
-void lock_page_memcg(struct page *page)
+void folio_memcg_lock(struct folio *folio)
{
- struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg;
unsigned long flags;
@@ -1981,7 +1981,7 @@ void lock_page_memcg(struct page *page)
if (mem_cgroup_disabled())
return;
again:
- memcg = page_memcg(head);
+ memcg = folio_memcg(folio);
if (unlikely(!memcg))
return;
@@ -1995,7 +1995,7 @@ again:
return;
spin_lock_irqsave(&memcg->move_lock, flags);
- if (memcg != page_memcg(head)) {
+ if (memcg != folio_memcg(folio)) {
spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again;
}
@@ -2009,9 +2009,15 @@ again:
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
}
+EXPORT_SYMBOL(folio_memcg_lock);
+
+void lock_page_memcg(struct page *page)
+{
+ folio_memcg_lock(page_folio(page));
+}
EXPORT_SYMBOL(lock_page_memcg);
-static void __unlock_page_memcg(struct mem_cgroup *memcg)
+static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags;
@@ -2026,14 +2032,22 @@ static void __unlock_page_memcg(struct mem_cgroup *memcg)
}
/**
- * unlock_page_memcg - unlock a page and memcg binding
- * @page: the page
+ * folio_memcg_unlock - Release the binding between a folio and its memcg.
+ * @folio: The folio.
+ *
+ * This releases the binding created by folio_memcg_lock(). This does
+ * not change the accounting of this folio to its memcg, but it does
+ * permit others to change it.
*/
-void unlock_page_memcg(struct page *page)
+void folio_memcg_unlock(struct folio *folio)
{
- struct page *head = compound_head(page);
+ __folio_memcg_unlock(folio_memcg(folio));
+}
+EXPORT_SYMBOL(folio_memcg_unlock);
- __unlock_page_memcg(page_memcg(head));
+void unlock_page_memcg(struct page *page)
+{
+ folio_memcg_unlock(page_folio(page));
}
EXPORT_SYMBOL(unlock_page_memcg);
@@ -2734,9 +2748,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
}
#endif
-static void commit_charge(struct page *page, struct mem_cgroup *memcg)
+static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
{
- VM_BUG_ON_PAGE(page_memcg(page), page);
+ VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
/*
* Any of the following ensures page's memcg stability:
*
@@ -2745,7 +2759,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
* - lock_page_memcg()
* - exclusive reference
*/
- page->memcg_data = (unsigned long)memcg;
+ folio->memcg_data = (unsigned long)memcg;
}
static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
@@ -3015,15 +3029,16 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
*/
void __memcg_kmem_uncharge_page(struct page *page, int order)
{
+ struct folio *folio = page_folio(page);
struct obj_cgroup *objcg;
unsigned int nr_pages = 1 << order;
- if (!PageMemcgKmem(page))
+ if (!folio_memcg_kmem(folio))
return;
- objcg = __page_objcg(page);
+ objcg = __folio_objcg(folio);
obj_cgroup_uncharge_pages(objcg, nr_pages);
- page->memcg_data = 0;
+ folio->memcg_data = 0;
obj_cgroup_put(objcg);
}
@@ -3257,17 +3272,18 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
*/
void split_page_memcg(struct page *head, unsigned int nr)
{
- struct mem_cgroup *memcg = page_memcg(head);
+ struct folio *folio = page_folio(head);
+ struct mem_cgroup *memcg = folio_memcg(folio);
int i;
if (mem_cgroup_disabled() || !memcg)
return;
for (i = 1; i < nr; i++)
- head[i].memcg_data = head->memcg_data;
+ folio_page(folio, i)->memcg_data = folio->memcg_data;
- if (PageMemcgKmem(head))
- obj_cgroup_get_many(__page_objcg(head), nr - 1);
+ if (folio_memcg_kmem(folio))
+ obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
else
css_get_many(&memcg->css, nr - 1);
}
@@ -3381,7 +3397,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
if (order > 0)
return 0;
- mctz = soft_limit_tree_node(pgdat->node_id);
+ mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
/*
* Do not even bother to check the largest node if the root
@@ -4537,17 +4553,17 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
* As being wrong occasionally doesn't matter, updates and accesses to the
* records are lockless and racy.
*/
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb)
{
- struct mem_cgroup *memcg = page_memcg(page);
+ struct mem_cgroup *memcg = folio_memcg(folio);
struct memcg_cgwb_frn *frn;
u64 now = get_jiffies_64();
u64 oldest_at = now;
int oldest = -1;
int i;
- trace_track_foreign_dirty(page, wb);
+ trace_track_foreign_dirty(folio, wb);
/*
* Pick the slot to use. If there is already a slot for @wb, keep
@@ -5575,38 +5591,39 @@ static int mem_cgroup_move_account(struct page *page,
struct mem_cgroup *from,
struct mem_cgroup *to)
{
+ struct folio *folio = page_folio(page);
struct lruvec *from_vec, *to_vec;
struct pglist_data *pgdat;
- unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
- int ret;
+ unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
+ int nid, ret;
VM_BUG_ON(from == to);
- VM_BUG_ON_PAGE(PageLRU(page), page);
- VM_BUG_ON(compound && !PageTransHuge(page));
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+ VM_BUG_ON(compound && !folio_test_multi(folio));
/*
* Prevent mem_cgroup_migrate() from looking at
* page's memory cgroup of its source page while we change it.
*/
ret = -EBUSY;
- if (!trylock_page(page))
+ if (!folio_trylock(folio))
goto out;
ret = -EINVAL;
- if (page_memcg(page) != from)
+ if (folio_memcg(folio) != from)
goto out_unlock;
- pgdat = page_pgdat(page);
+ pgdat = folio_pgdat(folio);
from_vec = mem_cgroup_lruvec(from, pgdat);
to_vec = mem_cgroup_lruvec(to, pgdat);
- lock_page_memcg(page);
+ folio_memcg_lock(folio);
- if (PageAnon(page)) {
- if (page_mapped(page)) {
+ if (folio_test_anon(folio)) {
+ if (folio_mapped(folio)) {
__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
- if (PageTransHuge(page)) {
+ if (folio_test_transhuge(folio)) {
__mod_lruvec_state(from_vec, NR_ANON_THPS,
-nr_pages);
__mod_lruvec_state(to_vec, NR_ANON_THPS,
@@ -5617,18 +5634,18 @@ static int mem_cgroup_move_account(struct page *page,
__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
- if (PageSwapBacked(page)) {
+ if (folio_test_swapbacked(folio)) {
__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
}
- if (page_mapped(page)) {
+ if (folio_mapped(folio)) {
__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
}
- if (PageDirty(page)) {
- struct address_space *mapping = page_mapping(page);
+ if (folio_test_dirty(folio)) {
+ struct address_space *mapping = folio_mapping(folio);
if (mapping_can_writeback(mapping)) {
__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
@@ -5639,7 +5656,7 @@ static int mem_cgroup_move_account(struct page *page,
}
}
- if (PageWriteback(page)) {
+ if (folio_test_writeback(folio)) {
__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
}
@@ -5662,20 +5679,21 @@ static int mem_cgroup_move_account(struct page *page,
css_get(&to->css);
css_put(&from->css);
- page->memcg_data = (unsigned long)to;
+ folio->memcg_data = (unsigned long)to;
- __unlock_page_memcg(from);
+ __folio_memcg_unlock(from);
ret = 0;
+ nid = folio_nid(folio);
local_irq_disable();
- mem_cgroup_charge_statistics(to, page, nr_pages);
- memcg_check_events(to, page);
- mem_cgroup_charge_statistics(from, page, -nr_pages);
- memcg_check_events(from, page);
+ mem_cgroup_charge_statistics(to, nr_pages);
+ memcg_check_events(to, nid);
+ mem_cgroup_charge_statistics(from, -nr_pages);
+ memcg_check_events(from, nid);
local_irq_enable();
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
out:
return ret;
}
@@ -6680,9 +6698,10 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root,
atomic_long_read(&parent->memory.children_low_usage)));
}
-static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
+static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
+ gfp_t gfp)
{
- unsigned int nr_pages = thp_nr_pages(page);
+ long nr_pages = folio_nr_pages(folio);
int ret;
ret = try_charge(memcg, gfp, nr_pages);
@@ -6690,38 +6709,23 @@ static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
goto out;
css_get(&memcg->css);
- commit_charge(page, memcg);
+ commit_charge(folio, memcg);
local_irq_disable();
- mem_cgroup_charge_statistics(memcg, page, nr_pages);
- memcg_check_events(memcg, page);
+ mem_cgroup_charge_statistics(memcg, nr_pages);
+ memcg_check_events(memcg, folio_nid(folio));
local_irq_enable();
out:
return ret;
}
-/**
- * __mem_cgroup_charge - charge a newly allocated page to a cgroup
- * @page: page to charge
- * @mm: mm context of the victim
- * @gfp_mask: reclaim mode
- *
- * Try to charge @page to the memcg that @mm belongs to, reclaiming
- * pages according to @gfp_mask if necessary. if @mm is NULL, try to
- * charge to the active memcg.
- *
- * Do not use this for pages allocated for swapin.
- *
- * Returns 0 on success. Otherwise, an error code is returned.
- */
-int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
{
struct mem_cgroup *memcg;
int ret;
memcg = get_mem_cgroup_from_mm(mm);
- ret = charge_memcg(page, memcg, gfp_mask);
+ ret = charge_memcg(folio, memcg, gfp);
css_put(&memcg->css);
return ret;
@@ -6742,6 +6746,7 @@ int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry)
{
+ struct folio *folio = page_folio(page);
struct mem_cgroup *memcg;
unsigned short id;
int ret;
@@ -6756,7 +6761,7 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
memcg = get_mem_cgroup_from_mm(mm);
rcu_read_unlock();
- ret = charge_memcg(page, memcg, gfp);
+ ret = charge_memcg(folio, memcg, gfp);
css_put(&memcg->css);
return ret;
@@ -6800,7 +6805,7 @@ struct uncharge_gather {
unsigned long nr_memory;
unsigned long pgpgout;
unsigned long nr_kmem;
- struct page *dummy_page;
+ int nid;
};
static inline void uncharge_gather_clear(struct uncharge_gather *ug)
@@ -6824,36 +6829,36 @@ static void uncharge_batch(const struct uncharge_gather *ug)
local_irq_save(flags);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
- memcg_check_events(ug->memcg, ug->dummy_page);
+ memcg_check_events(ug->memcg, ug->nid);
local_irq_restore(flags);
- /* drop reference from uncharge_page */
+ /* drop reference from uncharge_folio */
css_put(&ug->memcg->css);
}
-static void uncharge_page(struct page *page, struct uncharge_gather *ug)
+static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
{
- unsigned long nr_pages;
+ long nr_pages;
struct mem_cgroup *memcg;
struct obj_cgroup *objcg;
- bool use_objcg = PageMemcgKmem(page);
+ bool use_objcg = folio_memcg_kmem(folio);
- VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
/*
* Nobody should be changing or seriously looking at
- * page memcg or objcg at this point, we have fully
- * exclusive access to the page.
+ * folio memcg or objcg at this point, we have fully
+ * exclusive access to the folio.
*/
if (use_objcg) {
- objcg = __page_objcg(page);
+ objcg = __folio_objcg(folio);
/*
* This get matches the put at the end of the function and
* kmem pages do not hold memcg references anymore.
*/
memcg = get_mem_cgroup_from_objcg(objcg);
} else {
- memcg = __page_memcg(page);
+ memcg = __folio_memcg(folio);
}
if (!memcg)
@@ -6865,19 +6870,19 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = memcg;
- ug->dummy_page = page;
+ ug->nid = folio_nid(folio);
/* pairs with css_put in uncharge_batch */
css_get(&memcg->css);
}
- nr_pages = compound_nr(page);
+ nr_pages = folio_nr_pages(folio);
if (use_objcg) {
ug->nr_memory += nr_pages;
ug->nr_kmem += nr_pages;
- page->memcg_data = 0;
+ folio->memcg_data = 0;
obj_cgroup_put(objcg);
} else {
/* LRU pages aren't accounted at the root level */
@@ -6885,28 +6890,22 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
ug->nr_memory += nr_pages;
ug->pgpgout++;
- page->memcg_data = 0;
+ folio->memcg_data = 0;
}
css_put(&memcg->css);
}
-/**
- * __mem_cgroup_uncharge - uncharge a page
- * @page: page to uncharge
- *
- * Uncharge a page previously charged with __mem_cgroup_charge().
- */
-void __mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct folio *folio)
{
struct uncharge_gather ug;
- /* Don't touch page->lru of any random page, pre-check: */
- if (!page_memcg(page))
+ /* Don't touch folio->lru of any random page, pre-check: */
+ if (!folio_memcg(folio))
return;
uncharge_gather_clear(&ug);
- uncharge_page(page, &ug);
+ uncharge_folio(folio, &ug);
uncharge_batch(&ug);
}
@@ -6920,52 +6919,49 @@ void __mem_cgroup_uncharge(struct page *page)
void __mem_cgroup_uncharge_list(struct list_head *page_list)
{
struct uncharge_gather ug;
- struct page *page;
+ struct folio *folio;
uncharge_gather_clear(&ug);
- list_for_each_entry(page, page_list, lru)
- uncharge_page(page, &ug);
+ list_for_each_entry(folio, page_list, lru)
+ uncharge_folio(folio, &ug);
if (ug.memcg)
uncharge_batch(&ug);
}
/**
- * mem_cgroup_migrate - charge a page's replacement
- * @oldpage: currently circulating page
- * @newpage: replacement page
+ * mem_cgroup_migrate - Charge a folio's replacement.
+ * @old: Currently circulating folio.
+ * @new: Replacement folio.
*
- * Charge @newpage as a replacement page for @oldpage. @oldpage will
+ * Charge @new as a replacement folio for @old. @old will
* be uncharged upon free.
*
- * Both pages must be locked, @newpage->mapping must be set up.
+ * Both folios must be locked, @new->mapping must be set up.
*/
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
struct mem_cgroup *memcg;
- unsigned int nr_pages;
+ long nr_pages = folio_nr_pages(new);
unsigned long flags;
- VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
- VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
- VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
- VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
- newpage);
+ VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
+ VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
+ VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
+ VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
if (mem_cgroup_disabled())
return;
- /* Page cache replacement: new page already charged? */
- if (page_memcg(newpage))
+ /* Page cache replacement: new folio already charged? */
+ if (folio_memcg(new))
return;
- memcg = page_memcg(oldpage);
- VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
+ memcg = folio_memcg(old);
+ VM_WARN_ON_ONCE_FOLIO(!memcg, old);
if (!memcg)
return;
/* Force-charge the new page. The old one will be freed soon */
- nr_pages = thp_nr_pages(newpage);
-
if (!mem_cgroup_is_root(memcg)) {
page_counter_charge(&memcg->memory, nr_pages);
if (do_memsw_account())
@@ -6973,11 +6969,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
}
css_get(&memcg->css);
- commit_charge(newpage, memcg);
+ commit_charge(new, memcg);
local_irq_save(flags);
- mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
- memcg_check_events(memcg, newpage);
+ mem_cgroup_charge_statistics(memcg, nr_pages);
+ memcg_check_events(memcg, folio_nid(new));
local_irq_restore(flags);
}
@@ -7204,8 +7200,8 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* only synchronisation we have for updating the per-CPU variables.
*/
VM_BUG_ON(!irqs_disabled());
- mem_cgroup_charge_statistics(memcg, page, -nr_entries);
- memcg_check_events(memcg, page);
+ mem_cgroup_charge_statistics(memcg, -nr_entries);
+ memcg_check_events(memcg, page_to_nid(page));
css_put(&memcg->css);
}