summaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-12-04 07:48:11 -0800
committerJakub Kicinski <kuba@kernel.org>2020-12-04 07:48:12 -0800
commita1dd1d86973182458da7798a95f26cfcbea599b4 (patch)
tree1adda22ea30ccfac7651a7eed7b7c90356f8243a /mm/slab.h
parent55fd59b003f6e8fd88cf16590e79823d7ccf3026 (diff)
parenteceae70bdeaeb6b8ceb662983cf663ff352fbc96 (diff)
downloadlinux-a1dd1d86973182458da7798a95f26cfcbea599b4.tar.bz2
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-12-03 The main changes are: 1) Support BTF in kernel modules, from Andrii. 2) Introduce preferred busy-polling, from Björn. 3) bpf_ima_inode_hash() and bpf_bprm_opts_set() helpers, from KP Singh. 4) Memcg-based memory accounting for bpf objects, from Roman. 5) Allow bpf_{s,g}etsockopt from cgroup bind{4,6} hooks, from Stanislav. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (118 commits) selftests/bpf: Fix invalid use of strncat in test_sockmap libbpf: Use memcpy instead of strncpy to please GCC selftests/bpf: Add fentry/fexit/fmod_ret selftest for kernel module selftests/bpf: Add tp_btf CO-RE reloc test for modules libbpf: Support attachment of BPF tracing programs to kernel modules libbpf: Factor out low-level BPF program loading helper bpf: Allow to specify kernel module BTFs when attaching BPF programs bpf: Remove hard-coded btf_vmlinux assumption from BPF verifier selftests/bpf: Add CO-RE relocs selftest relying on kernel module BTF selftests/bpf: Add support for marking sub-tests as skipped selftests/bpf: Add bpf_testmod kernel module for testing libbpf: Add kernel module BTF support for CO-RE relocations libbpf: Refactor CO-RE relocs to not assume a single BTF object libbpf: Add internal helper to load BTF data by FD bpf: Keep module's btf_data_size intact after load bpf: Fix bpf_put_raw_tracepoint()'s use of __module_address() selftests/bpf: Add Userspace tests for TCP_WINDOW_CLAMP bpf: Adds support for setting window clamp samples/bpf: Fix spelling mistake "recieving" -> "receiving" bpf: Fix cold build of test_progs-no_alu32 ... ==================== Link: https://lore.kernel.org/r/20201204021936.85653-1-alexei.starovoitov@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h38
1 files changed, 9 insertions, 29 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 6d7c6a5056ba..9a54a0cb5cca 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -239,30 +239,13 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
}
#ifdef CONFIG_MEMCG_KMEM
-static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
-{
- /*
- * page->mem_cgroup and page->obj_cgroups are sharing the same
- * space. To distinguish between them in case we don't know for sure
- * that the page is a slab page (e.g. page_cgroup_ino()), let's
- * always set the lowest bit of obj_cgroups.
- */
- return (struct obj_cgroup **)
- ((unsigned long)page->obj_cgroups & ~0x1UL);
-}
-
-static inline bool page_has_obj_cgroups(struct page *page)
-{
- return ((unsigned long)page->obj_cgroups & 0x1UL);
-}
-
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp);
static inline void memcg_free_page_obj_cgroups(struct page *page)
{
- kfree(page_obj_cgroups(page));
- page->obj_cgroups = NULL;
+ kfree(page_objcgs(page));
+ page->memcg_data = 0;
}
static inline size_t obj_full_size(struct kmem_cache *s)
@@ -323,7 +306,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
if (likely(p[i])) {
page = virt_to_head_page(p[i]);
- if (!page_has_obj_cgroups(page) &&
+ if (!page_objcgs(page) &&
memcg_alloc_page_obj_cgroups(page, s, flags)) {
obj_cgroup_uncharge(objcg, obj_full_size(s));
continue;
@@ -331,7 +314,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
off = obj_to_index(s, page, p[i]);
obj_cgroup_get(objcg);
- page_obj_cgroups(page)[off] = objcg;
+ page_objcgs(page)[off] = objcg;
mod_objcg_state(objcg, page_pgdat(page),
cache_vmstat_idx(s), obj_full_size(s));
} else {
@@ -345,6 +328,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
void **p, int objects)
{
struct kmem_cache *s;
+ struct obj_cgroup **objcgs;
struct obj_cgroup *objcg;
struct page *page;
unsigned int off;
@@ -358,7 +342,8 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
continue;
page = virt_to_head_page(p[i]);
- if (!page_has_obj_cgroups(page))
+ objcgs = page_objcgs(page);
+ if (!objcgs)
continue;
if (!s_orig)
@@ -367,11 +352,11 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
s = s_orig;
off = obj_to_index(s, page, p[i]);
- objcg = page_obj_cgroups(page)[off];
+ objcg = objcgs[off];
if (!objcg)
continue;
- page_obj_cgroups(page)[off] = NULL;
+ objcgs[off] = NULL;
obj_cgroup_uncharge(objcg, obj_full_size(s));
mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
-obj_full_size(s));
@@ -380,11 +365,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
}
#else /* CONFIG_MEMCG_KMEM */
-static inline bool page_has_obj_cgroups(struct page *page)
-{
- return false;
-}
-
static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
{
return NULL;