memcg: allocate v1 event percpu only on v1 deployment

Currently memcg->events_percpu gets allocated on v2 deployments.  Let's
move the allocation to v1 only codebase.  This is not needed in v2.

Link: https://lkml.kernel.org/r/20240815050453.1298138-7-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: T.J. Mercier <tjmercier@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Shakeel Butt 2024-08-14 22:04:52 -07:00 committed by Andrew Morton
parent a5ebe6bbe5
commit 0ccaf421d6
3 changed files with 28 additions and 20 deletions

View File

@ -255,7 +255,6 @@ struct mem_cgroup {
struct list_head objcg_list;
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
struct memcg1_events_percpu __percpu *events_percpu;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
@ -277,6 +276,8 @@ struct mem_cgroup {
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
struct memcg1_events_percpu __percpu *events_percpu;
unsigned long soft_limit;
/* protected by memcg_oom_lock */

View File

@ -1442,6 +1442,12 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
}
}
/* Cgroup1: threshold notifications & softlimit tree updates */
struct memcg1_events_percpu {
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};
static void memcg1_charge_statistics(struct mem_cgroup *memcg, int nr_pages)
{
/* pagein of a big page is an event. So, ignore page size */
@ -3033,6 +3039,19 @@ bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
return false;
}
bool memcg1_alloc_events(struct mem_cgroup *memcg)
{
memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu,
GFP_KERNEL_ACCOUNT);
return !!memcg->events_percpu;
}
void memcg1_free_events(struct mem_cgroup *memcg)
{
if (memcg->events_percpu)
free_percpu(memcg->events_percpu);
}
static int __init memcg1_init(void)
{
int node;

View File

@ -55,12 +55,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
/* Cgroup1: threshold notifications & softlimit tree updates */
struct memcg1_events_percpu {
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
void drain_all_stock(struct mem_cgroup *root_memcg);
@ -72,21 +66,12 @@ unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
int memory_stat_show(struct seq_file *m, void *v);
static inline bool memcg1_alloc_events(struct mem_cgroup *memcg)
{
memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu,
GFP_KERNEL_ACCOUNT);
return !!memcg->events_percpu;
}
static inline void memcg1_free_events(struct mem_cgroup *memcg)
{
if (memcg->events_percpu)
free_percpu(memcg->events_percpu);
}
/* Cgroup v1-specific declarations */
#ifdef CONFIG_MEMCG_V1
bool memcg1_alloc_events(struct mem_cgroup *memcg);
void memcg1_free_events(struct mem_cgroup *memcg);
void memcg1_memcg_init(struct mem_cgroup *memcg);
void memcg1_remove_from_trees(struct mem_cgroup *memcg);
@ -139,6 +124,9 @@ extern struct cftype mem_cgroup_legacy_files[];
#else /* CONFIG_MEMCG_V1 */
static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; }
static inline void memcg1_free_events(struct mem_cgroup *memcg) {}
static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {}
static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {}
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {}