diff --git a/include/linux/mm.h b/include/linux/mm.h index 13b4bd7355c1..b634617f85fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2644,7 +2644,19 @@ static inline bool get_user_page_fast_only(unsigned long addr, */ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) { - return percpu_counter_read_positive(&mm->rss_stat[member]); + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (percpu_counter_initialized(fbc)) + return percpu_counter_read_positive(fbc); + + long val = percpu_counter_atomic_read(fbc); + /* + * counter is updated in asynchronous manner and may go to minus. + * But it's never be expected number for users. + */ + if (val < 0) + return 0; + return (unsigned long)val; } static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member) @@ -2656,7 +2668,12 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member); static inline void add_mm_counter(struct mm_struct *mm, int member, long value) { - percpu_counter_add(&mm->rss_stat[member], value); + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (percpu_counter_initialized(fbc)) + percpu_counter_add(fbc, value); + else + percpu_counter_atomic_add(fbc, value); mm_trace_rss_stat(mm, member); } @@ -2670,9 +2687,37 @@ static inline void inc_mm_counter(struct mm_struct *mm, int member) static inline void dec_mm_counter(struct mm_struct *mm, int member) { - percpu_counter_dec(&mm->rss_stat[member]); + add_mm_counter(mm, member, -1); +} - mm_trace_rss_stat(mm, member); +static inline s64 mm_counter_sum(struct mm_struct *mm, int member) +{ + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (percpu_counter_initialized(fbc)) + return percpu_counter_sum(fbc); + + return percpu_counter_atomic_read(fbc); +} + +static inline s64 mm_counter_sum_positive(struct mm_struct *mm, int member) +{ + struct percpu_counter *fbc = &mm->rss_stat[member]; + + if (percpu_counter_initialized(fbc)) + return percpu_counter_sum_positive(fbc); + + return percpu_counter_atomic_read(fbc); +} + +static inline int mm_counter_switch_to_pcpu(struct mm_struct *mm) +{ + return percpu_counter_switch_to_pcpu_many(mm->rss_stat, NR_MM_COUNTERS); +} + +static inline void mm_counter_destroy(struct mm_struct *mm) +{ + percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); } /* Optimized variant when folio is already known not to be anon */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 3a44dd1e33d2..3b1f7c1d93dc 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -21,7 +21,18 @@ struct percpu_counter { raw_spinlock_t lock; - s64 count; + /* + * Depending on whether counters is NULL, we can support two modes, + * atomic mode using count_atomic and percpu mode using count. + * The single-thread processes should use atomic mode to reduce the + * memory consumption and performance regression. + * The multiple-thread processes should use percpu mode to reduce the + * error margin. + */ + union { + s64 count; + atomic64_t count_atomic; + }; #ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ #endif @@ -32,14 +43,14 @@ extern int percpu_counter_batch; int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters, - struct lock_class_key *key); + struct lock_class_key *key, bool switch_mode); #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \ ({ \ static struct lock_class_key __key; \ \ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\ - &__key); \ + &__key, false); \ }) @@ -130,6 +141,20 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc) return (fbc->counters != NULL); } +static inline s64 percpu_counter_atomic_read(struct percpu_counter *fbc) +{ + return atomic64_read(&fbc->count_atomic); +} + +static inline void percpu_counter_atomic_add(struct percpu_counter *fbc, + s64 amount) +{ + atomic64_add(amount, &fbc->count_atomic); +} + +int percpu_counter_switch_to_pcpu_many(struct percpu_counter *fbc, + u32 nr_counters); + #else /* !CONFIG_SMP */ struct percpu_counter { @@ -260,6 +285,23 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc) static inline void percpu_counter_sync(struct percpu_counter *fbc) { } + +static inline s64 percpu_counter_atomic_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + +static inline void percpu_counter_atomic_add(struct percpu_counter *fbc, + s64 amount) +{ + percpu_counter_add(fbc, amount); +} + +static inline int percpu_counter_switch_to_pcpu_many(struct percpu_counter *fbc, + u32 nr_counters) +{ + return 0; +} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index b37eb0a7060f..5b80987885f3 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -399,8 +399,8 @@ TRACE_EVENT(rss_stat, __entry->mm_id = mm_ptr_to_hash(mm); __entry->curr = !!(current->mm == mm); __entry->member = member; - __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member]) - << PAGE_SHIFT); + __entry->size = (mm_counter_sum_positive(mm, member) + << PAGE_SHIFT); ), TP_printk("mm_id=%u curr=%d type=%s size=%ldB", diff --git a/kernel/fork.c b/kernel/fork.c index e5ec098a6f61..0b3493e66ff9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -829,7 +829,7 @@ static void check_mm(struct mm_struct *mm) "Please make sure 'struct resident_page_types[]' is updated as well"); for (i = 0; i < NR_MM_COUNTERS; i++) { - long x = percpu_counter_sum(&mm->rss_stat[i]); + long x = mm_counter_sum(mm, i); if (unlikely(x)) pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", @@ -930,7 +930,7 @@ void __mmdrop(struct mm_struct *mm) put_user_ns(mm->user_ns); mm_pasid_drop(mm); mm_destroy_cid(mm); - percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); + mm_counter_destroy(mm); free_mm(mm); } @@ -1308,16 +1308,10 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, if (mm_alloc_cid(mm)) goto fail_cid; - if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, - NR_MM_COUNTERS)) - goto fail_pcpu; - mm->user_ns = get_user_ns(user_ns); lru_gen_init_mm(mm); return mm; -fail_pcpu: - mm_destroy_cid(mm); fail_cid: destroy_context(mm); fail_nocontext: @@ -1741,6 +1735,16 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) if (!oldmm) return 0; + /* + * For single-thread processes, rss_stat is in atomic mode, which + * reduces the memory consumption and performance regression caused by + * using percpu. For multiple-thread processes, rss_stat is switched to + * the percpu mode to reduce the error margin. + */ + if (clone_flags & CLONE_THREAD) + if (mm_counter_switch_to_pcpu(oldmm)) + return -ENOMEM; + if (clone_flags & CLONE_VM) { mmget(oldmm); mm = oldmm; diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 2891f94a11c6..3077be23dd72 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -187,7 +187,7 @@ EXPORT_SYMBOL(__percpu_counter_sum); int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters, - struct lock_class_key *key) + struct lock_class_key *key, bool switch_mode) { unsigned long flags __maybe_unused; size_t counter_size; @@ -208,7 +208,8 @@ int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, #ifdef CONFIG_HOTPLUG_CPU INIT_LIST_HEAD(&fbc[i].list); #endif - fbc[i].count = amount; + if (likely(!switch_mode)) + fbc[i].count = amount; fbc[i].counters = (void __percpu *)counters + i * counter_size; debug_percpu_counter_activate(&fbc[i]); @@ -312,6 +313,36 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) } EXPORT_SYMBOL(__percpu_counter_compare); +/* + * percpu_counter_switch_to_pcpu_many: Converts struct percpu_counters from + * atomic mode to percpu mode. + * + * Return: 0 if percpu_counter is already in atomic mode or successfully + * switched to atomic mode; -ENOMEM if percpu memory allocation fails, + * percpu_counter is still in atomic mode. + */ +int percpu_counter_switch_to_pcpu_many(struct percpu_counter *fbc, + u32 nr_counters) +{ + static struct lock_class_key __key; + unsigned long flags; + int ret = 0; + + if (percpu_counter_initialized(fbc)) + return 0; + + preempt_disable(); + local_irq_save(flags); + if (likely(!percpu_counter_initialized(fbc))) + ret = __percpu_counter_init_many(fbc, 0, + GFP_ATOMIC|__GFP_NOWARN|__GFP_ZERO, + nr_counters, &__key, true); + local_irq_restore(flags); + preempt_enable(); + + return ret; +} + /* * Compare counter, and add amount if total is: less than or equal to limit if * amount is positive, or greater than or equal to limit if amount is negative.