lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date: Fri, 5 Apr 2024 15:54:45 +0200
From: Klara Modin <klarasmodin@...il.com>
To: Suren Baghdasaryan <surenb@...gle.com>, akpm@...ux-foundation.org
Cc: kent.overstreet@...ux.dev, mhocko@...e.com, vbabka@...e.cz,
 hannes@...xchg.org, roman.gushchin@...ux.dev, mgorman@...e.de,
 dave@...olabs.net, willy@...radead.org, liam.howlett@...cle.com,
 penguin-kernel@...ove.sakura.ne.jp, corbet@....net, void@...ifault.com,
 "Peter Zijlstra (Intel)" <peterz@...radead.org>, juri.lelli@...hat.com,
 catalin.marinas@....com, will@...nel.org, arnd@...db.de, tglx@...utronix.de,
 mingo@...hat.com, dave.hansen@...ux.intel.com, x86@...nel.org,
 peterx@...hat.com, david@...hat.com, axboe@...nel.dk, mcgrof@...nel.org,
 masahiroy@...nel.org, Nathan Chancellor <nathan@...nel.org>,
 dennis@...nel.org, jhubbard@...dia.com, tj@...nel.org,
 muchun.song@...ux.dev, rppt@...nel.org, paulmck@...nel.org,
 pasha.tatashin@...een.com, yosryahmed@...gle.com, yuzhao@...gle.com,
 David Howells <dhowells@...hat.com>, hughd@...gle.com, andreyknvl@...il.com,
 keescook@...omium.org, ndesaulniers@...gle.com, vvvvvv@...gle.com,
 gregkh@...uxfoundation.org, ebiggers@...gle.com, ytcoode@...il.com,
 vincent.guittot@...aro.org, dietmar.eggemann@....com, rostedt@...dmis.org,
 bsegall@...gle.com, bristot@...hat.com, vschneid@...hat.com, cl@...ux.com,
 penberg@...nel.org, iamjoonsoo.kim@....com, 42.hyeyoo@...il.com,
 glider@...gle.com, elver@...gle.com, dvyukov@...gle.com,
 songmuchun@...edance.com, jbaron@...mai.com, aliceryhl@...gle.com,
 rientjes@...gle.com, minchan@...gle.com, kaleshsingh@...gle.com,
 kernel-team@...roid.com, linux-doc@...r.kernel.org,
 linux-kernel@...r.kernel.org, iommu@...ts.linux.dev,
 linux-arch@...r.kernel.org, linux-fsdevel@...r.kernel.org,
 linux-mm@...ck.org, linux-modules@...r.kernel.org,
 kasan-dev@...glegroups.com, cgroups@...r.kernel.org
Subject: Re: [PATCH v6 13/37] lib: add allocation tagging support for memory
 allocation profiling

Hi,

On 2024-03-21 17:36, Suren Baghdasaryan wrote:
> Introduce CONFIG_MEM_ALLOC_PROFILING which provides definitions to easily
> instrument memory allocators. It registers an "alloc_tags" codetag type
> with /proc/allocinfo interface to output allocation tag information when
> the feature is enabled.
> CONFIG_MEM_ALLOC_PROFILING_DEBUG is provided for debugging the memory
> allocation profiling instrumentation.
> Memory allocation profiling can be enabled or disabled at runtime using
> /proc/sys/vm/mem_profiling sysctl when CONFIG_MEM_ALLOC_PROFILING_DEBUG=n.
> CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT enables memory allocation
> profiling by default.
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> Co-developed-by: Kent Overstreet <kent.overstreet@...ux.dev>
> Signed-off-by: Kent Overstreet <kent.overstreet@...ux.dev>

With this commit (9e2dcefa791e9d14006b360fba3455510fd3325d in
next-20240404), randconfig with KCONFIG_SEED=0xE6264236 fails to build
with the attached error. The following patch fixes the build error for 
me, but I don't know if it's correct.

Kind regards,
Klara Modin

diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 100ddf66eb8e..1c765d80298b 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -12,6 +12,7 @@
  #include <asm/percpu.h>
  #include <linux/cpumask.h>
  #include <linux/static_key.h>
+#include <linux/irqflags.h>

  struct alloc_tag_counters {
         u64 bytes;

> diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
> new file mode 100644
> index 000000000000..b970ff1c80dc
> --- /dev/null
> +++ b/include/linux/alloc_tag.h
> @@ -0,0 +1,145 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * allocation tagging
> + */
> +#ifndef _LINUX_ALLOC_TAG_H
> +#define _LINUX_ALLOC_TAG_H
> +
> +#include <linux/bug.h>
> +#include <linux/codetag.h>
> +#include <linux/container_of.h>
> +#include <linux/preempt.h>
> +#include <asm/percpu.h>
> +#include <linux/cpumask.h>
> +#include <linux/static_key.h>
> +
> +struct alloc_tag_counters {
> +     u64 bytes;
> +     u64 calls;
> +};
> +
> +/*
> + * An instance of this structure is created in a special ELF section at every
> + * allocation callsite. At runtime, the special section is treated as
> + * an array of these. Embedded codetag utilizes codetag framework.
> + */
> +struct alloc_tag {
> +     struct codetag                  ct;
> +     struct alloc_tag_counters __percpu      *counters;
> +} __aligned(8);
> +
> +#ifdef CONFIG_MEM_ALLOC_PROFILING
> +
> +static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
> +{
> +     return container_of(ct, struct alloc_tag, ct);
> +}
> +
> +#ifdef ARCH_NEEDS_WEAK_PER_CPU
> +/*
> + * When percpu variables are required to be defined as weak, static percpu
> + * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
> + */
> +#error "Memory allocation profiling is incompatible with ARCH_NEEDS_WEAK_PER_CPU"
> +#endif
> +
> +#define DEFINE_ALLOC_TAG(_alloc_tag)                                         \
> +     static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);      \
> +     static struct alloc_tag _alloc_tag __used __aligned(8)                  \
> +     __section("alloc_tags") = {                                             \
> +             .ct = CODE_TAG_INIT,                                            \
> +             .counters = &_alloc_tag_cntr };
> +
> +DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
> +                     mem_alloc_profiling_key);
> +
> +static inline bool mem_alloc_profiling_enabled(void)
> +{
> +     return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
> +                                &mem_alloc_profiling_key);
> +}
> +
> +static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
> +{
> +     struct alloc_tag_counters v = { 0, 0 };
> +     struct alloc_tag_counters *counter;
> +     int cpu;
> +
> +     for_each_possible_cpu(cpu) {
> +             counter = per_cpu_ptr(tag->counters, cpu);
> +             v.bytes += counter->bytes;
> +             v.calls += counter->calls;
> +     }
> +
> +     return v;
> +}
> +
> +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
> +static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
> +{
> +     WARN_ONCE(ref && ref->ct,
> +               "alloc_tag was not cleared (got tag for %s:%u)\n",
> +               ref->ct->filename, ref->ct->lineno);
> +
> +     WARN_ONCE(!tag, "current->alloc_tag not set");
> +}
> +
> +static inline void alloc_tag_sub_check(union codetag_ref *ref)
> +{
> +     WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
> +}
> +#else
> +static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
> +static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
> +#endif
> +
> +/* Caller should verify both ref and tag to be valid */
> +static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
> +{
> +     ref->ct = &tag->ct;
> +     /*
> +      * We need in increment the call counter every time we have a new
> +      * allocation or when we split a large allocation into smaller ones.
> +      * Each new reference for every sub-allocation needs to increment call
> +      * counter because when we free each part the counter will be decremented.
> +      */
> +     this_cpu_inc(tag->counters->calls);
> +}
> +
> +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
> +{
> +     alloc_tag_add_check(ref, tag);
> +     if (!ref || !tag)
> +             return;
> +
> +     __alloc_tag_ref_set(ref, tag);
> +     this_cpu_add(tag->counters->bytes, bytes);
> +}
> +
> +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
> +{
> +     struct alloc_tag *tag;
> +
> +     alloc_tag_sub_check(ref);
> +     if (!ref || !ref->ct)
> +             return;
> +
> +     tag = ct_to_alloc_tag(ref->ct);
> +
> +     this_cpu_sub(tag->counters->bytes, bytes);
> +     this_cpu_dec(tag->counters->calls);
> +
> +     ref->ct = NULL;
> +}
> +
> +#else /* CONFIG_MEM_ALLOC_PROFILING */
> +
> +#define DEFINE_ALLOC_TAG(_alloc_tag)
> +static inline bool mem_alloc_profiling_enabled(void) { return false; }
> +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
> +                              size_t bytes) {}
> +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
> +
> +#endif /* CONFIG_MEM_ALLOC_PROFILING */
> +
> +#endif /* _LINUX_ALLOC_TAG_H */

View attachment "error-in-alloc_tag" of type "text/plain" (3290 bytes)

Download attachment "randconfig.gz" of type "application/gzip" (43697 bytes)

View attachment "bisect-alloc_tag" of type "text/plain" (2535 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ