lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b4258ba5ace4c422cdbcde659bb5304408aaec14.camel@gmail.com>
Date: Mon, 27 Oct 2025 11:40:23 -0700
From: Eduard Zingerman <eddyz87@...il.com>
To: Donglin Peng <dolinux.peng@...il.com>, ast@...nel.org
Cc: linux-kernel@...r.kernel.org, bpf@...r.kernel.org, Andrii Nakryiko	
 <andrii.nakryiko@...il.com>, Alan Maguire <alan.maguire@...cle.com>, Song
 Liu	 <song@...nel.org>, pengdonglin <pengdonglin@...omi.com>
Subject: Re: [RFC PATCH v3 1/3] btf: implement BTF type sorting for
 accelerated lookups

On Mon, 2025-10-27 at 21:54 +0800, Donglin Peng wrote:
> This patch introduces a new libbpf interface btf__permute() to reorganize
> BTF types according to a provided mapping. The BTF lookup mechanism is
> enhanced with binary search capability, significantly improving lookup
> performance for large type sets.
> 
> The pahole tool can invoke this interface with a sorted type ID array,
> enabling binary search in both user space and kernel. To share core logic
> between kernel and libbpf, common sorting functionality is implemented
> in a new btf_sort.c source file.
> 
> Cc: Eduard Zingerman <eddyz87@...il.com>
> Cc: Alexei Starovoitov <ast@...nel.org>
> Cc: Andrii Nakryiko <andrii.nakryiko@...il.com>
> Cc: Alan Maguire <alan.maguire@...cle.com>
> Cc: Song Liu <song@...nel.org>
> Co-developed-by: Eduard Zingerman <eddyz87@...il.com>
> Signed-off-by: pengdonglin <pengdonglin@...omi.com>
> Signed-off-by: Donglin Peng <dolinux.peng@...il.com>
> ---
> v2->v3:
> - Remove sorting logic from libbpf and provide a generic btf__permute() interface
> - Remove the search direction patch since sorted lookup provides sufficient performance
>   and changing search order could cause conflicts between BTF and base BTF
> - Include btf_sort.c directly in btf.c to reduce function call overhead
> ---


Could you please split this in two commits:
- one handling BTF sortedness
- another handling btf__permute()
?

>  tools/lib/bpf/btf.c            | 262 ++++++++++++++++++++++++++++++---
>  tools/lib/bpf/btf.h            |  17 +++
>  tools/lib/bpf/btf_sort.c       | 174 ++++++++++++++++++++++
>  tools/lib/bpf/btf_sort.h       |  11 ++
>  tools/lib/bpf/libbpf.map       |   6 +
>  tools/lib/bpf/libbpf_version.h |   2 +-
>  6 files changed, 447 insertions(+), 25 deletions(-)
>  create mode 100644 tools/lib/bpf/btf_sort.c
>  create mode 100644 tools/lib/bpf/btf_sort.h
> 
> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> index 18907f0fcf9f..d20bf81a21ce 100644
> --- a/tools/lib/bpf/btf.c
> +++ b/tools/lib/bpf/btf.c
> @@ -23,6 +23,7 @@
>  #include "libbpf_internal.h"
>  #include "hashmap.h"
>  #include "strset.h"
> +#include "btf_sort.h"
>  
>  #define BTF_MAX_NR_TYPES 0x7fffffffU
>  #define BTF_MAX_STR_OFFSET 0x7fffffffU
> @@ -92,6 +93,12 @@ struct btf {
>  	 *   - for split BTF counts number of types added on top of base BTF.
>  	 */
>  	__u32 nr_types;
> +	/* number of sorted and named types in this BTF instance:
> +	 *   - doesn't include special [0] void type;
> +	 *   - for split BTF counts number of sorted and named types added on
> +	 *     top of base BTF.
> +	 */
> +	__u32 nr_sorted_types;
>  	/* if not NULL, points to the base BTF on top of which the current
>  	 * split BTF is based
>  	 */
> @@ -624,6 +631,11 @@ const struct btf *btf__base_btf(const struct btf *btf)
>  	return btf->base_btf;
>  }
>  
> +__u32 btf__start_id(const struct btf *btf)
> +{
> +	return btf->start_id;
> +}
> +
>  /* internal helper returning non-const pointer to a type */
>  struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
>  {
> @@ -915,38 +927,16 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
>  	return libbpf_err(-ENOENT);
>  }
>  
> -static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
> -				   const char *type_name, __u32 kind)
> -{
> -	__u32 i, nr_types = btf__type_cnt(btf);
> -
> -	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
> -		return 0;
> -
> -	for (i = start_id; i < nr_types; i++) {
> -		const struct btf_type *t = btf__type_by_id(btf, i);
> -		const char *name;
> -
> -		if (btf_kind(t) != kind)
> -			continue;
> -		name = btf__name_by_offset(btf, t->name_off);
> -		if (name && !strcmp(type_name, name))
> -			return i;
> -	}
> -
> -	return libbpf_err(-ENOENT);
> -}
> -
>  __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
>  				 __u32 kind)
>  {
> -	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
> +	return _btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
>  }
>  
>  __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
>  			     __u32 kind)
>  {
> -	return btf_find_by_name_kind(btf, 1, type_name, kind);
> +	return _btf_find_by_name_kind(btf, 1, type_name, kind);
>  }
>  
>  static bool btf_is_modifiable(const struct btf *btf)
> @@ -1091,6 +1081,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b
>  	err = err ?: btf_sanity_check(btf);
>  	if (err)
>  		goto done;
> +	btf_check_sorted(btf, btf->start_id);
>  
>  done:
>  	if (err) {
> @@ -1715,6 +1706,8 @@ static void btf_invalidate_raw_data(struct btf *btf)
>  		free(btf->raw_data_swapped);
>  		btf->raw_data_swapped = NULL;
>  	}
> +	if (btf->nr_sorted_types)
> +		btf->nr_sorted_types = 0;
>  }
>  
>  /* Ensure BTF is ready to be modified (by splitting into a three memory
> @@ -5829,3 +5822,224 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
>  		btf->owns_base = false;
>  	return libbpf_err(err);
>  }
> +
> +struct btf_permute;
> +
> +static struct btf_permute *btf_permute_new(struct btf *btf, const struct btf_permute_opts *opts);
> +static void btf_permute_free(struct btf_permute *p);
> +static int btf_permute_shuffle_types(struct btf_permute *p);
> +static int btf_permute_remap_types(struct btf_permute *p);
> +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx);
> +
> +/*
> + * Permute BTF types in-place using the ID mapping from btf_permute_opts->ids.
> + * After permutation, all type ID references are updated to reflect the new
> + * ordering. If a struct btf_ext (representing '.BTF.ext' section) is provided,
> + * type ID references within the BTF extension data are also updated.
> + */
> +int btf__permute(struct btf *btf, const struct btf_permute_opts *opts)
> +{
> +	struct btf_permute *p;

Maybe allocate this on stack?
(Or just use btf_permute_opts directly and have 'map' as local variable?).

> +	int err = 0;
> +
> +	if (!OPTS_VALID(opts, btf_permute_opts))
> +		return libbpf_err(-EINVAL);
> +
> +	p = btf_permute_new(btf, opts);
> +	if (!p) {
> +		pr_debug("btf_permute_new failed: %ld\n", PTR_ERR(p));
> +		return libbpf_err(-EINVAL);
> +	}
> +
> +	if (btf_ensure_modifiable(btf)) {
> +		err = -ENOMEM;
> +		goto done;
> +	}
> +
> +	err = btf_permute_shuffle_types(p);
> +	if (err < 0) {
> +		pr_debug("btf_permute_shuffle_types failed: %s\n", errstr(err));
> +		goto done;
> +	}
> +	err = btf_permute_remap_types(p);
> +	if (err) {
> +		pr_debug("btf_permute_remap_types failed: %s\n", errstr(err));
> +		goto done;
> +	}
> +
> +done:
> +	btf_permute_free(p);
> +	return libbpf_err(err);
> +}
> +
> +struct btf_permute {
> +	/* .BTF section to be permuted in-place */
> +	struct btf *btf;
> +	struct btf_ext *btf_ext;
> +	/* Array of type IDs used for permutation. The array length must equal
> +	 * the number of types in the BTF being permuted, excluding the special
> +	 * void type at ID 0. For split BTF, the length corresponds to the
> +	 * number of types added on top of the base BTF.
> +	 */
> +	__u32 *ids;
> +	/* Array of type IDs used to map from original type ID to a new permuted
> +	 * type ID, its length equals to the above ids */
> +	__u32 *map;
> +};
> +
> +static struct btf_permute *btf_permute_new(struct btf *btf, const struct btf_permute_opts *opts)
> +{
> +	struct btf_permute *p = calloc(1, sizeof(struct btf_permute));
> +	__u32 *map;
> +	int err = 0;
> +
> +	if (!p)
> +		return ERR_PTR(-ENOMEM);
> +
> +	p->btf = btf;
> +	p->btf_ext = OPTS_GET(opts, btf_ext, NULL);
> +	p->ids = OPTS_GET(opts, ids, NULL);
> +	if (!p->ids) {
> +		err = -EINVAL;
> +		goto done;
> +	}
> +
> +	map = calloc(btf->nr_types, sizeof(*map));
> +	if (!map) {
> +		err = -ENOMEM;
> +		goto done;
> +	}
> +	p->map = map;
> +
> +done:
> +	if (err) {
> +		btf_permute_free(p);
> +		return ERR_PTR(err);
> +	}
> +
> +	return p;
> +}
> +
> +static void btf_permute_free(struct btf_permute *p)
> +{
> +	if (p->map) {
> +		free(p->map);
> +		p->map = NULL;
> +	}
> +	free(p);
> +}
> +
> +/*
> + * Shuffle BTF types.
> + *
> + * Rearranges types according to the permutation map in p->ids. The p->map
> + * array stores the mapping from original type IDs to new shuffled IDs,
> + * which is used in the next phase to update type references.
> + */
> +static int btf_permute_shuffle_types(struct btf_permute *p)
> +{
> +	struct btf *btf = p->btf;
> +	const struct btf_type *t;
> +	__u32 *new_offs = NULL;
> +	void *l, *new_types = NULL;
> +	int i, id, len, err;
> +
> +	new_offs = calloc(btf->nr_types, sizeof(*new_offs));
> +	new_types = calloc(btf->hdr->type_len, 1);
> +	if (!new_types || !new_offs) {
> +		err = -ENOMEM;
> +		goto out_err;
> +	}
> +
> +	l = new_types;
> +	for (i = 0; i < btf->nr_types; i++) {
> +		id = p->ids[i];
> +		t = btf__type_by_id(btf, id);
> +		len = btf_type_size(t);
> +		memcpy(l, t, len);
> +		new_offs[i] = l - new_types;
> +		p->map[id - btf->start_id] = btf->start_id + i;
> +		l += len;
> +	}
> +
> +	free(btf->types_data);
> +	free(btf->type_offs);
> +	btf->types_data = new_types;
> +	btf->type_offs = new_offs;
> +	return 0;
> +
> +out_err:
> +	return err;
> +}
> +
> +/*
> + * Remap referenced type IDs into permuted type IDs.
> + *
> + * After BTF types are permuted, their final type IDs may differ from original
> + * ones. The map from original to a corresponding permuted type ID is stored
> + * in btf_permute->map and is populated during shuffle phase. During remapping
> + * phase we are rewriting all type IDs  referenced from any BTF type (e.g.,
> + * struct fields, func proto args, etc) to their final deduped type IDs.
> + */
> +static int btf_permute_remap_types(struct btf_permute *p)
> +{
> +	struct btf *btf = p->btf;
> +	int i, r;
> +
> +	for (i = 0; i < btf->nr_types; i++) {
> +		struct btf_type *t = btf_type_by_id(btf, btf->start_id + i);
> +		struct btf_field_iter it;
> +		__u32 *type_id;
> +
> +		r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS);
> +		if (r)
> +			return r;
> +
> +		while ((type_id = btf_field_iter_next(&it))) {

The code below repeats logic it btf_permute_remap_type_id(), please
use it here. Better yet, generalize btf_dedup_remap_types() and avoid
a copy-paste altogether.

> +			__u32 new_id = *type_id;
> +
> +			/* skip references that point into the base BTF */
> +			if (new_id < btf->start_id)
> +				continue;
> +
> +			new_id = p->map[new_id - btf->start_id];
> +			if (new_id > BTF_MAX_NR_TYPES)
> +				return -EINVAL;
> +
> +			*type_id = new_id;
> +		}
> +	}
> +
> +	if (!p->btf_ext)
> +		return 0;
> +
> +	r = btf_ext_visit_type_ids(p->btf_ext, btf_permute_remap_type_id, p);
> +	if (r)
> +		return r;
> +
> +	return 0;
> +}
> +
> +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
> +{
> +	struct btf_permute *p = ctx;
> +	__u32 new_type_id = *type_id;
> +
> +	/* skip references that point into the base BTF */
> +	if (new_type_id < p->btf->start_id)
> +		return 0;
> +
> +	new_type_id = p->map[*type_id - p->btf->start_id];
> +	if (new_type_id > BTF_MAX_NR_TYPES)
> +		return -EINVAL;
> +
> +	*type_id = new_type_id;
> +	return 0;
> +}
> +
> +/*
> + * btf_sort.c is included directly to avoid function call overhead
> + * when accessing BTF private data, as this file is shared between
> + * libbpf and kernel and may be called frequently.
> + */
> +#include "./btf_sort.c"
> diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
> index ccfd905f03df..3aac0a729bd5 100644
> --- a/tools/lib/bpf/btf.h
> +++ b/tools/lib/bpf/btf.h
> @@ -149,6 +149,7 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
>  					const char *type_name, __u32 kind);
>  LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
>  LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
> +LIBBPF_API __u32 btf__start_id(const struct btf *btf);
>  LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
>  						  __u32 id);
>  LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
> @@ -273,6 +274,22 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
>   */
>  LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
>  
> +struct btf_permute_opts {
> +	size_t sz;
> +	/* optional .BTF.ext info along the main BTF info */
> +	struct btf_ext *btf_ext;
> +	/* Array of type IDs used for permutation. The array length must equal
> +	 * the number of types in the BTF being permuted, excluding the special
> +	 * void type at ID 0. For split BTF, the length corresponds to the
> +	 * number of types added on top of the base BTF.
> +	 */
> +	__u32 *ids;
> +	size_t :0;
> +};
> +#define btf_permute_opts__last_field ids
> +
> +LIBBPF_API int btf__permute(struct btf *btf, const struct btf_permute_opts *opts);
> +
>  struct btf_dump;
>  
>  struct btf_dump_opts {
> diff --git a/tools/lib/bpf/btf_sort.c b/tools/lib/bpf/btf_sort.c
> new file mode 100644
> index 000000000000..553c5f5e61bd
> --- /dev/null
> +++ b/tools/lib/bpf/btf_sort.c
> @@ -0,0 +1,174 @@
> +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
> +/* Copyright (c) 2025 Xiaomi */
> +
> +#ifndef _GNU_SOURCE
> +#define _GNU_SOURCE
> +#endif
> +
> +#ifdef __KERNEL__
> +
> +#define btf_type_by_id				(struct btf_type *)btf_type_by_id
> +#define btf__str_by_offset			btf_str_by_offset
> +#define btf__type_cnt				btf_nr_types
> +#define btf__start_id				btf_start_id
> +#define libbpf_err(x)				x
> +
> +#else
> +
> +#define notrace
> +
> +#endif /* __KERNEL__ */
> +
> +/*
> + * Skip the sorted check if the number of BTF types is below this threshold.
> + * The value 4 is chosen based on the theoretical break-even point where
> + * linear search (N/2) and binary search (LOG2(N)) require approximately
> + * the same number of comparisons.
> + */
> +#define BTF_CHECK_SORT_THRESHOLD  4
> +
> +struct btf;
> +
> +static int cmp_btf_kind_name(int ka, const char *na, int kb, const char *nb)
> +{
> +	return (ka - kb) ?: strcmp(na, nb);
> +}
> +
> +/*
> + * Sort BTF types by kind and name in ascending order, placing named types
> + * before anonymous ones.
> + */
> +static int btf_compare_type_kinds_names(const void *a, const void *b, void *priv)
> +{
> +	struct btf *btf = (struct btf *)priv;
> +	struct btf_type *ta = btf_type_by_id(btf, *(__u32 *)a);
> +	struct btf_type *tb = btf_type_by_id(btf, *(__u32 *)b);
> +	const char *na, *nb;
> +	bool anon_a, anon_b;
> +	int ka, kb;
> +
> +	na = btf__str_by_offset(btf, ta->name_off);
> +	nb = btf__str_by_offset(btf, tb->name_off);
> +	anon_a = str_is_empty(na);
> +	anon_b = str_is_empty(nb);
> +
> +	/* ta w/o name is greater than tb */
> +	if (anon_a && !anon_b)
> +		return 1;
> +	/* tb w/o name is smaller than ta */
> +	if (!anon_a && anon_b)
> +		return -1;
> +
> +	ka = btf_kind(ta);
> +	kb = btf_kind(tb);
> +
> +	if (anon_a && anon_b)
> +		return ka - kb;
> +
> +	return cmp_btf_kind_name(ka, na, kb, nb);
> +}
> +
> +static __s32 notrace __btf_find_by_name_kind(const struct btf *btf, int start_id,
> +				   const char *type_name, __u32 kind)
> +{
> +	const struct btf_type *t;
> +	const char *tname;
> +	int err = -ENOENT;
> +
> +	if (!btf)
> +		goto out;
> +
> +	if (start_id < btf__start_id(btf)) {
> +		err = __btf_find_by_name_kind(btf->base_btf, start_id, type_name, kind);
> +		if (err == -ENOENT)
> +			start_id = btf__start_id(btf);
> +	}
> +
> +	if (err == -ENOENT) {
> +		if (btf->nr_sorted_types) {
> +			/* binary search */
> +			__s32 start, end, mid, found = -1;
> +			int ret;
> +
> +			start = start_id;
> +			end = start + btf->nr_sorted_types - 1;
> +			/* found the leftmost btf_type that matches */
> +			while(start <= end) {
> +				mid = start + (end - start) / 2;
> +				t = btf_type_by_id(btf, mid);
> +				tname = btf__str_by_offset(btf, t->name_off);
> +				ret = cmp_btf_kind_name(BTF_INFO_KIND(t->info), tname,
> +							kind, type_name);
> +				if (ret < 0)
> +					start = mid + 1;
> +				else {
> +					if (ret == 0)
> +						found = mid;
> +					end = mid - 1;
> +				}
> +			}
> +
> +			if (found != -1)
> +				return found;
> +		} else {
> +			/* linear search */
> +			__u32 i, total;
> +
> +			total = btf__type_cnt(btf);
> +			for (i = start_id; i < total; i++) {
> +				t = btf_type_by_id(btf, i);
> +				if (btf_kind(t) != kind)
> +					continue;
> +
> +				tname = btf__str_by_offset(btf, t->name_off);
> +				if (tname && !strcmp(tname, type_name))
> +					return i;
> +			}
> +		}
> +	}
> +
> +out:
> +	return err;
> +}
> +
> +/* start_id specifies the starting BTF to search */
> +static __s32 notrace _btf_find_by_name_kind(const struct btf *btf, int start_id,
> +				   const char *type_name, __u32 kind)
> +{
> +	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
> +		return 0;
> +
> +	return libbpf_err(__btf_find_by_name_kind(btf, start_id, type_name, kind));
> +}
> +
> +static void btf_check_sorted(struct btf *btf, int start_id)
> +{
> +	const struct btf_type *t;
> +	int i, n, nr_sorted_types;
> +
> +	n = btf__type_cnt(btf);
> +	if (btf->nr_types < BTF_CHECK_SORT_THRESHOLD)
> +		return;
> +
> +	n--;
> +	nr_sorted_types = 0;
> +	for (i = start_id; i < n; i++) {
> +		int k = i + 1;
> +
> +		if (btf_compare_type_kinds_names(&i, &k, btf) > 0)
> +			return;
> +
> +		t = btf_type_by_id(btf, k);
> +		if (!str_is_empty(btf__str_by_offset(btf, t->name_off)))
> +			nr_sorted_types++;
> +	}
> +
> +	t = btf_type_by_id(btf, start_id);
> +	if (!str_is_empty(btf__str_by_offset(btf, t->name_off)))
> +		nr_sorted_types++;
> +
> +	if (nr_sorted_types < BTF_CHECK_SORT_THRESHOLD)
> +		return;
> +
> +	btf->nr_sorted_types = nr_sorted_types;
> +}
> diff --git a/tools/lib/bpf/btf_sort.h b/tools/lib/bpf/btf_sort.h
> new file mode 100644
> index 000000000000..4dedc67286d9
> --- /dev/null
> +++ b/tools/lib/bpf/btf_sort.h
> @@ -0,0 +1,11 @@
> +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
> +/* Copyright (c) 2025 Xiaomi */
> +
> +#ifndef __BTF_SORT_H
> +#define __BTF_SORT_H
> +
> +static __s32 _btf_find_by_name_kind(const struct btf *btf, int start_id, const char *type_name, __u32 kind);
> +static int btf_compare_type_kinds_names(const void *a, const void *b, void *priv);
> +static void btf_check_sorted(struct btf *btf, int start_id);
> +
> +#endif
> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> index 8ed8749907d4..8ce7b1d08650 100644
> --- a/tools/lib/bpf/libbpf.map
> +++ b/tools/lib/bpf/libbpf.map
> @@ -452,3 +452,9 @@ LIBBPF_1.7.0 {
>  		bpf_map__set_exclusive_program;
>  		bpf_map__exclusive_program;
>  } LIBBPF_1.6.0;
> +
> +LIBBPF_1.8.0 {

I think we are still at 1.7.

> +	global:
> +		btf__start_id;
> +		btf__permute;
> +} LIBBPF_1.7.0;
> diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
> index 99331e317dee..c446c0cd8cf9 100644
> --- a/tools/lib/bpf/libbpf_version.h
> +++ b/tools/lib/bpf/libbpf_version.h
> @@ -4,6 +4,6 @@
>  #define __LIBBPF_VERSION_H
>  
>  #define LIBBPF_MAJOR_VERSION 1
> -#define LIBBPF_MINOR_VERSION 7
> +#define LIBBPF_MINOR_VERSION 8
>  
>  #endif /* __LIBBPF_VERSION_H */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ