[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220718132938.1031864-3-pulehui@huawei.com>
Date: Mon, 18 Jul 2022 21:29:35 +0800
From: Pu Lehui <pulehui@...wei.com>
To: <bpf@...r.kernel.org>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
CC: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Quentin Monnet <quentin@...valent.com>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
"Jean-Philippe Brucker" <jean-philippe@...aro.org>,
Pu Lehui <pulehui@...wei.com>
Subject: [PATCH bpf-next v2 2/5] libbpf: Unify memory address casting operation style
Memory addresses are conceptually unsigned, (unsigned long) casting
makes more sense, so let's make a change for conceptual uniformity
and there is no functional change.
Signed-off-by: Pu Lehui <pulehui@...wei.com>
---
tools/lib/bpf/bpf_prog_linfo.c | 8 ++++----
tools/lib/bpf/btf.c | 7 ++++---
tools/lib/bpf/skel_internal.h | 4 ++--
tools/lib/bpf/usdt.c | 4 ++--
4 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index 5c503096ef43..5cf41a563ef5 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -127,7 +127,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
prog_linfo->raw_linfo = malloc(data_sz);
if (!prog_linfo->raw_linfo)
goto err_free;
- memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz);
+ memcpy(prog_linfo->raw_linfo, (void *)(unsigned long)info->line_info, data_sz);
nr_jited_func = info->nr_jited_ksyms;
if (!nr_jited_func ||
@@ -148,7 +148,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
if (!prog_linfo->raw_jited_linfo)
goto err_free;
memcpy(prog_linfo->raw_jited_linfo,
- (void *)(long)info->jited_line_info, data_sz);
+ (void *)(unsigned long)info->jited_line_info, data_sz);
/* Number of jited_line_info per jited func */
prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func *
@@ -166,8 +166,8 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
goto err_free;
if (dissect_jited_func(prog_linfo,
- (__u64 *)(long)info->jited_ksyms,
- (__u32 *)(long)info->jited_func_lens))
+ (__u64 *)(unsigned long)info->jited_ksyms,
+ (__u32 *)(unsigned long)info->jited_func_lens))
goto err_free;
return prog_linfo;
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 2d14f1a52d7a..61e2ac2b6891 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1568,7 +1568,7 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
return 0;
if (p->str_off_map &&
- hashmap__find(p->str_off_map, (void *)(long)*str_off, &mapped_off)) {
+ hashmap__find(p->str_off_map, (void *)(unsigned long)*str_off, &mapped_off)) {
*str_off = (__u32)(long)mapped_off;
return 0;
}
@@ -1581,7 +1581,8 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx)
* performing expensive string comparisons.
*/
if (p->str_off_map) {
- err = hashmap__append(p->str_off_map, (void *)(long)*str_off, (void *)(long)off);
+ err = hashmap__append(p->str_off_map, (void *)(unsigned long)*str_off,
+ (void *)(unsigned long)off);
if (err)
return err;
}
@@ -3133,7 +3134,7 @@ static long hash_combine(long h, long value)
static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
{
return hashmap__append(d->dedup_table,
- (void *)hash, (void *)(long)type_id);
+ (void *)hash, (void *)(unsigned long)type_id);
}
static int btf_dedup_hypot_map_add(struct btf_dedup *d,
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index bd6f4505e7b1..e2803e7cd6d9 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -146,7 +146,7 @@ static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int
struct bpf_map *map;
void *addr = NULL;
- kvfree((void *) (long) *init_val);
+ kvfree((void *) (unsigned long) *init_val);
*init_val = ~0ULL;
/* At this point bpf_load_and_run() finished without error and
@@ -197,7 +197,7 @@ static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int
{
void *addr;
- addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
+ addr = mmap((void *) (unsigned long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
if (addr == (void *) -1)
return NULL;
return addr;
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index d18e37982344..3e54b47f9e1b 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -915,7 +915,7 @@ static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash
*spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
/* cache spec ID for current spec string for future lookups */
- err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ err = hashmap__add(specs_hash, target->spec_str, (void *)(unsigned long)*spec_id);
if (err)
return err;
@@ -928,7 +928,7 @@ static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash
*spec_id = man->next_free_spec_id;
/* cache spec ID for current spec string for future lookups */
- err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ err = hashmap__add(specs_hash, target->spec_str, (void *)(unsigned long)*spec_id);
if (err)
return err;
--
2.25.1
Powered by blists - more mailing lists