[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YMz80O2mkEWyl2Xx@yury-ThinkPad>
Date: Fri, 18 Jun 2021 13:06:40 -0700
From: Yury Norov <yury.norov@...il.com>
To: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
Ian Rogers <irogers@...gle.com>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Leo Yan <leo.yan@...aro.org>, Jiri Olsa <jolsa@...hat.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Ben Gardon <bgardon@...gle.com>, Peter Xu <peterx@...hat.com>,
linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
kvm@...r.kernel.org, linux-kselftest@...r.kernel.org,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Namhyung Kim <namhyung@...nel.org>,
Shuah Khan <shuah@...nel.org>,
Yury Norov <ynorov@...iumnetworks.com>
Subject: Re: [PATCH v1 1/1] tools: Rename bitmap_alloc() to bitmap_zalloc()
On Fri, Jun 18, 2021 at 05:38:54PM +0300, Andy Shevchenko wrote:
> Rename bitmap_alloc() to bitmap_zalloc() in tools to follow new coming
> bitmap API extension in kernel.
Can you please tell more about the new coming extensions?
Anyways,
Acked-by: Yury Norov <ynorov@...iumnetworks.com>
All bitmap patches together can be found here:
https://github.com/norov/linux/commits/bm-f1
> No functional changes intended.
>
> Suggested-by: Yury Norov <ynorov@...iumnetworks.com>
> Signed-off-by: Andy Shevchenko <andriy.shevchenko@...ux.intel.com>
> ---
> tools/include/linux/bitmap.h | 4 ++--
> tools/perf/bench/find-bit-bench.c | 2 +-
> tools/perf/builtin-c2c.c | 6 +++---
> tools/perf/builtin-record.c | 2 +-
> tools/perf/tests/bitmap.c | 2 +-
> tools/perf/tests/mem2node.c | 2 +-
> tools/perf/util/affinity.c | 4 ++--
> tools/perf/util/header.c | 4 ++--
> tools/perf/util/metricgroup.c | 2 +-
> tools/perf/util/mmap.c | 4 ++--
> tools/testing/selftests/kvm/dirty_log_perf_test.c | 2 +-
> tools/testing/selftests/kvm/dirty_log_test.c | 4 ++--
> tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c | 2 +-
> 13 files changed, 20 insertions(+), 20 deletions(-)
>
> diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
> index 330dbf7509cc..7eae64eb5c80 100644
> --- a/tools/include/linux/bitmap.h
> +++ b/tools/include/linux/bitmap.h
> @@ -109,10 +109,10 @@ static inline int test_and_clear_bit(int nr, unsigned long *addr)
> }
>
> /**
> - * bitmap_alloc - Allocate bitmap
> + * bitmap_zalloc - Allocate bitmap
> * @nbits: Number of bits
> */
> -static inline unsigned long *bitmap_alloc(int nbits)
> +static inline unsigned long *bitmap_zalloc(int nbits)
> {
> return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
> }
> diff --git a/tools/perf/bench/find-bit-bench.c b/tools/perf/bench/find-bit-bench.c
> index 73b5bcc5946a..22b5cfe97023 100644
> --- a/tools/perf/bench/find-bit-bench.c
> +++ b/tools/perf/bench/find-bit-bench.c
> @@ -54,7 +54,7 @@ static bool asm_test_bit(long nr, const unsigned long *addr)
>
> static int do_for_each_set_bit(unsigned int num_bits)
> {
> - unsigned long *to_test = bitmap_alloc(num_bits);
> + unsigned long *to_test = bitmap_zalloc(num_bits);
> struct timeval start, end, diff;
> u64 runtime_us;
> struct stats fb_time_stats, tb_time_stats;
> diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
> index e3b9d63077ef..a17726ff85a9 100644
> --- a/tools/perf/builtin-c2c.c
> +++ b/tools/perf/builtin-c2c.c
> @@ -137,11 +137,11 @@ static void *c2c_he_zalloc(size_t size)
> if (!c2c_he)
> return NULL;
>
> - c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
> + c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
> if (!c2c_he->cpuset)
> return NULL;
>
> - c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
> + c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
> if (!c2c_he->nodeset)
> return NULL;
>
> @@ -2045,7 +2045,7 @@ static int setup_nodes(struct perf_session *session)
> struct perf_cpu_map *map = n[node].map;
> unsigned long *set;
>
> - set = bitmap_alloc(c2c.cpus_cnt);
> + set = bitmap_zalloc(c2c.cpus_cnt);
> if (!set)
> return -ENOMEM;
>
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index 84803abeb942..978b6bbd06e4 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -2766,7 +2766,7 @@ int cmd_record(int argc, const char **argv)
>
> if (rec->opts.affinity != PERF_AFFINITY_SYS) {
> rec->affinity_mask.nbits = cpu__max_cpu();
> - rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
> + rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
> if (!rec->affinity_mask.bits) {
> pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
> err = -ENOMEM;
> diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
> index 96c137360918..12b805efdca0 100644
> --- a/tools/perf/tests/bitmap.c
> +++ b/tools/perf/tests/bitmap.c
> @@ -14,7 +14,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
> unsigned long *bm = NULL;
> int i;
>
> - bm = bitmap_alloc(nbits);
> + bm = bitmap_zalloc(nbits);
>
> if (map && bm) {
> for (i = 0; i < map->nr; i++)
> diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
> index a258bd51f1a4..e4d0d58b97f8 100644
> --- a/tools/perf/tests/mem2node.c
> +++ b/tools/perf/tests/mem2node.c
> @@ -27,7 +27,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
> unsigned long *bm = NULL;
> int i;
>
> - bm = bitmap_alloc(nbits);
> + bm = bitmap_zalloc(nbits);
>
> if (map && bm) {
> for (i = 0; i < map->nr; i++) {
> diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
> index a5e31f826828..7b12bd7a3080 100644
> --- a/tools/perf/util/affinity.c
> +++ b/tools/perf/util/affinity.c
> @@ -25,11 +25,11 @@ int affinity__setup(struct affinity *a)
> {
> int cpu_set_size = get_cpu_set_size();
>
> - a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
> + a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
> if (!a->orig_cpus)
> return -1;
> sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
> - a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
> + a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
> if (!a->sched_cpus) {
> zfree(&a->orig_cpus);
> return -1;
> diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
> index aa1e42518d37..c67c03dd3db2 100644
> --- a/tools/perf/util/header.c
> +++ b/tools/perf/util/header.c
> @@ -277,7 +277,7 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
> if (ret)
> return ret;
>
> - set = bitmap_alloc(size);
> + set = bitmap_zalloc(size);
> if (!set)
> return -ENOMEM;
>
> @@ -1259,7 +1259,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
>
> size++;
>
> - n->set = bitmap_alloc(size);
> + n->set = bitmap_zalloc(size);
> if (!n->set) {
> closedir(dir);
> return -ENOMEM;
> diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
> index 8336dd8e8098..f24c6998d26c 100644
> --- a/tools/perf/util/metricgroup.c
> +++ b/tools/perf/util/metricgroup.c
> @@ -313,7 +313,7 @@ static int metricgroup__setup_events(struct list_head *groups,
> struct evsel *evsel, *tmp;
> unsigned long *evlist_used;
>
> - evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
> + evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
> if (!evlist_used)
> return -ENOMEM;
>
> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index ab7108d22428..512dc8b9c168 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -106,7 +106,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
> data = map->aio.data[idx];
> mmap_len = mmap__mmap_len(map);
> node_index = cpu__get_node(cpu);
> - node_mask = bitmap_alloc(node_index + 1);
> + node_mask = bitmap_zalloc(node_index + 1);
> if (!node_mask) {
> pr_err("Failed to allocate node mask for mbind: error %m\n");
> return -1;
> @@ -258,7 +258,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
> static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
> {
> map->affinity_mask.nbits = cpu__max_cpu();
> - map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
> + map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
> if (!map->affinity_mask.bits)
> return -1;
>
> diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> index 04a2641261be..fbf0c2c1fbc9 100644
> --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> @@ -121,7 +121,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
> guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
> guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
> host_num_pages = vm_num_host_pages(mode, guest_num_pages);
> - bmap = bitmap_alloc(host_num_pages);
> + bmap = bitmap_zalloc(host_num_pages);
>
> if (dirty_log_manual_caps) {
> cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
> diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
> index 81edbd23d371..ef641b0ff125 100644
> --- a/tools/testing/selftests/kvm/dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_test.c
> @@ -750,8 +750,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>
> pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
>
> - bmap = bitmap_alloc(host_num_pages);
> - host_bmap_track = bitmap_alloc(host_num_pages);
> + bmap = bitmap_zalloc(host_num_pages);
> + host_bmap_track = bitmap_zalloc(host_num_pages);
>
> /* Add an extra memory slot for testing dirty logging */
> vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
> diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
> index 537de1068554..a2f1bab6c234 100644
> --- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
> @@ -111,7 +111,7 @@ int main(int argc, char *argv[])
> nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
> nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
>
> - bmap = bitmap_alloc(TEST_MEM_PAGES);
> + bmap = bitmap_zalloc(TEST_MEM_PAGES);
> host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
>
> while (!done) {
> --
> 2.30.2
Powered by blists - more mailing lists