[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190629112453.2c46a114@cakuba.netronome.com>
Date: Sat, 29 Jun 2019 11:24:53 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: Andrii Nakryiko <andriin@...com>
Cc: <andrii.nakryiko@...il.com>, <ast@...com>, <daniel@...earbox.net>,
<bpf@...r.kernel.org>, <netdev@...r.kernel.org>,
<kernel-team@...com>, <songliubraving@...com>
Subject: Re: [PATCH v3 bpf-next 4/4] tools/bpftool: switch map event_pipe to
libbpf's perf_buffer
On Fri, 28 Jun 2019 22:53:09 -0700, Andrii Nakryiko wrote:
> map_info_len = sizeof(map_info);
> map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
> - if (map_fd < 0)
> + if (map_fd < 0) {
> + p_err("failed to get map info");
Can't do, map_parse_fd_and_info() prints an error already, we can't
have multiple errors in JSON.
> return -1;
> + }
>
> if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
> p_err("map is not a perf event array");
> @@ -205,7 +157,7 @@ int do_event_pipe(int argc, char **argv)
> char *endptr;
>
> NEXT_ARG();
> - cpu = strtoul(*argv, &endptr, 0);
> + ctx.cpu = strtoul(*argv, &endptr, 0);
> if (*endptr) {
> p_err("can't parse %s as CPU ID", **argv);
> goto err_close_map;
> @@ -216,7 +168,7 @@ int do_event_pipe(int argc, char **argv)
> char *endptr;
>
> NEXT_ARG();
> - index = strtoul(*argv, &endptr, 0);
> + ctx.idx = strtoul(*argv, &endptr, 0);
> if (*endptr) {
> p_err("can't parse %s as index", **argv);
> goto err_close_map;
> @@ -228,45 +180,32 @@ int do_event_pipe(int argc, char **argv)
> goto err_close_map;
> }
>
> - do_all = false;
> + ctx.all_cpus = false;
> }
>
> - if (!do_all) {
> - if (index == -1 || cpu == -1) {
> + if (!ctx.all_cpus) {
> + if (ctx.idx == -1 || ctx.cpu == -1) {
> p_err("cpu and index must be specified together");
> goto err_close_map;
Now that you look at err looks like we're missing an err = -1 assignment
here? but...
> }
> -
> - nfds = 1;
> } else {
> - nfds = min(get_possible_cpus(), map_info.max_entries);
> - cpu = 0;
> - index = 0;
> + ctx.cpu = 0;
> + ctx.idx = 0;
> }
>
> - rings = calloc(nfds, sizeof(rings[0]));
> - if (!rings)
> + opts.attr = &perf_attr;
> + opts.event_cb = print_bpf_output;
> + opts.ctx = &ctx;
> + opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
> + opts.cpus = &ctx.cpu;
> + opts.map_keys = &ctx.idx;
> +
> + pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts);
> + err = libbpf_get_error(pb);
> + if (err) {
> + p_err("failed to create perf buffer: %s (%d)",
> + strerror(err), err);
> goto err_close_map;
> -
> - pfds = calloc(nfds, sizeof(pfds[0]));
> - if (!pfds)
> - goto err_free_rings;
> -
> - for (i = 0; i < nfds; i++) {
> - rings[i].cpu = cpu + i;
> - rings[i].key = index + i;
> -
> - rings[i].fd = bpf_perf_event_open(map_fd, rings[i].key,
> - rings[i].cpu);
> - if (rings[i].fd < 0)
> - goto err_close_fds_prev;
> -
> - rings[i].mem = perf_event_mmap(rings[i].fd);
> - if (!rings[i].mem)
> - goto err_close_fds_current;
> -
> - pfds[i].fd = rings[i].fd;
> - pfds[i].events = POLLIN;
> }
>
> signal(SIGINT, int_exit);
> @@ -277,35 +216,25 @@ int do_event_pipe(int argc, char **argv)
> jsonw_start_array(json_wtr);
>
> while (!stop) {
> - poll(pfds, nfds, 200);
> - for (i = 0; i < nfds; i++)
> - perf_event_read(&rings[i], &tmp_buf, &tmp_buf_sz);
> + err = perf_buffer__poll(pb, 200);
> + if (err < 0 && err != -EINTR) {
> + p_err("perf buffer polling failed: %s (%d)",
> + strerror(err), err);
> + goto err_close_pb;
> + }
> }
> - free(tmp_buf);
>
> if (json_output)
> jsonw_end_array(json_wtr);
>
> - for (i = 0; i < nfds; i++) {
> - perf_event_unmap(rings[i].mem);
> - close(rings[i].fd);
> - }
> - free(pfds);
> - free(rings);
> + perf_buffer__free(pb);
> close(map_fd);
>
> return 0;
>
> -err_close_fds_prev:
> - while (i--) {
> - perf_event_unmap(rings[i].mem);
> -err_close_fds_current:
> - close(rings[i].fd);
> - }
> - free(pfds);
> -err_free_rings:
> - free(rings);
> +err_close_pb:
> + perf_buffer__free(pb);
> err_close_map:
> close(map_fd);
> - return -1;
> + return err ? -1 : 0;
... how can we return 0 on the error path? 😕
> }
Powered by blists - more mailing lists