[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c3fa2001-ef77-46c4-c0de-3335e7934db9@fb.com>
Date: Mon, 11 May 2020 17:35:28 -0700
From: Yonghong Song <yhs@...com>
To: Lorenzo Bianconi <lorenzo@...nel.org>, <bpf@...r.kernel.org>,
<netdev@...r.kernel.org>
CC: <ast@...nel.org>, <davem@...emloft.net>, <brouer@...hat.com>,
<daniel@...earbox.net>, <lorenzo.bianconi@...hat.com>
Subject: Re: [PATCH bpf-next] samples/bpf: xdp_redirect_cpu: set MAX_CPUS
according to NR_CPUS
On 5/11/20 1:24 PM, Lorenzo Bianconi wrote:
> xdp_redirect_cpu is currently failing in bpf_prog_load_xattr()
> allocating cpu_map map if CONFIG_NR_CPUS is less than 64 since
> cpu_map_alloc() requires max_entries to be less than NR_CPUS.
> Set cpu_map max_entries according to NR_CPUS in xdp_redirect_cpu_kern.c
> and get currently running cpus in xdp_redirect_cpu_user.c
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
> ---
> samples/bpf/xdp_redirect_cpu_kern.c | 2 +-
> samples/bpf/xdp_redirect_cpu_user.c | 29 ++++++++++++++++-------------
> 2 files changed, 17 insertions(+), 14 deletions(-)
>
> diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
> index 313a8fe6d125..2baf8db1f7e7 100644
> --- a/samples/bpf/xdp_redirect_cpu_kern.c
> +++ b/samples/bpf/xdp_redirect_cpu_kern.c
> @@ -15,7 +15,7 @@
> #include <bpf/bpf_helpers.h>
> #include "hash_func01.h"
>
> -#define MAX_CPUS 64 /* WARNING - sync with _user.c */
> +#define MAX_CPUS NR_CPUS
>
> /* Special map type that can XDP_REDIRECT frames to another CPU */
> struct {
> diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
> index 15bdf047a222..100e72cb4cf5 100644
> --- a/samples/bpf/xdp_redirect_cpu_user.c
> +++ b/samples/bpf/xdp_redirect_cpu_user.c
> @@ -13,6 +13,7 @@ static const char *__doc__ =
> #include <unistd.h>
> #include <locale.h>
> #include <sys/resource.h>
> +#include <sys/sysinfo.h>
> #include <getopt.h>
> #include <net/if.h>
> #include <time.h>
> @@ -24,8 +25,6 @@ static const char *__doc__ =
> #include <arpa/inet.h>
> #include <linux/if_link.h>
>
> -#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
> -
> /* How many xdp_progs are defined in _kern.c */
> #define MAX_PROG 6
>
> @@ -40,6 +39,7 @@ static char *ifname;
> static __u32 prog_id;
>
> static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
> +static int n_cpus;
> static int cpu_map_fd;
> static int rx_cnt_map_fd;
> static int redirect_err_cnt_map_fd;
> @@ -170,7 +170,7 @@ struct stats_record {
> struct record redir_err;
> struct record kthread;
> struct record exception;
> - struct record enq[MAX_CPUS];
> + struct record enq[];
> };
>
> static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
> @@ -225,10 +225,11 @@ static struct datarec *alloc_record_per_cpu(void)
> static struct stats_record *alloc_stats_record(void)
> {
> struct stats_record *rec;
> - int i;
> + int i, size;
>
> - rec = malloc(sizeof(*rec));
> - memset(rec, 0, sizeof(*rec));
> + size = sizeof(*rec) + n_cpus * sizeof(struct record);
> + rec = malloc(size);
> + memset(rec, 0, size);
> if (!rec) {
> fprintf(stderr, "Mem alloc error\n");
> exit(EXIT_FAIL_MEM);
> @@ -237,7 +238,7 @@ static struct stats_record *alloc_stats_record(void)
> rec->redir_err.cpu = alloc_record_per_cpu();
> rec->kthread.cpu = alloc_record_per_cpu();
> rec->exception.cpu = alloc_record_per_cpu();
> - for (i = 0; i < MAX_CPUS; i++)
> + for (i = 0; i < n_cpus; i++)
> rec->enq[i].cpu = alloc_record_per_cpu();
>
> return rec;
> @@ -247,7 +248,7 @@ static void free_stats_record(struct stats_record *r)
> {
> int i;
>
> - for (i = 0; i < MAX_CPUS; i++)
> + for (i = 0; i < n_cpus; i++)
> free(r->enq[i].cpu);
> free(r->exception.cpu);
> free(r->kthread.cpu);
> @@ -350,7 +351,7 @@ static void stats_print(struct stats_record *stats_rec,
> }
>
> /* cpumap enqueue stats */
> - for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
> + for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
> char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
> char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
> char *errstr = "";
> @@ -475,7 +476,7 @@ static void stats_collect(struct stats_record *rec)
> map_collect_percpu(fd, 1, &rec->redir_err);
>
> fd = cpumap_enqueue_cnt_map_fd;
> - for (i = 0; i < MAX_CPUS; i++)
> + for (i = 0; i < n_cpus; i++)
> map_collect_percpu(fd, i, &rec->enq[i]);
>
> fd = cpumap_kthread_cnt_map_fd;
> @@ -549,10 +550,10 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
> */
> static void mark_cpus_unavailable(void)
> {
> - __u32 invalid_cpu = MAX_CPUS;
> + __u32 invalid_cpu = n_cpus;
> int ret, i;
>
> - for (i = 0; i < MAX_CPUS; i++) {
> + for (i = 0; i < n_cpus; i++) {
> ret = bpf_map_update_elem(cpus_available_map_fd, &i,
> &invalid_cpu, 0);
> if (ret) {
> @@ -688,6 +689,8 @@ int main(int argc, char **argv)
> int prog_fd;
> __u32 qsize;
>
> + n_cpus = get_nprocs();
get_nprocs() gets the number of available cpus, not including offline
cpus. But gaps may exist in cpus, e.g., get_nprocs() returns 4, and cpus
are 0-1,4-5. map updates will miss cpus 4-5. And in this situation,
map_update will fail on offline cpus.
This sample test does not need to deal with complication of
cpu offlining, I think. Maybe we can get
n_cpus = get_nprocs();
n_cpus_conf = get_nprocs_conf();
if (n_cpus != n_cpus_conf) {
/* message that some cpus are offline and not supported. */
return error
}
> +
> /* Notice: choosing he queue size is very important with the
> * ixgbe driver, because it's driver page recycling trick is
> * dependend on pages being returned quickly. The number of
> @@ -757,7 +760,7 @@ int main(int argc, char **argv)
> case 'c':
> /* Add multiple CPUs */
> add_cpu = strtoul(optarg, NULL, 0);
> - if (add_cpu >= MAX_CPUS) {
> + if (add_cpu >= n_cpus) {
> fprintf(stderr,
> "--cpu nr too large for cpumap err(%d):%s\n",
> errno, strerror(errno));
>
Powered by blists - more mailing lists