[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1272548980.2222.87.camel@edumazet-laptop>
Date: Thu, 29 Apr 2010 15:49:40 +0200
From: Eric Dumazet <eric.dumazet@...il.com>
To: hadi@...erus.ca
Cc: Changli Gao <xiaosuo@...il.com>,
David Miller <davem@...emloft.net>, therbert@...gle.com,
shemminger@...tta.com, netdev@...r.kernel.org,
Eilon Greenstein <eilong@...adcom.com>,
Brian Bloniarz <bmb@...enacr.com>
Subject: Re: [PATCH net-next-2.6] net: speedup udp receive path
Le jeudi 29 avril 2010 à 09:37 -0400, jamal a écrit :
> On Thu, 2010-04-29 at 15:21 +0200, Eric Dumazet wrote:
>
>
> >
> > You could try following program :
> >
>
> Will do later today (test machine is not on the network and is about 20
> minutes from here; so worst case i will get you results by end of day)
> I guess this program is good enough since it tells me the system wide
> ipi count - what my patch did was also to break it down by which cpu got
> how many IPIs (served to check if there was uneven distribution)
>
> >
> > Is your application mono threaded and receiving data to 8 sockets ?
> >
>
> I fork one instance per detected cpu and bind to different ports each
> time. Example bind to port 8200 on cpu0, 8201 on cpu1, etc.
>
I guess this is the problem ;)
With RPS, you should not bind your threads to cpu.
This is the rps hash who will decide for you.
I am using following program :
/*
* Usage: udpsink [ -p baseport] nbports
*
*/
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <fcntl.h>
struct worker_data {
int fd;
unsigned long pack_count;
unsigned long bytes_count;
unsigned long _padd[16 - 3]; /* alignment */
};
void usage(int code)
{
fprintf(stderr, "Usage: udpsink [-p baseport] nbports\n");
exit(code);
}
void *worker_func(void *arg)
{
struct worker_data *wdata = (struct worker_data *)arg;
char buffer[4096];
struct sockaddr_in addr;
int lu;
while (1) {
socklen_t len = sizeof(addr);
lu = recvfrom(wdata->fd, buffer, sizeof(buffer), 0, (struct sockaddr *)&addr, &len);
if (lu > 0) {
wdata->pack_count++;
wdata->bytes_count += lu;
}
}
}
int main(int argc, char *argv[])
{
int c;
int baseport = 4000;
int nbthreads;
struct worker_data *wdata;
unsigned long ototal = 0;
int concurrent = 0;
int verbose = 0;
int i;
while ((c = getopt(argc, argv, "cvp:")) != -1) {
if (c == 'p')
baseport = atoi(optarg);
else if (c == 'c')
concurrent = 1;
else if (c == 'v')
verbose++;
else usage(1);
}
if (optind == argc)
usage(1);
nbthreads = atoi(argv[optind]);
wdata = calloc(sizeof(struct worker_data), nbthreads);
if (!wdata) {
perror("calloc");
return 1;
}
for (i = 0; i < nbthreads; i++) {
struct sockaddr_in addr;
pthread_t tid;
if (i && concurrent) {
wdata[i].fd = wdata[0].fd ;
} else {
wdata[i].fd = socket(PF_INET, SOCK_DGRAM, 0);
if (wdata[i].fd == -1) {
perror("socket");
return 1;
}
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
// addr.sin_addr.s_addr = inet_addr(argv[optind]);
addr.sin_port = htons(baseport + i);
if (bind(wdata[i].fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
perror("bind");
return 1;
}
// fcntl(wdata[i].fd, F_SETFL, O_NDELAY);
}
pthread_create(&tid, NULL, worker_func, wdata + i);
}
for (;;) {
unsigned long total;
long delta;
sleep(1);
total = 0;
for (i = 0; i < nbthreads;i++) {
total += wdata[i].pack_count;
}
delta = total - ototal;
if (delta) {
printf("%lu pps (%lu", delta, total);
if (verbose) {
for (i = 0; i < nbthreads;i++) {
if (wdata[i].pack_count)
printf(" %d:%lu", i, wdata[i].pack_count);
}
}
printf(")\n");
}
ototal = total;
}
}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists