lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 20 Feb 2019 17:58:08 +0100
From:   Stefano Brivio <sbrivio@...hat.com>
To:     Stephen Hemminger <stephen@...workplumber.org>
Cc:     Eric Dumazet <eric.dumazet@...il.com>, Phil Sutter <phil@....cc>,
        David Ahern <dsahern@...il.com>,
        Sabrina Dubroca <sd@...asysnail.net>, netdev@...r.kernel.org
Subject: Re: [PATCH iproute2] ss: Render buffer to output every time a
 number of chunks are allocated

Hi Stephen,

I think this patch is reasonably tested now. Eric, who reported the
original issue, is also satisfied with it. Is there any issue with it?

-- 
Stefano

On Thu, 14 Feb 2019 01:58:32 +0100
Stefano Brivio <sbrivio@...hat.com> wrote:

> Eric reported that, with 10 million sockets, ss -emoi (about 1000 bytes
> output per socket) can easily lead to OOM (buffer would grow to 10GB of
> memory).
> 
> Limit the maximum size of the buffer to five chunks, 1M each. Render and
> flush buffers whenever we reach that.
> 
> This might make the resulting blocks slightly unaligned between them, with
> occasional loss of readability on lines occurring every 5k to 50k sockets
> approximately. Something like (from ss -tu):
> 
> [...]
> CLOSE-WAIT   32       0           192.168.1.50:35232           10.0.0.1:https
> ESTAB        0        0           192.168.1.50:53820           10.0.0.1:https
> ESTAB       0        0           192.168.1.50:46924            10.0.0.1:https
> CLOSE-WAIT  32       0           192.168.1.50:35228            10.0.0.1:https
> [...]
> 
> However, I don't actually expect any human user to scroll through that
> amount of sockets, so readability should be preserved when it matters.
> 
> The bulk of the diffstat comes from moving field_next() around, as we now
> call render() from it. Functionally, this is implemented by six lines of
> code, most of them in field_next().
> 
> Reported-by: Eric Dumazet <eric.dumazet@...il.com>
> Fixes: 691bd854bf4a ("ss: Buffer raw fields first, then render them as a table")
> Signed-off-by: Stefano Brivio <sbrivio@...hat.com>
> ---
> Eric, it would be nice if you could test this with your bazillion sockets,
> I checked this with -emoi and "only" 500,000 sockets.
> 
>  misc/ss.c | 68 ++++++++++++++++++++++++++++++++-----------------------
>  1 file changed, 40 insertions(+), 28 deletions(-)
> 
> diff --git a/misc/ss.c b/misc/ss.c
> index 9e821faf0d31..28bdcba72d73 100644
> --- a/misc/ss.c
> +++ b/misc/ss.c
> @@ -52,7 +52,8 @@
>  #include <linux/tipc_sockets_diag.h>
>  
>  #define MAGIC_SEQ 123456
> -#define BUF_CHUNK (1024 * 1024)
> +#define BUF_CHUNK (1024 * 1024)	/* Buffer chunk allocation size */
> +#define BUF_CHUNKS_MAX 5	/* Maximum number of allocated buffer chunks */
>  #define LEN_ALIGN(x) (((x) + 1) & ~1)
>  
>  #define DIAG_REQUEST(_req, _r)						    \
> @@ -176,6 +177,7 @@ static struct {
>  	struct buf_token *cur;	/* Position of current token in chunk */
>  	struct buf_chunk *head;	/* First chunk */
>  	struct buf_chunk *tail;	/* Current chunk */
> +	int chunks;		/* Number of allocated chunks */
>  } buffer;
>  
>  static const char *TCP_PROTO = "tcp";
> @@ -936,6 +938,8 @@ static struct buf_chunk *buf_chunk_new(void)
>  
>  	new->end = buffer.cur->data;
>  
> +	buffer.chunks++;
> +
>  	return new;
>  }
>  
> @@ -1080,33 +1084,6 @@ static int field_is_last(struct column *f)
>  	return f - columns == COL_MAX - 1;
>  }
>  
> -static void field_next(void)
> -{
> -	field_flush(current_field);
> -
> -	if (field_is_last(current_field))
> -		current_field = columns;
> -	else
> -		current_field++;
> -}
> -
> -/* Walk through fields and flush them until we reach the desired one */
> -static void field_set(enum col_id id)
> -{
> -	while (id != current_field - columns)
> -		field_next();
> -}
> -
> -/* Print header for all non-empty columns */
> -static void print_header(void)
> -{
> -	while (!field_is_last(current_field)) {
> -		if (!current_field->disabled)
> -			out("%s", current_field->header);
> -		field_next();
> -	}
> -}
> -
>  /* Get the next available token in the buffer starting from the current token */
>  static struct buf_token *buf_token_next(struct buf_token *cur)
>  {
> @@ -1132,6 +1109,7 @@ static void buf_free_all(void)
>  		free(tmp);
>  	}
>  	buffer.head = NULL;
> +	buffer.chunks = 0;
>  }
>  
>  /* Get current screen width, default to 80 columns if TIOCGWINSZ fails */
> @@ -1294,6 +1272,40 @@ static void render(void)
>  	current_field = columns;
>  }
>  
> +/* Move to next field, and render buffer if we reached the maximum number of
> + * chunks, at the last field in a line.
> + */
> +static void field_next(void)
> +{
> +	if (field_is_last(current_field) && buffer.chunks >= BUF_CHUNKS_MAX) {
> +		render();
> +		return;
> +	}
> +
> +	field_flush(current_field);
> +	if (field_is_last(current_field))
> +		current_field = columns;
> +	else
> +		current_field++;
> +}
> +
> +/* Walk through fields and flush them until we reach the desired one */
> +static void field_set(enum col_id id)
> +{
> +	while (id != current_field - columns)
> +		field_next();
> +}
> +
> +/* Print header for all non-empty columns */
> +static void print_header(void)
> +{
> +	while (!field_is_last(current_field)) {
> +		if (!current_field->disabled)
> +			out("%s", current_field->header);
> +		field_next();
> +	}
> +}
> +
>  static void sock_state_print(struct sockstat *s)
>  {
>  	const char *sock_name;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ