lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87ec59f7-2d76-4c7a-a2b0-57bc4e801d1d@gmail.com>
Date: Fri, 23 Jan 2026 00:14:36 -0800
From: JP Kobryn <inwardvessel@...il.com>
To: Jianyue Wu <wujianyue000@...il.com>, akpm@...ux-foundation.org
Cc: shakeel.butt@...ux.dev, hannes@...xchg.org, mhocko@...nel.org,
 roman.gushchin@...ux.dev, muchun.song@...ux.dev, linux-mm@...ck.org,
 cgroups@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3] mm: optimize stat output for 11% sys time reduce

On 1/22/26 3:42 AM, Jianyue Wu wrote:
> Replace seq_printf/seq_buf_printf with lightweight helpers to avoid
> printf parsing in memcg stats output.
> 
> Key changes:
> - Add memcg_seq_put_name_val() for seq_file "name value\n" formatting
> - Add memcg_seq_buf_put_name_val() for seq_buf "name value\n" formatting
> - Update __memory_events_show(), swap_events_show(),
>    memory_stat_format(), memory_numa_stat_show(), and related helpers
> - Introduce local variables to improve readability and reduce line length
> 
> Performance:
> - 1M reads of memory.stat+memory.numa_stat
> - Before: real 0m9.663s, user 0m4.840s, sys 0m4.823s
> - After:  real 0m9.051s, user 0m4.775s, sys 0m4.275s (~11.4% sys drop)
> 
> Tests:
> - Script:
>    for ((i=1; i<=1000000; i++)); do
>        : > /dev/null < /sys/fs/cgroup/memory.stat
>        : > /dev/null < /sys/fs/cgroup/memory.numa_stat
>    done
> 
> Acked-by: Shakeel Butt <shakeel.butt@...ux.dev>
> Signed-off-by: Jianyue Wu <wujianyue000@...il.com>
> ---

Hi Jianyue,
I gave this patch a run and can confirm the perf gain. I left comments
on reducing the amount of added lines so that it better resembles the
existing code.

Tested-by: JP Kobryn <inwardvessel@...il.com>

> 
> Hi Shakeel,
> 
> Thanks for the review! I've addressed your comments in v3 by moving the
> helper functions to memcontrol.c and adding kernel-doc documentation.
> 
> Thanks,
> Jianyue
> 
> Changes in v3:
> - Move memcg_seq_put_name_val() and memcg_seq_buf_put_name_val() from
>    header (inline) to memcontrol.c and add kernel-doc documentation
>    (Suggested by Shakeel Butt)
> 
> Changes in v2:
> - Initial version with performance optimization
> 
>   mm/memcontrol-v1.c | 120 +++++++++++++++++++++------------
>   mm/memcontrol-v1.h |   6 ++
>   mm/memcontrol.c    | 162 ++++++++++++++++++++++++++++++++++-----------
>   3 files changed, 205 insertions(+), 83 deletions(-)
> 
> diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
> index 6eed14bff742..482475333876 100644
> --- a/mm/memcontrol-v1.c
> +++ b/mm/memcontrol-v1.c
> @@ -10,7 +10,7 @@
>   #include <linux/poll.h>
>   #include <linux/sort.h>
>   #include <linux/file.h>
> -#include <linux/seq_buf.h>
> +#include <linux/string.h>
>   
>   #include "internal.h"
>   #include "swap.h"
> @@ -1795,25 +1795,36 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
>   	mem_cgroup_flush_stats(memcg);
>   
>   	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
> -		seq_printf(m, "%s=%lu", stat->name,
> -			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
> -						   false));
> -		for_each_node_state(nid, N_MEMORY)
> -			seq_printf(m, " N%d=%lu", nid,
> -				   mem_cgroup_node_nr_lru_pages(memcg, nid,
> -							stat->lru_mask, false));
> +		u64 nr_pages;
> +
> +		seq_puts(m, stat->name);
> +		nr_pages = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
> +						   false);
> +		seq_put_decimal_ull(m, "=", nr_pages);
> +		for_each_node_state(nid, N_MEMORY) {
> +			nr_pages = mem_cgroup_node_nr_lru_pages(memcg, nid,
> +								stat->lru_mask,
> +								false);
> +			seq_put_decimal_ull(m, " N", nid);
> +			seq_put_decimal_ull(m, "=", nr_pages);
> +		}
>   		seq_putc(m, '\n');

There's a recurring pattern of 1) put name, 2) put separator, 3) put
value. Instead of adding so many new lines, I wonder if you could use a
function or macro that accepts: char *name, char sep, u64 val. You could
then use it as a replacement for seq_printf() and avoid the extra added
lines here and throughout this patch.

>   	}
>   
>   	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
> -
> -		seq_printf(m, "hierarchical_%s=%lu", stat->name,
> -			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
> -						   true));
> -		for_each_node_state(nid, N_MEMORY)
> -			seq_printf(m, " N%d=%lu", nid,
> -				   mem_cgroup_node_nr_lru_pages(memcg, nid,
> -							stat->lru_mask, true));
> +		u64 nr_pages;
> +
> +		seq_puts(m, "hierarchical_");
> +		seq_puts(m, stat->name);
> +		nr_pages = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, true);
> +		seq_put_decimal_ull(m, "=", nr_pages);
> +		for_each_node_state(nid, N_MEMORY) {
> +			nr_pages = mem_cgroup_node_nr_lru_pages(memcg, nid,
> +								stat->lru_mask,
> +								true);
> +			seq_put_decimal_ull(m, " N", nid);
> +			seq_put_decimal_ull(m, "=", nr_pages);
> +		}
>   		seq_putc(m, '\n');
>   	}
>   
> @@ -1870,6 +1881,7 @@ void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>   	unsigned long memory, memsw;
>   	struct mem_cgroup *mi;
>   	unsigned int i;
> +	u64 memory_limit, memsw_limit;
>   
>   	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
>   
> @@ -1879,17 +1891,24 @@ void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>   		unsigned long nr;
>   
>   		nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
> -		seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
> +		memcg_seq_buf_put_name_val(s, memcg1_stat_names[i], (u64)nr);
>   	}
>   
> -	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
> -		seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
> -			       memcg_events_local(memcg, memcg1_events[i]));
> +	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
> +		u64 events;
>   
> -	for (i = 0; i < NR_LRU_LISTS; i++)
> -		seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
> -			       memcg_page_state_local(memcg, NR_LRU_BASE + i) *
> -			       PAGE_SIZE);
> +		events = memcg_events_local(memcg, memcg1_events[i]);
> +		memcg_seq_buf_put_name_val(s, vm_event_name(memcg1_events[i]),
> +					   events);
> +	}
> +
> +	for (i = 0; i < NR_LRU_LISTS; i++) {
> +		u64 nr_pages;
> +
> +		nr_pages = memcg_page_state_local(memcg, NR_LRU_BASE + i) *
> +			   PAGE_SIZE;
> +		memcg_seq_buf_put_name_val(s, lru_list_name(i), nr_pages);
> +	}
>   
>   	/* Hierarchical information */
>   	memory = memsw = PAGE_COUNTER_MAX;
> @@ -1897,28 +1916,38 @@ void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>   		memory = min(memory, READ_ONCE(mi->memory.max));
>   		memsw = min(memsw, READ_ONCE(mi->memsw.max));
>   	}
> -	seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
> -		       (u64)memory * PAGE_SIZE);
> -	seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
> -		       (u64)memsw * PAGE_SIZE);
> +	memory_limit = (u64)memory * PAGE_SIZE;
> +	memsw_limit = (u64)memsw * PAGE_SIZE;

I don't think in this case these new local variables are improving
readability.

> +
> +	memcg_seq_buf_put_name_val(s, "hierarchical_memory_limit",
> +				   memory_limit);
> +	memcg_seq_buf_put_name_val(s, "hierarchical_memsw_limit",
> +				   memsw_limit);
>   
>   	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
>   		unsigned long nr;
>   
>   		nr = memcg_page_state_output(memcg, memcg1_stats[i]);
> -		seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
> -			       (u64)nr);
> +		seq_buf_puts(s, "total_");
> +		memcg_seq_buf_put_name_val(s, memcg1_stat_names[i], (u64)nr);

I would try and combine these 2 calls into 1 if possible. If the diff
has close to a -1:+1 line change in places where seq_buf_printf() is
replaced with some helper, it would reduce the noisiness. This applies
to other areas where a prefix is put before calling a new helper.

> +	}
> +
> +	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
> +		u64 events;
> +
> +		events = memcg_events(memcg, memcg1_events[i]);
> +		seq_buf_puts(s, "total_");
> +		memcg_seq_buf_put_name_val(s, vm_event_name(memcg1_events[i]),
> +					   events);
>   	}
>   
> -	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
> -		seq_buf_printf(s, "total_%s %llu\n",
> -			       vm_event_name(memcg1_events[i]),
> -			       (u64)memcg_events(memcg, memcg1_events[i]));
> +	for (i = 0; i < NR_LRU_LISTS; i++) {
> +		u64 nr_pages;
>   
> -	for (i = 0; i < NR_LRU_LISTS; i++)
> -		seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
> -			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
> -			       PAGE_SIZE);
> +		nr_pages = memcg_page_state(memcg, NR_LRU_BASE + i) * PAGE_SIZE;
> +		seq_buf_puts(s, "total_");
> +		memcg_seq_buf_put_name_val(s, lru_list_name(i), nr_pages);
> +	}
>   
>   #ifdef CONFIG_DEBUG_VM
>   	{
> @@ -1933,8 +1962,8 @@ void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>   			anon_cost += mz->lruvec.anon_cost;
>   			file_cost += mz->lruvec.file_cost;
>   		}
> -		seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
> -		seq_buf_printf(s, "file_cost %lu\n", file_cost);
> +		memcg_seq_buf_put_name_val(s, "anon_cost", (u64)anon_cost);
> +		memcg_seq_buf_put_name_val(s, "file_cost", (u64)file_cost);
>   	}
>   #endif
>   }
> @@ -1968,11 +1997,14 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
>   static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
>   {
>   	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
> +	u64 oom_kill;
> +
> +	memcg_seq_put_name_val(sf, "oom_kill_disable",
> +			       READ_ONCE(memcg->oom_kill_disable));
> +	memcg_seq_put_name_val(sf, "under_oom", (bool)memcg->under_oom);
>   
> -	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
> -	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
> -	seq_printf(sf, "oom_kill %lu\n",
> -		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
> +	oom_kill = atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]);
> +	memcg_seq_put_name_val(sf, "oom_kill", oom_kill);

New local variable just adding extra lines.

>   	return 0;
>   }
>   
> diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h
> index 6358464bb416..46f198a81761 100644
> --- a/mm/memcontrol-v1.h
> +++ b/mm/memcontrol-v1.h
> @@ -4,6 +4,9 @@
>   #define __MM_MEMCONTROL_V1_H
>   
>   #include <linux/cgroup-defs.h>
> +#include <linux/seq_buf.h>
> +#include <linux/seq_file.h>
> +#include <linux/sprintf.h>
>   
>   /* Cgroup v1 and v2 common declarations */
>   
> @@ -33,6 +36,9 @@ int memory_stat_show(struct seq_file *m, void *v);
>   void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
>   struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg);
>   
> +void memcg_seq_put_name_val(struct seq_file *m, const char *name, u64 val);
> +void memcg_seq_buf_put_name_val(struct seq_buf *s, const char *name, u64 val);
> +
>   /* Cgroup v1-specific declarations */
>   #ifdef CONFIG_MEMCG_V1
>   
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 86f43b7e5f71..0bc244c5a570 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -42,6 +42,7 @@
>   #include <linux/bit_spinlock.h>
>   #include <linux/rcupdate.h>
>   #include <linux/limits.h>
> +#include <linux/sprintf.h>
>   #include <linux/export.h>
>   #include <linux/list.h>
>   #include <linux/mutex.h>
> @@ -1460,9 +1461,70 @@ static bool memcg_accounts_hugetlb(void)
>   }
>   #endif /* CONFIG_HUGETLB_PAGE */
>   
> +/* Max 2^64 - 1 = 18446744073709551615 (20 digits) */
> +#define MEMCG_DEC_U64_MAX_LEN 20
> +
> +/**
> + * memcg_seq_put_name_val - Write a name-value pair to a seq_file
> + * @m: The seq_file to write to
> + * @name: The name string (not null-terminated required, uses seq_puts)
> + * @val: The u64 value to write
> + *
> + * This helper formats and writes a "name value\n" line to a seq_file,
> + * commonly used for cgroup statistics output. The value is efficiently
> + * converted to decimal using seq_put_decimal_ull.
> + *
> + * Output format: "<name> <value>\n"
> + * Example: "anon 1048576\n"
> + */
> +void memcg_seq_put_name_val(struct seq_file *m, const char *name, u64 val)
> +{
> +	seq_puts(m, name);
> +	/* need a space between name and value */
> +	seq_put_decimal_ull(m, " ", val);
> +	seq_putc(m, '\n');

I think seq_put* calls normally don't imply a newline. Maybe change the
name to reflect, like something with "print"? Also, it's not really
memcg specific.

This function has a space as a separator. Earlier in your diff you were
using '='. A separator parameter could allow this func to be used
elsewhere, but you'd have to manage the newline somehow. Maybe a newline
wrapper?

> +}
> +
> +/**
> + * memcg_seq_buf_put_name_val - Write a name-value pair to a seq_buf
> + * @s: The seq_buf to write to
> + * @name: The name string to write
> + * @val: The u64 value to write
> + *
> + * This helper formats and writes a "name value\n" line to a seq_buf.
> + * Unlike memcg_seq_put_name_val which uses seq_file's built-in formatting,
> + * this function manually converts the value to a string using num_to_str
> + * and writes it using seq_buf primitives for better performance when
> + * batching multiple writes to a seq_buf.
> + *
> + * The function checks for overflow at each step and returns early if
> + * any operation would cause the buffer to overflow.
> + *
> + * Output format: "<name> <value>\n"
> + * Example: "file 2097152\n"
> + */
> +void memcg_seq_buf_put_name_val(struct seq_buf *s, const char *name, u64 val)
> +{
> +	char num_buf[MEMCG_DEC_U64_MAX_LEN];
> +	int num_len;
> +
> +	num_len = num_to_str(num_buf, sizeof(num_buf), val, 0);
> +	if (num_len <= 0)
> +		return;
> +
> +	if (seq_buf_puts(s, name))
> +		return;
> +	if (seq_buf_putc(s, ' '))
> +		return;

Can num_buf[0] just be ' '? The length would have to be extended though.
Not sure if saving a few seq_buf_putc() calls make a difference.

> +	if (seq_buf_putmem(s, num_buf, num_len))
> +		return;
> +	seq_buf_putc(s, '\n');

Similary, though I'm not sure if it even performs better, this call
could be removed and can do num_buf[num_len+1] = '\n' (extend buf
again).

If you make the two changes above you can call seq_buf_putmem() last.

> +}
> +
>   static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>   {
>   	int i;
> +	u64 pgscan, pgsteal;
>   
>   	/*
>   	 * Provide statistics on the state of the memory subsystem as
> @@ -1485,36 +1547,40 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
>   			continue;
>   #endif
>   		size = memcg_page_state_output(memcg, memory_stats[i].idx);
> -		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
> +		memcg_seq_buf_put_name_val(s, memory_stats[i].name, size);
>   
>   		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
>   			size += memcg_page_state_output(memcg,
>   							NR_SLAB_RECLAIMABLE_B);
> -			seq_buf_printf(s, "slab %llu\n", size);
> +			memcg_seq_buf_put_name_val(s, "slab", size);
>   		}
>   	}
>   
>   	/* Accumulated memory events */
> -	seq_buf_printf(s, "pgscan %lu\n",
> -		       memcg_events(memcg, PGSCAN_KSWAPD) +
> -		       memcg_events(memcg, PGSCAN_DIRECT) +
> -		       memcg_events(memcg, PGSCAN_PROACTIVE) +
> -		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
> -	seq_buf_printf(s, "pgsteal %lu\n",
> -		       memcg_events(memcg, PGSTEAL_KSWAPD) +
> -		       memcg_events(memcg, PGSTEAL_DIRECT) +
> -		       memcg_events(memcg, PGSTEAL_PROACTIVE) +
> -		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
> +	pgscan = memcg_events(memcg, PGSCAN_KSWAPD) +
> +		 memcg_events(memcg, PGSCAN_DIRECT) +
> +		 memcg_events(memcg, PGSCAN_PROACTIVE) +
> +		 memcg_events(memcg, PGSCAN_KHUGEPAGED);
> +	pgsteal = memcg_events(memcg, PGSTEAL_KSWAPD) +
> +		  memcg_events(memcg, PGSTEAL_DIRECT) +
> +		  memcg_events(memcg, PGSTEAL_PROACTIVE) +
> +		  memcg_events(memcg, PGSTEAL_KHUGEPAGED);

More extra local variables. You can save the lines instead.

> +
> +	memcg_seq_buf_put_name_val(s, "pgscan", pgscan);
> +	memcg_seq_buf_put_name_val(s, "pgsteal", pgsteal);
>   
>   	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
> +		u64 events;
> +
>   #ifdef CONFIG_MEMCG_V1
>   		if (memcg_vm_event_stat[i] == PGPGIN ||
>   		    memcg_vm_event_stat[i] == PGPGOUT)
>   			continue;
>   #endif
> -		seq_buf_printf(s, "%s %lu\n",
> -			       vm_event_name(memcg_vm_event_stat[i]),
> -			       memcg_events(memcg, memcg_vm_event_stat[i]));
> +		events = memcg_events(memcg, memcg_vm_event_stat[i]);
> +		memcg_seq_buf_put_name_val(s,
> +					   vm_event_name(memcg_vm_event_stat[i]),
> +					   events);
>   	}
>   }
>   
> @@ -4218,10 +4284,12 @@ static void mem_cgroup_attach(struct cgroup_taskset *tset)
>   
>   static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
>   {
> -	if (value == PAGE_COUNTER_MAX)
> +	if (value == PAGE_COUNTER_MAX) {
>   		seq_puts(m, "max\n");
> -	else
> -		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
> +	} else {
> +		seq_put_decimal_ull(m, "", (u64)value * PAGE_SIZE);
> +		seq_putc(m, '\n');
> +	}
>   
>   	return 0;
>   }
> @@ -4247,7 +4315,8 @@ static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
>   	else
>   		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
>   
> -	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
> +	seq_put_decimal_ull(sf, "", peak * PAGE_SIZE);
> +	seq_putc(sf, '\n');

Your benchmark mentions reading memory and numa stat files, but this
function is not reached in those cases. Is this a hot path for you? If
not, maybe just leave this and any others like it alone.

>   	return 0;
>   }
>   
> @@ -4480,16 +4549,24 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
>    */
>   static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
>   {
> -	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
> -	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
> -	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
> -	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
> -	seq_printf(m, "oom_kill %lu\n",
> -		   atomic_long_read(&events[MEMCG_OOM_KILL]));
> -	seq_printf(m, "oom_group_kill %lu\n",
> -		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
> -	seq_printf(m, "sock_throttled %lu\n",
> -		   atomic_long_read(&events[MEMCG_SOCK_THROTTLED]));
> +	u64 low, high, max, oom, oom_kill;
> +	u64 oom_group_kill, sock_throttled;
> +
> +	low = atomic_long_read(&events[MEMCG_LOW]);
> +	high = atomic_long_read(&events[MEMCG_HIGH]);
> +	max = atomic_long_read(&events[MEMCG_MAX]);
> +	oom = atomic_long_read(&events[MEMCG_OOM]);
> +	oom_kill = atomic_long_read(&events[MEMCG_OOM_KILL]);
> +	oom_group_kill = atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]);
> +	sock_throttled = atomic_long_read(&events[MEMCG_SOCK_THROTTLED]);

Same, more new locals.

> +
> +	memcg_seq_put_name_val(m, "low", low);
> +	memcg_seq_put_name_val(m, "high", high);
> +	memcg_seq_put_name_val(m, "max", max);
> +	memcg_seq_put_name_val(m, "oom", oom);
> +	memcg_seq_put_name_val(m, "oom_kill", oom_kill);
> +	memcg_seq_put_name_val(m, "oom_group_kill", oom_group_kill);
> +	memcg_seq_put_name_val(m, "sock_throttled", sock_throttled);
>   }
>   
>   static int memory_events_show(struct seq_file *m, void *v)
> @@ -4544,7 +4621,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
>   		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
>   			continue;
>   
> -		seq_printf(m, "%s", memory_stats[i].name);
> +		seq_puts(m, memory_stats[i].name);
>   		for_each_node_state(nid, N_MEMORY) {
>   			u64 size;
>   			struct lruvec *lruvec;
> @@ -4552,7 +4629,10 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
>   			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
>   			size = lruvec_page_state_output(lruvec,
>   							memory_stats[i].idx);
> -			seq_printf(m, " N%d=%llu", nid, size);
> +
> +			seq_put_decimal_ull(m, " N", nid);
> +			seq_putc(m, '=');
> +			seq_put_decimal_ull(m, "", size);
>   		}
>   		seq_putc(m, '\n');
>   	}
> @@ -4565,7 +4645,8 @@ static int memory_oom_group_show(struct seq_file *m, void *v)
>   {
>   	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
>   
> -	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
> +	seq_put_decimal_ll(m, "", READ_ONCE(memcg->oom_group));
> +	seq_putc(m, '\n');
>   
>   	return 0;
>   }
> @@ -5372,13 +5453,15 @@ static ssize_t swap_max_write(struct kernfs_open_file *of,
>   static int swap_events_show(struct seq_file *m, void *v)
>   {
>   	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
> +	u64 swap_high, swap_max, swap_fail;
> +
> +	swap_high = atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]);
> +	swap_max = atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]);
> +	swap_fail = atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]);

Same, new local variables.

>   
> -	seq_printf(m, "high %lu\n",
> -		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
> -	seq_printf(m, "max %lu\n",
> -		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
> -	seq_printf(m, "fail %lu\n",
> -		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
> +	memcg_seq_put_name_val(m, "high", swap_high);
> +	memcg_seq_put_name_val(m, "max", swap_max);
> +	memcg_seq_put_name_val(m, "fail", swap_fail);
>   
>   	return 0;
>   }
> @@ -5564,7 +5647,8 @@ static int zswap_writeback_show(struct seq_file *m, void *v)
>   {
>   	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
>   
> -	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
> +	seq_put_decimal_ll(m, "", READ_ONCE(memcg->zswap_writeback));
> +	seq_putc(m, '\n');
>   	return 0;
>   }
>   


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ