[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <56E5AE88.5030205@linaro.org>
Date: Sun, 13 Mar 2016 18:16:40 +0000
From: Kieran Bingham <kieran.bingham@...aro.org>
To: Jan Kiszka <jan.kiszka@...mens.com>, linux-kernel@...r.kernel.org
Cc: lee.jones@...aro.org, peter.griffin@...aro.org,
maxime.coquelin@...com
Subject: Re: [PATCHv3 09/13] scripts/gdb: Add meminfo command
On 13/03/16 16:34, Jan Kiszka wrote:
> On 2016-03-03 12:41, Kieran Bingham wrote:
>> Provide an equivalent of /proc/meminfo which should be available from
>> core dumps, or crashed kernels. This should allow a debugger to identify
>> if memory pressures were applicable in the instance of their issue
>>
>
> Sound useful.
>
>> Signed-off-by: Kieran Bingham <kieran.bingham@...aro.org>
>>
>> ---
>>
>> Changes from v1:
>> - Updated to use LX_ macros for constants
>> - Utilise the LX_CONFIG() options for conditional printing
>> - Fixed meminfo command on Jan's target .config
>> - Added missing segments to meminfo command (HUGEPAGE, QUICKLIST)
>> - Adjusted for new list_for_each_entry() function
>> - Fixed up for !CONFIG_SWAP and !CONFIG_MMU targets (Tested STM32)
>>
>> Changes from v2:
>> - Reduce line size on output lines causing pep8 warnings
>> - Remove crept in 'pass' statement
>> ---
>> scripts/gdb/linux/constants.py.in | 34 ++++++
>> scripts/gdb/linux/proc.py | 228 ++++++++++++++++++++++++++++++++++++++
>> 2 files changed, 262 insertions(+)
>>
>> diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
>> index 57213ad8cf75..66562a8242bd 100644
>> --- a/scripts/gdb/linux/constants.py.in
>> +++ b/scripts/gdb/linux/constants.py.in
>> @@ -12,8 +12,16 @@
>> *
>> */
>>
>> +#include <asm/page.h>
>> +#include <asm/pgtable.h>
>> +#include <asm/thread_info.h>
>> +
>> #include <linux/fs.h>
>> +#include <linux/swap.h>
>> #include <linux/mount.h>
>> +#include <linux/huge_mm.h>
>> +#include <linux/vmalloc.h>
>> +
>>
>> /* We need to stringify expanded macros so that they can be parsed */
>>
>> @@ -51,3 +59,29 @@ LX_VALUE(MNT_NOATIME)
>> LX_VALUE(MNT_NODIRATIME)
>> LX_VALUE(MNT_RELATIME)
>>
>> +/* asm/page.h */
>> +LX_GDBPARSED(PAGE_SHIFT)
>> +
>> +/* asm/thread_info.h */
>> +LX_GDBPARSED(THREAD_SIZE)
>> +
>> +/* linux/vmalloc.h */
>> +LX_GDBPARSED(VMALLOC_TOTAL)
>> +
>> +/* linux/swap.h */
>> +LX_GDBPARSED(MAX_SWAPFILES)
>> +
>> +
>> +/* Kernel Configs */
>> +LX_CONFIG(CONFIG_HIGHMEM)
>> +LX_CONFIG(CONFIG_MEMORY_FAILURE)
>> +LX_CONFIG(CONFIG_TRANSPARENT_HUGEPAGE)
>> +LX_CONFIG(CONFIG_CMA)
>> +LX_CONFIG(CONFIG_MMU)
>> +LX_CONFIG(CONFIG_SWAP)
>> +
>> +#ifndef CONFIG_NR_QUICK
>> +#define CONFIG_NR_QUICK 0
>> +#endif
>> +LX_VALUE(CONFIG_NR_QUICK)
>> +LX_CONFIG(CONFIG_QUICKLIST)
>> diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
>> index 115f20b07a54..e5a8dbe3aa3a 100644
>> --- a/scripts/gdb/linux/proc.py
>> +++ b/scripts/gdb/linux/proc.py
>> @@ -195,3 +195,231 @@ values of that process namespace"""
>> info_opts(MNT_INFO, m_flags)))
>>
>> LxMounts()
>> +
>> +
>> +bdev_type = utils.CachedType("struct block_device")
>> +bdev_ptr_type = bdev_type.get_type().pointer()
>> +
>> +
>> +class LxMeminfo(gdb.Command):
>> + """ Identify the memory usage, statistics, and availability
>> +
>> +Equivalent to cat /proc/meminfo on a running target """
>> +
>> + def __init__(self):
>> + super(LxMeminfo, self).__init__("lx-meminfo", gdb.COMMAND_DATA)
>> +
>> + def K(self, val):
>> + # Convert from PAGES to KB
>> + return int(val << (constants.LX_PAGE_SHIFT - 10))
>> +
>> + def page_K(self, remote_value):
>> + # Obtain page value, and Convert from PAGES to KB
>> + val = int(gdb.parse_and_eval(remote_value))
>> + return self.K(val)
>> +
>> + def gps(self, enum_zone_stat_item):
>> + # Access the Global Page State structure
>> + # I would prefer to read this structure in one go and then index
>> + # from the enum. But we can't determine the enum values with out
>> + # a call to GDB anyway so we may as well take the easy route and
>> + # get the value.
>> + remote_value = "vm_stat[" + enum_zone_stat_item + "].counter"
>> + return int(gdb.parse_and_eval(remote_value))
>> +
>> + def gps_K(self, enum_zone_stat_item):
>> + return self.K(self.gps(enum_zone_stat_item))
>> +
>> + def nr_blockdev_pages(self):
>> + bdevs_head = gdb.parse_and_eval("all_bdevs")
>> + pages = 0
>> + for bdev in lists.list_for_each_entry(bdevs_head,
>> + bdev_ptr_type,
>> + "bd_list"):
>> + try:
>> + pages += bdev['bd_inode']['i_mapping']['nrpages']
>> + except:
>> + # Any memory read failures are simply not counted
>> + pass
>> + return pages
>> +
>> + def total_swapcache_pages(self):
>> + pages = 0
>> + if not constants.LX_CONFIG_SWAP:
>> + return 0
>> +
>> + for i in range(0, int(constants.LX_MAX_SWAPFILES)):
>> + swap_space = "swapper_spaces[" + str(i) + "].nrpages"
>> + pages += int(gdb.parse_and_eval(swap_space))
>> + return pages
>> +
>> + def vm_commit_limit(self, totalram_pages):
>> + total_swap_pages = 0
>> + overcommit = int(gdb.parse_and_eval("sysctl_overcommit_kbytes"))
>> + overcommit_ratio = int(gdb.parse_and_eval("sysctl_overcommit_ratio"))
>> +
>> + if constants.LX_CONFIG_SWAP:
>> + total_swap_pages = int(gdb.parse_and_eval("total_swap_pages"))
>> +
>> + hugetlb_total_pages = 0 # hugetlb_total_pages()
>> +
>> + if overcommit:
>> + allowed = overcommit >> (constants.LX_PAGE_SHIFT - 10)
>> + else:
>> + allowed = ((totalram_pages - hugetlb_total_pages *
>> + overcommit_ratio / 100))
>> +
>> + allowed += total_swap_pages
>> + return allowed
>> +
>> + def quicklist_total_size(self):
>> + count = 0
>> + quicklist = utils.gdb_eval_or_none("quicklist")
>> + if quicklist is None:
>> + return 0
>> +
>> + for cpu in cpus.each_online_cpu():
>> + ql = cpus.per_cpu(quicklist, cpu)
>> + for q in range(0, constants.LX_CONFIG_NR_QUICK):
>> + # for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
>> + # count += q->nr_pages
>> + count += ql[q]['nr_pages']
>> +
>> + return count
>> +
>> + # Main lx-meminfo command execution
>> + # See fs/proc/meminfo.c:meminfo_proc_show()
>> + def invoke(self, arg, from_tty):
>> + totalram = int(gdb.parse_and_eval("totalram_pages"))
>> + freeram = self.gps("NR_FREE_PAGES")
>> + reclaimable = self.gps("NR_SLAB_RECLAIMABLE")
>> + unreclaimable = self.gps("NR_SLAB_UNRECLAIMABLE")
>> + slab = reclaimable + unreclaimable
>> + # for_each_zone(zone)
>> + # wmark_low += zone->watermark[WMARK_LOW];
>> + wmark_low = 0 # Zone parsing is unimplemented
>> +
>> + available = freeram - wmark_low
>> + available += reclaimable - min(reclaimable / 2, wmark_low)
>> +
>> + bufferram = self.nr_blockdev_pages()
>> + swapcached = self.total_swapcache_pages()
>> +
>> + file_pages = self.gps("NR_FILE_PAGES")
>> + cached = file_pages - swapcached - bufferram
>> +
>> + # LRU Pages
>> + active_pages_anon = self.gps("NR_ACTIVE_ANON")
>> + inactive_pages_anon = self.gps("NR_INACTIVE_ANON")
>> + active_pages_file = self.gps("NR_ACTIVE_FILE")
>> + inactive_pages_file = self.gps("NR_INACTIVE_FILE")
>> + unevictable_pages = self.gps("NR_UNEVICTABLE")
>> + active_pages = active_pages_anon + active_pages_file
>> + inactive_pages = inactive_pages_anon + inactive_pages_file
>> +
>> + kernelstack = int(self.gps("NR_KERNEL_STACK") *
>> + constants.LX_THREAD_SIZE / 1024)
>> +
>> + commitlimit = int(self.vm_commit_limit(totalram))
>> + committed_as = int(gdb.parse_and_eval("vm_committed_as.count"))
>> +
>> + vmalloc_total = int(constants.LX_VMALLOC_TOTAL >> 10)
>> +
>> + gdb.write(
>> + "MemTotal: {:8d} kB\n".format(self.K(totalram)) +
>> + "MemFree: {:8d} kB\n".format(self.K(freeram)) +
>> + "MemAvailable: {:8d} kB\n".format(self.K(available)) +
>> + "Buffers: {:8d} kB\n".format(self.K(bufferram)) +
>> + "Cached: {:8d} kB\n".format(self.K(cached)) +
>> + "SwapCached: {:8d} kB\n".format(self.K(swapcached)) +
>> + "Active: {:8d} kB\n".format(self.K(active_pages)) +
>> + "Inactive: {:8d} kB\n".format(self.K(inactive_pages)) +
>> + "Active(anon): {:8d} kB\n".format(self.K(active_pages_anon)) +
>> + "Inactive(anon): {:8d} kB\n".format(self.K(inactive_pages_anon)) +
>> + "Active(file): {:8d} kB\n".format(self.K(active_pages_file)) +
>> + "Inactive(file): {:8d} kB\n".format(self.K(inactive_pages_file)) +
>> + "Unevictable: {:8d} kB\n".format(self.K(unevictable_pages)) +
>> + "Mlocked: {:8d} kB\n".format(self.gps_K("NR_MLOCK"))
>> + )
>> +
>> + if constants.LX_CONFIG_HIGHMEM:
>> + totalhigh = int(gdb.parse_and_eval("totalhigh_pages"))
>> + freehigh = int(gdb.parse_and_eval("nr_free_highpages()"))
>> + lowtotal = totalram - totalhigh
>> + lowfree = freeram - freehigh
>> + gdb.write(
>> + "HighTotal: {:8d} kB\n".format(self.K(totalhigh)) +
>> + "HighFree: {:8d} kB\n".format(self.K(freehigh)) +
>> + "LowTotal: {:8d} kB\n".format(self.K(lowtotal)) +
>> + "LowFree: {:8d} kB\n".format(self.K(lowfree))
>> + )
>> +
>> + if not constants.LX_CONFIG_MMU:
>> + mmap_pg_alloc = gdb.parse_and_eval("mmap_pages_allocated.counter")
>> + gdb.write(
>> + "MmapCopy: {:8d} kB\n".format(self.K(mmap_pg_alloc))
>> + )
>> +
>> + gdb.write(
>> + "SwapTotal: {:8d} kB\n".format(self.K(0)) +
>> + "SwapFree: {:8d} kB\n".format(self.K(0)) +
>> + "Dirty: {:8d} kB\n".format(self.gps_K("NR_FILE_DIRTY")) +
>> + "Writeback: {:8d} kB\n".format(self.gps_K("NR_WRITEBACK")) +
>> + "AnonPages: {:8d} kB\n".format(self.gps_K("NR_ANON_PAGES")) +
>> + "Mapped: {:8d} kB\n".format(self.gps_K("NR_FILE_MAPPED")) +
>> + "Shmem: {:8d} kB\n".format(self.gps_K("NR_SHMEM")) +
>> + "Slab: {:8d} kB\n".format(self.K(slab)) +
>> + "SReclaimable: {:8d} kB\n".format(self.K(reclaimable)) +
>> + "SUnreclaim: {:8d} kB\n".format(self.K(unreclaimable)) +
>> + "KernelStack: {:8d} kB\n".format(kernelstack) +
>> + "PageTables: {:8d} kB\n".format(self.gps_K("NR_PAGETABLE"))
>> + )
>> +
>> + if constants.LX_CONFIG_QUICKLIST:
>> + quicklist = self.quicklist_total_size()
>> + gdb.write(
>> + "Quicklists: {:8d} kB\n".format(self.K(quicklist))
>
> scripts/gdb/linux/proc.py:381:16: E121 continuation line under-indented
> for hanging indent
>
> Please make sure to run pep8 on the series before posting.
My apologies for the error, but on my laptop pep8 runs clean?
What version are you running?
I have the following:
$ pep8 --version
1.6.2
Perhaps I need to update something here.
Kieran
Powered by blists - more mailing lists