diff --git a/mm/vmstat.c b/mm/vmstat.c index e361c2b3a8bc..2579d4a59364 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -31,7 +31,10 @@ #include "internal.h" +#ifdef CONFIG_SMP static int vmstat_late_init_done; +#endif /* CONFIG_SMP */ + #ifdef CONFIG_NUMA int sysctl_vm_numa_stat = ENABLE_NUMA_STAT; @@ -102,7 +105,7 @@ int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, mutex_unlock(&vm_numa_stat_lock); return ret; } -#endif +#endif /* CONFIG_NUMA */ #ifdef CONFIG_VM_EVENT_COUNTERS DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; @@ -192,7 +195,7 @@ void fold_vm_numa_events(void) for_each_populated_zone(zone) fold_vm_zone_numa_events(zone); } -#endif +#endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP @@ -542,7 +545,7 @@ void __dec_node_page_state(struct page *page, enum node_stat_item item) } EXPORT_SYMBOL(__dec_node_page_state); -#ifdef CONFIG_HAVE_CMPXCHG_LOCAL +# ifdef CONFIG_HAVE_CMPXCHG_LOCAL /* * If we have cmpxchg_local support then we do not need to incur the overhead * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. @@ -686,7 +689,7 @@ void dec_node_page_state(struct page *page, enum node_stat_item item) mod_node_state(page_pgdat(page), item, -1, -1); } EXPORT_SYMBOL(dec_node_page_state); -#else +# else /* * Use interrupt disable to serialize counter updates */ @@ -765,7 +768,7 @@ void dec_node_page_state(struct page *page, enum node_stat_item item) local_irq_restore(flags); } EXPORT_SYMBOL(dec_node_page_state); -#endif +# endif /* CONFIG_HAVE_CMPXCHG_LOCAL */ /* * Fold a differential into the global counters. @@ -830,7 +833,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets) #ifdef CONFIG_NUMA /* 3 seconds idle till flush */ __this_cpu_write(pcp->expire, 3); -#endif +#endif /* CONFIG_NUMA */ } } @@ -867,7 +870,7 @@ static int refresh_cpu_vm_stats(bool do_pagesets) drain_zone_pages(zone, this_cpu_ptr(pcp)); changes++; } -#endif +#endif /* CONFIG_NUMA */ } } @@ -927,7 +930,7 @@ void cpu_vm_stats_fold(int cpu) zone_numa_event_add(v, zone, i); } } -#endif +#endif /* CONFIG_NUMA */ } for_each_online_pgdat(pgdat) { @@ -974,9 +977,9 @@ void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats) zone_numa_event_add(v, zone, i); } } -#endif +#endif /* CONFIG_NUMA */ } -#endif +#endif /* CONFIG_SMP */ #ifdef CONFIG_NUMA /* @@ -1018,10 +1021,10 @@ unsigned long node_page_state_pages(struct pglist_data *pgdat, enum node_stat_item item) { long x = atomic_long_read(&pgdat->vm_stat[item]); -#ifdef CONFIG_SMP +# ifdef CONFIG_SMP if (x < 0) x = 0; -#endif +# endif /* CONFIG_SMP */ return x; } @@ -1032,7 +1035,7 @@ unsigned long node_page_state(struct pglist_data *pgdat, return node_page_state_pages(pgdat, item); } -#endif +#endif /* CONFIG_NUMA */ /* * Count number of pages "struct page" and "struct page_ext" consume. @@ -1156,35 +1159,35 @@ int fragmentation_index(struct zone *zone, unsigned int order) fill_contig_page_info(zone, order, &info); return __fragmentation_index(order, &info); } -#endif +#endif /* CONFIG_COMPACTION */ #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \ defined(CONFIG_NUMA) || defined(CONFIG_MEMCG) -#ifdef CONFIG_ZONE_DMA -#define TEXT_FOR_DMA(xx) xx "_dma", -#else -#define TEXT_FOR_DMA(xx) -#endif - -#ifdef CONFIG_ZONE_DMA32 -#define TEXT_FOR_DMA32(xx) xx "_dma32", -#else -#define TEXT_FOR_DMA32(xx) -#endif - -#ifdef CONFIG_HIGHMEM -#define TEXT_FOR_HIGHMEM(xx) xx "_high", -#else -#define TEXT_FOR_HIGHMEM(xx) -#endif - -#ifdef CONFIG_ZONE_DEVICE -#define TEXT_FOR_DEVICE(xx) xx "_device", -#else -#define TEXT_FOR_DEVICE(xx) -#endif - -#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ +# ifdef CONFIG_ZONE_DMA +# define TEXT_FOR_DMA(xx) xx "_dma", +# else +# define TEXT_FOR_DMA(xx) +# endif + +# ifdef CONFIG_ZONE_DMA32 +# define TEXT_FOR_DMA32(xx) xx "_dma32", +# else +# define TEXT_FOR_DMA32(xx) +# endif + +# ifdef CONFIG_HIGHMEM +# define TEXT_FOR_HIGHMEM(xx) xx "_high", +# else +# define TEXT_FOR_HIGHMEM(xx) +# endif + +# ifdef CONFIG_ZONE_DEVICE +# define TEXT_FOR_DEVICE(xx) xx "_device", +# else +# define TEXT_FOR_DEVICE(xx) +# endif + +# define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ TEXT_FOR_HIGHMEM(xx) xx "_movable", \ TEXT_FOR_DEVICE(xx) @@ -1199,23 +1202,23 @@ const char * const vmstat_text[] = { "nr_zone_write_pending", "nr_mlock", "nr_bounce", -#if IS_ENABLED(CONFIG_ZSMALLOC) +# if IS_ENABLED(CONFIG_ZSMALLOC) "nr_zspages", -#endif +# endif "nr_free_cma", -#ifdef CONFIG_UNACCEPTED_MEMORY +# ifdef CONFIG_UNACCEPTED_MEMORY "nr_unaccepted", -#endif +# endif /* enum numa_stat_item counters */ -#ifdef CONFIG_NUMA +# ifdef CONFIG_NUMA "numa_hit", "numa_miss", "numa_foreign", "numa_interleave", "numa_local", "numa_other", -#endif +# endif /* enum node_stat_item counters */ "nr_inactive_anon", @@ -1256,21 +1259,21 @@ const char * const vmstat_text[] = { "nr_foll_pin_acquired", "nr_foll_pin_released", "nr_kernel_stack", -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) +# if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) "nr_shadow_call_stack", -#endif +# endif "nr_page_table_pages", "nr_sec_page_table_pages", -#ifdef CONFIG_IOMMU_SUPPORT +# ifdef CONFIG_IOMMU_SUPPORT "nr_iommu_pages", -#endif -#ifdef CONFIG_SWAP +# endif +# ifdef CONFIG_SWAP "nr_swapcached", -#endif -#ifdef CONFIG_NUMA_BALANCING +# endif +# ifdef CONFIG_NUMA_BALANCING "pgpromote_success", "pgpromote_candidate", -#endif +# endif "pgdemote_kswapd", "pgdemote_direct", "pgdemote_khugepaged", @@ -1280,7 +1283,7 @@ const char * const vmstat_text[] = { "nr_memmap_pages", "nr_memmap_boot_pages", -#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) +# if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) /* enum vm_event_item counters */ "pgpgin", "pgpgout", @@ -1314,10 +1317,10 @@ const char * const vmstat_text[] = { "pgsteal_anon", "pgsteal_file", -#ifdef CONFIG_NUMA +# ifdef CONFIG_NUMA "zone_reclaim_success", "zone_reclaim_failed", -#endif +# endif "pginodesteal", "slabs_scanned", "kswapd_inodesteal", @@ -1331,21 +1334,21 @@ const char * const vmstat_text[] = { "drop_slab", "oom_kill", -#ifdef CONFIG_NUMA_BALANCING +# ifdef CONFIG_NUMA_BALANCING "numa_pte_updates", "numa_huge_pte_updates", "numa_hint_faults", "numa_hint_faults_local", "numa_pages_migrated", -#endif -#ifdef CONFIG_MIGRATION +# endif +# ifdef CONFIG_MIGRATION "pgmigrate_success", "pgmigrate_fail", "thp_migration_success", "thp_migration_fail", "thp_migration_split", -#endif -#ifdef CONFIG_COMPACTION +# endif +# ifdef CONFIG_COMPACTION "compact_migrate_scanned", "compact_free_scanned", "compact_isolated", @@ -1355,16 +1358,16 @@ const char * const vmstat_text[] = { "compact_daemon_wake", "compact_daemon_migrate_scanned", "compact_daemon_free_scanned", -#endif +# endif -#ifdef CONFIG_HUGETLB_PAGE +# ifdef CONFIG_HUGETLB_PAGE "htlb_buddy_alloc_success", "htlb_buddy_alloc_fail", -#endif -#ifdef CONFIG_CMA +# endif +# ifdef CONFIG_CMA "cma_alloc_success", "cma_alloc_fail", -#endif +# endif "unevictable_pgs_culled", "unevictable_pgs_scanned", "unevictable_pgs_rescued", @@ -1373,7 +1376,7 @@ const char * const vmstat_text[] = { "unevictable_pgs_cleared", "unevictable_pgs_stranded", -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +# ifdef CONFIG_TRANSPARENT_HUGEPAGE "thp_fault_alloc", "thp_fault_fallback", "thp_fault_fallback_charge", @@ -1390,78 +1393,78 @@ const char * const vmstat_text[] = { "thp_scan_exceed_none_pte", "thp_scan_exceed_swap_pte", "thp_scan_exceed_share_pte", -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +# ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD "thp_split_pud", -#endif +# endif "thp_zero_page_alloc", "thp_zero_page_alloc_failed", "thp_swpout", "thp_swpout_fallback", -#endif -#ifdef CONFIG_MEMORY_BALLOON +# endif /*CONFIG_TRANSPARENT_HUGEPAGE */ +# ifdef CONFIG_MEMORY_BALLOON "balloon_inflate", "balloon_deflate", -#ifdef CONFIG_BALLOON_COMPACTION +# ifdef CONFIG_BALLOON_COMPACTION "balloon_migrate", -#endif -#endif /* CONFIG_MEMORY_BALLOON */ -#ifdef CONFIG_DEBUG_TLBFLUSH +# endif +# endif /* CONFIG_MEMORY_BALLOON */ +# ifdef CONFIG_DEBUG_TLBFLUSH "nr_tlb_remote_flush", "nr_tlb_remote_flush_received", "nr_tlb_local_flush_all", "nr_tlb_local_flush_one", -#endif /* CONFIG_DEBUG_TLBFLUSH */ +# endif /* CONFIG_DEBUG_TLBFLUSH */ -#ifdef CONFIG_SWAP +# ifdef CONFIG_SWAP "swap_ra", "swap_ra_hit", -#ifdef CONFIG_KSM +# ifdef CONFIG_KSM "ksm_swpin_copy", -#endif -#endif -#ifdef CONFIG_KSM +# endif /* CONFIG_KSM */ +# endif /* CONFIG_SWAP */ +# ifdef CONFIG_KSM "cow_ksm", -#endif -#ifdef CONFIG_ZSWAP +# endif +# ifdef CONFIG_ZSWAP "zswpin", "zswpout", "zswpwb", -#endif -#ifdef CONFIG_X86 +# endif +# ifdef CONFIG_X86 "direct_map_level2_splits", "direct_map_level3_splits", -#endif -#ifdef CONFIG_PER_VMA_LOCK_STATS +# endif +# ifdef CONFIG_PER_VMA_LOCK_STATS "vma_lock_success", "vma_lock_abort", "vma_lock_retry", "vma_lock_miss", -#endif -#ifdef CONFIG_DEBUG_STACK_USAGE +# endif +# ifdef CONFIG_DEBUG_STACK_USAGE "kstack_1k", -#if THREAD_SIZE > 1024 +# if THREAD_SIZE > 1024 "kstack_2k", -#endif -#if THREAD_SIZE > 2048 +# endif +# if THREAD_SIZE > 2048 "kstack_4k", -#endif -#if THREAD_SIZE > 4096 +# endif +# if THREAD_SIZE > 4096 "kstack_8k", -#endif -#if THREAD_SIZE > 8192 +# endif +# if THREAD_SIZE > 8192 "kstack_16k", -#endif -#if THREAD_SIZE > 16384 +# endif +# if THREAD_SIZE > 16384 "kstack_32k", -#endif -#if THREAD_SIZE > 32768 +# endif +# if THREAD_SIZE > 32768 "kstack_64k", -#endif -#if THREAD_SIZE > 65536 +# endif +# if THREAD_SIZE > 65536 "kstack_rest", -#endif -#endif -#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ +# endif +# endif /* CONFIG_DEBUG_STACK_USAGE */ +# endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ @@ -1515,7 +1518,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, spin_unlock_irqrestore(&zone->lock, flags); } } -#endif +#endif /* CONFIG_DEBUG_FS && CONFIG_COMPACTION || CONFIG_PROC_FS */ #ifdef CONFIG_PROC_FS static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, @@ -1654,7 +1657,7 @@ static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg) */ static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) { -#ifdef CONFIG_PAGE_OWNER +# ifdef CONFIG_PAGE_OWNER int mtype; if (!static_branch_unlikely(&page_owner_inited)) @@ -1669,7 +1672,7 @@ static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) walk_zones_in_node(m, pgdat, true, true, pagetypeinfo_showmixedcount_print); -#endif /* CONFIG_PAGE_OWNER */ +# endif /* CONFIG_PAGE_OWNER */ } /* @@ -1777,11 +1780,11 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, seq_printf(m, "\n %-12s %lu", zone_stat_name(i), zone_page_state(zone, i)); -#ifdef CONFIG_NUMA +# ifdef CONFIG_NUMA for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) seq_printf(m, "\n %-12s %lu", numa_stat_name(i), zone_numa_event_state(zone, i)); -#endif +# endif seq_printf(m, "\n pagesets"); for_each_online_cpu(i) { @@ -1798,11 +1801,11 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, pcp->count, pcp->high, pcp->batch); -#ifdef CONFIG_SMP +# ifdef CONFIG_SMP pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i); seq_printf(m, "\n vm stats threshold: %d", pzstats->stat_threshold); -#endif +# endif } seq_printf(m, "\n node_unreclaimable: %u" @@ -1858,11 +1861,11 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos) v[i] = global_zone_page_state(i); v += NR_VM_ZONE_STAT_ITEMS; -#ifdef CONFIG_NUMA +# ifdef CONFIG_NUMA for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) v[i] = global_numa_event_state(i); v += NR_VM_NUMA_EVENT_ITEMS; -#endif +# endif for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { v[i] = global_node_page_state_pages(i); @@ -1877,11 +1880,11 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos) v[NR_MEMMAP_BOOT_PAGES] = atomic_long_read(&nr_memmap_boot_pages); v += NR_VM_STAT_ITEMS; -#ifdef CONFIG_VM_EVENT_COUNTERS +# ifdef CONFIG_VM_EVENT_COUNTERS all_vm_events(v); v[PGPGIN] /= 2; /* sectors -> kbytes */ v[PGPGOUT] /= 2; -#endif +# endif return (unsigned long *)m->private + *pos; } @@ -1930,7 +1933,7 @@ static const struct seq_operations vmstat_op = { static DEFINE_PER_CPU(struct delayed_work, vmstat_work); int sysctl_stat_interval __read_mostly = HZ; -#ifdef CONFIG_PROC_FS +# ifdef CONFIG_PROC_FS static void refresh_vm_stats(struct work_struct *work) { refresh_cpu_vm_stats(true); @@ -1993,7 +1996,7 @@ int vmstat_refresh(const struct ctl_table *table, int write, *lenp = 0; return 0; } -#endif /* CONFIG_PROC_FS */ +# endif /* CONFIG_PROC_FS */ static void vmstat_update(struct work_struct *w) { @@ -2172,7 +2175,7 @@ static int __init vmstat_late_init(void) return 0; } late_initcall(vmstat_late_init); -#endif +#endif /* CONFIG_SMP */ struct workqueue_struct *mm_percpu_wq; @@ -2199,13 +2202,13 @@ void __init init_mm_internals(void) cpus_read_unlock(); start_shepherd_timer(); -#endif +#endif /* CONFIG_SMP */ #ifdef CONFIG_PROC_FS proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op); proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op); proc_create_seq("vmstat", 0444, NULL, &vmstat_op); proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op); -#endif +#endif /* CONFIG_PROC_FS */ } #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) @@ -2341,4 +2344,4 @@ static int __init extfrag_debug_init(void) module_init(extfrag_debug_init); -#endif +#endif /* CONFIG_DEBUG_FS && CONFIG_COMPACTION */