[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200615221607.7764-3-peterx@redhat.com>
Date: Mon, 15 Jun 2020 18:15:44 -0400
From: Peter Xu <peterx@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Gerald Schaefer <gerald.schaefer@...ibm.com>,
Andrew Morton <akpm@...ux-foundation.org>, peterx@...hat.com,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrea Arcangeli <aarcange@...hat.com>
Subject: [PATCH 02/25] mm: Introduce mm_fault_accounting()
Provide this helper for doing memory page fault accounting across archs. It
can be defined unconditionally because perf_sw_event() is always defined, and
perf_sw_event() will be a no-op if !CONFIG_PERF_EVENTS.
Signed-off-by: Peter Xu <peterx@...hat.com>
---
include/linux/mm.h | 2 ++
mm/memory.c | 18 ++++++++++++++++++
2 files changed, 20 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f3fe7371855c..b96db64125be 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1649,6 +1649,8 @@ void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
+void mm_fault_accounting(struct task_struct *task, struct pt_regs *regs,
+ unsigned long address, bool major);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
diff --git a/mm/memory.c b/mm/memory.c
index f703fe8c8346..c4f8319f0ed8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -71,6 +71,8 @@
#include <linux/dax.h>
#include <linux/oom.h>
#include <linux/numa.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
#include <trace/events/kmem.h>
@@ -4397,6 +4399,22 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
}
EXPORT_SYMBOL_GPL(handle_mm_fault);
+void mm_fault_accounting(struct task_struct *task, struct pt_regs *regs,
+ unsigned long address, bool major)
+{
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ if (major) {
+ task->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
+ } else {
+ task->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
+ }
+}
+EXPORT_SYMBOL_GPL(mm_fault_accounting);
+
#ifndef __PAGETABLE_P4D_FOLDED
/*
* Allocate p4d page table.
--
2.26.2
Powered by blists - more mailing lists