[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251020031655.1093-2-laoar.shao@gmail.com>
Date: Mon, 20 Oct 2025 11:16:51 +0800
From: Yafang Shao <laoar.shao@...il.com>
To: akpm@...ux-foundation.org,
ast@...nel.org,
daniel@...earbox.net,
andrii@...nel.org,
martin.lau@...ux.dev,
eddyz87@...il.com,
song@...nel.org,
yonghong.song@...ux.dev,
john.fastabend@...il.com,
kpsingh@...nel.org,
sdf@...ichev.me,
haoluo@...gle.com,
jolsa@...nel.org,
david@...hat.com,
ziy@...dia.com,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
npache@...hat.com,
ryan.roberts@....com,
dev.jain@....com,
hannes@...xchg.org,
usamaarif642@...il.com,
gutierrez.asier@...wei-partners.com,
willy@...radead.org,
ameryhung@...il.com,
rientjes@...gle.com,
corbet@....net,
21cnbao@...il.com,
shakeel.butt@...ux.dev,
tj@...nel.org,
lance.yang@...ux.dev,
rdunlap@...radead.org
Cc: bpf@...r.kernel.org,
linux-mm@...ck.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
Yafang Shao <laoar.shao@...il.com>
Subject: [PATCH v11 mm-new 06/10] mm: bpf-thp: add support for global mode
The per-process BPF-THP mode is unsuitable for managing shared resources
such as shmem THP and file-backed THP. This aligns with known cgroup
limitations for similar scenarios [0].
Introduce a global BPF-THP mode to address this gap. When registered:
- All existing per-process instances are disabled
- New per-process registrations are blocked
- Existing per-process instances remain registered (no forced unregistration)
The global mode takes precedence over per-process instances. Updates are
type-isolated: global instances can only be updated by new global
instances, and per-process instances by new per-process instances.
Link: https://lore.kernel.org/linux-mm/YwNold0GMOappUxc@slm.duckdns.org/ [0]
Signed-off-by: Yafang Shao <laoar.shao@...il.com>
---
mm/huge_memory_bpf.c | 109 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 107 insertions(+), 2 deletions(-)
diff --git a/mm/huge_memory_bpf.c b/mm/huge_memory_bpf.c
index e8894c10d1d9..cad1ca6f59a4 100644
--- a/mm/huge_memory_bpf.c
+++ b/mm/huge_memory_bpf.c
@@ -33,6 +33,28 @@ struct bpf_thp_ops {
};
static DEFINE_SPINLOCK(thp_ops_lock);
+static struct bpf_thp_ops __rcu *bpf_thp_global; /* global mode */
+
+static unsigned long
+bpf_hook_thp_get_orders_global(struct vm_area_struct *vma,
+ enum tva_type type,
+ unsigned long orders)
+{
+ thp_order_fn_t *bpf_hook_thp_get_order;
+ int bpf_order;
+
+ rcu_read_lock();
+ bpf_hook_thp_get_order = rcu_dereference(bpf_thp_global->thp_get_order);
+ if (!bpf_hook_thp_get_order)
+ goto out;
+
+ bpf_order = bpf_hook_thp_get_order(vma, type, orders);
+ orders &= BIT(bpf_order);
+
+out:
+ rcu_read_unlock();
+ return orders;
+}
unsigned long bpf_hook_thp_get_orders(struct vm_area_struct *vma,
enum tva_type type,
@@ -45,6 +67,10 @@ unsigned long bpf_hook_thp_get_orders(struct vm_area_struct *vma,
if (!mm)
return orders;
+ /* Global BPF-THP takes precedence over per-process BPF-THP. */
+ if (rcu_access_pointer(bpf_thp_global))
+ return bpf_hook_thp_get_orders_global(vma, type, orders);
+
rcu_read_lock();
bpf_thp = rcu_dereference(mm->bpf_mm.bpf_thp);
if (!bpf_thp || !bpf_thp->thp_get_order)
@@ -177,6 +203,23 @@ static int bpf_thp_init_member(const struct btf_type *t,
return 0;
}
+static int bpf_thp_reg_gloabl(void *kdata, struct bpf_link *link)
+{
+ struct bpf_thp_ops *ops = kdata;
+
+ /* Protect the global pointer bpf_thp_global from concurrent writes. */
+ spin_lock(&thp_ops_lock);
+ /* Only one instance is allowed. */
+ if (rcu_access_pointer(bpf_thp_global)) {
+ spin_unlock(&thp_ops_lock);
+ return -EBUSY;
+ }
+
+ rcu_assign_pointer(bpf_thp_global, ops);
+ spin_unlock(&thp_ops_lock);
+ return 0;
+}
+
static int bpf_thp_reg(void *kdata, struct bpf_link *link)
{
struct bpf_thp_ops *bpf_thp = kdata;
@@ -187,6 +230,11 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link)
pid_t pid;
pid = bpf_thp->pid;
+
+ /* Fallback to global mode if pid is not set. */
+ if (!pid)
+ return bpf_thp_reg_gloabl(kdata, link);
+
p = find_get_task_by_vpid(pid);
if (!p)
return -ESRCH;
@@ -207,8 +255,10 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link)
* might register this task simultaneously.
*/
spin_lock(&thp_ops_lock);
- /* Each process is exclusively managed by a single BPF-THP. */
- if (rcu_access_pointer(mm->bpf_mm.bpf_thp))
+ /* Each process is exclusively managed by a single BPF-THP.
+ * Global mode disables per-process instances.
+ */
+ if (rcu_access_pointer(mm->bpf_mm.bpf_thp) || rcu_access_pointer(bpf_thp_global))
goto out_lock;
err = 0;
rcu_assign_pointer(mm->bpf_mm.bpf_thp, bpf_thp);
@@ -224,12 +274,33 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link)
return err;
}
+static void bpf_thp_unreg_global(void *kdata, struct bpf_link *link)
+{
+ struct bpf_thp_ops *bpf_thp;
+
+ spin_lock(&thp_ops_lock);
+ if (!rcu_access_pointer(bpf_thp_global)) {
+ spin_unlock(&thp_ops_lock);
+ return;
+ }
+
+ bpf_thp = rcu_replace_pointer(bpf_thp_global, NULL,
+ lockdep_is_held(&thp_ops_lock));
+ WARN_ON_ONCE(!bpf_thp);
+ spin_unlock(&thp_ops_lock);
+
+ synchronize_rcu();
+}
+
static void bpf_thp_unreg(void *kdata, struct bpf_link *link)
{
struct bpf_thp_ops *bpf_thp = kdata;
struct bpf_mm_ops *bpf_mm;
struct list_head *pos, *n;
+ if (!bpf_thp->pid)
+ return bpf_thp_unreg_global(kdata, link);
+
spin_lock(&thp_ops_lock);
list_for_each_safe(pos, n, &bpf_thp->mm_list) {
bpf_mm = list_entry(pos, struct bpf_mm_ops, bpf_thp_list);
@@ -242,6 +313,31 @@ static void bpf_thp_unreg(void *kdata, struct bpf_link *link)
synchronize_rcu();
}
+static int bpf_thp_update_global(void *kdata, void *old_kdata, struct bpf_link *link)
+{
+ struct bpf_thp_ops *old_bpf_thp = old_kdata;
+ struct bpf_thp_ops *bpf_thp = kdata;
+ struct bpf_thp_ops *old_global;
+
+ if (!old_bpf_thp || !bpf_thp)
+ return -EINVAL;
+
+ spin_lock(&thp_ops_lock);
+ /* BPF-THP global instance has already been removed. */
+ if (!rcu_access_pointer(bpf_thp_global)) {
+ spin_unlock(&thp_ops_lock);
+ return -ENOENT;
+ }
+
+ old_global = rcu_replace_pointer(bpf_thp_global, bpf_thp,
+ lockdep_is_held(&thp_ops_lock));
+ WARN_ON_ONCE(!old_global);
+ spin_unlock(&thp_ops_lock);
+
+ synchronize_rcu();
+ return 0;
+}
+
static int bpf_thp_update(void *kdata, void *old_kdata, struct bpf_link *link)
{
struct bpf_thp_ops *old_bpf_thp = old_kdata;
@@ -249,6 +345,15 @@ static int bpf_thp_update(void *kdata, void *old_kdata, struct bpf_link *link)
struct bpf_mm_ops *bpf_mm;
struct list_head *pos, *n;
+ /* Updates are confined to instances of the same scope:
+ * global to global, process-local to process-local.
+ */
+ if (!!old_bpf_thp->pid != !!bpf_thp->pid)
+ return -EINVAL;
+
+ if (!old_bpf_thp->pid)
+ return bpf_thp_update_global(kdata, old_kdata, link);
+
INIT_LIST_HEAD(&bpf_thp->mm_list);
/* Could be optimized to a per-instance lock if this lock becomes a bottleneck. */
--
2.47.3
Powered by blists - more mailing lists