[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1645095472-26530-12-git-send-email-byungchul.park@lge.com>
Date: Thu, 17 Feb 2022 19:57:47 +0900
From: Byungchul Park <byungchul.park@....com>
To: torvalds@...ux-foundation.org
Cc: damien.lemoal@...nsource.wdc.com, linux-ide@...r.kernel.org,
adilger.kernel@...ger.ca, linux-ext4@...r.kernel.org,
mingo@...hat.com, linux-kernel@...r.kernel.org,
peterz@...radead.org, will@...nel.org, tglx@...utronix.de,
rostedt@...dmis.org, joel@...lfernandes.org, sashal@...nel.org,
daniel.vetter@...ll.ch, chris@...is-wilson.co.uk,
duyuyang@...il.com, johannes.berg@...el.com, tj@...nel.org,
tytso@....edu, willy@...radead.org, david@...morbit.com,
amir73il@...il.com, bfields@...ldses.org,
gregkh@...uxfoundation.org, kernel-team@....com,
linux-mm@...ck.org, akpm@...ux-foundation.org, mhocko@...nel.org,
minchan@...nel.org, hannes@...xchg.org, vdavydov.dev@...il.com,
sj@...nel.org, jglisse@...hat.com, dennis@...nel.org, cl@...ux.com,
penberg@...nel.org, rientjes@...gle.com, vbabka@...e.cz,
ngupta@...are.org, linux-block@...r.kernel.org, axboe@...nel.dk,
paolo.valente@...aro.org, josef@...icpanda.com,
linux-fsdevel@...r.kernel.org, viro@...iv.linux.org.uk,
jack@...e.cz, jack@...e.com, jlayton@...nel.org,
dan.j.williams@...el.com, hch@...radead.org, djwong@...nel.org,
dri-devel@...ts.freedesktop.org, airlied@...ux.ie,
rodrigosiqueiramelo@...il.com, melissa.srw@...il.com,
hamohammed.sa@...il.com
Subject: [PATCH 11/16] dept: Introduce split map concept and new APIs for them
There is a case where all maps used for a type of wait/event is so large
in size. For instance, struct page can be a type for (un)lock_page().
The additional memory size for the maps would be 'the # of pages *
sizeof(struct dept_map)' if each struct page keeps its map all the way,
which might be too big to accept in some system.
It'd better to have split map, one is for each instance and the other
is for the type which is commonly used, and new APIs using them. So
introduced split map and new APIs for them.
Signed-off-by: Byungchul Park <byungchul.park@....com>
---
include/linux/dept.h | 36 ++++++++++++++
kernel/dependency/dept.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 158 insertions(+)
diff --git a/include/linux/dept.h b/include/linux/dept.h
index 2ac4bca..531065a 100644
--- a/include/linux/dept.h
+++ b/include/linux/dept.h
@@ -351,6 +351,30 @@ struct dept_map {
bool nocheck;
};
+struct dept_map_each {
+ /*
+ * wait timestamp associated to this map
+ */
+ unsigned int wgen;
+};
+
+struct dept_map_common {
+ const char *name;
+ struct dept_key *keys;
+ int sub_usr;
+
+ /*
+ * It's local copy for fast acces to the associated classes. And
+ * Also used for dept_key instance for statically defined map.
+ */
+ struct dept_key keys_local;
+
+ /*
+ * whether this map should be going to be checked or not
+ */
+ bool nocheck;
+};
+
struct dept_task {
/*
* all event contexts that have entered and before exiting
@@ -441,6 +465,11 @@ struct dept_task {
extern void dept_ecxt_exit(struct dept_map *m, unsigned long ip);
extern struct dept_map *dept_top_map(void);
extern void dept_warn_on(bool cond);
+extern void dept_split_map_each_init(struct dept_map_each *me);
+extern void dept_split_map_common_init(struct dept_map_common *mc, struct dept_key *k, const char *n);
+extern void dept_wait_split_map(struct dept_map_each *me, struct dept_map_common *mc, unsigned long ip, const char *w_fn, int ne);
+extern void dept_event_split_map(struct dept_map_each *me, struct dept_map_common *mc, unsigned long ip, const char *e_fn);
+extern void dept_ask_event_split_map(struct dept_map_each *me, struct dept_map_common *mc);
/*
* for users who want to manage external keys
@@ -450,6 +479,8 @@ struct dept_task {
#else /* !CONFIG_DEPT */
struct dept_key { };
struct dept_map { };
+struct dept_map_each { };
+struct dept_map_commmon { };
struct dept_task { };
#define DEPT_TASK_INITIALIZER(t)
@@ -474,6 +505,11 @@ struct dept_task {
#define dept_ecxt_exit(m, ip) do { } while (0)
#define dept_top_map() NULL
#define dept_warn_on(c) do { } while (0)
+#define dept_split_map_each_init(me) do { } while (0)
+#define dept_split_map_common_init(mc, k, n) do { (void)(n); (void)(k); } while (0)
+#define dept_wait_split_map(me, mc, ip, w_fn, ne) do { } while (0)
+#define dept_event_split_map(me, mc, ip, e_fn) do { } while (0)
+#define dept_ask_event_split_map(me, mc) do { } while (0)
#define dept_key_init(k) do { (void)(k); } while (0)
#define dept_key_destroy(k) do { (void)(k); } while (0)
#endif
diff --git a/kernel/dependency/dept.c b/kernel/dependency/dept.c
index b8d56075..4510dbb 100644
--- a/kernel/dependency/dept.c
+++ b/kernel/dependency/dept.c
@@ -2339,6 +2339,128 @@ void dept_event(struct dept_map *m, unsigned long e_f, unsigned long ip,
}
EXPORT_SYMBOL_GPL(dept_event);
+void dept_split_map_each_init(struct dept_map_each *me)
+{
+ struct dept_task *dt = dept_task();
+ unsigned long flags;
+
+ if (READ_ONCE(dept_stop) || dt->recursive)
+ return;
+
+ flags = dept_enter();
+
+ me->wgen = 0U;
+
+ dept_exit(flags);
+}
+EXPORT_SYMBOL_GPL(dept_split_map_each_init);
+
+void dept_split_map_common_init(struct dept_map_common *mc,
+ struct dept_key *k, const char *n)
+{
+ struct dept_task *dt = dept_task();
+ unsigned long flags;
+
+ if (READ_ONCE(dept_stop) || dt->recursive)
+ return;
+
+ flags = dept_enter();
+
+ if (mc->keys != k)
+ mc->keys = k;
+ clean_classes_cache(&mc->keys_local);
+
+ /*
+ * sub_usr is not used with split map.
+ */
+ mc->sub_usr = 0;
+ mc->name = n;
+ mc->nocheck = false;
+
+ dept_exit(flags);
+}
+EXPORT_SYMBOL_GPL(dept_split_map_common_init);
+
+void dept_wait_split_map(struct dept_map_each *me,
+ struct dept_map_common *mc,
+ unsigned long ip, const char *w_fn, int ne)
+{
+ struct dept_task *dt = dept_task();
+ struct dept_class *c;
+ struct dept_key *k;
+ unsigned long flags;
+
+ if (READ_ONCE(dept_stop) || dt->recursive)
+ return;
+
+ if (mc->nocheck)
+ return;
+
+ flags = dept_enter();
+
+ k = mc->keys ?: &mc->keys_local;
+ c = check_new_class(&mc->keys_local, k, 0, mc->name);
+ if (c)
+ add_wait(c, ip, w_fn, ne);
+
+ dept_exit(flags);
+}
+EXPORT_SYMBOL_GPL(dept_wait_split_map);
+
+void dept_ask_event_split_map(struct dept_map_each *me,
+ struct dept_map_common *mc)
+{
+ struct dept_task *dt = dept_task();
+ unsigned long flags;
+ unsigned int wg;
+
+ if (READ_ONCE(dept_stop) || dt->recursive)
+ return;
+
+ if (mc->nocheck)
+ return;
+
+ flags = dept_enter();
+
+ /*
+ * Avoid zero wgen.
+ */
+ wg = atomic_inc_return(&wgen) ?: atomic_inc_return(&wgen);
+ WRITE_ONCE(me->wgen, wg);
+
+ dept_exit(flags);
+}
+EXPORT_SYMBOL_GPL(dept_ask_event_split_map);
+
+void dept_event_split_map(struct dept_map_each *me,
+ struct dept_map_common *mc,
+ unsigned long ip, const char *e_fn)
+{
+ struct dept_task *dt = dept_task();
+ struct dept_class *c;
+ struct dept_key *k;
+ unsigned long flags;
+
+ if (READ_ONCE(dept_stop) || dt->recursive)
+ return;
+
+ if (mc->nocheck)
+ return;
+
+ flags = dept_enter();
+
+ k = mc->keys ?: &mc->keys_local;
+ c = check_new_class(&mc->keys_local, k, 0, mc->name);
+ if (c) {
+ add_ecxt((void *)me, c, 0UL, NULL, e_fn, 0);
+ do_event((void *)me, c, READ_ONCE(me->wgen), ip);
+ pop_ecxt((void *)me);
+ }
+
+ dept_exit(flags);
+}
+EXPORT_SYMBOL_GPL(dept_event_split_map);
+
void dept_ecxt_exit(struct dept_map *m, unsigned long ip)
{
struct dept_task *dt = dept_task();
--
1.9.1
Powered by blists - more mailing lists