[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <51ba5cba7c43ec5ddce8023054c99943fad02b6d.1532563124.git.osandov@fb.com>
Date: Wed, 25 Jul 2018 16:59:14 -0700
From: Omar Sandoval <osandov@...ndov.com>
To: linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Alexey Dobriyan <adobriyan@...il.com>,
Eric Biederman <ebiederm@...ssion.com>,
James Morse <james.morse@....com>,
Bhupesh Sharma <bhsharma@...hat.com>, kernel-team@...com
Subject: [PATCH v4 3/9] proc/kcore: replace kclist_lock rwlock with rwsem
From: Omar Sandoval <osandov@...com>
Now we only need kclist_lock from user context and at fs init time, and
the following changes need to sleep while holding the kclist_lock.
Signed-off-by: Omar Sandoval <osandov@...com>
---
fs/proc/kcore.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e83f15a4f66d..ae43a97d511d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -59,7 +59,7 @@ struct memelfnote
};
static LIST_HEAD(kclist_head);
-static DEFINE_RWLOCK(kclist_lock);
+static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1;
/* This doesn't grab kclist_lock, so it should only be used at init time. */
@@ -117,7 +117,7 @@ static void __kcore_update_ram(struct list_head *list)
struct kcore_list *tmp, *pos;
LIST_HEAD(garbage);
- write_lock(&kclist_lock);
+ down_write(&kclist_lock);
if (xchg(&kcore_need_update, 0)) {
list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
if (pos->type == KCORE_RAM
@@ -128,7 +128,7 @@ static void __kcore_update_ram(struct list_head *list)
} else
list_splice(list, &garbage);
proc_root_kcore->size = get_kcore_size(&nphdr, &size);
- write_unlock(&kclist_lock);
+ up_write(&kclist_lock);
free_kclist_ents(&garbage);
}
@@ -451,11 +451,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
int nphdr;
unsigned long start;
- read_lock(&kclist_lock);
+ down_read(&kclist_lock);
size = get_kcore_size(&nphdr, &elf_buflen);
if (buflen == 0 || *fpos >= size) {
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
return 0;
}
@@ -472,11 +472,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
tsz = buflen;
elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
if (!elf_buf) {
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
return -ENOMEM;
}
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
return -EFAULT;
@@ -491,7 +491,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (buflen == 0)
return acc;
} else
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
/*
* Check to see if our file offset matches with any of
@@ -504,12 +504,12 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
while (buflen) {
struct kcore_list *m;
- read_lock(&kclist_lock);
+ down_read(&kclist_lock);
list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}
- read_unlock(&kclist_lock);
+ up_read(&kclist_lock);
if (&m->list == &kclist_head) {
if (clear_user(buffer, tsz))
--
2.18.0
Powered by blists - more mailing lists