[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZivhG0yrbpFqORDw@casper.infradead.org>
Date: Fri, 26 Apr 2024 18:15:07 +0100
From: Matthew Wilcox <willy@...radead.org>
To: linux-mm@...ck.org, netdev@...r.kernel.org
Cc: Suren Baghdasaryan <surenb@...gle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Arjun Roy <arjunroy@...gle.com>
Subject: [RFC] Make find_tcp_vma() more efficient
Liam asked me if we could do away with the "bool *mmap_locked"
parameter, and the problem is that some architctures don't support
CONFIG_PER_VMA_LOCK yet. But we can abstract it ... something like this
maybe?
(not particularly proposing this for inclusion; just wrote it and want
to get it out of my tree so I can get back to other projects. If anyone
wants it, they can test it and submit it for inclusion and stick my
S-o-B on it)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d4..570763351508 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -779,11 +779,22 @@ static inline void assert_fault_locked(struct vm_fault *vmf)
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
+static inline void lock_vma_under_mmap_lock(struct vm_area_struct *vma)
+{
+ down_read(&vma->vm_lock->lock);
+ mmap_read_unlock(vma->vm_mm);
+}
+
#else /* CONFIG_PER_VMA_LOCK */
static inline bool vma_start_read(struct vm_area_struct *vma)
{ return false; }
-static inline void vma_end_read(struct vm_area_struct *vma) {}
+static inline void vma_end_read(struct vm_area_struct *vma)
+{
+ mmap_read_unlock(vma->vm_mm);
+}
+
+static inline void lock_vma_under_mmap_lock(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f23b97777ea5..e763916e5185 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2051,27 +2051,25 @@ static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
}
static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
- unsigned long address,
- bool *mmap_locked)
+ unsigned long address)
{
struct vm_area_struct *vma = lock_vma_under_rcu(mm, address);
- if (vma) {
- if (vma->vm_ops != &tcp_vm_ops) {
- vma_end_read(vma);
+ if (!vma) {
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, address);
+ if (vma) {
+ lock_vma_under_mmap_lock(vma);
+ } else {
+ mmap_read_unlock(mm);
return NULL;
}
- *mmap_locked = false;
- return vma;
}
-
- mmap_read_lock(mm);
- vma = vma_lookup(mm, address);
- if (!vma || vma->vm_ops != &tcp_vm_ops) {
- mmap_read_unlock(mm);
+ if (vma->vm_ops != &tcp_vm_ops) {
+ vma_end_read(vma);
return NULL;
}
- *mmap_locked = true;
+
return vma;
}
@@ -2092,7 +2090,6 @@ static int tcp_zerocopy_receive(struct sock *sk,
u32 seq = tp->copied_seq;
u32 total_bytes_to_map;
int inq = tcp_inq(sk);
- bool mmap_locked;
int ret;
zc->copybuf_len = 0;
@@ -2117,7 +2114,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
return 0;
}
- vma = find_tcp_vma(current->mm, address, &mmap_locked);
+ vma = find_tcp_vma(current->mm, address);
if (!vma)
return -EINVAL;
@@ -2194,10 +2191,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
zc, total_bytes_to_map);
}
out:
- if (mmap_locked)
- mmap_read_unlock(current->mm);
- else
- vma_end_read(vma);
+ vma_end_read(vma);
/* Try to copy straggler data. */
if (!ret)
copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);
Powered by blists - more mailing lists