lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Tue, 12 Jan 2021 11:11:57 -0500 From: "Liam R. Howlett" <Liam.Howlett@...cle.com> To: maple-tree@...ts.infradead.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org Cc: Andrew Morton <akpm@...gle.com>, Song Liu <songliubraving@...com>, Davidlohr Bueso <dave@...olabs.net>, "Paul E . McKenney" <paulmck@...nel.org>, Matthew Wilcox <willy@...radead.org>, Jerome Glisse <jglisse@...hat.com>, David Rientjes <rientjes@...gle.com>, Axel Rasmussen <axelrasmussen@...gle.com>, Suren Baghdasaryan <surenb@...gle.com>, Vlastimil Babka <vbabka@...e.cz>, Rik van Riel <riel@...riel.com>, Peter Zijlstra <peterz@...radead.org> Subject: [PATCH v2 27/70] mm: Introduce vma_next() and vma_prev() Signed-off-by: Liam R. Howlett <Liam.Howlett@...cle.com> --- include/linux/mm.h | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 680dcfe07dbb6..99b1dec97495a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1647,7 +1647,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, - unsigned long start, unsigned long end); + struct ma_state *mas, unsigned long start, unsigned long end); struct mmu_notifier_range; @@ -2654,6 +2654,24 @@ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned lon extern struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr, unsigned long end_addr); +static inline struct vm_area_struct *vma_next(struct mm_struct *mm, + const struct vm_area_struct *vma) +{ + MA_STATE(mas, &mm->mm_mt, 0, 0); + + mas_set(&mas, vma->vm_end); + return mas_next(&mas, ULONG_MAX); +} + +static inline struct vm_area_struct *vma_prev(struct mm_struct *mm, + const struct vm_area_struct *vma) +{ + MA_STATE(mas, &mm->mm_mt, 0, 0); + + mas_set(&mas, vma->vm_start); + return mas_prev(&mas, 0); +} + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start; @@ -2695,6 +2713,21 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, return vma; } +static inline struct vm_area_struct *vma_mas_next(struct ma_state *mas) +{ + struct ma_state tmp; + + memcpy(&tmp, mas, sizeof(tmp)); + return mas_next(&tmp, ULONG_MAX); +} + +static inline struct vm_area_struct *vma_mas_prev(struct ma_state *mas) +{ + struct ma_state tmp; + + memcpy(&tmp, mas, sizeof(tmp)); + return mas_prev(&tmp, 0); +} static inline bool range_in_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) { -- 2.28.0
Powered by blists - more mailing lists