[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210607075855.5084-7-apopple@nvidia.com>
Date: Mon, 7 Jun 2021 17:58:51 +1000
From: Alistair Popple <apopple@...dia.com>
To: <linux-mm@...ck.org>, <akpm@...ux-foundation.org>
CC: <rcampbell@...dia.com>, <linux-doc@...r.kernel.org>,
<nouveau@...ts.freedesktop.org>, <hughd@...gle.com>,
<linux-kernel@...r.kernel.org>, <dri-devel@...ts.freedesktop.org>,
<hch@...radead.org>, <bskeggs@...hat.com>, <jgg@...dia.com>,
<peterx@...hat.com>, <shakeelb@...gle.com>, <jhubbard@...dia.com>,
<willy@...radead.org>, Alistair Popple <apopple@...dia.com>
Subject: [PATCH v10 06/10] mm/memory.c: Allow different return codes for copy_nonpresent_pte()
Currently if copy_nonpresent_pte() returns a non-zero value it is
assumed to be a swap entry which requires further processing outside the
loop in copy_pte_range() after dropping locks. This prevents other
values being returned to signal conditions such as failure which a
subsequent change requires.
Instead make copy_nonpresent_pte() return an error code if further
processing is required and read the value for the swap entry in the main
loop under the ptl.
Signed-off-by: Alistair Popple <apopple@...dia.com>
---
v10:
Use a unique error code and only check return codes for handling.
v9:
New for v9 to allow device exclusive handling to occur in
copy_nonpresent_pte().
---
mm/memory.c | 26 ++++++++++++++++----------
1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 2fb455c365c2..0982cab37ecb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -718,7 +718,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (likely(!non_swap_entry(entry))) {
if (swap_duplicate(entry) < 0)
- return entry.val;
+ return -EIO;
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
@@ -974,11 +974,13 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
continue;
}
if (unlikely(!pte_present(*src_pte))) {
- entry.val = copy_nonpresent_pte(dst_mm, src_mm,
- dst_pte, src_pte,
- src_vma, addr, rss);
- if (entry.val)
+ ret = copy_nonpresent_pte(dst_mm, src_mm,
+ dst_pte, src_pte,
+ src_vma, addr, rss);
+ if (ret == -EIO) {
+ entry = pte_to_swp_entry(*src_pte);
break;
+ }
progress += 8;
continue;
}
@@ -1011,20 +1013,24 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
- if (entry.val) {
+ if (ret == -EIO) {
+ VM_WARN_ON_ONCE(!entry.val);
if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
ret = -ENOMEM;
goto out;
}
entry.val = 0;
- } else if (ret) {
- WARN_ON_ONCE(ret != -EAGAIN);
+ } else if (ret == -EAGAIN) {
prealloc = page_copy_prealloc(src_mm, src_vma, addr);
if (!prealloc)
return -ENOMEM;
- /* We've captured and resolved the error. Reset, try again. */
- ret = 0;
+ } else if (ret) {
+ VM_WARN_ON_ONCE(1);
}
+
+ /* We've captured and resolved the error. Reset, try again. */
+ ret = 0;
+
if (addr != end)
goto again;
out:
--
2.20.1
Powered by blists - more mailing lists