[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202212202240.itYRnf5x-lkp@intel.com>
Date: Tue, 20 Dec 2022 22:23:49 +0800
From: kernel test robot <lkp@...el.com>
To: Chih-En Lin <shiyn.lin@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Qi Zheng <zhengqi.arch@...edance.com>,
David Hildenbrand <david@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
John Hubbard <jhubbard@...dia.com>,
Nadav Amit <namit@...are.com>
Cc: llvm@...ts.linux.dev, oe-kbuild-all@...ts.linux.dev,
Linux Memory Management List <linux-mm@...ck.org>,
linux-kernel@...r.kernel.org, Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Yang Shi <shy828301@...il.com>, Peter Xu <peterx@...hat.com>,
Zach O'Keefe <zokeefe@...gle.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Alex Sierra <alex.sierra@....com>,
Xianting Tian <xianting.tian@...ux.alibaba.com>,
Colin Cross <ccross@...gle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Barry Song <baohua@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Suleiman Souhlal <suleiman@...gle.com>,
Brian Geffon <bgeffon@...gle.com>
Subject: Re: [PATCH v3 11/14] mm/migrate_device: Support COW PTE
Hi Chih-En,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on v6.1-rc7]
[cannot apply to akpm-mm/mm-everything tip/perf/core acme/perf/core linus/master v6.1 v6.1-rc8 next-20221220]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Chih-En-Lin/Introduce-Copy-On-Write-to-Page-Table/20221220-153207
patch link: https://lore.kernel.org/r/20221220072743.3039060-12-shiyn.lin%40gmail.com
patch subject: [PATCH v3 11/14] mm/migrate_device: Support COW PTE
config: x86_64-rhel-8.3-rust
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/77cd28466a15d4d3fd3d6f23044a9196d543dba2
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Chih-En-Lin/Introduce-Copy-On-Write-to-Page-Table/20221220-153207
git checkout 77cd28466a15d4d3fd3d6f23044a9196d543dba2
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 olddefconfig
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@...el.com>
All errors (new ones prefixed by >>):
>> mm/migrate_device.c:109:45: error: too many arguments to function call, expected 3, have 4
if (!break_cow_pte_range(vma, pmdp, start, end))
~~~~~~~~~~~~~~~~~~~ ^~~
include/linux/mm.h:1898:5: note: 'break_cow_pte_range' declared here
int break_cow_pte_range(struct vm_area_struct *vma, unsigned long start,
^
1 error generated.
vim +109 mm/migrate_device.c
56
57 static int migrate_vma_collect_pmd(pmd_t *pmdp,
58 unsigned long start,
59 unsigned long end,
60 struct mm_walk *walk)
61 {
62 struct migrate_vma *migrate = walk->private;
63 struct vm_area_struct *vma = walk->vma;
64 struct mm_struct *mm = vma->vm_mm;
65 unsigned long addr = start, unmapped = 0;
66 spinlock_t *ptl;
67 pte_t *ptep;
68
69 again:
70 if (pmd_none(*pmdp))
71 return migrate_vma_collect_hole(start, end, -1, walk);
72
73 if (pmd_trans_huge(*pmdp)) {
74 struct page *page;
75
76 ptl = pmd_lock(mm, pmdp);
77 if (unlikely(!pmd_trans_huge(*pmdp))) {
78 spin_unlock(ptl);
79 goto again;
80 }
81
82 page = pmd_page(*pmdp);
83 if (is_huge_zero_page(page)) {
84 spin_unlock(ptl);
85 split_huge_pmd(vma, pmdp, addr);
86 if (pmd_trans_unstable(pmdp))
87 return migrate_vma_collect_skip(start, end,
88 walk);
89 } else {
90 int ret;
91
92 get_page(page);
93 spin_unlock(ptl);
94 if (unlikely(!trylock_page(page)))
95 return migrate_vma_collect_skip(start, end,
96 walk);
97 ret = split_huge_page(page);
98 unlock_page(page);
99 put_page(page);
100 if (ret)
101 return migrate_vma_collect_skip(start, end,
102 walk);
103 if (pmd_none(*pmdp))
104 return migrate_vma_collect_hole(start, end, -1,
105 walk);
106 }
107 }
108
> 109 if (!break_cow_pte_range(vma, pmdp, start, end))
110 return migrate_vma_collect_skip(start, end, walk);
111 if (unlikely(pmd_bad(*pmdp)))
112 return migrate_vma_collect_skip(start, end, walk);
113
114 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
115 arch_enter_lazy_mmu_mode();
116
117 for (; addr < end; addr += PAGE_SIZE, ptep++) {
118 unsigned long mpfn = 0, pfn;
119 struct page *page;
120 swp_entry_t entry;
121 pte_t pte;
122
123 pte = *ptep;
124
125 if (pte_none(pte)) {
126 if (vma_is_anonymous(vma)) {
127 mpfn = MIGRATE_PFN_MIGRATE;
128 migrate->cpages++;
129 }
130 goto next;
131 }
132
133 if (!pte_present(pte)) {
134 /*
135 * Only care about unaddressable device page special
136 * page table entry. Other special swap entries are not
137 * migratable, and we ignore regular swapped page.
138 */
139 entry = pte_to_swp_entry(pte);
140 if (!is_device_private_entry(entry))
141 goto next;
142
143 page = pfn_swap_entry_to_page(entry);
144 if (!(migrate->flags &
145 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
146 page->pgmap->owner != migrate->pgmap_owner)
147 goto next;
148
149 mpfn = migrate_pfn(page_to_pfn(page)) |
150 MIGRATE_PFN_MIGRATE;
151 if (is_writable_device_private_entry(entry))
152 mpfn |= MIGRATE_PFN_WRITE;
153 } else {
154 pfn = pte_pfn(pte);
155 if (is_zero_pfn(pfn) &&
156 (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
157 mpfn = MIGRATE_PFN_MIGRATE;
158 migrate->cpages++;
159 goto next;
160 }
161 page = vm_normal_page(migrate->vma, addr, pte);
162 if (page && !is_zone_device_page(page) &&
163 !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
164 goto next;
165 else if (page && is_device_coherent_page(page) &&
166 (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
167 page->pgmap->owner != migrate->pgmap_owner))
168 goto next;
169 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
170 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
171 }
172
173 /* FIXME support THP */
174 if (!page || !page->mapping || PageTransCompound(page)) {
175 mpfn = 0;
176 goto next;
177 }
178
179 /*
180 * By getting a reference on the page we pin it and that blocks
181 * any kind of migration. Side effect is that it "freezes" the
182 * pte.
183 *
184 * We drop this reference after isolating the page from the lru
185 * for non device page (device page are not on the lru and thus
186 * can't be dropped from it).
187 */
188 get_page(page);
189
190 /*
191 * We rely on trylock_page() to avoid deadlock between
192 * concurrent migrations where each is waiting on the others
193 * page lock. If we can't immediately lock the page we fail this
194 * migration as it is only best effort anyway.
195 *
196 * If we can lock the page it's safe to set up a migration entry
197 * now. In the common case where the page is mapped once in a
198 * single process setting up the migration entry now is an
199 * optimisation to avoid walking the rmap later with
200 * try_to_migrate().
201 */
202 if (trylock_page(page)) {
203 bool anon_exclusive;
204 pte_t swp_pte;
205
206 flush_cache_page(vma, addr, pte_pfn(*ptep));
207 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
208 if (anon_exclusive) {
209 pte = ptep_clear_flush(vma, addr, ptep);
210
211 if (page_try_share_anon_rmap(page)) {
212 set_pte_at(mm, addr, ptep, pte);
213 unlock_page(page);
214 put_page(page);
215 mpfn = 0;
216 goto next;
217 }
218 } else {
219 pte = ptep_get_and_clear(mm, addr, ptep);
220 }
221
222 migrate->cpages++;
223
224 /* Set the dirty flag on the folio now the pte is gone. */
225 if (pte_dirty(pte))
226 folio_mark_dirty(page_folio(page));
227
228 /* Setup special migration page table entry */
229 if (mpfn & MIGRATE_PFN_WRITE)
230 entry = make_writable_migration_entry(
231 page_to_pfn(page));
232 else if (anon_exclusive)
233 entry = make_readable_exclusive_migration_entry(
234 page_to_pfn(page));
235 else
236 entry = make_readable_migration_entry(
237 page_to_pfn(page));
238 if (pte_present(pte)) {
239 if (pte_young(pte))
240 entry = make_migration_entry_young(entry);
241 if (pte_dirty(pte))
242 entry = make_migration_entry_dirty(entry);
243 }
244 swp_pte = swp_entry_to_pte(entry);
245 if (pte_present(pte)) {
246 if (pte_soft_dirty(pte))
247 swp_pte = pte_swp_mksoft_dirty(swp_pte);
248 if (pte_uffd_wp(pte))
249 swp_pte = pte_swp_mkuffd_wp(swp_pte);
250 } else {
251 if (pte_swp_soft_dirty(pte))
252 swp_pte = pte_swp_mksoft_dirty(swp_pte);
253 if (pte_swp_uffd_wp(pte))
254 swp_pte = pte_swp_mkuffd_wp(swp_pte);
255 }
256 set_pte_at(mm, addr, ptep, swp_pte);
257
258 /*
259 * This is like regular unmap: we remove the rmap and
260 * drop page refcount. Page won't be freed, as we took
261 * a reference just above.
262 */
263 page_remove_rmap(page, vma, false);
264 put_page(page);
265
266 if (pte_present(pte))
267 unmapped++;
268 } else {
269 put_page(page);
270 mpfn = 0;
271 }
272
273 next:
274 migrate->dst[migrate->npages] = 0;
275 migrate->src[migrate->npages++] = mpfn;
276 }
277
278 /* Only flush the TLB if we actually modified any entries */
279 if (unmapped)
280 flush_tlb_range(walk->vma, start, end);
281
282 arch_leave_lazy_mmu_mode();
283 pte_unmap_unlock(ptep - 1, ptl);
284
285 return 0;
286 }
287
--
0-DAY CI Kernel Test Service
https://01.org/lkp
View attachment "config" of type "text/plain" (166945 bytes)
Powered by blists - more mailing lists