@@ -53,18 +53,6 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
53
53
return true;
54
54
}
55
55
56
- static inline bool pfn_is_match (struct page * page , unsigned long pfn )
57
- {
58
- unsigned long page_pfn = page_to_pfn (page );
59
-
60
- /* normal page and hugetlbfs page */
61
- if (!PageTransCompound (page ) || PageHuge (page ))
62
- return page_pfn == pfn ;
63
-
64
- /* THP can be referenced by any subpage */
65
- return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages (page );
66
- }
67
-
68
56
/**
69
57
* check_pte - check if @pvmw->page is mapped at the @pvmw->pte
70
58
* @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
@@ -116,7 +104,17 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
116
104
pfn = pte_pfn (* pvmw -> pte );
117
105
}
118
106
119
- return pfn_is_match (pvmw -> page , pfn );
107
+ return (pfn - pvmw -> pfn ) < pvmw -> nr_pages ;
108
+ }
109
+
110
+ /* Returns true if the two ranges overlap. Careful to not overflow. */
111
+ static bool check_pmd (unsigned long pfn , struct page_vma_mapped_walk * pvmw )
112
+ {
113
+ if ((pfn + HPAGE_PMD_NR - 1 ) < pvmw -> pfn )
114
+ return false;
115
+ if (pfn > pvmw -> pfn + pvmw -> nr_pages - 1 )
116
+ return false;
117
+ return true;
120
118
}
121
119
122
120
static void step_forward (struct page_vma_mapped_walk * pvmw , unsigned long size )
@@ -127,7 +125,7 @@ static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
127
125
}
128
126
129
127
/**
130
- * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
128
+ * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
131
129
* @pvmw->address
132
130
* @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
133
131
* must be set. pmd, pte and ptl must be NULL.
@@ -152,8 +150,8 @@ static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
152
150
*/
153
151
bool page_vma_mapped_walk (struct page_vma_mapped_walk * pvmw )
154
152
{
155
- struct mm_struct * mm = pvmw -> vma -> vm_mm ;
156
- struct page * page = pvmw -> page ;
153
+ struct vm_area_struct * vma = pvmw -> vma ;
154
+ struct mm_struct * mm = vma -> vm_mm ;
157
155
unsigned long end ;
158
156
pgd_t * pgd ;
159
157
p4d_t * p4d ;
@@ -164,32 +162,26 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
164
162
if (pvmw -> pmd && !pvmw -> pte )
165
163
return not_found (pvmw );
166
164
167
- if (unlikely (PageHuge (page ))) {
165
+ if (unlikely (is_vm_hugetlb_page (vma ))) {
166
+ unsigned long size = pvmw -> nr_pages * PAGE_SIZE ;
168
167
/* The only possible mapping was handled on last iteration */
169
168
if (pvmw -> pte )
170
169
return not_found (pvmw );
171
170
172
171
/* when pud is not present, pte will be NULL */
173
- pvmw -> pte = huge_pte_offset (mm , pvmw -> address , page_size ( page ) );
172
+ pvmw -> pte = huge_pte_offset (mm , pvmw -> address , size );
174
173
if (!pvmw -> pte )
175
174
return false;
176
175
177
- pvmw -> ptl = huge_pte_lockptr (page_hstate (page ), mm , pvmw -> pte );
176
+ pvmw -> ptl = huge_pte_lockptr (size_to_hstate (size ), mm ,
177
+ pvmw -> pte );
178
178
spin_lock (pvmw -> ptl );
179
179
if (!check_pte (pvmw ))
180
180
return not_found (pvmw );
181
181
return true;
182
182
}
183
183
184
- /*
185
- * Seek to next pte only makes sense for THP.
186
- * But more important than that optimization, is to filter out
187
- * any PageKsm page: whose page->index misleads vma_address()
188
- * and vma_address_end() to disaster.
189
- */
190
- end = PageTransCompound (page ) ?
191
- vma_address_end (page , pvmw -> vma ) :
192
- pvmw -> address + PAGE_SIZE ;
184
+ end = vma_address_end (pvmw );
193
185
if (pvmw -> pte )
194
186
goto next_pte ;
195
187
restart :
@@ -224,7 +216,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
224
216
if (likely (pmd_trans_huge (pmde ))) {
225
217
if (pvmw -> flags & PVMW_MIGRATION )
226
218
return not_found (pvmw );
227
- if (pmd_page ( pmde ) != page )
219
+ if (! check_pmd ( pmd_pfn ( pmde ), pvmw ) )
228
220
return not_found (pvmw );
229
221
return true;
230
222
}
@@ -236,7 +228,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
236
228
return not_found (pvmw );
237
229
entry = pmd_to_swp_entry (pmde );
238
230
if (!is_migration_entry (entry ) ||
239
- pfn_swap_entry_to_page ( entry ) != page )
231
+ ! check_pmd ( swp_offset ( entry ), pvmw ) )
240
232
return not_found (pvmw );
241
233
return true;
242
234
}
@@ -250,7 +242,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
250
242
* cleared *pmd but not decremented compound_mapcount().
251
243
*/
252
244
if ((pvmw -> flags & PVMW_SYNC ) &&
253
- PageTransCompound (page )) {
245
+ transparent_hugepage_active (vma ) &&
246
+ (pvmw -> nr_pages >= HPAGE_PMD_NR )) {
254
247
spinlock_t * ptl = pmd_lock (mm , pvmw -> pmd );
255
248
256
249
spin_unlock (ptl );
@@ -307,7 +300,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
307
300
int page_mapped_in_vma (struct page * page , struct vm_area_struct * vma )
308
301
{
309
302
struct page_vma_mapped_walk pvmw = {
310
- .page = page ,
303
+ .pfn = page_to_pfn (page ),
304
+ .nr_pages = 1 ,
311
305
.vma = vma ,
312
306
.flags = PVMW_SYNC ,
313
307
};
0 commit comments