3333#include <asm/pgalloc.h>
3434#include <asm/tlbflush.h>
3535
36- /*
37- * We need to delay page freeing for SMP as other CPUs can access pages
38- * which have been removed but not yet had their TLB entries invalidated.
39- * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40- * we need to apply this same delaying tactic to ensure correct operation.
41- */
42- #if defined(CONFIG_SMP ) || defined(CONFIG_CPU_32v7 )
43- #define tlb_fast_mode (tlb ) 0
44- #else
45- #define tlb_fast_mode (tlb ) 1
46- #endif
47-
4836#define MMU_GATHER_BUNDLE 8
4937
5038/*
@@ -55,6 +43,7 @@ struct mmu_gather {
5543 struct mm_struct * mm ;
5644 unsigned int fullmm ;
5745 struct vm_area_struct * vma ;
46+ unsigned long start , end ;
5847 unsigned long range_start ;
5948 unsigned long range_end ;
6049 unsigned int nr ;
@@ -112,19 +101,19 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
112101static inline void tlb_flush_mmu (struct mmu_gather * tlb )
113102{
114103 tlb_flush (tlb );
115- if (!tlb_fast_mode (tlb )) {
116- free_pages_and_swap_cache (tlb -> pages , tlb -> nr );
117- tlb -> nr = 0 ;
118- if (tlb -> pages == tlb -> local )
119- __tlb_alloc_page (tlb );
120- }
104+ free_pages_and_swap_cache (tlb -> pages , tlb -> nr );
105+ tlb -> nr = 0 ;
106+ if (tlb -> pages == tlb -> local )
107+ __tlb_alloc_page (tlb );
121108}
122109
123110static inline void
124- tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm , unsigned int fullmm )
111+ tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm , unsigned long start , unsigned long end )
125112{
126113 tlb -> mm = mm ;
127- tlb -> fullmm = fullmm ;
114+ tlb -> fullmm = !(start | (end + 1 ));
115+ tlb -> start = start ;
116+ tlb -> end = end ;
128117 tlb -> vma = NULL ;
129118 tlb -> max = ARRAY_SIZE (tlb -> local );
130119 tlb -> pages = tlb -> local ;
@@ -178,11 +167,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
178167
179168static inline int __tlb_remove_page (struct mmu_gather * tlb , struct page * page )
180169{
181- if (tlb_fast_mode (tlb )) {
182- free_page_and_swap_cache (page );
183- return 1 ; /* avoid calling tlb_flush_mmu */
184- }
185-
186170 tlb -> pages [tlb -> nr ++ ] = page ;
187171 VM_BUG_ON (tlb -> nr > tlb -> max );
188172 return tlb -> max - tlb -> nr ;
0 commit comments