Skip to content

Commit 285994a

Browse files
committed
arm64: Invalidate the TLB corresponding to intermediate page table levels
The ARM architecture allows the caching of intermediate page table levels and page table freeing requires a sequence like: pmd_clear() TLB invalidation pte page freeing With commit 5e5f6dc (arm64: mm: enable HAVE_RCU_TABLE_FREE logic), the page table freeing batching was moved from tlb_remove_page() to tlb_remove_table(). The former takes care of TLB invalidation as this is also shared with pte clearing and page cache page freeing. The latter, however, does not invalidate the TLBs for intermediate page table levels as it probably relies on the architecture code to do it if required. When the mm->mm_users < 2, tlb_remove_table() does not do any batching and page table pages are freed before tlb_finish_mmu() which performs the actual TLB invalidation. This patch introduces __tlb_flush_pgtable() for arm64 and calls it from the {pte,pmd,pud}_free_tlb() directly without relying on deferred page table freeing. Fixes: 5e5f6dc arm64: mm: enable HAVE_RCU_TABLE_FREE logic Reported-by: Jon Masters <jcm@redhat.com> Tested-by: Jon Masters <jcm@redhat.com> Tested-by: Steve Capper <steve.capper@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 9eccca0 commit 285994a

File tree

2 files changed

+16
-0
lines changed

2 files changed

+16
-0
lines changed

arch/arm64/include/asm/tlb.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
4848
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
4949
unsigned long addr)
5050
{
51+
__flush_tlb_pgtable(tlb->mm, addr);
5152
pgtable_page_dtor(pte);
5253
tlb_remove_entry(tlb, pte);
5354
}
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
5657
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
5758
unsigned long addr)
5859
{
60+
__flush_tlb_pgtable(tlb->mm, addr);
5961
tlb_remove_entry(tlb, virt_to_page(pmdp));
6062
}
6163
#endif
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
6466
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
6567
unsigned long addr)
6668
{
69+
__flush_tlb_pgtable(tlb->mm, addr);
6770
tlb_remove_entry(tlb, virt_to_page(pudp));
6871
}
6972
#endif

arch/arm64/include/asm/tlbflush.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
143143
flush_tlb_all();
144144
}
145145

146+
/*
147+
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
148+
* table levels (pgd/pud/pmd).
149+
*/
150+
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
151+
unsigned long uaddr)
152+
{
153+
unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
154+
155+
dsb(ishst);
156+
asm("tlbi vae1is, %0" : : "r" (addr));
157+
dsb(ish);
158+
}
146159
/*
147160
* On AArch64, the cache coherency is handled via the set_pte_at() function.
148161
*/

0 commit comments

Comments
 (0)