Skip to content

Commit 11f6dd8

Browse files
ryanhrobctmarinas
authored andcommitted
arm64: mm: Refactor __flush_tlb_range() to take flags
We have function variants with "_nosync", "_local", "_nonotify" as well as the "last_level" parameter. Let's generalize and simplify by using a flags parameter to encode all these variants. As a first step, convert the "last_level" boolean parameter to a flags parameter and create the first flag, TLBF_NOWALKCACHE. When present, walk cache entries are not evicted, which is the same as the old last_level=true. Reviewed-by: Linu Cherian <linu.cherian@arm.com> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 64212d6 commit 11f6dd8

7 files changed

Lines changed: 37 additions & 28 deletions

File tree

arch/arm64/include/asm/hugetlb.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -71,23 +71,23 @@ static inline void __flush_hugetlb_tlb_range(struct vm_area_struct *vma,
7171
unsigned long start,
7272
unsigned long end,
7373
unsigned long stride,
74-
bool last_level)
74+
tlbf_t flags)
7575
{
7676
switch (stride) {
7777
#ifndef __PAGETABLE_PMD_FOLDED
7878
case PUD_SIZE:
79-
__flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1);
79+
__flush_tlb_range(vma, start, end, PUD_SIZE, 1, flags);
8080
break;
8181
#endif
8282
case CONT_PMD_SIZE:
8383
case PMD_SIZE:
84-
__flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2);
84+
__flush_tlb_range(vma, start, end, PMD_SIZE, 2, flags);
8585
break;
8686
case CONT_PTE_SIZE:
87-
__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3);
87+
__flush_tlb_range(vma, start, end, PAGE_SIZE, 3, flags);
8888
break;
8989
default:
90-
__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN);
90+
__flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, flags);
9191
}
9292
}
9393

@@ -98,7 +98,7 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
9898
{
9999
unsigned long stride = huge_page_size(hstate_vma(vma));
100100

101-
__flush_hugetlb_tlb_range(vma, start, end, stride, false);
101+
__flush_hugetlb_tlb_range(vma, start, end, stride, TLBF_NONE);
102102
}
103103

104104
#endif /* __ASM_HUGETLB_H */

arch/arm64/include/asm/pgtable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,9 @@ static inline void arch_leave_lazy_mmu_mode(void)
8989

9090
/* Set stride and tlb_level in flush_*_tlb_range */
9191
#define flush_pmd_tlb_range(vma, addr, end) \
92-
__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
92+
__flush_tlb_range(vma, addr, end, PMD_SIZE, 2, TLBF_NONE)
9393
#define flush_pud_tlb_range(vma, addr, end) \
94-
__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
94+
__flush_tlb_range(vma, addr, end, PUD_SIZE, 1, TLBF_NONE)
9595
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
9696

9797
/*

arch/arm64/include/asm/tlb.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ static inline int tlb_get_level(struct mmu_gather *tlb)
5353
static inline void tlb_flush(struct mmu_gather *tlb)
5454
{
5555
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
56-
bool last_level = !tlb->freed_tables;
56+
tlbf_t flags = tlb->freed_tables ? TLBF_NONE : TLBF_NOWALKCACHE;
5757
unsigned long stride = tlb_get_unmap_size(tlb);
5858
int tlb_level = tlb_get_level(tlb);
5959

@@ -63,13 +63,13 @@ static inline void tlb_flush(struct mmu_gather *tlb)
6363
* reallocate our ASID without invalidating the entire TLB.
6464
*/
6565
if (tlb->fullmm) {
66-
if (!last_level)
66+
if (tlb->freed_tables)
6767
flush_tlb_mm(tlb->mm);
6868
return;
6969
}
7070

7171
__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
72-
last_level, tlb_level);
72+
tlb_level, flags);
7373
}
7474

7575
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,

arch/arm64/include/asm/tlbflush.h

Lines changed: 20 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -286,16 +286,16 @@ static inline void __tlbi_sync_s1ish_hyp(void)
286286
* CPUs, ensuring that any walk-cache entries associated with the
287287
* translation are also invalidated.
288288
*
289-
* __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
289+
* __flush_tlb_range(vma, start, end, stride, tlb_level, flags)
290290
* Invalidate the virtual-address range '[start, end)' on all
291291
* CPUs for the user address space corresponding to 'vma->mm'.
292292
* The invalidation operations are issued at a granularity
293-
* determined by 'stride' and only affect any walk-cache entries
294-
* if 'last_level' is equal to false. tlb_level is the level at
293+
* determined by 'stride'. tlb_level is the level at
295294
* which the invalidation must take place. If the level is wrong,
296295
* no invalidation may take place. In the case where the level
297296
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will
298-
* perform a non-hinted invalidation.
297+
* perform a non-hinted invalidation. flags may be TLBF_NONE (0) or
298+
* TLBF_NOWALKCACHE (elide eviction of walk cache entries).
299299
*
300300
* local_flush_tlb_page(vma, addr)
301301
* Local variant of flush_tlb_page(). Stale TLB entries may
@@ -544,10 +544,18 @@ static inline bool __flush_tlb_range_limit_excess(unsigned long pages,
544544
return pages >= (MAX_DVM_OPS * stride) >> PAGE_SHIFT;
545545
}
546546

547+
typedef unsigned __bitwise tlbf_t;
548+
549+
/* No special behaviour. */
550+
#define TLBF_NONE ((__force tlbf_t)0)
551+
552+
/* Invalidate tlb entries only, leaving the page table walk cache intact. */
553+
#define TLBF_NOWALKCACHE ((__force tlbf_t)BIT(0))
554+
547555
static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
548556
unsigned long start, unsigned long end,
549-
unsigned long stride, bool last_level,
550-
int tlb_level)
557+
unsigned long stride, int tlb_level,
558+
tlbf_t flags)
551559
{
552560
unsigned long asid, pages;
553561

@@ -563,7 +571,7 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
563571
dsb(ishst);
564572
asid = ASID(mm);
565573

566-
if (last_level)
574+
if (flags & TLBF_NOWALKCACHE)
567575
__flush_s1_tlb_range_op(vale1is, start, pages, stride,
568576
asid, tlb_level);
569577
else
@@ -575,11 +583,11 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
575583

576584
static inline void __flush_tlb_range(struct vm_area_struct *vma,
577585
unsigned long start, unsigned long end,
578-
unsigned long stride, bool last_level,
579-
int tlb_level)
586+
unsigned long stride, int tlb_level,
587+
tlbf_t flags)
580588
{
581589
__flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
582-
last_level, tlb_level);
590+
tlb_level, flags);
583591
__tlbi_sync_s1ish();
584592
}
585593

@@ -607,7 +615,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
607615
* Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
608616
* information here.
609617
*/
610-
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
618+
__flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, TLBF_NONE);
611619
}
612620

613621
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -648,7 +656,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
648656
static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
649657
struct mm_struct *mm, unsigned long start, unsigned long end)
650658
{
651-
__flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, true, 3);
659+
__flush_tlb_range_nosync(mm, start, end, PAGE_SIZE, 3, TLBF_NOWALKCACHE);
652660
}
653661

654662
static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)

arch/arm64/mm/contpte.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,8 @@ static void contpte_convert(struct mm_struct *mm, unsigned long addr,
225225
*/
226226

227227
if (!system_supports_bbml2_noabort())
228-
__flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
228+
__flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, 3,
229+
TLBF_NOWALKCACHE);
229230

230231
__set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES);
231232
}
@@ -552,7 +553,7 @@ int contpte_clear_flush_young_ptes(struct vm_area_struct *vma,
552553
* eliding the trailing DSB applies here.
553554
*/
554555
__flush_tlb_range_nosync(vma->vm_mm, addr, end,
555-
PAGE_SIZE, true, 3);
556+
PAGE_SIZE, 3, TLBF_NOWALKCACHE);
556557
}
557558

558559
return young;

arch/arm64/mm/hugetlbpage.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ static pte_t get_clear_contig_flush(struct mm_struct *mm,
181181
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
182182
unsigned long end = addr + (pgsize * ncontig);
183183

184-
__flush_hugetlb_tlb_range(&vma, addr, end, pgsize, true);
184+
__flush_hugetlb_tlb_range(&vma, addr, end, pgsize, TLBF_NOWALKCACHE);
185185
return orig_pte;
186186
}
187187

@@ -209,7 +209,7 @@ static void clear_flush(struct mm_struct *mm,
209209
if (mm == &init_mm)
210210
flush_tlb_kernel_range(saddr, addr);
211211
else
212-
__flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, true);
212+
__flush_hugetlb_tlb_range(&vma, saddr, addr, pgsize, TLBF_NOWALKCACHE);
213213
}
214214

215215
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,

arch/arm64/mm/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2149,7 +2149,7 @@ pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr,
21492149
*/
21502150
if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte))
21512151
__flush_tlb_range(vma, addr, nr * PAGE_SIZE,
2152-
PAGE_SIZE, true, 3);
2152+
PAGE_SIZE, 3, TLBF_NOWALKCACHE);
21532153
}
21542154

21552155
return pte;

0 commit comments

Comments
 (0)