Skip to content

Commit a371003

Browse files
willdeaconctmarinas
authored andcommitted
arm64: mm: Push __TLBI_VADDR() into __tlbi_level()
The __TLBI_VADDR() macro takes an ASID and an address and converts them into a single argument formatted correctly for a TLB invalidation instruction. Rather than have callers worry about this (especially in the case where the ASID is zero), push the macro down into __tlbi_level() via a new __tlbi_level_asid() helper. Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Linu Cherian <linu.cherian@arm.com> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent edc55b7 commit a371003

6 files changed

Lines changed: 14 additions & 12 deletions

File tree

arch/arm64/include/asm/tlbflush.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,10 @@ static __always_inline void ipas2e1is(u64 arg)
142142
__tlbi(ipas2e1is, arg);
143143
}
144144

145-
static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
145+
static __always_inline void __tlbi_level_asid(tlbi_op op, u64 addr, u32 level,
146+
u16 asid)
146147
{
147-
u64 arg = addr;
148+
u64 arg = __TLBI_VADDR(addr, asid);
148149

149150
if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
150151
u64 ttl = level | (get_trans_granule() << 2);
@@ -155,6 +156,11 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
155156
op(arg);
156157
}
157158

159+
static inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
160+
{
161+
__tlbi_level_asid(op, addr, level, 0);
162+
}
163+
158164
/*
159165
* This macro creates a properly formatted VA operand for the TLB RANGE. The
160166
* value bit assignments are:
@@ -511,8 +517,7 @@ do { \
511517
if (!system_supports_tlb_range() || \
512518
__flush_pages == 1 || \
513519
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
514-
addr = __TLBI_VADDR(__flush_start, asid); \
515-
__tlbi_level(op, addr, tlb_level); \
520+
__tlbi_level_asid(op, __flush_start, tlb_level, asid); \
516521
__flush_start += stride; \
517522
__flush_pages -= stride >> PAGE_SHIFT; \
518523
continue; \
@@ -685,6 +690,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
685690
#define huge_pmd_needs_flush huge_pmd_needs_flush
686691

687692
#undef __tlbi_user
693+
#undef __TLBI_VADDR
688694
#endif
689695

690696
#endif

arch/arm64/kernel/sys_compat.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
3636
* The workaround requires an inner-shareable tlbi.
3737
* We pick the reserved-ASID to minimise the impact.
3838
*/
39-
__tlbi(aside1is, __TLBI_VADDR(0, 0));
39+
__tlbi(aside1is, 0UL);
4040
__tlbi_sync_s1ish();
4141
}
4242

arch/arm64/kvm/hyp/nvhe/mm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
270270
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
271271
*/
272272
dsb(ishst);
273-
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
273+
__tlbi_level(vale2is, addr, level);
274274
__tlbi_sync_s1ish_hyp();
275275
isb();
276276
}

arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
158158
* Instead, we invalidate Stage-2 for this IPA, and the
159159
* whole of Stage-1. Weep...
160160
*/
161-
ipa >>= 12;
162161
__tlbi_level(ipas2e1is, ipa, level);
163162

164163
/*
@@ -188,7 +187,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
188187
* Instead, we invalidate Stage-2 for this IPA, and the
189188
* whole of Stage-1. Weep...
190189
*/
191-
ipa >>= 12;
192190
__tlbi_level(ipas2e1, ipa, level);
193191

194192
/*

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -490,14 +490,14 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
490490

491491
kvm_clear_pte(ctx->ptep);
492492
dsb(ishst);
493-
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
493+
__tlbi_level(vae2is, ctx->addr, TLBI_TTL_UNKNOWN);
494494
} else {
495495
if (ctx->end - ctx->addr < granule)
496496
return -EINVAL;
497497

498498
kvm_clear_pte(ctx->ptep);
499499
dsb(ishst);
500-
__tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
500+
__tlbi_level(vale2is, ctx->addr, ctx->level);
501501
*unmapped += granule;
502502
}
503503

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
104104
* Instead, we invalidate Stage-2 for this IPA, and the
105105
* whole of Stage-1. Weep...
106106
*/
107-
ipa >>= 12;
108107
__tlbi_level(ipas2e1is, ipa, level);
109108

110109
/*
@@ -136,7 +135,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
136135
* Instead, we invalidate Stage-2 for this IPA, and the
137136
* whole of Stage-1. Weep...
138137
*/
139-
ipa >>= 12;
140138
__tlbi_level(ipas2e1, ipa, level);
141139

142140
/*

0 commit comments

Comments
 (0)