@@ -295,7 +295,10 @@ static inline void __tlbi_sync_s1ish_hyp(void)
295295 * no invalidation may take place. In the case where the level
296296 * cannot be easily determined, the value TLBI_TTL_UNKNOWN will
297297 * perform a non-hinted invalidation. flags may be TLBF_NONE (0) or
298- * TLBF_NOWALKCACHE (elide eviction of walk cache entries).
298+ * any combination of TLBF_NOWALKCACHE (elide eviction of walk
299+ * cache entries), TLBF_NONOTIFY (don't call mmu notifiers),
300+ * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST
301+ * (only perform the invalidation for the local cpu).
299302 *
300303 * local_flush_tlb_page(vma, addr)
301304 * Local variant of flush_tlb_page(). Stale TLB entries may
@@ -305,12 +308,6 @@ static inline void __tlbi_sync_s1ish_hyp(void)
305308 * Same as local_flush_tlb_page() except MMU notifier will not be
306309 * called.
307310 *
308- * local_flush_tlb_contpte(vma, addr)
309- * Invalidate the virtual-address range
310- * '[addr, addr+CONT_PTE_SIZE)' mapped with contpte on local CPU
311- * for the user address space corresponding to 'vma->mm'. Stale
312- * TLB entries may remain in remote CPUs.
313- *
314311 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
315312 * on top of these routines, since that is our interface to the mmu_gather
316313 * API as used by munmap() and friends.
@@ -552,58 +549,75 @@ typedef unsigned __bitwise tlbf_t;
552549/* Invalidate tlb entries only, leaving the page table walk cache intact. */
553550#define TLBF_NOWALKCACHE ((__force tlbf_t)BIT(0))
554551
555- static inline void __flush_tlb_range_nosync (struct mm_struct * mm ,
556- unsigned long start , unsigned long end ,
557- unsigned long stride , int tlb_level ,
558- tlbf_t flags )
552+ /* Skip the trailing dsb after issuing tlbi. */
553+ #define TLBF_NOSYNC ((__force tlbf_t)BIT(1))
554+
555+ /* Suppress tlb notifier callbacks for this flush operation. */
556+ #define TLBF_NONOTIFY ((__force tlbf_t)BIT(2))
557+
558+ /* Perform the tlbi locally without broadcasting to other CPUs. */
559+ #define TLBF_NOBROADCAST ((__force tlbf_t)BIT(3))
560+
561+ static __always_inline void __do_flush_tlb_range (struct vm_area_struct * vma ,
562+ unsigned long start , unsigned long end ,
563+ unsigned long stride , int tlb_level ,
564+ tlbf_t flags )
559565{
566+ struct mm_struct * mm = vma -> vm_mm ;
560567 unsigned long asid , pages ;
561568
562- start = round_down (start , stride );
563- end = round_up (end , stride );
564569 pages = (end - start ) >> PAGE_SHIFT ;
565570
566571 if (__flush_tlb_range_limit_excess (pages , stride )) {
567572 flush_tlb_mm (mm );
568573 return ;
569574 }
570575
571- dsb (ishst );
576+ if (!(flags & TLBF_NOBROADCAST ))
577+ dsb (ishst );
578+ else
579+ dsb (nshst );
580+
572581 asid = ASID (mm );
573582
574- if (flags & TLBF_NOWALKCACHE )
575- __flush_s1_tlb_range_op (vale1is , start , pages , stride ,
576- asid , tlb_level );
577- else
583+ switch (flags & (TLBF_NOWALKCACHE | TLBF_NOBROADCAST )) {
584+ case TLBF_NONE :
578585 __flush_s1_tlb_range_op (vae1is , start , pages , stride ,
579- asid , tlb_level );
586+ asid , tlb_level );
587+ break ;
588+ case TLBF_NOWALKCACHE :
589+ __flush_s1_tlb_range_op (vale1is , start , pages , stride ,
590+ asid , tlb_level );
591+ break ;
592+ case TLBF_NOBROADCAST :
593+ /* Combination unused */
594+ BUG ();
595+ break ;
596+ case TLBF_NOWALKCACHE | TLBF_NOBROADCAST :
597+ __flush_s1_tlb_range_op (vale1 , start , pages , stride ,
598+ asid , tlb_level );
599+ break ;
600+ }
601+
602+ if (!(flags & TLBF_NONOTIFY ))
603+ mmu_notifier_arch_invalidate_secondary_tlbs (mm , start , end );
580604
581- mmu_notifier_arch_invalidate_secondary_tlbs (mm , start , end );
605+ if (!(flags & TLBF_NOSYNC )) {
606+ if (!(flags & TLBF_NOBROADCAST ))
607+ __tlbi_sync_s1ish ();
608+ else
609+ dsb (nsh );
610+ }
582611}
583612
584613static inline void __flush_tlb_range (struct vm_area_struct * vma ,
585614 unsigned long start , unsigned long end ,
586615 unsigned long stride , int tlb_level ,
587616 tlbf_t flags )
588617{
589- __flush_tlb_range_nosync (vma -> vm_mm , start , end , stride ,
590- tlb_level , flags );
591- __tlbi_sync_s1ish ();
592- }
593-
594- static inline void local_flush_tlb_contpte (struct vm_area_struct * vma ,
595- unsigned long addr )
596- {
597- unsigned long asid ;
598-
599- addr = round_down (addr , CONT_PTE_SIZE );
600-
601- dsb (nshst );
602- asid = ASID (vma -> vm_mm );
603- __flush_s1_tlb_range_op (vale1 , addr , CONT_PTES , PAGE_SIZE , asid , 3 );
604- mmu_notifier_arch_invalidate_secondary_tlbs (vma -> vm_mm , addr ,
605- addr + CONT_PTE_SIZE );
606- dsb (nsh );
618+ start = round_down (start , stride );
619+ end = round_up (end , stride );
620+ __do_flush_tlb_range (vma , start , end , stride , tlb_level , flags );
607621}
608622
609623static inline void flush_tlb_range (struct vm_area_struct * vma ,
@@ -656,7 +670,10 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
656670static inline void arch_tlbbatch_add_pending (struct arch_tlbflush_unmap_batch * batch ,
657671 struct mm_struct * mm , unsigned long start , unsigned long end )
658672{
659- __flush_tlb_range_nosync (mm , start , end , PAGE_SIZE , 3 , TLBF_NOWALKCACHE );
673+ struct vm_area_struct vma = { .vm_mm = mm , .vm_flags = 0 };
674+
675+ __flush_tlb_range (& vma , start , end , PAGE_SIZE , 3 ,
676+ TLBF_NOWALKCACHE | TLBF_NOSYNC );
660677}
661678
662679static inline bool __pte_flags_need_flush (ptdesc_t oldval , ptdesc_t newval )
0 commit comments