@@ -429,12 +429,13 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
429429/*
430430 * __flush_tlb_range_op - Perform TLBI operation upon a range
431431 *
432- * @op: TLBI instruction that operates on a range (has 'r' prefix)
432+ * @lop: TLBI level operation to perform
433+ * @rop: TLBI range operation to perform
433434 * @start: The start address of the range
434435 * @pages: Range as the number of pages from 'start'
435436 * @stride: Flush granularity
436437 * @asid: The ASID of the task (0 for IPA instructions)
437- * @tlb_level : Translation Table level hint, if known
438+ * @level : Translation Table level hint, if known
438439 * @lpa2: If 'true', the lpa2 scheme is used as set out below
439440 *
440441 * When the CPU does not support TLB range operations, flush the TLB
@@ -501,36 +502,44 @@ static __always_inline void __tlbi_range(tlbi_op op, u64 addr,
501502 op (arg );
502503}
503504
504- #define __flush_tlb_range_op (op , start , pages , stride , \
505- asid , tlb_level , lpa2 ) \
506- do { \
507- typeof(start) __flush_start = start; \
508- typeof(pages) __flush_pages = pages; \
509- int num = 0; \
510- int scale = 3; \
511- \
512- while (__flush_pages > 0) { \
513- if (!system_supports_tlb_range() || \
514- __flush_pages == 1 || \
515- (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
516- __tlbi_level_asid(op, __flush_start, tlb_level, asid); \
517- __flush_start += stride; \
518- __flush_pages -= stride >> PAGE_SHIFT; \
519- continue; \
520- } \
521- \
522- num = __TLBI_RANGE_NUM(__flush_pages, scale); \
523- if (num >= 0) { \
524- __tlbi_range(r##op, __flush_start, asid, scale, num, tlb_level, lpa2); \
525- __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
526- __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
527- } \
528- scale--; \
529- } \
530- } while (0)
505+ static __always_inline void __flush_tlb_range_op (tlbi_op lop , tlbi_op rop ,
506+ u64 start , size_t pages ,
507+ u64 stride , u16 asid ,
508+ u32 level , bool lpa2 )
509+ {
510+ u64 addr = start , end = start + pages * PAGE_SIZE ;
511+ int scale = 3 ;
512+
513+ while (addr != end ) {
514+ int num ;
515+
516+ pages = (end - addr ) >> PAGE_SHIFT ;
517+
518+ if (!system_supports_tlb_range () || pages == 1 )
519+ goto invalidate_one ;
520+
521+ if (lpa2 && !IS_ALIGNED (addr , SZ_64K ))
522+ goto invalidate_one ;
523+
524+ num = __TLBI_RANGE_NUM (pages , scale );
525+ if (num >= 0 ) {
526+ __tlbi_range (rop , addr , asid , scale , num , level , lpa2 );
527+ addr += __TLBI_RANGE_PAGES (num , scale ) << PAGE_SHIFT ;
528+ }
529+
530+ scale -- ;
531+ continue ;
532+ invalidate_one :
533+ __tlbi_level_asid (lop , addr , level , asid );
534+ addr += stride ;
535+ }
536+ }
537+
538+ #define __flush_s1_tlb_range_op (op , start , pages , stride , asid , tlb_level ) \
539+ __flush_tlb_range_op(op, r##op, start, pages, stride, asid, tlb_level, lpa2_is_enabled())
531540
532541#define __flush_s2_tlb_range_op (op , start , pages , stride , tlb_level ) \
533- __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled());
542+ __flush_tlb_range_op(op, r##op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled())
534543
535544static inline bool __flush_tlb_range_limit_excess (unsigned long start ,
536545 unsigned long end , unsigned long pages , unsigned long stride )
@@ -569,11 +578,11 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
569578 asid = ASID (mm );
570579
571580 if (last_level )
572- __flush_tlb_range_op (vale1is , start , pages , stride , asid ,
573- tlb_level , lpa2_is_enabled () );
581+ __flush_s1_tlb_range_op (vale1is , start , pages , stride ,
582+ asid , tlb_level );
574583 else
575- __flush_tlb_range_op (vae1is , start , pages , stride , asid ,
576- tlb_level , lpa2_is_enabled () );
584+ __flush_s1_tlb_range_op (vae1is , start , pages , stride ,
585+ asid , tlb_level );
577586
578587 mmu_notifier_arch_invalidate_secondary_tlbs (mm , start , end );
579588}
@@ -597,8 +606,7 @@ static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
597606
598607 dsb (nshst );
599608 asid = ASID (vma -> vm_mm );
600- __flush_tlb_range_op (vale1 , addr , CONT_PTES , PAGE_SIZE , asid ,
601- 3 , lpa2_is_enabled ());
609+ __flush_s1_tlb_range_op (vale1 , addr , CONT_PTES , PAGE_SIZE , asid , 3 );
602610 mmu_notifier_arch_invalidate_secondary_tlbs (vma -> vm_mm , addr ,
603611 addr + CONT_PTE_SIZE );
604612 dsb (nsh );
@@ -631,8 +639,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
631639 }
632640
633641 dsb (ishst );
634- __flush_tlb_range_op (vaale1is , start , pages , stride , 0 ,
635- TLBI_TTL_UNKNOWN , lpa2_is_enabled () );
642+ __flush_s1_tlb_range_op (vaale1is , start , pages , stride , 0 ,
643+ TLBI_TTL_UNKNOWN );
636644 __tlbi_sync_s1ish ();
637645 isb ();
638646}
0 commit comments