Skip to content

Commit c753d66

Browse files
willdeaconctmarinas
authored andcommitted
arm64: mm: Simplify __flush_tlb_range_limit_excess()
__flush_tlb_range_limit_excess() is unnecessarily complicated: - It takes a 'start', 'end' and 'pages' argument, whereas it only needs 'pages' (which the caller has computed from the other two arguments!). - It erroneously compares 'pages' with MAX_TLBI_RANGE_PAGES when the system doesn't support range-based invalidation but the range to be invalidated would result in fewer than MAX_DVM_OPS invalidations. Simplify the function so that it no longer takes the 'start' and 'end' arguments and only considers the MAX_TLBI_RANGE_PAGES threshold on systems that implement range-based invalidation. Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 057bbd8 commit c753d66

1 file changed

Lines changed: 11 additions & 13 deletions

File tree

arch/arm64/include/asm/tlbflush.h

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -537,21 +537,19 @@ static __always_inline void __flush_tlb_range_op(tlbi_op lop, tlbi_op rop,
537537
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
538538
__flush_tlb_range_op(op, r##op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled())
539539

540-
static inline bool __flush_tlb_range_limit_excess(unsigned long start,
541-
unsigned long end, unsigned long pages, unsigned long stride)
540+
static inline bool __flush_tlb_range_limit_excess(unsigned long pages,
541+
unsigned long stride)
542542
{
543543
/*
544-
* When the system does not support TLB range based flush
545-
* operation, (MAX_DVM_OPS - 1) pages can be handled. But
546-
* with TLB range based operation, MAX_TLBI_RANGE_PAGES
547-
* pages can be handled.
544+
* Assume that the worst case number of DVM ops required to flush a
545+
* given range on a system that supports tlb-range is 20 (4 scales, 1
546+
* final page, 15 for alignment on LPA2 systems), which is much smaller
547+
* than MAX_DVM_OPS.
548548
*/
549-
if ((!system_supports_tlb_range() &&
550-
(end - start) >= (MAX_DVM_OPS * stride)) ||
551-
pages > MAX_TLBI_RANGE_PAGES)
552-
return true;
549+
if (system_supports_tlb_range())
550+
return pages > MAX_TLBI_RANGE_PAGES;
553551

554-
return false;
552+
return pages >= (MAX_DVM_OPS * stride) >> PAGE_SHIFT;
555553
}
556554

557555
static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
@@ -565,7 +563,7 @@ static inline void __flush_tlb_range_nosync(struct mm_struct *mm,
565563
end = round_up(end, stride);
566564
pages = (end - start) >> PAGE_SHIFT;
567565

568-
if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
566+
if (__flush_tlb_range_limit_excess(pages, stride)) {
569567
flush_tlb_mm(mm);
570568
return;
571569
}
@@ -629,7 +627,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
629627
end = round_up(end, stride);
630628
pages = (end - start) >> PAGE_SHIFT;
631629

632-
if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
630+
if (__flush_tlb_range_limit_excess(pages, stride)) {
633631
flush_tlb_all();
634632
return;
635633
}

0 commit comments

Comments
 (0)