@@ -255,7 +255,6 @@ void cache_tag_unassign_domain(struct dmar_domain *domain,
255255
256256static unsigned long calculate_psi_aligned_address (unsigned long start ,
257257 unsigned long end ,
258- unsigned long * _pages ,
259258 unsigned long * _mask )
260259{
261260 unsigned long pages = aligned_nrpages (start , end - start + 1 );
@@ -281,10 +280,8 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
281280 */
282281 shared_bits = ~(pfn ^ end_pfn ) & ~bitmask ;
283282 mask = shared_bits ? __ffs (shared_bits ) : MAX_AGAW_PFN_WIDTH ;
284- aligned_pages = 1UL << mask ;
285283 }
286284
287- * _pages = aligned_pages ;
288285 * _mask = mask ;
289286
290287 return ALIGN_DOWN (start , VTD_PAGE_SIZE << mask );
@@ -371,14 +368,13 @@ static bool intel_domain_use_piotlb(struct dmar_domain *domain)
371368}
372369
373370static void cache_tag_flush_iotlb (struct dmar_domain * domain , struct cache_tag * tag ,
374- unsigned long addr , unsigned long pages ,
375- unsigned long mask , int ih )
371+ unsigned long addr , unsigned long mask , int ih )
376372{
377373 struct intel_iommu * iommu = tag -> iommu ;
378374 u64 type = DMA_TLB_PSI_FLUSH ;
379375
380376 if (intel_domain_use_piotlb (domain )) {
381- if (pages == -1 )
377+ if (mask >= MAX_AGAW_PFN_WIDTH )
382378 qi_batch_add_piotlb_all (iommu , tag -> domain_id ,
383379 tag -> pasid , domain -> qi_batch );
384380 else
@@ -392,7 +388,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
392388 * is too big.
393389 */
394390 if (!cap_pgsel_inv (iommu -> cap ) ||
395- mask > cap_max_amask_val (iommu -> cap ) || pages == -1 ) {
391+ mask > cap_max_amask_val (iommu -> cap )) {
396392 addr = 0 ;
397393 mask = 0 ;
398394 ih = 0 ;
@@ -441,16 +437,15 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
441437 unsigned long end , int ih )
442438{
443439 struct intel_iommu * iommu = NULL ;
444- unsigned long pages , mask , addr ;
440+ unsigned long mask , addr ;
445441 struct cache_tag * tag ;
446442 unsigned long flags ;
447443
448444 if (start == 0 && end == ULONG_MAX ) {
449445 addr = 0 ;
450- pages = -1 ;
451446 mask = MAX_AGAW_PFN_WIDTH ;
452447 } else {
453- addr = calculate_psi_aligned_address (start , end , & pages , & mask );
448+ addr = calculate_psi_aligned_address (start , end , & mask );
454449 }
455450
456451 spin_lock_irqsave (& domain -> cache_lock , flags );
@@ -462,7 +457,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
462457 switch (tag -> type ) {
463458 case CACHE_TAG_IOTLB :
464459 case CACHE_TAG_NESTING_IOTLB :
465- cache_tag_flush_iotlb (domain , tag , addr , pages , mask , ih );
460+ cache_tag_flush_iotlb (domain , tag , addr , mask , ih );
466461 break ;
467462 case CACHE_TAG_NESTING_DEVTLB :
468463 /*
@@ -480,7 +475,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
480475 break ;
481476 }
482477
483- trace_cache_tag_flush_range (tag , start , end , addr , pages , mask );
478+ trace_cache_tag_flush_range (tag , start , end , addr , mask );
484479 }
485480 qi_batch_flush_descs (iommu , domain -> qi_batch );
486481 spin_unlock_irqrestore (& domain -> cache_lock , flags );
@@ -510,11 +505,11 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
510505 unsigned long end )
511506{
512507 struct intel_iommu * iommu = NULL ;
513- unsigned long pages , mask , addr ;
508+ unsigned long mask , addr ;
514509 struct cache_tag * tag ;
515510 unsigned long flags ;
516511
517- addr = calculate_psi_aligned_address (start , end , & pages , & mask );
512+ addr = calculate_psi_aligned_address (start , end , & mask );
518513
519514 spin_lock_irqsave (& domain -> cache_lock , flags );
520515 list_for_each_entry (tag , & domain -> cache_tags , node ) {
@@ -530,9 +525,9 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
530525
531526 if (tag -> type == CACHE_TAG_IOTLB ||
532527 tag -> type == CACHE_TAG_NESTING_IOTLB )
533- cache_tag_flush_iotlb (domain , tag , addr , pages , mask , 0 );
528+ cache_tag_flush_iotlb (domain , tag , addr , mask , 0 );
534529
535- trace_cache_tag_flush_range_np (tag , start , end , addr , pages , mask );
530+ trace_cache_tag_flush_range_np (tag , start , end , addr , mask );
536531 }
537532 qi_batch_flush_descs (iommu , domain -> qi_batch );
538533 spin_unlock_irqrestore (& domain -> cache_lock , flags );
0 commit comments