Skip to content

Commit b334d7f

Browse files
jgunthorpejoergroedel
authored andcommitted
iommu/vt-d: Remove the remaining pages along the invalidation path
This was only being used to signal that a flush all should be used. Use mask/size_order >= 52 to signal this instead. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/3-v1-f175e27af136+11647-iommupt_inv_vtd_jgg@nvidia.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
1 parent e36ee89 commit b334d7f

2 files changed

Lines changed: 19 additions & 26 deletions

File tree

drivers/iommu/intel/cache.c

Lines changed: 11 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,6 @@ void cache_tag_unassign_domain(struct dmar_domain *domain,
255255

256256
static unsigned long calculate_psi_aligned_address(unsigned long start,
257257
unsigned long end,
258-
unsigned long *_pages,
259258
unsigned long *_mask)
260259
{
261260
unsigned long pages = aligned_nrpages(start, end - start + 1);
@@ -281,10 +280,8 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
281280
*/
282281
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
283282
mask = shared_bits ? __ffs(shared_bits) : MAX_AGAW_PFN_WIDTH;
284-
aligned_pages = 1UL << mask;
285283
}
286284

287-
*_pages = aligned_pages;
288285
*_mask = mask;
289286

290287
return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
@@ -371,14 +368,13 @@ static bool intel_domain_use_piotlb(struct dmar_domain *domain)
371368
}
372369

373370
static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag,
374-
unsigned long addr, unsigned long pages,
375-
unsigned long mask, int ih)
371+
unsigned long addr, unsigned long mask, int ih)
376372
{
377373
struct intel_iommu *iommu = tag->iommu;
378374
u64 type = DMA_TLB_PSI_FLUSH;
379375

380376
if (intel_domain_use_piotlb(domain)) {
381-
if (pages == -1)
377+
if (mask >= MAX_AGAW_PFN_WIDTH)
382378
qi_batch_add_piotlb_all(iommu, tag->domain_id,
383379
tag->pasid, domain->qi_batch);
384380
else
@@ -392,7 +388,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *
392388
* is too big.
393389
*/
394390
if (!cap_pgsel_inv(iommu->cap) ||
395-
mask > cap_max_amask_val(iommu->cap) || pages == -1) {
391+
mask > cap_max_amask_val(iommu->cap)) {
396392
addr = 0;
397393
mask = 0;
398394
ih = 0;
@@ -441,16 +437,15 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
441437
unsigned long end, int ih)
442438
{
443439
struct intel_iommu *iommu = NULL;
444-
unsigned long pages, mask, addr;
440+
unsigned long mask, addr;
445441
struct cache_tag *tag;
446442
unsigned long flags;
447443

448444
if (start == 0 && end == ULONG_MAX) {
449445
addr = 0;
450-
pages = -1;
451446
mask = MAX_AGAW_PFN_WIDTH;
452447
} else {
453-
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
448+
addr = calculate_psi_aligned_address(start, end, &mask);
454449
}
455450

456451
spin_lock_irqsave(&domain->cache_lock, flags);
@@ -462,7 +457,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
462457
switch (tag->type) {
463458
case CACHE_TAG_IOTLB:
464459
case CACHE_TAG_NESTING_IOTLB:
465-
cache_tag_flush_iotlb(domain, tag, addr, pages, mask, ih);
460+
cache_tag_flush_iotlb(domain, tag, addr, mask, ih);
466461
break;
467462
case CACHE_TAG_NESTING_DEVTLB:
468463
/*
@@ -480,7 +475,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
480475
break;
481476
}
482477

483-
trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
478+
trace_cache_tag_flush_range(tag, start, end, addr, mask);
484479
}
485480
qi_batch_flush_descs(iommu, domain->qi_batch);
486481
spin_unlock_irqrestore(&domain->cache_lock, flags);
@@ -510,11 +505,11 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
510505
unsigned long end)
511506
{
512507
struct intel_iommu *iommu = NULL;
513-
unsigned long pages, mask, addr;
508+
unsigned long mask, addr;
514509
struct cache_tag *tag;
515510
unsigned long flags;
516511

517-
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
512+
addr = calculate_psi_aligned_address(start, end, &mask);
518513

519514
spin_lock_irqsave(&domain->cache_lock, flags);
520515
list_for_each_entry(tag, &domain->cache_tags, node) {
@@ -530,9 +525,9 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
530525

531526
if (tag->type == CACHE_TAG_IOTLB ||
532527
tag->type == CACHE_TAG_NESTING_IOTLB)
533-
cache_tag_flush_iotlb(domain, tag, addr, pages, mask, 0);
528+
cache_tag_flush_iotlb(domain, tag, addr, mask, 0);
534529

535-
trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
530+
trace_cache_tag_flush_range_np(tag, start, end, addr, mask);
536531
}
537532
qi_batch_flush_descs(iommu, domain->qi_batch);
538533
spin_unlock_irqrestore(&domain->cache_lock, flags);

drivers/iommu/intel/trace.h

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -132,8 +132,8 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
132132

133133
DECLARE_EVENT_CLASS(cache_tag_flush,
134134
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
135-
unsigned long addr, unsigned long pages, unsigned long mask),
136-
TP_ARGS(tag, start, end, addr, pages, mask),
135+
unsigned long addr, unsigned long mask),
136+
TP_ARGS(tag, start, end, addr, mask),
137137
TP_STRUCT__entry(
138138
__string(iommu, tag->iommu->name)
139139
__string(dev, dev_name(tag->dev))
@@ -143,7 +143,6 @@ DECLARE_EVENT_CLASS(cache_tag_flush,
143143
__field(unsigned long, start)
144144
__field(unsigned long, end)
145145
__field(unsigned long, addr)
146-
__field(unsigned long, pages)
147146
__field(unsigned long, mask)
148147
),
149148
TP_fast_assign(
@@ -155,31 +154,30 @@ DECLARE_EVENT_CLASS(cache_tag_flush,
155154
__entry->start = start;
156155
__entry->end = end;
157156
__entry->addr = addr;
158-
__entry->pages = pages;
159157
__entry->mask = mask;
160158
),
161-
TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
159+
TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx mask 0x%lx",
162160
__get_str(iommu), __get_str(dev), __entry->pasid,
163161
__print_symbolic(__entry->type,
164162
{ CACHE_TAG_IOTLB, "iotlb" },
165163
{ CACHE_TAG_DEVTLB, "devtlb" },
166164
{ CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
167165
{ CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
168166
__entry->domain_id, __entry->start, __entry->end,
169-
__entry->addr, __entry->pages, __entry->mask
167+
__entry->addr, __entry->mask
170168
)
171169
);
172170

173171
DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
174172
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
175-
unsigned long addr, unsigned long pages, unsigned long mask),
176-
TP_ARGS(tag, start, end, addr, pages, mask)
173+
unsigned long addr, unsigned long mask),
174+
TP_ARGS(tag, start, end, addr, mask)
177175
);
178176

179177
DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
180178
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
181-
unsigned long addr, unsigned long pages, unsigned long mask),
182-
TP_ARGS(tag, start, end, addr, pages, mask)
179+
unsigned long addr, unsigned long mask),
180+
TP_ARGS(tag, start, end, addr, mask)
183181
);
184182
#endif /* _TRACE_INTEL_IOMMU_H */
185183

0 commit comments

Comments
 (0)