Skip to content

Commit 5aac287

Browse files
Magnus Kallandjoergroedel
authored andcommitted
iommu/amd: Invalidate IRT cache for DMA aliases
DMA aliasing causes interrupt remapping table entries (IRTEs) to be shared between multiple device IDs. See commit 3c12443 ("iommu/amd: Support multiple PCI DMA aliases in IRQ Remapping") for more information on this. However, the AMD IOMMU driver currently invalidates IRTE cache entries on a per-device basis whenever an IRTE is updated, not for each alias. This approach leaves stale IRTE cache entries when an IRTE is cached under one DMA alias but later updated and invalidated through a different alias. In such cases, the original device ID is never invalidated, since it is programmed via aliasing. This incoherency bug has been observed when IRTEs are cached for one Non-Transparent Bridge (NTB) DMA alias, later updated via another. Fix this by invalidating the interrupt remapping table cache for all DMA aliases when updating an IRTE. Co-developed-by: Lars B. Kristiansen <larsk@dolphinics.com> Signed-off-by: Lars B. Kristiansen <larsk@dolphinics.com> Co-developed-by: Jonas Markussen <jonas@dolphinics.com> Signed-off-by: Jonas Markussen <jonas@dolphinics.com> Co-developed-by: Tore H. Larsen <torel@simula.no> Signed-off-by: Tore H. Larsen <torel@simula.no> Signed-off-by: Magnus Kalland <magnus@dolphinics.com> Link: https://lore.kernel.org/linux-iommu/9204da81-f821-4034-b8ad-501e43383b56@amd.com/ Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
1 parent faad224 commit 5aac287

1 file changed

Lines changed: 23 additions & 5 deletions

File tree

drivers/iommu/amd/iommu.c

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3167,26 +3167,44 @@ const struct iommu_ops amd_iommu_ops = {
31673167
static struct irq_chip amd_ir_chip;
31683168
static DEFINE_SPINLOCK(iommu_table_lock);
31693169

3170+
static int iommu_flush_dev_irt(struct pci_dev *unused, u16 devid, void *data)
3171+
{
3172+
int ret;
3173+
struct iommu_cmd cmd;
3174+
struct amd_iommu *iommu = data;
3175+
3176+
build_inv_irt(&cmd, devid);
3177+
ret = __iommu_queue_command_sync(iommu, &cmd, true);
3178+
return ret;
3179+
}
3180+
31703181
static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
31713182
{
31723183
int ret;
31733184
u64 data;
31743185
unsigned long flags;
3175-
struct iommu_cmd cmd, cmd2;
3186+
struct iommu_cmd cmd;
3187+
struct pci_dev *pdev = NULL;
3188+
struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
31763189

31773190
if (iommu->irtcachedis_enabled)
31783191
return;
31793192

3180-
build_inv_irt(&cmd, devid);
3193+
if (dev_data && dev_data->dev && dev_is_pci(dev_data->dev))
3194+
pdev = to_pci_dev(dev_data->dev);
31813195

31823196
raw_spin_lock_irqsave(&iommu->lock, flags);
31833197
data = get_cmdsem_val(iommu);
3184-
build_completion_wait(&cmd2, iommu, data);
3198+
build_completion_wait(&cmd, iommu, data);
31853199

3186-
ret = __iommu_queue_command_sync(iommu, &cmd, true);
3200+
if (pdev)
3201+
ret = pci_for_each_dma_alias(pdev, iommu_flush_dev_irt, iommu);
3202+
else
3203+
ret = iommu_flush_dev_irt(NULL, devid, iommu);
31873204
if (ret)
31883205
goto out_err;
3189-
ret = __iommu_queue_command_sync(iommu, &cmd2, false);
3206+
3207+
ret = __iommu_queue_command_sync(iommu, &cmd, false);
31903208
if (ret)
31913209
goto out_err;
31923210
raw_spin_unlock_irqrestore(&iommu->lock, flags);

0 commit comments

Comments
 (0)