Skip to content

Commit f211830

Browse files
aikgregkh
authored andcommitted
powerpc/book3s64: Fix error handling in mm_iommu_do_alloc()
[ Upstream commit c4b7816 ] The last jump to free_exit in mm_iommu_do_alloc() happens after page pointers in struct mm_iommu_table_group_mem_t were already converted to physical addresses. Thus calling put_page() on these physical addresses will likely crash. This moves the loop which calculates the pageshift and converts page struct pointers to physical addresses later after the point when we cannot fail; thus eliminating the need to convert pointers back. Fixes: eb9d7a6 ("powerpc/mm_iommu: Fix potential deadlock") Reported-by: Jan Kara <jack@suse.cz> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20191223060351.26359-1-aik@ozlabs.ru Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent efc95f2 commit f211830

1 file changed

Lines changed: 21 additions & 18 deletions

File tree

arch/powerpc/mm/book3s64/iommu_api.c

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
121121
goto free_exit;
122122
}
123123

124-
pageshift = PAGE_SHIFT;
125-
for (i = 0; i < entries; ++i) {
126-
struct page *page = mem->hpages[i];
127-
128-
/*
129-
* Allow to use larger than 64k IOMMU pages. Only do that
130-
* if we are backed by hugetlb.
131-
*/
132-
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
133-
pageshift = page_shift(compound_head(page));
134-
mem->pageshift = min(mem->pageshift, pageshift);
135-
/*
136-
* We don't need struct page reference any more, switch
137-
* to physical address.
138-
*/
139-
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
140-
}
141-
142124
good_exit:
143125
atomic64_set(&mem->mapped, 1);
144126
mem->used = 1;
@@ -158,6 +140,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
158140
}
159141
}
160142

143+
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
144+
/*
145+
* Allow to use larger than 64k IOMMU pages. Only do that
146+
* if we are backed by hugetlb. Skip device memory as it is not
147+
* backed with page structs.
148+
*/
149+
pageshift = PAGE_SHIFT;
150+
for (i = 0; i < entries; ++i) {
151+
struct page *page = mem->hpages[i];
152+
153+
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
154+
pageshift = page_shift(compound_head(page));
155+
mem->pageshift = min(mem->pageshift, pageshift);
156+
/*
157+
* We don't need struct page reference any more, switch
158+
* to physical address.
159+
*/
160+
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
161+
}
162+
}
163+
161164
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
162165

163166
mutex_unlock(&mem_list_mutex);

0 commit comments

Comments
 (0)