Skip to content

Commit f054804

Browse files
Jiri Pirkomszyprow
authored andcommitted
dma-mapping: introduce DMA_ATTR_CC_SHARED for shared memory
Current CC designs don't place a vIOMMU in front of untrusted devices. Instead, the DMA API forces all untrusted device DMA through swiotlb bounce buffers (is_swiotlb_force_bounce()) which copies data into shared memory on behalf of the device. When a caller has already arranged for the memory to be shared via set_memory_decrypted(), the DMA API needs to know so it can map directly using the unencrypted physical address rather than bounce buffering. Following the pattern of DMA_ATTR_MMIO, add DMA_ATTR_CC_SHARED for this purpose. Like the MMIO case, only the caller knows what kind of memory it has and must inform the DMA API for it to work correctly. Signed-off-by: Jiri Pirko <jiri@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20260325192352.437608-2-jiri@resnulli.us
1 parent 27e2e9b commit f054804

4 files changed

Lines changed: 34 additions & 6 deletions

File tree

include/linux/dma-mapping.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,16 @@
9292
* flushing.
9393
*/
9494
#define DMA_ATTR_REQUIRE_COHERENT (1UL << 12)
95+
/*
96+
* DMA_ATTR_CC_SHARED: Indicates the DMA mapping is shared (decrypted) for
97+
* confidential computing guests. For normal system memory the caller must have
98+
* called set_memory_decrypted(), and pgprot_decrypted must be used when
99+
* creating CPU PTEs for the mapping. The same shared semantic may be passed
100+
* to the vIOMMU when it sets up the IOPTE. For MMIO use together with
101+
* DMA_ATTR_MMIO to indicate shared MMIO. Unless DMA_ATTR_MMIO is provided
102+
* a struct page is required.
103+
*/
104+
#define DMA_ATTR_CC_SHARED (1UL << 13)
95105

96106
/*
97107
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can

include/trace/events/dma.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,8 @@ TRACE_DEFINE_ENUM(DMA_NONE);
3434
{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
3535
{ DMA_ATTR_MMIO, "MMIO" }, \
3636
{ DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" }, \
37-
{ DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" })
37+
{ DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" }, \
38+
{ DMA_ATTR_CC_SHARED, "CC_SHARED" })
3839

3940
DECLARE_EVENT_CLASS(dma_map,
4041
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,

kernel/dma/direct.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -89,16 +89,24 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev,
8989
dma_addr_t dma_addr;
9090

9191
if (is_swiotlb_force_bounce(dev)) {
92-
if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
93-
return DMA_MAPPING_ERROR;
92+
if (!(attrs & DMA_ATTR_CC_SHARED)) {
93+
if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
94+
return DMA_MAPPING_ERROR;
9495

95-
return swiotlb_map(dev, phys, size, dir, attrs);
96+
return swiotlb_map(dev, phys, size, dir, attrs);
97+
}
98+
} else if (attrs & DMA_ATTR_CC_SHARED) {
99+
return DMA_MAPPING_ERROR;
96100
}
97101

98102
if (attrs & DMA_ATTR_MMIO) {
99103
dma_addr = phys;
100104
if (unlikely(!dma_capable(dev, dma_addr, size, false)))
101105
goto err_overflow;
106+
} else if (attrs & DMA_ATTR_CC_SHARED) {
107+
dma_addr = phys_to_dma_unencrypted(dev, phys);
108+
if (unlikely(!dma_capable(dev, dma_addr, size, false)))
109+
goto err_overflow;
102110
} else {
103111
dma_addr = phys_to_dma(dev, phys);
104112
if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||

kernel/dma/mapping.c

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
157157
{
158158
const struct dma_map_ops *ops = get_dma_ops(dev);
159159
bool is_mmio = attrs & DMA_ATTR_MMIO;
160+
bool is_cc_shared = attrs & DMA_ATTR_CC_SHARED;
160161
dma_addr_t addr = DMA_MAPPING_ERROR;
161162

162163
BUG_ON(!valid_dma_direction(dir));
@@ -168,8 +169,11 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
168169
return DMA_MAPPING_ERROR;
169170

170171
if (dma_map_direct(dev, ops) ||
171-
(!is_mmio && arch_dma_map_phys_direct(dev, phys + size)))
172+
(!is_mmio && !is_cc_shared &&
173+
arch_dma_map_phys_direct(dev, phys + size)))
172174
addr = dma_direct_map_phys(dev, phys, size, dir, attrs, true);
175+
else if (is_cc_shared)
176+
return DMA_MAPPING_ERROR;
173177
else if (use_dma_iommu(dev))
174178
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
175179
else if (ops->map_phys)
@@ -206,11 +210,16 @@ void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
206210
{
207211
const struct dma_map_ops *ops = get_dma_ops(dev);
208212
bool is_mmio = attrs & DMA_ATTR_MMIO;
213+
bool is_cc_shared = attrs & DMA_ATTR_CC_SHARED;
209214

210215
BUG_ON(!valid_dma_direction(dir));
216+
211217
if (dma_map_direct(dev, ops) ||
212-
(!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size)))
218+
(!is_mmio && !is_cc_shared &&
219+
arch_dma_unmap_phys_direct(dev, addr + size)))
213220
dma_direct_unmap_phys(dev, addr, size, dir, attrs, true);
221+
else if (is_cc_shared)
222+
return;
214223
else if (use_dma_iommu(dev))
215224
iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
216225
else if (ops->unmap_phys)

0 commit comments

Comments
 (0)