Skip to content

Commit 370645f

Browse files
ctmarinasakpm00
authored andcommitted
dma-mapping: force bouncing if the kmalloc() size is not cache-line-aligned
For direct DMA, if the size is small enough to have originated from a kmalloc() cache below ARCH_DMA_MINALIGN, check its alignment against dma_get_cache_alignment() and bounce if necessary. For larger sizes, it is the responsibility of the DMA API caller to ensure proper alignment. At this point, the kmalloc() caches are properly aligned but this will change in a subsequent patch. Architectures can opt in by selecting DMA_BOUNCE_UNALIGNED_KMALLOC. Link: https://lkml.kernel.org/r/20230612153201.554742-15-catalin.marinas@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Isaac J. Manjarres <isaacmanjarres@google.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Jerry Snitselaar <jsnitsel@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jonathan Cameron <jic23@kernel.org> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Lars-Peter Clausen <lars@metafoo.de> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Mike Snitzer <snitzer@kernel.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Cc: Saravana Kannan <saravanak@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent cb147bb commit 370645f

File tree

3 files changed

+67
-1
lines changed

3 files changed

+67
-1
lines changed

include/linux/dma-map-ops.h

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#include <linux/dma-mapping.h>
1010
#include <linux/pgtable.h>
11+
#include <linux/slab.h>
1112

1213
struct cma;
1314

@@ -277,6 +278,66 @@ static inline bool dev_is_dma_coherent(struct device *dev)
277278
}
278279
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
279280

281+
/*
282+
* Check whether potential kmalloc() buffers are safe for non-coherent DMA.
283+
*/
284+
static inline bool dma_kmalloc_safe(struct device *dev,
285+
enum dma_data_direction dir)
286+
{
287+
/*
288+
* If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
289+
* caches have already been aligned to a DMA-safe size.
290+
*/
291+
if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
292+
return true;
293+
294+
/*
295+
* kmalloc() buffers are DMA-safe irrespective of size if the device
296+
* is coherent or the direction is DMA_TO_DEVICE (non-desctructive
297+
* cache maintenance and benign cache line evictions).
298+
*/
299+
if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
300+
return true;
301+
302+
return false;
303+
}
304+
305+
/*
306+
* Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
307+
* sufficiently aligned for non-coherent DMA.
308+
*/
309+
static inline bool dma_kmalloc_size_aligned(size_t size)
310+
{
311+
/*
312+
* Larger kmalloc() sizes are guaranteed to be aligned to
313+
* ARCH_DMA_MINALIGN.
314+
*/
315+
if (size >= 2 * ARCH_DMA_MINALIGN ||
316+
IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
317+
return true;
318+
319+
return false;
320+
}
321+
322+
/*
323+
* Check whether the given object size may have originated from a kmalloc()
324+
* buffer with a slab alignment below the DMA-safe alignment and needs
325+
* bouncing for non-coherent DMA. The pointer alignment is not considered and
326+
* in-structure DMA-safe offsets are the responsibility of the caller. Such
327+
* code should use the static ARCH_DMA_MINALIGN for compiler annotations.
328+
*
329+
* The heuristics can have false positives, bouncing unnecessarily, though the
330+
* buffers would be small. False negatives are theoretically possible if, for
331+
* example, multiple small kmalloc() buffers are coalesced into a larger
332+
* buffer that passes the alignment check. There are no such known constructs
333+
* in the kernel.
334+
*/
335+
static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
336+
enum dma_data_direction dir)
337+
{
338+
return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
339+
}
340+
280341
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
281342
gfp_t gfp, unsigned long attrs);
282343
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,

kernel/dma/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,10 @@ config SWIOTLB
9090
bool
9191
select NEED_DMA_MAP_STATE
9292

93+
config DMA_BOUNCE_UNALIGNED_KMALLOC
94+
bool
95+
depends on SWIOTLB
96+
9397
config DMA_RESTRICTED_POOL
9498
bool "DMA Restricted Pool"
9599
depends on OF && OF_RESERVED_MEM && SWIOTLB

kernel/dma/direct.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
9494
return swiotlb_map(dev, phys, size, dir, attrs);
9595
}
9696

97-
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
97+
if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
98+
dma_kmalloc_needs_bounce(dev, size, dir)) {
9899
if (is_pci_p2pdma_page(page))
99100
return DMA_MAPPING_ERROR;
100101
if (is_swiotlb_active(dev))

0 commit comments

Comments
 (0)