|
31 | 31 | #include <linux/pci.h> |
32 | 32 | #include <linux/dmar.h> |
33 | 33 | #include <linux/dma-mapping.h> |
34 | | -#include <linux/dma-direct.h> |
35 | 34 | #include <linux/mempool.h> |
36 | 35 | #include <linux/memory.h> |
37 | 36 | #include <linux/cpu.h> |
@@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, |
3713 | 3712 | dma_addr_t *dma_handle, gfp_t flags, |
3714 | 3713 | unsigned long attrs) |
3715 | 3714 | { |
3716 | | - void *vaddr; |
| 3715 | + struct page *page = NULL; |
| 3716 | + int order; |
3717 | 3717 |
|
3718 | | - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); |
3719 | | - if (iommu_no_mapping(dev) || !vaddr) |
3720 | | - return vaddr; |
| 3718 | + size = PAGE_ALIGN(size); |
| 3719 | + order = get_order(size); |
3721 | 3720 |
|
3722 | | - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), |
3723 | | - PAGE_ALIGN(size), DMA_BIDIRECTIONAL, |
3724 | | - dev->coherent_dma_mask); |
3725 | | - if (!*dma_handle) |
3726 | | - goto out_free_pages; |
3727 | | - return vaddr; |
| 3721 | + if (!iommu_no_mapping(dev)) |
| 3722 | + flags &= ~(GFP_DMA | GFP_DMA32); |
| 3723 | + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { |
| 3724 | + if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
| 3725 | + flags |= GFP_DMA; |
| 3726 | + else |
| 3727 | + flags |= GFP_DMA32; |
| 3728 | + } |
| 3729 | + |
| 3730 | + if (gfpflags_allow_blocking(flags)) { |
| 3731 | + unsigned int count = size >> PAGE_SHIFT; |
| 3732 | + |
| 3733 | + page = dma_alloc_from_contiguous(dev, count, order, flags); |
| 3734 | + if (page && iommu_no_mapping(dev) && |
| 3735 | + page_to_phys(page) + size > dev->coherent_dma_mask) { |
| 3736 | + dma_release_from_contiguous(dev, page, count); |
| 3737 | + page = NULL; |
| 3738 | + } |
| 3739 | + } |
| 3740 | + |
| 3741 | + if (!page) |
| 3742 | + page = alloc_pages(flags, order); |
| 3743 | + if (!page) |
| 3744 | + return NULL; |
| 3745 | + memset(page_address(page), 0, size); |
| 3746 | + |
| 3747 | + *dma_handle = __intel_map_single(dev, page_to_phys(page), size, |
| 3748 | + DMA_BIDIRECTIONAL, |
| 3749 | + dev->coherent_dma_mask); |
| 3750 | + if (*dma_handle) |
| 3751 | + return page_address(page); |
| 3752 | + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
| 3753 | + __free_pages(page, order); |
3728 | 3754 |
|
3729 | | -out_free_pages: |
3730 | | - dma_direct_free(dev, size, vaddr, *dma_handle, attrs); |
3731 | 3755 | return NULL; |
3732 | 3756 | } |
3733 | 3757 |
|
3734 | 3758 | static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, |
3735 | 3759 | dma_addr_t dma_handle, unsigned long attrs) |
3736 | 3760 | { |
3737 | | - if (!iommu_no_mapping(dev)) |
3738 | | - intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); |
3739 | | - dma_direct_free(dev, size, vaddr, dma_handle, attrs); |
| 3761 | + int order; |
| 3762 | + struct page *page = virt_to_page(vaddr); |
| 3763 | + |
| 3764 | + size = PAGE_ALIGN(size); |
| 3765 | + order = get_order(size); |
| 3766 | + |
| 3767 | + intel_unmap(dev, dma_handle, size); |
| 3768 | + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
| 3769 | + __free_pages(page, order); |
3740 | 3770 | } |
3741 | 3771 |
|
3742 | 3772 | static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, |
|
0 commit comments