Skip to content

Commit

Permalink
intel-iommu sg chaining support
Browse files Browse the repository at this point in the history
x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
support sg chaining, we will get data corruption.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
FUJITA Tomonori authored and Linus Torvalds committed Oct 22, 2007
1 parent 358dd8a commit c03ab37
Showing 1 changed file with 17 additions and 17 deletions.
34 changes: 17 additions & 17 deletions drivers/pci/intel-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
}

#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
int i;
Expand All @@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
struct iova *iova;
size_t size = 0;
void *addr;
struct scatterlist *sg;

if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return;

domain = find_domain(pdev);

iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address));
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
if (!iova)
return;
for (i = 0; i < nelems; i++, sg++) {
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
size += aligned_size((u64)addr, sg->length);
}
Expand All @@ -2003,21 +2004,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
}

static int intel_nontranslate_map_sg(struct device *hddev,
struct scatterlist *sg, int nelems, int dir)
struct scatterlist *sglist, int nelems, int dir)
{
int i;
struct scatterlist *sg;

for (i = 0; i < nelems; i++) {
struct scatterlist *s = &sg[i];
BUG_ON(!s->page);
s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s));
s->dma_length = s->length;
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg->page);
sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
sg->dma_length = sg->length;
}
return nelems;
}

static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
void *addr;
int i;
Expand All @@ -2028,26 +2029,26 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
size_t offset = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *orig_sg = sg;
struct scatterlist *sg;
unsigned long start_addr;

BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return intel_nontranslate_map_sg(hwdev, sg, nelems, dir);
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);

domain = get_valid_domain_for_dev(pdev);
if (!domain)
return 0;

for (i = 0; i < nelems; i++, sg++) {
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size += aligned_size((u64)addr, sg->length);
}

iova = __intel_alloc_iova(hwdev, domain, size);
if (!iova) {
orig_sg->dma_length = 0;
sglist->dma_length = 0;
return 0;
}

Expand All @@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,

start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
offset = 0;
sg = orig_sg;
for (i = 0; i < nelems; i++, sg++) {
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length);
Expand Down

0 comments on commit c03ab37

Please sign in to comment.