Skip to content

Commit 6d39bde

Browse files
ssuthiku-amdwilldeacon
authored andcommitted
iommu/amd: Enforce 4k mapping for certain IOMMU data structures
AMD IOMMU requires 4k-aligned pages for the event log, the PPR log, and the completion wait write-back regions. However, when allocating the pages, they could be part of large mapping (e.g. 2M) page. This causes #PF due to the SNP RMP hardware enforces the check based on the page level for these data structures. So, fix by calling set_memory_4k() on the allocated pages. Fixes: c69d89a ("iommu/amd: Use 4K page for completion wait write-back semaphore") Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Brijesh Singh <brijesh.singh@amd.com> Link: https://lore.kernel.org/r/20201105145832.3065-1-suravee.suthikulpanit@amd.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 91c2c28 commit 6d39bde

File tree

1 file changed

+22
-5
lines changed

1 file changed

+22
-5
lines changed

drivers/iommu/amd/init.c

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include <asm/iommu_table.h>
3030
#include <asm/io_apic.h>
3131
#include <asm/irq_remapping.h>
32+
#include <asm/set_memory.h>
3233

3334
#include <linux/crash_dump.h>
3435

@@ -672,11 +673,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
672673
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
673674
}
674675

676+
static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
677+
gfp_t gfp, size_t size)
678+
{
679+
int order = get_order(size);
680+
void *buf = (void *)__get_free_pages(gfp, order);
681+
682+
if (buf &&
683+
iommu_feature(iommu, FEATURE_SNP) &&
684+
set_memory_4k((unsigned long)buf, (1 << order))) {
685+
free_pages((unsigned long)buf, order);
686+
buf = NULL;
687+
}
688+
689+
return buf;
690+
}
691+
675692
/* allocates the memory where the IOMMU will log its events to */
676693
static int __init alloc_event_buffer(struct amd_iommu *iommu)
677694
{
678-
iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
679-
get_order(EVT_BUFFER_SIZE));
695+
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
696+
EVT_BUFFER_SIZE);
680697

681698
return iommu->evt_buf ? 0 : -ENOMEM;
682699
}
@@ -715,8 +732,8 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
715732
/* allocates the memory where the IOMMU will log its events to */
716733
static int __init alloc_ppr_log(struct amd_iommu *iommu)
717734
{
718-
iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
719-
get_order(PPR_LOG_SIZE));
735+
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
736+
PPR_LOG_SIZE);
720737

721738
return iommu->ppr_log ? 0 : -ENOMEM;
722739
}
@@ -838,7 +855,7 @@ static int iommu_init_ga(struct amd_iommu *iommu)
838855

839856
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
840857
{
841-
iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
858+
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
842859

843860
return iommu->cmd_sem ? 0 : -ENOMEM;
844861
}

0 commit comments

Comments
 (0)