Skip to content

Commit

Permalink
Merge branch 'iommu/intel/vt-d' into iommu/next
Browse files Browse the repository at this point in the history
* iommu/intel/vt-d:
  iommu/vt-d: Fix identity map bounds in si_domain_init()
  iommu/vt-d: Fix aligned pages in calculate_psi_aligned_address()
  iommu/vt-d: Limit max address mask to MAX_AGAW_PFN_WIDTH
  iommu/vt-d: Refactor PCI PRI enabling/disabling callbacks
  iommu/vt-d: Add helper to flush caches for context change
  iommu/vt-d: Add helper to allocate paging domain
  iommu/vt-d: Downgrade warning for pre-enabled IR
  iommu/vt-d: Remove control over Execute-Requested requests
  iommu/vt-d: Remove comment for def_domain_type
  iommu/vt-d: Handle volatile descriptor status read
  iommu/vt-d: Use try_cmpxchg64() in intel_pasid_get_entry()
  • Loading branch information
willdeacon committed Jul 12, 2024
2 parents cbf9520 + 3100073 commit 906fe8b
Show file tree
Hide file tree
Showing 7 changed files with 249 additions and 101 deletions.
3 changes: 2 additions & 1 deletion drivers/iommu/intel/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,8 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
* shared_bits are all equal in both pfn and end_pfn.
*/
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
mask = shared_bits ? __ffs(shared_bits) : MAX_AGAW_PFN_WIDTH;
aligned_pages = 1UL << mask;
}

*_pages = aligned_pages;
Expand Down
2 changes: 1 addition & 1 deletion drivers/iommu/intel/dmar.c
Original file line number Diff line number Diff line change
Expand Up @@ -1446,7 +1446,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);

while (qi->desc_status[wait_index] != QI_DONE) {
while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
/*
* We will leave the interrupts disabled, to prevent interrupt
* context to queue another cmd while a cmd is already submitted
Expand Down
196 changes: 137 additions & 59 deletions drivers/iommu/intel/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -854,7 +854,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain->use_first_level)
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;

tmp = 0ULL;
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
Expand Down Expand Up @@ -1359,21 +1359,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info)
}
}

static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
u64 addr, unsigned int mask)
{
u16 sid, qdep;

if (!info || !info->ats_enabled)
return;

sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
}

static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
cache_tag_flush_all(to_dmar_domain(domain));
Expand Down Expand Up @@ -1872,7 +1857,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
attr |= DMA_FL_PTE_PRESENT;
if (domain->use_first_level) {
attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
if (prot & DMA_PTE_WRITE)
attr |= DMA_FL_PTE_DIRTY;
}
Expand Down Expand Up @@ -1959,7 +1944,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
u16 did_old;

spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
Expand All @@ -1968,24 +1952,10 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
return;
}

did_old = context_domain_id(context);

context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
iommu->flush.flush_context(iommu,
did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);

iommu->flush.flush_iotlb(iommu,
did_old,
0,
0,
DMA_TLB_DSI_FLUSH);

__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
intel_context_flush_present(info, context, true);
}

static int domain_setup_first_level(struct intel_iommu *iommu,
Expand Down Expand Up @@ -2071,7 +2041,7 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
mm_to_dma_pfn_start(start_pfn),
mm_to_dma_pfn_end(end_pfn));
mm_to_dma_pfn_end(end_pfn-1));
if (ret)
return ret;
}
Expand Down Expand Up @@ -2177,17 +2147,6 @@ static bool device_rmrr_is_relaxable(struct device *dev)
return false;
}

/*
* Return the required default domain type for a specific device.
*
* @dev: the device in query
* @startup: true if this is during early boot
*
* Returns:
* - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
* - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
* - 0: both identity and dynamic domains work for this device
*/
static int device_def_domain_type(struct device *dev)
{
if (dev_is_pci(dev)) {
Expand Down Expand Up @@ -3633,6 +3592,79 @@ static struct iommu_domain blocking_domain = {
}
};

static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
{
if (!intel_iommu_superpage)
return 0;

if (first_stage)
return cap_fl1gp_support(iommu->cap) ? 2 : 1;

return fls(cap_super_page_val(iommu->cap));
}

static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *domain;
int addr_width;

domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return ERR_PTR(-ENOMEM);

INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->dev_pasids);
INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->lock);
spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);

domain->nid = dev_to_node(dev);
domain->has_iotlb_device = info->ats_enabled;
domain->use_first_level = first_stage;

/* calculate the address width */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
addr_width = cap_mgaw(iommu->cap);
domain->gaw = addr_width;
domain->agaw = iommu->agaw;
domain->max_addr = __DOMAIN_MAX_ADDR(addr_width);

/* iommu memory access coherency */
domain->iommu_coherency = iommu_paging_structure_coherency(iommu);

/* pagesize bitmap */
domain->domain.pgsize_bitmap = SZ_4K;
domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage);
domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);

/*
* IOVA aperture: First-level translation restricts the input-address
* to a canonical address (i.e., address bits 63:N have the same value
* as address bit [N-1], where N is 48-bits with 4-level paging and
* 57-bits with 5-level paging). Hence, skip bit [N-1].
*/
domain->domain.geometry.force_aperture = true;
domain->domain.geometry.aperture_start = 0;
if (first_stage)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);

/* always allocate the top pgd */
domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);
}
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);

return domain;
}

static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
Expand Down Expand Up @@ -3695,15 +3727,14 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (user_data || (dirty_tracking && !ssads_supported(iommu)))
return ERR_PTR(-EOPNOTSUPP);

/*
* domain_alloc_user op needs to fully initialize a domain before
* return, so uses iommu_domain_alloc() here for simple.
*/
domain = iommu_domain_alloc(dev->bus);
if (!domain)
return ERR_PTR(-ENOMEM);

dmar_domain = to_dmar_domain(domain);
/* Do not use first stage for user domain translation. */
dmar_domain = paging_domain_alloc(dev, false);
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
domain = &dmar_domain->domain;
domain->type = IOMMU_DOMAIN_UNMANAGED;
domain->owner = &intel_iommu_ops;
domain->ops = intel_iommu_ops.default_domain_ops;

if (nested_parent) {
dmar_domain->nested_parent = true;
Expand Down Expand Up @@ -4213,6 +4244,37 @@ static int intel_iommu_enable_sva(struct device *dev)
return 0;
}

static int context_flip_pri(struct device_domain_info *info, bool enable)
{
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
struct context_entry *context;

spin_lock(&iommu->lock);
if (context_copied(iommu, bus, devfn)) {
spin_unlock(&iommu->lock);
return -EINVAL;
}

context = iommu_context_addr(iommu, bus, devfn, false);
if (!context || !context_present(context)) {
spin_unlock(&iommu->lock);
return -ENODEV;
}

if (enable)
context_set_sm_pre(context);
else
context_clear_sm_pre(context);

if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
intel_context_flush_present(info, context, true);
spin_unlock(&iommu->lock);

return 0;
}

static int intel_iommu_enable_iopf(struct device *dev)
{
struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
Expand Down Expand Up @@ -4242,15 +4304,23 @@ static int intel_iommu_enable_iopf(struct device *dev)
if (ret)
return ret;

ret = context_flip_pri(info, true);
if (ret)
goto err_remove_device;

ret = pci_enable_pri(pdev, PRQ_DEPTH);
if (ret) {
iopf_queue_remove_device(iommu->iopf_queue, dev);
return ret;
}
if (ret)
goto err_clear_pri;

info->pri_enabled = 1;

return 0;
err_clear_pri:
context_flip_pri(info, false);
err_remove_device:
iopf_queue_remove_device(iommu->iopf_queue, dev);

return ret;
}

static int intel_iommu_disable_iopf(struct device *dev)
Expand All @@ -4261,6 +4331,15 @@ static int intel_iommu_disable_iopf(struct device *dev)
if (!info->pri_enabled)
return -EINVAL;

/* Disable new PRI reception: */
context_flip_pri(info, false);

/*
* Remove device from fault queue and acknowledge all outstanding
* PRQs to the device:
*/
iopf_queue_remove_device(iommu->iopf_queue, dev);

/*
* PCIe spec states that by clearing PRI enable bit, the Page
* Request Interface will not issue new page requests, but has
Expand All @@ -4271,7 +4350,6 @@ static int intel_iommu_disable_iopf(struct device *dev)
*/
pci_disable_pri(to_pci_dev(dev));
info->pri_enabled = 0;
iopf_queue_remove_device(iommu->iopf_queue, dev);

return 0;
}
Expand Down
19 changes: 15 additions & 4 deletions drivers/iommu/intel/iommu.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_ACCESS BIT_ULL(5)
#define DMA_FL_PTE_DIRTY BIT_ULL(6)
#define DMA_FL_PTE_XD BIT_ULL(63)

#define DMA_SL_PTE_DIRTY_BIT 9
#define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT)
Expand Down Expand Up @@ -831,11 +830,10 @@ static inline void dma_clear_pte(struct dma_pte *pte)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
return pte->val & VTD_PAGE_MASK;
#else
/* Must have a full atomic 64-bit read */
return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
#endif
}

Expand Down Expand Up @@ -1047,6 +1045,15 @@ static inline void context_set_sm_pre(struct context_entry *context)
context->lo |= BIT_ULL(4);
}

/*
* Clear the PRE(Page Request Enable) field of a scalable mode context
* entry.
*/
static inline void context_clear_sm_pre(struct context_entry *context)
{
context->lo &= ~BIT_ULL(4);
}

/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
{
Expand Down Expand Up @@ -1145,6 +1152,10 @@ void cache_tag_flush_all(struct dmar_domain *domain);
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
unsigned long end);

void intel_context_flush_present(struct device_domain_info *info,
struct context_entry *context,
bool affect_domains);

#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
Expand Down
4 changes: 2 additions & 2 deletions drivers/iommu/intel/irq_remapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -597,8 +597,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)

if (ir_pre_enabled(iommu)) {
if (!is_kdump_kernel()) {
pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
iommu->name);
pr_info_once("IRQ remapping was enabled on %s but we are not in kdump mode\n",
iommu->name);
clear_ir_pre_enabled(iommu);
iommu_disable_irq_remapping(iommu);
} else if (iommu_load_old_irte(iommu))
Expand Down
Loading

0 comments on commit 906fe8b

Please sign in to comment.