1717#include <linux/vringh.h>
1818#include <linux/vdpa.h>
1919#include <linux/vhost_iotlb.h>
20+ #include <linux/iova.h>
2021
2122#include "vdpa_sim.h"
2223
@@ -128,71 +129,89 @@ static int dir_to_perm(enum dma_data_direction dir)
128129 return perm ;
129130}
130131
132+ static dma_addr_t vdpasim_map_range (struct vdpasim * vdpasim , phys_addr_t paddr ,
133+ size_t size , unsigned int perm )
134+ {
135+ struct iova * iova ;
136+ dma_addr_t dma_addr ;
137+ int ret ;
138+
139+ /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
140+ iova = alloc_iova (& vdpasim -> iova , size , ULONG_MAX - 1 , true);
141+ if (!iova )
142+ return DMA_MAPPING_ERROR ;
143+
144+ dma_addr = iova_dma_addr (& vdpasim -> iova , iova );
145+
146+ spin_lock (& vdpasim -> iommu_lock );
147+ ret = vhost_iotlb_add_range (vdpasim -> iommu , (u64 )dma_addr ,
148+ (u64 )dma_addr + size - 1 , (u64 )paddr , perm );
149+ spin_unlock (& vdpasim -> iommu_lock );
150+
151+ if (ret ) {
152+ __free_iova (& vdpasim -> iova , iova );
153+ return DMA_MAPPING_ERROR ;
154+ }
155+
156+ return dma_addr ;
157+ }
158+
159+ static void vdpasim_unmap_range (struct vdpasim * vdpasim , dma_addr_t dma_addr ,
160+ size_t size )
161+ {
162+ spin_lock (& vdpasim -> iommu_lock );
163+ vhost_iotlb_del_range (vdpasim -> iommu , (u64 )dma_addr ,
164+ (u64 )dma_addr + size - 1 );
165+ spin_unlock (& vdpasim -> iommu_lock );
166+
167+ free_iova (& vdpasim -> iova , iova_pfn (& vdpasim -> iova , dma_addr ));
168+ }
169+
131170static dma_addr_t vdpasim_map_page (struct device * dev , struct page * page ,
132171 unsigned long offset , size_t size ,
133172 enum dma_data_direction dir ,
134173 unsigned long attrs )
135174{
136175 struct vdpasim * vdpasim = dev_to_sim (dev );
137- struct vhost_iotlb * iommu = vdpasim -> iommu ;
138- u64 pa = (page_to_pfn (page ) << PAGE_SHIFT ) + offset ;
139- int ret , perm = dir_to_perm (dir );
176+ phys_addr_t paddr = page_to_phys (page ) + offset ;
177+ int perm = dir_to_perm (dir );
140178
141179 if (perm < 0 )
142180 return DMA_MAPPING_ERROR ;
143181
144- /* For simplicity, use identical mapping to avoid e.g iova
145- * allocator.
146- */
147- spin_lock (& vdpasim -> iommu_lock );
148- ret = vhost_iotlb_add_range (iommu , pa , pa + size - 1 ,
149- pa , dir_to_perm (dir ));
150- spin_unlock (& vdpasim -> iommu_lock );
151- if (ret )
152- return DMA_MAPPING_ERROR ;
153-
154- return (dma_addr_t )(pa );
182+ return vdpasim_map_range (vdpasim , paddr , size , perm );
155183}
156184
157185static void vdpasim_unmap_page (struct device * dev , dma_addr_t dma_addr ,
158186 size_t size , enum dma_data_direction dir ,
159187 unsigned long attrs )
160188{
161189 struct vdpasim * vdpasim = dev_to_sim (dev );
162- struct vhost_iotlb * iommu = vdpasim -> iommu ;
163190
164- spin_lock (& vdpasim -> iommu_lock );
165- vhost_iotlb_del_range (iommu , (u64 )dma_addr ,
166- (u64 )dma_addr + size - 1 );
167- spin_unlock (& vdpasim -> iommu_lock );
191+ vdpasim_unmap_range (vdpasim , dma_addr , size );
168192}
169193
170194static void * vdpasim_alloc_coherent (struct device * dev , size_t size ,
171195 dma_addr_t * dma_addr , gfp_t flag ,
172196 unsigned long attrs )
173197{
174198 struct vdpasim * vdpasim = dev_to_sim (dev );
175- struct vhost_iotlb * iommu = vdpasim -> iommu ;
176- void * addr = kmalloc (size , flag );
177- int ret ;
199+ phys_addr_t paddr ;
200+ void * addr ;
178201
179- spin_lock ( & vdpasim -> iommu_lock );
202+ addr = kmalloc ( size , flag );
180203 if (!addr ) {
181204 * dma_addr = DMA_MAPPING_ERROR ;
182- } else {
183- u64 pa = virt_to_phys (addr );
184-
185- ret = vhost_iotlb_add_range (iommu , (u64 )pa ,
186- (u64 )pa + size - 1 ,
187- pa , VHOST_MAP_RW );
188- if (ret ) {
189- * dma_addr = DMA_MAPPING_ERROR ;
190- kfree (addr );
191- addr = NULL ;
192- } else
193- * dma_addr = (dma_addr_t )pa ;
205+ return NULL ;
206+ }
207+
208+ paddr = virt_to_phys (addr );
209+
210+ * dma_addr = vdpasim_map_range (vdpasim , paddr , size , VHOST_MAP_RW );
211+ if (* dma_addr == DMA_MAPPING_ERROR ) {
212+ kfree (addr );
213+ return NULL ;
194214 }
195- spin_unlock (& vdpasim -> iommu_lock );
196215
197216 return addr ;
198217}
@@ -202,14 +221,10 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
202221 unsigned long attrs )
203222{
204223 struct vdpasim * vdpasim = dev_to_sim (dev );
205- struct vhost_iotlb * iommu = vdpasim -> iommu ;
206224
207- spin_lock (& vdpasim -> iommu_lock );
208- vhost_iotlb_del_range (iommu , (u64 )dma_addr ,
209- (u64 )dma_addr + size - 1 );
210- spin_unlock (& vdpasim -> iommu_lock );
225+ vdpasim_unmap_range (vdpasim , dma_addr , size );
211226
212- kfree (phys_to_virt (( uintptr_t ) dma_addr ) );
227+ kfree (vaddr );
213228}
214229
215230static const struct dma_map_ops vdpasim_dma_ops = {
@@ -271,6 +286,13 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
271286 for (i = 0 ; i < dev_attr -> nvqs ; i ++ )
272287 vringh_set_iotlb (& vdpasim -> vqs [i ].vring , vdpasim -> iommu );
273288
289+ ret = iova_cache_get ();
290+ if (ret )
291+ goto err_iommu ;
292+
293+ /* For simplicity we use an IOVA allocator with byte granularity */
294+ init_iova_domain (& vdpasim -> iova , 1 , 0 );
295+
274296 vdpasim -> vdpa .dma_dev = dev ;
275297
276298 return vdpasim ;
@@ -541,6 +563,8 @@ static void vdpasim_free(struct vdpa_device *vdpa)
541563 struct vdpasim * vdpasim = vdpa_to_sim (vdpa );
542564
543565 cancel_work_sync (& vdpasim -> work );
566+ put_iova_domain (& vdpasim -> iova );
567+ iova_cache_put ();
544568 kvfree (vdpasim -> buffer );
545569 if (vdpasim -> iommu )
546570 vhost_iotlb_free (vdpasim -> iommu );
0 commit comments