@@ -39,13 +39,20 @@ enum {
3939 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT ,
4040};
4141
42+ enum {
43+ /* These flags are carried from input-to-output */
44+ HMM_PFN_INOUT_FLAGS = HMM_PFN_DMA_MAPPED ,
45+ };
46+
4247static int hmm_pfns_fill (unsigned long addr , unsigned long end ,
4348 struct hmm_range * range , unsigned long cpu_flags )
4449{
4550 unsigned long i = (addr - range -> start ) >> PAGE_SHIFT ;
4651
47- for (; addr < end ; addr += PAGE_SIZE , i ++ )
48- range -> hmm_pfns [i ] = cpu_flags ;
52+ for (; addr < end ; addr += PAGE_SIZE , i ++ ) {
53+ range -> hmm_pfns [i ] &= HMM_PFN_INOUT_FLAGS ;
54+ range -> hmm_pfns [i ] |= cpu_flags ;
55+ }
4956 return 0 ;
5057}
5158
@@ -202,8 +209,10 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
202209 return hmm_vma_fault (addr , end , required_fault , walk );
203210
204211 pfn = pmd_pfn (pmd ) + ((addr & ~PMD_MASK ) >> PAGE_SHIFT );
205- for (i = 0 ; addr < end ; addr += PAGE_SIZE , i ++ , pfn ++ )
206- hmm_pfns [i ] = pfn | cpu_flags ;
212+ for (i = 0 ; addr < end ; addr += PAGE_SIZE , i ++ , pfn ++ ) {
213+ hmm_pfns [i ] &= HMM_PFN_INOUT_FLAGS ;
214+ hmm_pfns [i ] |= pfn | cpu_flags ;
215+ }
207216 return 0 ;
208217}
209218#else /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -230,14 +239,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
230239 unsigned long cpu_flags ;
231240 pte_t pte = ptep_get (ptep );
232241 uint64_t pfn_req_flags = * hmm_pfn ;
242+ uint64_t new_pfn_flags = 0 ;
233243
234244 if (pte_none_mostly (pte )) {
235245 required_fault =
236246 hmm_pte_need_fault (hmm_vma_walk , pfn_req_flags , 0 );
237247 if (required_fault )
238248 goto fault ;
239- * hmm_pfn = 0 ;
240- return 0 ;
249+ goto out ;
241250 }
242251
243252 if (!pte_present (pte )) {
@@ -253,16 +262,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
253262 cpu_flags = HMM_PFN_VALID ;
254263 if (is_writable_device_private_entry (entry ))
255264 cpu_flags |= HMM_PFN_WRITE ;
256- * hmm_pfn = swp_offset_pfn (entry ) | cpu_flags ;
257- return 0 ;
265+ new_pfn_flags = swp_offset_pfn (entry ) | cpu_flags ;
266+ goto out ;
258267 }
259268
260269 required_fault =
261270 hmm_pte_need_fault (hmm_vma_walk , pfn_req_flags , 0 );
262- if (!required_fault ) {
263- * hmm_pfn = 0 ;
264- return 0 ;
265- }
271+ if (!required_fault )
272+ goto out ;
266273
267274 if (!non_swap_entry (entry ))
268275 goto fault ;
@@ -304,11 +311,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
304311 pte_unmap (ptep );
305312 return - EFAULT ;
306313 }
307- * hmm_pfn = HMM_PFN_ERROR ;
308- return 0 ;
314+ new_pfn_flags = HMM_PFN_ERROR ;
315+ goto out ;
309316 }
310317
311- * hmm_pfn = pte_pfn (pte ) | cpu_flags ;
318+ new_pfn_flags = pte_pfn (pte ) | cpu_flags ;
319+ out :
320+ * hmm_pfn = (* hmm_pfn & HMM_PFN_INOUT_FLAGS ) | new_pfn_flags ;
312321 return 0 ;
313322
314323fault :
@@ -448,8 +457,10 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
448457 }
449458
450459 pfn = pud_pfn (pud ) + ((addr & ~PUD_MASK ) >> PAGE_SHIFT );
451- for (i = 0 ; i < npages ; ++ i , ++ pfn )
452- hmm_pfns [i ] = pfn | cpu_flags ;
460+ for (i = 0 ; i < npages ; ++ i , ++ pfn ) {
461+ hmm_pfns [i ] &= HMM_PFN_INOUT_FLAGS ;
462+ hmm_pfns [i ] |= pfn | cpu_flags ;
463+ }
453464 goto out_unlock ;
454465 }
455466
@@ -507,8 +518,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
507518 }
508519
509520 pfn = pte_pfn (entry ) + ((start & ~hmask ) >> PAGE_SHIFT );
510- for (; addr < end ; addr += PAGE_SIZE , i ++ , pfn ++ )
511- range -> hmm_pfns [i ] = pfn | cpu_flags ;
521+ for (; addr < end ; addr += PAGE_SIZE , i ++ , pfn ++ ) {
522+ range -> hmm_pfns [i ] &= HMM_PFN_INOUT_FLAGS ;
523+ range -> hmm_pfns [i ] |= pfn | cpu_flags ;
524+ }
512525
513526 spin_unlock (ptl );
514527 return 0 ;
0 commit comments