@@ -637,44 +637,125 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
637637}
638638
639639/**
640- * gmap_fault - resolve a fault on a guest address
640+ * fixup_user_fault_nowait - manually resolve a user page fault without waiting
641+ * @mm: mm_struct of target mm
642+ * @address: user address
643+ * @fault_flags:flags to pass down to handle_mm_fault()
644+ * @unlocked: did we unlock the mmap_lock while retrying
645+ *
646+ * This function behaves similarly to fixup_user_fault(), but it guarantees
647+ * that the fault will be resolved without waiting. The function might drop
648+ * and re-acquire the mm lock, in which case @unlocked will be set to true.
649+ *
650+ * The guarantee is that the fault is handled without waiting, but the
651+ * function itself might sleep, due to the lock.
652+ *
653+ * Context: Needs to be called with mm->mmap_lock held in read mode, and will
654+ * return with the lock held in read mode; @unlocked will indicate whether
655+ * the lock has been dropped and re-acquired. This is the same behaviour as
656+ * fixup_user_fault().
657+ *
658+ * Return: 0 on success, -EAGAIN if the fault cannot be resolved without
659+ * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of
660+ * memory.
661+ */
662+ static int fixup_user_fault_nowait (struct mm_struct * mm , unsigned long address ,
663+ unsigned int fault_flags , bool * unlocked )
664+ {
665+ struct vm_area_struct * vma ;
666+ unsigned int test_flags ;
667+ vm_fault_t fault ;
668+ int rc ;
669+
670+ fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT ;
671+ test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ ;
672+
673+ vma = find_vma (mm , address );
674+ if (unlikely (!vma || address < vma -> vm_start ))
675+ return - EFAULT ;
676+ if (unlikely (!(vma -> vm_flags & test_flags )))
677+ return - EFAULT ;
678+
679+ fault = handle_mm_fault (vma , address , fault_flags , NULL );
680+ /* the mm lock has been dropped, take it again */
681+ if (fault & VM_FAULT_COMPLETED ) {
682+ * unlocked = true;
683+ mmap_read_lock (mm );
684+ return 0 ;
685+ }
686+ /* the mm lock has not been dropped */
687+ if (fault & VM_FAULT_ERROR ) {
688+ rc = vm_fault_to_errno (fault , 0 );
689+ BUG_ON (!rc );
690+ return rc ;
691+ }
692+ /* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */
693+ if (fault & VM_FAULT_RETRY )
694+ return - EAGAIN ;
695+ /* nothing needed to be done and the mm lock has not been dropped */
696+ return 0 ;
697+ }
698+
699+ /**
700+ * __gmap_fault - resolve a fault on a guest address
641701 * @gmap: pointer to guest mapping meta data structure
642702 * @gaddr: guest address
643703 * @fault_flags: flags to pass down to handle_mm_fault()
644704 *
645- * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
646- * if the vm address is already mapped to a different guest segment .
705+ * Context: Needs to be called with mm->mmap_lock held in read mode. Might
706+ * drop and re-acquire the lock. Will always return with the lock held .
647707 */
648- int gmap_fault (struct gmap * gmap , unsigned long gaddr ,
649- unsigned int fault_flags )
708+ static int __gmap_fault (struct gmap * gmap , unsigned long gaddr , unsigned int fault_flags )
650709{
651710 unsigned long vmaddr ;
652- int rc ;
653711 bool unlocked ;
654-
655- mmap_read_lock (gmap -> mm );
712+ int rc = 0 ;
656713
657714retry :
658715 unlocked = false;
716+
659717 vmaddr = __gmap_translate (gmap , gaddr );
660- if (IS_ERR_VALUE (vmaddr )) {
661- rc = vmaddr ;
662- goto out_up ;
663- }
664- if (fixup_user_fault (gmap -> mm , vmaddr , fault_flags ,
665- & unlocked )) {
666- rc = - EFAULT ;
667- goto out_up ;
718+ if (IS_ERR_VALUE (vmaddr ))
719+ return vmaddr ;
720+
721+ if (fault_flags & FAULT_FLAG_RETRY_NOWAIT ) {
722+ rc = fixup_user_fault_nowait (gmap -> mm , vmaddr , fault_flags , & unlocked );
723+ if (rc )
724+ return rc ;
725+ } else if (fixup_user_fault (gmap -> mm , vmaddr , fault_flags , & unlocked )) {
726+ return - EFAULT ;
668727 }
669728 /*
670729 * In the case that fixup_user_fault unlocked the mmap_lock during
671- * faultin redo __gmap_translate to not race with a map/unmap_segment.
730+ * fault-in, redo __gmap_translate() to avoid racing with a
731+ * map/unmap_segment.
732+ * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(),
733+ * and __gmap_link() must all be called atomically in one go; if the
734+ * lock had been dropped in between, a retry is needed.
672735 */
673736 if (unlocked )
674737 goto retry ;
675738
676- rc = __gmap_link (gmap , gaddr , vmaddr );
677- out_up :
739+ return __gmap_link (gmap , gaddr , vmaddr );
740+ }
741+
742+ /**
743+ * gmap_fault - resolve a fault on a guest address
744+ * @gmap: pointer to guest mapping meta data structure
745+ * @gaddr: guest address
746+ * @fault_flags: flags to pass down to handle_mm_fault()
747+ *
748+ * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the
749+ * vm address is already mapped to a different guest segment, and -EAGAIN if
750+ * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed
751+ * immediately.
752+ */
753+ int gmap_fault (struct gmap * gmap , unsigned long gaddr , unsigned int fault_flags )
754+ {
755+ int rc ;
756+
757+ mmap_read_lock (gmap -> mm );
758+ rc = __gmap_fault (gmap , gaddr , fault_flags );
678759 mmap_read_unlock (gmap -> mm );
679760 return rc ;
680761}
0 commit comments