@@ -1737,6 +1737,24 @@ static bool should_migrate_to_smem(struct xe_bo *bo)
17371737 bo -> attr .atomic_access == DRM_XE_ATOMIC_CPU ;
17381738}
17391739
1740+ static int xe_bo_wait_usage_kernel (struct xe_bo * bo , struct ttm_operation_ctx * ctx )
1741+ {
1742+ long lerr ;
1743+
1744+ if (ctx -> no_wait_gpu )
1745+ return dma_resv_test_signaled (bo -> ttm .base .resv , DMA_RESV_USAGE_KERNEL ) ?
1746+ 0 : - EBUSY ;
1747+
1748+ lerr = dma_resv_wait_timeout (bo -> ttm .base .resv , DMA_RESV_USAGE_KERNEL ,
1749+ ctx -> interruptible , MAX_SCHEDULE_TIMEOUT );
1750+ if (lerr < 0 )
1751+ return lerr ;
1752+ if (lerr == 0 )
1753+ return - EBUSY ;
1754+
1755+ return 0 ;
1756+ }
1757+
17401758/* Populate the bo if swapped out, or migrate if the access mode requires that. */
17411759static int xe_bo_fault_migrate (struct xe_bo * bo , struct ttm_operation_ctx * ctx ,
17421760 struct drm_exec * exec )
@@ -1745,10 +1763,9 @@ static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
17451763 int err = 0 ;
17461764
17471765 if (ttm_manager_type (tbo -> bdev , tbo -> resource -> mem_type )-> use_tt ) {
1748- xe_assert (xe_bo_device (bo ),
1749- dma_resv_test_signaled (tbo -> base .resv , DMA_RESV_USAGE_KERNEL ) ||
1750- (tbo -> ttm && ttm_tt_is_populated (tbo -> ttm )));
1751- err = ttm_bo_populate (& bo -> ttm , ctx );
1766+ err = xe_bo_wait_usage_kernel (bo , ctx );
1767+ if (!err )
1768+ err = ttm_bo_populate (& bo -> ttm , ctx );
17521769 } else if (should_migrate_to_smem (bo )) {
17531770 xe_assert (xe_bo_device (bo ), bo -> flags & XE_BO_FLAG_SYSTEM );
17541771 err = xe_bo_migrate (bo , XE_PL_TT , ctx , exec );
@@ -1922,7 +1939,6 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
19221939 .no_wait_gpu = false,
19231940 .gfp_retry_mayfail = retry_after_wait ,
19241941 };
1925- long lerr ;
19261942
19271943 err = drm_exec_lock_obj (& exec , & tbo -> base );
19281944 drm_exec_retry_on_contention (& exec );
@@ -1942,13 +1958,9 @@ static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
19421958 break ;
19431959 }
19441960
1945- lerr = dma_resv_wait_timeout (tbo -> base .resv ,
1946- DMA_RESV_USAGE_KERNEL , true,
1947- MAX_SCHEDULE_TIMEOUT );
1948- if (lerr < 0 ) {
1949- err = lerr ;
1961+ err = xe_bo_wait_usage_kernel (bo , & tctx );
1962+ if (err )
19501963 break ;
1951- }
19521964
19531965 if (!retry_after_wait )
19541966 ret = __xe_bo_cpu_fault (vmf , xe , bo );
0 commit comments