Skip to content

Commit

Permalink
Revert "drm/msm/gpu: Push gpu lock down past runpm"
Browse files Browse the repository at this point in the history
This reverts commit abe2023.

Changing the locking order means that scheduler/msm_job_run() can race
with the recovery kthread worker, with the result that the GPU gets an
extra runpm get when we are trying to power it off.  Leaving the GPU in
an unrecovered state.

I'll need to come up with a different scheme for appeasing lockdep.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/573835/
  • Loading branch information
robclark committed Feb 1, 2024
1 parent 6a0dbcd commit 917e9b7
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 8 deletions.
11 changes: 5 additions & 6 deletions drivers/gpu/drm/msm/msm_gpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;

pm_runtime_get_sync(&gpu->pdev->dev);
WARN_ON(!mutex_is_locked(&gpu->lock));

mutex_lock(&gpu->lock);
pm_runtime_get_sync(&gpu->pdev->dev);

msm_gpu_hw_init(gpu);

submit->seqno = submit->hw_fence->seqno;

update_sw_cntrs(gpu);

/*
Expand All @@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;

hangcheck_timer_reset(gpu);

mutex_unlock(&gpu->lock);

pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}

/*
Expand Down
7 changes: 5 additions & 2 deletions drivers/gpu/drm/msm/msm_ringbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)

msm_fence_init(submit->hw_fence, fctx);

submit->seqno = submit->hw_fence->seqno;

mutex_lock(&priv->lru.lock);

for (i = 0; i < submit->nr_bos; i++) {
Expand All @@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)

mutex_unlock(&priv->lru.lock);

/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);

msm_gpu_submit(gpu, submit);

mutex_unlock(&gpu->lock);

return dma_fence_get(submit->hw_fence);
}

Expand Down

0 comments on commit 917e9b7

Please sign in to comment.