Skip to content

Commit

Permalink
Merge tag 'drm-intel-fixes-2022-12-30' of git://anongit.freedesktop.o…
Browse files Browse the repository at this point in the history
…rg/drm/drm-intel into drm-fixes

- fix TLB invalidation for DG2 and newer platforms. (Andrzej)
- Remove __maybe_unused from mtl_info (Lucas)
- improve the catch-all evict to handle lock contention (Matt Auld)
- Fix two issues with over-size (GuC/HuC) firmware files (John)
- Fix DSI resume issues on ICL+ (Jani)

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y662ijDHrZCjTFla@intel.com
  • Loading branch information
danvet committed Jan 1, 2023
2 parents 1b929c0 + 6217e9f commit a9f5a75
Show file tree
Hide file tree
Showing 12 changed files with 212 additions and 45 deletions.
94 changes: 91 additions & 3 deletions drivers/gpu/drm/i915/display/intel_dsi_vbt.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,11 @@

#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_gmbus_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
Expand Down Expand Up @@ -377,15 +379,93 @@ static void icl_exec_gpio(struct intel_connector *connector,
drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}

enum {
MIPI_RESET_1 = 0,
MIPI_AVDD_EN_1,
MIPI_BKLT_EN_1,
MIPI_AVEE_EN_1,
MIPI_VIO_EN_1,
MIPI_RESET_2,
MIPI_AVDD_EN_2,
MIPI_BKLT_EN_2,
MIPI_AVEE_EN_2,
MIPI_VIO_EN_2,
};

static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
int gpio, bool value)
{
int index;

if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
return;

switch (gpio) {
case MIPI_RESET_1:
case MIPI_RESET_2:
index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;

/*
* Disable HPD to set the pin to output, and set output
* value. The HPD pin should not be enabled for DSI anyway,
* assuming the board design and VBT are sane, and the pin isn't
* used by a non-DSI encoder.
*
* The locking protects against concurrent SHOTPLUG_CTL_DDI
* modifications in irq setup and handling.
*/
spin_lock_irq(&dev_priv->irq_lock);
intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
spin_unlock_irq(&dev_priv->irq_lock);
break;
case MIPI_AVDD_EN_1:
case MIPI_AVDD_EN_2:
index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;

intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
value ? PANEL_POWER_ON : 0);
break;
case MIPI_BKLT_EN_1:
case MIPI_BKLT_EN_2:
index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;

intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
value ? EDP_BLC_ENABLE : 0);
break;
case MIPI_AVEE_EN_1:
case MIPI_AVEE_EN_2:
index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;

intel_de_rmw(dev_priv, GPIO(dev_priv, index),
GPIO_CLOCK_VAL_OUT,
GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
break;
case MIPI_VIO_EN_1:
case MIPI_VIO_EN_2:
index = gpio == MIPI_VIO_EN_1 ? 1 : 2;

intel_de_rmw(dev_priv, GPIO(dev_priv, index),
GPIO_DATA_VAL_OUT,
GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
break;
default:
MISSING_CASE(gpio);
}
}

static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *connector = intel_dsi->attached_connector;
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;

drm_dbg_kms(&dev_priv->drm, "\n");
bool native = DISPLAY_VER(dev_priv) >= 11;

if (connector->panel.vbt.dsi.seq_version >= 3)
gpio_index = *data++;
Expand All @@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
else
gpio_source = 0;

if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
native = false;

/* pull up/down */
value = *data++ & 1;

if (DISPLAY_VER(dev_priv) >= 11)
drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));

if (native)
icl_native_gpio_set_value(dev_priv, gpio_number, value);
else if (DISPLAY_VER(dev_priv) >= 11)
icl_exec_gpio(connector, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(connector, gpio_source, gpio_number, value);
Expand Down
59 changes: 48 additions & 11 deletions drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
bool unpinned;

/*
* Attempt to pin all of the buffers into the GTT.
* This is done in 2 phases:
* We have one more buffers that we couldn't bind, which could be due to
* various reasons. To resolve this we have 4 passes, with every next
* level turning the screws tighter:
*
* 1. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 2. Bind new objects.
* 0. Unbind all objects that do not match the GTT constraints for the
* execbuffer (fenceable, mappable, alignment etc). Bind all new
* objects. This avoids unnecessary unbinding of later objects in order
* to make room for the earlier objects *unless* we need to defragment.
*
* This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
* 1. Reorder the buffers, where objects with the most restrictive
* placement requirements go first (ignoring fixed location buffers for
* now). For example, objects needing the mappable aperture (the first
* 256M of GTT), should go first vs objects that can be placed just
* about anywhere. Repeat the previous pass.
*
* Defragmenting is skipped if all objects are pinned at a fixed location.
* 2. Consider buffers that are pinned at a fixed location. Also try to
* evict the entire VM this time, leaving only objects that we were
* unable to lock. Try again to bind the buffers. (still using the new
* buffer order).
*
* 3. We likely have object lock contention for one or more stubborn
* objects in the VM, for which we need to evict to make forward
* progress (perhaps we are fighting the shrinker?). When evicting the
* VM this time around, anything that we can't lock we now track using
* the busy_bo, using the full lock (after dropping the vm->mutex to
* prevent deadlocks), instead of trylock. We then continue to evict the
* VM, this time with the stubborn object locked, which we can now
* hopefully unbind (if still bound in the VM). Repeat until the VM is
* evicted. Finally we should be able bind everything.
*/
for (pass = 0; pass <= 2; pass++) {
for (pass = 0; pass <= 3; pass++) {
int pin_flags = PIN_USER | PIN_VALIDATE;

if (pass == 0)
pin_flags |= PIN_NONBLOCK;

if (pass >= 1)
unpinned = eb_unbind(eb, pass == 2);
unpinned = eb_unbind(eb, pass >= 2);

if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
mutex_unlock(&eb->context->vm->mutex);
}
if (err)
return err;
}

if (pass == 3) {
retry:
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
struct drm_i915_gem_object *busy_bo = NULL;

err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
mutex_unlock(&eb->context->vm->mutex);
if (err && busy_bo) {
err = i915_gem_object_lock(busy_bo, &eb->ww);
i915_gem_object_put(busy_bo);
if (!err)
goto retry;
}
}
if (err)
return err;
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gem/i915_gem_mman.c
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (vma == ERR_PTR(-ENOSPC)) {
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
if (!ret) {
ret = i915_gem_evict_vm(&ggtt->vm, &ww);
ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
if (ret)
Expand Down
8 changes: 7 additions & 1 deletion drivers/gpu/drm/i915/gt/intel_gt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt)
continue;

if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
u32 val = BIT(engine->instance);

if (engine->class == VIDEO_DECODE_CLASS ||
engine->class == VIDEO_ENHANCEMENT_CLASS ||
engine->class == COMPUTE_CLASS)
val = _MASKED_BIT_ENABLE(val);
intel_gt_mcr_multicast_write_fw(gt,
xehp_regs[engine->class],
BIT(engine->instance));
val);
} else {
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
Expand Down
42 changes: 28 additions & 14 deletions drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
Original file line number Diff line number Diff line change
Expand Up @@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt,
return 0;
}

static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct device *dev = gt->i915->drm.dev;
int err;

err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);

if (err)
return err;

if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
drm_err(&gt->i915->drm,
"%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
(*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);

/* try to find another blob to load */
release_firmware(*fw);
*fw = NULL;
return -ENOENT;
}

return 0;
}

/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
Expand All @@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct drm_i915_private *i915 = gt->i915;
struct intel_uc_fw_file file_ideal;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
bool old_ver = false;
Expand All @@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);

err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
err = try_firmware_load(uc_fw, &fw);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));

if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
drm_err(&i915->drm,
"%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);

/* try to find another blob to load */
release_firmware(fw);
err = -ENOENT;
}

/* Any error is terminal if overriding. Don't bother searching for older versions */
if (err && intel_uc_fw_is_overridden(uc_fw))
goto fail;
Expand All @@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
break;
}

err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
err = try_firmware_load(uc_fw, &fw);
}

if (err)
Expand Down
37 changes: 27 additions & 10 deletions drivers/gpu/drm/i915/i915_gem_evict.c
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* @vm: Address space to cleanse
* @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
* will be able to evict vma's locked by the ww as well.
* @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
* in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
* then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
* the vm->mutex, before trying again to acquire the contended lock. The caller
* also owns a reference to the object.
*
* This function evicts all vmas from a vm.
*
Expand All @@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object **busy_bo)
{
int ret = 0;

Expand Down Expand Up @@ -457,41 +463,52 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
* the resv is shared among multiple objects, we still
* need the object ref.
*/
if (dying_vma(vma) ||
if (!i915_gem_object_get_rcu(vma->obj) ||
(ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
__i915_vma_pin(vma);
list_add(&vma->evict_link, &locked_eviction_list);
continue;
}

if (!i915_gem_object_trylock(vma->obj, ww))
if (!i915_gem_object_trylock(vma->obj, ww)) {
if (busy_bo) {
*busy_bo = vma->obj; /* holds ref */
ret = -EBUSY;
break;
}
i915_gem_object_put(vma->obj);
continue;
}

__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
break;

ret = 0;
/* Unbind locked objects first, before unlocking the eviction_list */
list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
__i915_vma_unpin(vma);

if (ret == 0)
if (ret == 0) {
ret = __i915_vma_unbind(vma);
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
}
if (!dying_vma(vma))
i915_gem_object_put(vma->obj);
}

list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
if (ret == 0) {
ret = __i915_vma_unbind(vma);
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
if (ret != -EINTR) /* "Get me out of here!" */
ret = 0;
}

i915_gem_object_unlock(vma->obj);
i915_gem_object_put(vma->obj);
}
} while (ret == 0);

Expand Down
Loading

0 comments on commit a9f5a75

Please sign in to comment.