diff --git a/Android.bp b/Android.bp new file mode 100644 index 000000000..604581cd9 --- /dev/null +++ b/Android.bp @@ -0,0 +1,41 @@ +headers_src = [ + "include/uapi/*/**/*.h", +] + +display_headers_out = [ + "display/drm/msm_drm_pp.h", + "display/drm/sde_drm.h", + "display/drm/mi_disp.h", + "display/hdcp/msm_hdmi_hdcp_mgr.h", + "display/media/mmm_color_fmt.h", + "display/media/msm_sde_rotator.h", +] + +display_kernel_headers_verbose = "--verbose " +genrule { + name: "qti_generate_display_kernel_headers", + tools: [ + "headers_install.sh", + "unifdef" + ], + tool_files: [ + "display_kernel_headers.py", + ], + srcs: headers_src, + cmd: "python3 $(location display_kernel_headers.py) " + + display_kernel_headers_verbose + + "--header_arch arm64 " + + "--gen_dir $(genDir) " + + "--display_include_uapi $(locations include/uapi/*/**/*.h) " + + "--unifdef $(location unifdef) " + + "--headers_install $(location headers_install.sh)", + out: display_headers_out, +} + +cc_library_headers { + name: "qti_display_kernel_headers", + generated_headers: ["qti_generate_display_kernel_headers"], + export_generated_headers: ["qti_generate_display_kernel_headers"], + vendor: true, + recovery_available: true +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 000000000..450f24855 --- /dev/null +++ b/Android.mk @@ -0,0 +1,15 @@ +# Android makefile for display kernel modules +ifneq (, $(filter $(call get-component-name), miodm)) + +DISPLAY_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_DISPLAY_OVERRIDE), false) + DISPLAY_DLKM_ENABLE := false + endif +endif + +ifeq ($(DISPLAY_DLKM_ENABLE), true) + LOCAL_PATH := $(call my-dir) + include $(LOCAL_PATH)/msm/Android.mk +endif +endif diff --git a/BUILD.bazel b/BUILD.bazel new file mode 100644 index 000000000..dd029c6b4 --- /dev/null +++ b/BUILD.bazel @@ -0,0 +1,85 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_headers") + +package( + default_visibility = [ + "//visibility:public"], +) + +ddk_headers( + name = "linux_includes", + hdrs = glob([ + "include/linux/*.h", + ]), + includes = ["include"] +) + +ddk_headers( + name = "uapi_headers", + hdrs = glob([ + "include/uapi/display/drm/*.h", + "include/uapi/display/hdcp/*.h", + "include/uapi/display/media/*.h", + ]), + includes = ["include/uapi/display"] +) + +ddk_headers( + name = "dp_headers", + hdrs = glob([ + "msm/dp/*.h", + ]), + includes = ["msm/dp"] +) + +ddk_headers( + name = "dsi_headers", + hdrs = glob([ + "msm/dsi/*.h", + ]), + includes = ["msm/dsi"] +) + +ddk_headers( + name = "mi_disp_headers", + hdrs = glob([ + "msm/mi_disp/*.h", + ]), + includes = ["msm/mi_disp"] +) + +ddk_headers( + name = "sde_headers", + hdrs = glob([ + "msm/sde/*.h", + ]), + includes = ["msm/sde"] +) + +ddk_headers( + name = "rotator_headers", + hdrs = glob([ + "rotator/*.h", + ]), + includes = ["rotator"] +) + +ddk_headers( + name = "msm_headers", + hdrs = glob([ + "msm/*.h", + ]), + includes = ["msm"] +) + +ddk_headers( + name = "display_drivers_headers", + hdrs = [ ":linux_includes", ":uapi_headers", ":msm_headers",":dp_headers",":dsi_headers",":sde_headers",":rotator_headers",":mi_disp_headers"] +) + +config_setting( + name = "factory_build", + define_values = { "FACTORY_BUILD" : "1" } +) + +load(":target.bzl", "define_display_target") +define_display_target() diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt b/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt new file mode 100644 index 000000000..f24aadeec --- /dev/null +++ b/Documentation/devicetree/bindings/drm/msm/sde-dp-sim.txt @@ -0,0 +1,58 @@ +QTI Snapdragon Display Engine (SDE) DP-MST sideband message emulation driver + +Required properties: +- compatible: "qcom,dp-mst-sim" + +Each child node represents a port at root branch, with properties: +- qcom,mode-h-active: A u32 property defines the horizontal active size. +- qcom,mode-h-front-porch: A u32 property defines the horizontal front porch. +- qcom,mode-h-pulse-width: A u32 property defines the horizontal pulse. +- qcom,mode-h-back-porch: A u32 property defines the horizontal back porch. +- qcom,mode-h-active-high: A boolean property if horizontal polarity is high. +- qcom,mode-v-active: A u32 property defines the vertical active size. +- qcom,mode-v-front-porch: A u32 property defines the vertical front portch. +- qcom,mode-v-pulse-width: A u32 property defines the vertical pulse width. +- qcom,mode-v-back-porch: A u32 property defines the vertical back porch. +- qcom,mode-v-active-high: A boolean property if vertical polarity is high. +- qcom,mode-refresh-rate: A u32 property defines vertial refresh rate. +- qcom,mode-clock-in-khz: A u32 property defines clock in kHz. + +Example: + +/ { + ... + + sde_dp_mst_sim: qcom,dp-mst-sim { + compatible = "qcom,dp-mst-sim"; + + port@0 { + qcom,mode-h-active = <1920>; + qcom,mode-h-front-porch = <88>; + qcom,mode-h-pulse-width = <44>; + qcom,mode-h-back-porch = <148>; + qcom,mode-h-active-high; + qcom,mode-v-active = <1080>; + qcom,mode-v-front-porch = <4>; + qcom,mode-v-pulse-width = <5>; + qcom,mode-v-back-porch = <36>; + qcom,mode-v-active-high; + qcom,mode-refresh-rate = <60>; + qcom,mode-clock-in-khz = <148500>; + }; + + port@1 { + qcom,mode-h-active = <1920>; + qcom,mode-h-front-porch = <88>; + qcom,mode-h-pulse-width = <44>; + qcom,mode-h-back-porch = <148>; + qcom,mode-h-active-high; + qcom,mode-v-active = <1080>; + qcom,mode-v-front-porch = <4>; + qcom,mode-v-pulse-width = <5>; + qcom,mode-v-back-porch = <36>; + qcom,mode-v-active-high; + qcom,mode-refresh-rate = <60>; + qcom,mode-clock-in-khz = <148500>; + }; + }; +}; diff --git a/Kbuild b/Kbuild new file mode 100644 index 000000000..cd50990e1 --- /dev/null +++ b/Kbuild @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +ifeq ($(DISPLAY_ROOT),) +DISPLAY_ROOT=$(srctree)/techpack/display +endif + +LINUXINCLUDE += \ + -I$(DISPLAY_ROOT)/include/uapi/display \ + -I$(DISPLAY_ROOT)/include +USERINCLUDE += -I$(DISPLAY_ROOT)/include/uapi/display + +obj-$(CONFIG_DRM_MSM) += msm/ diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..4f79116ad --- /dev/null +++ b/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KBUILD_OPTIONS+= DISPLAY_ROOT=$(KERNEL_SRC)/$(M) + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 000000000..dcd8f984f --- /dev/null +++ b/Makefile.am @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0-only + +DISPLAY_ROOT=$(ROOTDIR)display/vendor/qcom/opensource/display-drivers +CONFIG_DRM_MSM=$(MODULE_DRM_MSM) +KBUILD_OPTIONS := DISPLAY_ROOT=$(DISPLAY_ROOT) CONFIG_DRM_MSM=$(CONFIG_DRM_MSM) + +ifeq ($(TARGET_SUPPORT),genericarmv8) + KBUILD_OPTIONS += CONFIG_ARCH_PINEAPPLE=y +endif + +obj-m += msm/ + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..7d0421439 --- /dev/null +++ b/NOTICE @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. +*/ + +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Copyright (C) 2014 Red Hat + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . +*/ + + +/* + * Copyright (c) 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + + +/* + * Copyright (c) 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + + +/* + * Copyright © 2014 Red Hatt. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ diff --git a/README.md b/README.md deleted file mode 100644 index 148bbeaef..000000000 --- a/README.md +++ /dev/null @@ -1,22 +0,0 @@ -| Branch | Device | Android version | Base tag | Link | -| :-: | :-: | :-: | :-: | :-: | -| flame-u-oss | Redmi 14R 5G, POCO M7 5G | Android U | LA.VENDOR.1.0.r1-24600-WAIPIO.QSSI14.0-1 | [flame-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/flame-u-oss) | -| garnet-t-oss | Redmi Note 13 Pro | Android T | KERNEL.PLATFORM.1.0.r1-15100-kernel.0-1* |[garnet-t-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/garnet-t-oss) | -| xun-t-oss | Redmi Pad SE | Android T | AU_LINUX_ANDROID_LA.VENDOR.13.2.1.R1.11.00.00.587.064 |[xun-t-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/xun-t-oss) | -| shennong-u-oss | Xiaomi 14, Xiaomi 14Pro | Android U | KERNEL.PLATFORM.3.0.r1-03200-kernel.0-1 |[shennong-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/shennong-u-oss) | -| bsp-vermeer-t-oss | Redmi K70 | Android U | LA.VENDOR.13.2.0.r1-14800-r1.0.r1_00043.0 |[bsp-vermeer-t-oss](https://github.com/MiCode/vendor_opensource_display-drivers/commit/c696b28150245a9ddd1f2701f071e4b580b44ce2) | -| bsp-manet-u-oss | Redmi K70Pro | Android U | LA.VENDOR.14.3.0.r1-04800-r1.0.r1_00016.0 |[bsp-manet-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/bsp-manet-u-oss) | -| aurora-u-oss | Xiaomi 14 Ultra | Android U | LA.VENDOR.14.3.0.r1-06800-lanai.0-1 |[aurora-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/aurora-u-oss) | -| sheng-u-oss | Xiaomi Pad 6S Pro 12.4 | Android U | LA.VENDOR.13.2.0.r1-14800-KAILUA.0-1.36233.3 |[sheng-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/sheng-u-oss) | -| peridot-u-oss | Redmi Turbo 3 | Android U | AU_LINUX_ANDROID_LA.VENDOR.14.3.0.R1.00.00.00.000.092 |[peridot-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/peridot-u-oss) | -| chenfeng-u-oss | Civi 4 pro | Android U | AU_LINUX_KERNEL.PLATFORM.3.0.R1.00.00.00.017.065 |[chenfeng-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/chenfeng-u-oss) | -| breeze-u-oss | Redmi Note 13R | Android U | LA.VENDOR.1.0.r1-24300-WAIPIO.QSSI14.0-1 |[breeze-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/breeze-u-oss) | -| goku-u-oss | Xiaomi MIX Fold 4 | Android U | AU_LINUX_ANDROID_LA.VENDOR.14.3.0.R1.00.00.00.000.093 |[goku-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/goku-u-oss-test) | -| muyu-v-oss | Xiaomi Pad 7 Pro | Android U | LA.VENDOR.14.3.0.r1-14500-r1.0.r1_00042.0 |[muyu-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/muyu-v-oss) | -| uke-v-oss | Xiaomi Pad 7 | Android V | LA.VENDOR.14.3.0.r1-14500-r1.0.r1_00042.0 |[uke-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/uke-v-oss) | -| bsp-zorn-v-oss | REDMI K80 | Android V | qcom-LA.VENDOR.14.3.0.r1-14500-r1.0.r1_00042.0 |[bsp-zorn-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/bsp-zorn-v-oss) | -| bsp-miro-v-oss | REDMI K80 PRO | Android V | qcom-LA.VENDOR.14.3.0.r1-14500-r1.0.r1_00042.0 |[bsp-miro-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/bsp-miro-v-oss) | -| warm-u-oss | Redmi A4 5G / POCO C75 5G | Android U | snapdragon-mid-2024-spf-1-0_r1.0.r1_00006.0 |[warm-u-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/warm-u-oss) | -| dada-v-oss | Xiaomi 15 / Xiaomi 15 Pro | Android V | qcom-LA.VENDOR.15.4.0.r1-08900-r1.0.r1_00022.1 |[dada-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/dada-v-oss) | -| xuanyuan-v-oss | Xiaomi 15 Ultra | Android V | Snapdragon_Premium_High_2024.SPF.1.0_r1.0.r1_00021.0 |[xuanyuan-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/xuanyuan-v-oss) | -| onyx-v-oss | REDMI Turbo 4 Pro | Android V | LA.VENDOR.15.4.0.r1-15000-r1.0.r1_00044.1 |[onyx-v-oss](https://github.com/MiCode/vendor_opensource_display-drivers/tree/onyx-v-oss) | diff --git a/bridge-drivers/lt9611uxc.c b/bridge-drivers/lt9611uxc.c new file mode 100644 index 000000000..03c006796 --- /dev/null +++ b/bridge-drivers/lt9611uxc.c @@ -0,0 +1,1030 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020. Linaro Limited. + */ + +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define EDID_BLOCK_SIZE 128 +#define EDID_NUM_BLOCKS 2 + +struct lt9611uxc { + struct device *dev; + struct drm_bridge bridge; + struct drm_connector connector; + + struct regmap *regmap; + /* Protects all accesses to registers by stopping the on-chip MCU */ + struct mutex ocm_lock; + + struct wait_queue_head wq; + struct work_struct work; + + struct device_node *dsi0_node; + struct device_node *dsi1_node; + struct mipi_dsi_device *dsi0; + struct mipi_dsi_device *dsi1; + struct platform_device *audio_pdev; + + struct gpio_desc *reset_gpio; + struct gpio_desc *enable_gpio; + + struct regulator_bulk_data supplies[2]; + + struct i2c_client *client; + + bool hpd_supported; + bool edid_read; + /* can be accessed from different threads, so protect this with ocm_lock */ + bool hdmi_connected; + uint8_t fw_version; +}; + +#define LT9611_PAGE_CONTROL 0xff + +static const struct regmap_range_cfg lt9611uxc_ranges[] = { + { + .name = "register_range", + .range_min = 0, + .range_max = 0xd0ff, + .selector_reg = LT9611_PAGE_CONTROL, + .selector_mask = 0xff, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, + }, +}; + +static const struct regmap_config lt9611uxc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xffff, + .ranges = lt9611uxc_ranges, + .num_ranges = ARRAY_SIZE(lt9611uxc_ranges), +}; + +struct lt9611uxc_mode { + u16 hdisplay; + u16 vdisplay; + u8 vrefresh; +}; + +/* + * This chip supports only a fixed set of modes. + * Enumerate them here to check whether the mode is supported. + */ +static struct lt9611uxc_mode lt9611uxc_modes[] = { + { 1920, 1080, 60 }, + { 1920, 1080, 30 }, + { 1920, 1080, 25 }, + { 1366, 768, 60 }, + { 1360, 768, 60 }, + { 1280, 1024, 60 }, + { 1280, 800, 60 }, + { 1280, 720, 60 }, + { 1280, 720, 50 }, + { 1280, 720, 30 }, + { 1152, 864, 60 }, + { 1024, 768, 60 }, + { 800, 600, 60 }, + { 720, 576, 50 }, + { 720, 480, 60 }, + { 640, 480, 60 }, +}; + +static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge) +{ + return container_of(bridge, struct lt9611uxc, bridge); +} + +static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector) +{ + return container_of(connector, struct lt9611uxc, connector); +} + +static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc) +{ + mutex_lock(<9611uxc->ocm_lock); + regmap_write(lt9611uxc->regmap, 0x80ee, 0x01); +} + +static void lt9611uxc_unlock(struct lt9611uxc *lt9611uxc) +{ + regmap_write(lt9611uxc->regmap, 0x80ee, 0x00); + msleep(50); + mutex_unlock(<9611uxc->ocm_lock); +} + +static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) +{ + struct lt9611uxc *lt9611uxc = dev_id; + unsigned int irq_status = 0; + unsigned int hpd_status = 0; + + lt9611uxc_lock(lt9611uxc); + + regmap_read(lt9611uxc->regmap, 0xb022, &irq_status); + regmap_read(lt9611uxc->regmap, 0xb023, &hpd_status); + if (irq_status) + regmap_write(lt9611uxc->regmap, 0xb022, 0); + + if (irq_status & BIT(0)) { + lt9611uxc->edid_read = !!(hpd_status & BIT(0)); + wake_up_all(<9611uxc->wq); + } + + if (irq_status & BIT(1)) { + lt9611uxc->hdmi_connected = hpd_status & BIT(1); + schedule_work(<9611uxc->work); + } + + lt9611uxc_unlock(lt9611uxc); + + return IRQ_HANDLED; +} + +static void lt9611uxc_hpd_work(struct work_struct *work) +{ + struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); + bool connected; + + if (lt9611uxc->connector.dev) { + if (lt9611uxc->connector.dev->mode_config.funcs) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + } else { + + mutex_lock(<9611uxc->ocm_lock); + connected = lt9611uxc->hdmi_connected; + mutex_unlock(<9611uxc->ocm_lock); + + drm_bridge_hpd_notify(<9611uxc->bridge, + connected ? + connector_status_connected : + connector_status_disconnected); + } +} + +static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) +{ + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); + msleep(20); + + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 0); + msleep(20); + + gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); + msleep(300); +} + +static void lt9611uxc_assert_5v(struct lt9611uxc *lt9611uxc) +{ + if (!lt9611uxc->enable_gpio) + return; + + gpiod_set_value_cansleep(lt9611uxc->enable_gpio, 1); + msleep(20); +} + +static int lt9611uxc_regulator_init(struct lt9611uxc *lt9611uxc) +{ + int ret; + + lt9611uxc->supplies[0].supply = "vdd"; + lt9611uxc->supplies[1].supply = "vcc"; + + ret = devm_regulator_bulk_get(lt9611uxc->dev, 2, lt9611uxc->supplies); + if (ret < 0) + return ret; + + return regulator_set_load(lt9611uxc->supplies[0].consumer, 200000); +} + +static int lt9611uxc_regulator_enable(struct lt9611uxc *lt9611uxc) +{ + int ret; + + ret = regulator_enable(lt9611uxc->supplies[0].consumer); + if (ret < 0) + return ret; + + usleep_range(1000, 10000); /* 50000 according to dtsi */ + + ret = regulator_enable(lt9611uxc->supplies[1].consumer); + if (ret < 0) { + regulator_disable(lt9611uxc->supplies[0].consumer); + return ret; + } + + return 0; +} + +static struct lt9611uxc_mode *lt9611uxc_find_mode(const struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(lt9611uxc_modes); i++) { + if (lt9611uxc_modes[i].hdisplay == mode->hdisplay && + lt9611uxc_modes[i].vdisplay == mode->vdisplay && + lt9611uxc_modes[i].vrefresh == drm_mode_vrefresh(mode)) { + return <9611uxc_modes[i]; + } + } + + return NULL; +} + +static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, + struct device_node *dsi_node) +{ + const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL }; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + struct device *dev = lt9611uxc->dev; + int ret; + + host = of_find_mipi_dsi_host_by_node(dsi_node); + if (!host) { + dev_err(dev, "failed to find dsi host\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) { + dev_err(dev, "failed to create dsi device\n"); + return dsi; + } + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_MODE_VIDEO_HSE; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + dev_err(dev, "failed to attach dsi to host\n"); + return ERR_PTR(ret); + } + + return dsi; +} + +static int lt9611uxc_connector_get_modes(struct drm_connector *connector) +{ + struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); + unsigned int count; + struct edid *edid; + + edid = lt9611uxc->bridge.funcs->get_edid(<9611uxc->bridge, connector); + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; +} + +static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector, + bool force) +{ + struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); + + return lt9611uxc->bridge.funcs->detect(<9611uxc->bridge); +} + +static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode); + + return lt9611uxc_mode ? MODE_OK : MODE_BAD; +} + +static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = { + .get_modes = lt9611uxc_connector_get_modes, + .mode_valid = lt9611uxc_connector_mode_valid, +}; + +static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = lt9611uxc_connector_detect, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc) +{ + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_helper_add(<9611uxc->connector, + <9611uxc_bridge_connector_helper_funcs); + ret = drm_connector_init(bridge->dev, <9611uxc->connector, + <9611uxc_bridge_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + + ret = drm_connector_attach_encoder(<9611uxc->connector, bridge->encoder); + if (ret) { + DRM_ERROR("Failed to link up connector to encoder: %d\n", ret); + return ret; + } + + /* Attach primary DSI */ + lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); + if (IS_ERR(lt9611uxc->dsi0)) { + ret = PTR_ERR(lt9611uxc->dsi0); + drm_bridge_remove(<9611uxc->bridge); + return ret; + } + + /* Attach secondary DSI, if specified */ + if (lt9611uxc->dsi1_node) { + lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); + if (IS_ERR(lt9611uxc->dsi1)) { + ret = PTR_ERR(lt9611uxc->dsi1); + drm_bridge_remove(<9611uxc->bridge); + return ret; + } + } + + return ret; +} + +static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + int ret; + + ret = lt9611uxc_connector_init(bridge, lt9611uxc); + if (ret < 0) + return ret; + + return 0; +} + +static enum drm_mode_status +lt9611uxc_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct lt9611uxc_mode *lt9611uxc_mode; + + lt9611uxc_mode = lt9611uxc_find_mode(mode); + + return lt9611uxc_mode ? MODE_OK : MODE_BAD; +} + +static void lt9611uxc_video_setup(struct lt9611uxc *lt9611uxc, + const struct drm_display_mode *mode) +{ + u32 h_total, hactive, hsync_len, hfront_porch; + u32 v_total, vactive, vsync_len, vfront_porch; + + h_total = mode->htotal; + v_total = mode->vtotal; + + hactive = mode->hdisplay; + hsync_len = mode->hsync_end - mode->hsync_start; + hfront_porch = mode->hsync_start - mode->hdisplay; + + vactive = mode->vdisplay; + vsync_len = mode->vsync_end - mode->vsync_start; + vfront_porch = mode->vsync_start - mode->vdisplay; + + regmap_write(lt9611uxc->regmap, 0xd00d, (u8)(v_total / 256)); + regmap_write(lt9611uxc->regmap, 0xd00e, (u8)(v_total % 256)); + + regmap_write(lt9611uxc->regmap, 0xd00f, (u8)(vactive / 256)); + regmap_write(lt9611uxc->regmap, 0xd010, (u8)(vactive % 256)); + + regmap_write(lt9611uxc->regmap, 0xd011, (u8)(h_total / 256)); + regmap_write(lt9611uxc->regmap, 0xd012, (u8)(h_total % 256)); + + regmap_write(lt9611uxc->regmap, 0xd013, (u8)(hactive / 256)); + regmap_write(lt9611uxc->regmap, 0xd014, (u8)(hactive % 256)); + + regmap_write(lt9611uxc->regmap, 0xd015, (u8)(vsync_len % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd016, 0xf, (u8)(hsync_len / 256)); + regmap_write(lt9611uxc->regmap, 0xd017, (u8)(hsync_len % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd018, 0xf, (u8)(vfront_porch / 256)); + regmap_write(lt9611uxc->regmap, 0xd019, (u8)(vfront_porch % 256)); + + regmap_update_bits(lt9611uxc->regmap, 0xd01a, 0xf, (u8)(hfront_porch / 256)); + regmap_write(lt9611uxc->regmap, 0xd01b, (u8)(hfront_porch % 256)); +} + +static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adj_mode) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + + lt9611uxc_lock(lt9611uxc); + lt9611uxc_video_setup(lt9611uxc, mode); + lt9611uxc_unlock(lt9611uxc); +} + +static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + unsigned int reg_val = 0; + int ret; + bool connected = true; + + lt9611uxc_lock(lt9611uxc); + + if (lt9611uxc->hpd_supported) { + ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val); + + if (ret) + dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); + else + connected = reg_val & BIT(1); + } + lt9611uxc->hdmi_connected = connected; + + lt9611uxc_unlock(lt9611uxc); + + return connected ? connector_status_connected : + connector_status_disconnected; +} + +static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) +{ + return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, + msecs_to_jiffies(500)); +} + +static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) +{ + struct lt9611uxc *lt9611uxc = data; + int ret; + + if (len > EDID_BLOCK_SIZE) + return -EINVAL; + + if (block >= EDID_NUM_BLOCKS) + return -EINVAL; + + lt9611uxc_lock(lt9611uxc); + + regmap_write(lt9611uxc->regmap, 0xb00b, 0x10); + + regmap_write(lt9611uxc->regmap, 0xb00a, block * EDID_BLOCK_SIZE); + + ret = regmap_noinc_read(lt9611uxc->regmap, 0xb0b0, buf, len); + if (ret) + dev_err(lt9611uxc->dev, "edid read failed: %d\n", ret); + + lt9611uxc_unlock(lt9611uxc); + + return 0; +}; + +static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); + int ret; + + ret = lt9611uxc_wait_for_edid(lt9611uxc); + if (ret < 0) { + dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); + return NULL; + } else if (ret == 0) { + dev_err(lt9611uxc->dev, "wait for EDID timeout\n"); + return NULL; + } + + return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); +} + +static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { + .attach = lt9611uxc_bridge_attach, + .mode_valid = lt9611uxc_bridge_mode_valid, + .mode_set = lt9611uxc_bridge_mode_set, + .detect = lt9611uxc_bridge_detect, + .get_edid = lt9611uxc_bridge_get_edid, +}; + +static int lt9611uxc_parse_dt(struct device *dev, + struct lt9611uxc *lt9611uxc) +{ + lt9611uxc->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1); + if (!lt9611uxc->dsi0_node) { + dev_err(lt9611uxc->dev, "failed to get remote node for primary dsi\n"); + return -ENODEV; + } + + lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1); + + return 0; +} + +static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc) +{ + struct device *dev = lt9611uxc->dev; + + lt9611uxc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(lt9611uxc->reset_gpio)) { + dev_err(dev, "failed to acquire reset gpio\n"); + return PTR_ERR(lt9611uxc->reset_gpio); + } + + lt9611uxc->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(lt9611uxc->enable_gpio)) { + dev_err(dev, "failed to acquire enable gpio\n"); + return PTR_ERR(lt9611uxc->enable_gpio); + } + + return 0; +} + +static int lt9611uxc_read_device_rev(struct lt9611uxc *lt9611uxc) +{ + unsigned int rev0, rev1, rev2; + int ret; + + lt9611uxc_lock(lt9611uxc); + + ret = regmap_read(lt9611uxc->regmap, 0x8100, &rev0); + ret |= regmap_read(lt9611uxc->regmap, 0x8101, &rev1); + ret |= regmap_read(lt9611uxc->regmap, 0x8102, &rev2); + if (ret) + dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); + else + dev_info(lt9611uxc->dev, "LT9611 revision: 0x%02x.%02x.%02x\n", rev0, rev1, rev2); + + lt9611uxc_unlock(lt9611uxc); + + return ret; +} + +static int lt9611uxc_read_version(struct lt9611uxc *lt9611uxc) +{ + unsigned int rev; + int ret; + + lt9611uxc_lock(lt9611uxc); + + ret = regmap_read(lt9611uxc->regmap, 0xb021, &rev); + if (ret) + dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); + else + dev_info(lt9611uxc->dev, "LT9611 version: 0x%02x\n", rev); + + lt9611uxc_unlock(lt9611uxc); + + return ret < 0 ? ret : rev; +} + +static int lt9611uxc_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + /* + * LT9611UXC will automatically detect rate and sample size, so no need + * to setup anything here. + */ + return 0; +} + +static void lt9611uxc_audio_shutdown(struct device *dev, void *data) +{ +} + +static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located as reg = <2> + * Then, it is sound port 0 + */ + if (of_ep.port == 2) + return 0; + + return -EINVAL; +} + +static const struct hdmi_codec_ops lt9611uxc_codec_ops = { + .hw_params = lt9611uxc_hdmi_hw_params, + .audio_shutdown = lt9611uxc_audio_shutdown, + .get_dai_id = lt9611uxc_hdmi_i2s_get_dai_id, +}; + +static int lt9611uxc_audio_init(struct device *dev, struct lt9611uxc *lt9611uxc) +{ + struct hdmi_codec_pdata codec_data = { + .ops = <9611uxc_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, + .data = lt9611uxc, + }; + + lt9611uxc->audio_pdev = + platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(lt9611uxc->audio_pdev); +} + +static void lt9611uxc_audio_exit(struct lt9611uxc *lt9611uxc) +{ + if (lt9611uxc->audio_pdev) { + platform_device_unregister(lt9611uxc->audio_pdev); + lt9611uxc->audio_pdev = NULL; + } +} + +#define LT9611UXC_FW_PAGE_SIZE 32 +static void lt9611uxc_firmware_write_page(struct lt9611uxc *lt9611uxc, u16 addr, const u8 *buf) +{ + struct reg_sequence seq_write_prepare[] = { + REG_SEQ0(0x805a, 0x04), + REG_SEQ0(0x805a, 0x00), + + REG_SEQ0(0x805e, 0xdf), + REG_SEQ0(0x805a, 0x20), + REG_SEQ0(0x805a, 0x00), + REG_SEQ0(0x8058, 0x21), + }; + + struct reg_sequence seq_write_addr[] = { + REG_SEQ0(0x805b, (addr >> 16) & 0xff), + REG_SEQ0(0x805c, (addr >> 8) & 0xff), + REG_SEQ0(0x805d, addr & 0xff), + REG_SEQ0(0x805a, 0x10), + REG_SEQ0(0x805a, 0x00), + }; + + regmap_write(lt9611uxc->regmap, 0x8108, 0xbf); + msleep(20); + regmap_write(lt9611uxc->regmap, 0x8108, 0xff); + msleep(20); + regmap_multi_reg_write(lt9611uxc->regmap, seq_write_prepare, ARRAY_SIZE(seq_write_prepare)); + regmap_noinc_write(lt9611uxc->regmap, 0x8059, buf, LT9611UXC_FW_PAGE_SIZE); + regmap_multi_reg_write(lt9611uxc->regmap, seq_write_addr, ARRAY_SIZE(seq_write_addr)); + msleep(20); +} + +static void lt9611uxc_firmware_read_page(struct lt9611uxc *lt9611uxc, u16 addr, char *buf) +{ + struct reg_sequence seq_read_page[] = { + REG_SEQ0(0x805a, 0xa0), + REG_SEQ0(0x805a, 0x80), + REG_SEQ0(0x805b, (addr >> 16) & 0xff), + REG_SEQ0(0x805c, (addr >> 8) & 0xff), + REG_SEQ0(0x805d, addr & 0xff), + REG_SEQ0(0x805a, 0x90), + REG_SEQ0(0x805a, 0x80), + REG_SEQ0(0x8058, 0x21), + }; + + regmap_multi_reg_write(lt9611uxc->regmap, seq_read_page, ARRAY_SIZE(seq_read_page)); + regmap_noinc_read(lt9611uxc->regmap, 0x805f, buf, LT9611UXC_FW_PAGE_SIZE); +} + +static char *lt9611uxc_firmware_read(struct lt9611uxc *lt9611uxc, size_t size) +{ + struct reg_sequence seq_read_setup[] = { + REG_SEQ0(0x805a, 0x84), + REG_SEQ0(0x805a, 0x80), + }; + + char *readbuf; + u16 offset; + + readbuf = kzalloc(ALIGN(size, 32), GFP_KERNEL); + if (!readbuf) + return NULL; + + regmap_multi_reg_write(lt9611uxc->regmap, seq_read_setup, ARRAY_SIZE(seq_read_setup)); + + for (offset = 0; + offset < size; + offset += LT9611UXC_FW_PAGE_SIZE) + lt9611uxc_firmware_read_page(lt9611uxc, offset, &readbuf[offset]); + + return readbuf; +} + +static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc) +{ + int ret; + u16 offset; + size_t remain; + char *readbuf; + const struct firmware *fw; + + struct reg_sequence seq_setup[] = { + REG_SEQ0(0x805e, 0xdf), + REG_SEQ0(0x8058, 0x00), + REG_SEQ0(0x8059, 0x50), + REG_SEQ0(0x805a, 0x10), + REG_SEQ0(0x805a, 0x00), + }; + + + struct reg_sequence seq_block_erase[] = { + REG_SEQ0(0x805a, 0x04), + REG_SEQ0(0x805a, 0x00), + REG_SEQ0(0x805b, 0x00), + REG_SEQ0(0x805c, 0x00), + REG_SEQ0(0x805d, 0x00), + REG_SEQ0(0x805a, 0x01), + REG_SEQ0(0x805a, 0x00), + }; + + ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev); + if (ret < 0) + return ret; + + dev_info(lt9611uxc->dev, "Updating firmware\n"); + lt9611uxc_lock(lt9611uxc); + + regmap_multi_reg_write(lt9611uxc->regmap, seq_setup, ARRAY_SIZE(seq_setup)); + + /* + * Need erase block 2 timess here. Sometimes, block erase can fail. + * This is a workaroud. + */ + regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); + msleep(3000); + regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); + msleep(3000); + + for (offset = 0, remain = fw->size; + remain >= LT9611UXC_FW_PAGE_SIZE; + offset += LT9611UXC_FW_PAGE_SIZE, remain -= LT9611UXC_FW_PAGE_SIZE) + lt9611uxc_firmware_write_page(lt9611uxc, offset, fw->data + offset); + + if (remain > 0) { + char buf[LT9611UXC_FW_PAGE_SIZE]; + + memset(buf, 0xff, LT9611UXC_FW_PAGE_SIZE); + memcpy(buf, fw->data + offset, remain); + lt9611uxc_firmware_write_page(lt9611uxc, offset, buf); + } + msleep(20); + + readbuf = lt9611uxc_firmware_read(lt9611uxc, fw->size); + if (!readbuf) { + ret = -ENOMEM; + goto out; + } + + if (!memcmp(readbuf, fw->data, fw->size)) { + dev_err(lt9611uxc->dev, "Firmware update failed\n"); + print_hex_dump(KERN_ERR, "fw: ", DUMP_PREFIX_OFFSET, + 16, 1, readbuf, fw->size, false); + ret = -EINVAL; + } else { + dev_info(lt9611uxc->dev, "Firmware updates successfully\n"); + ret = 0; + } + kfree(readbuf); + +out: + lt9611uxc_unlock(lt9611uxc); + lt9611uxc_reset(lt9611uxc); + release_firmware(fw); + + return ret; +} + +static ssize_t lt9611uxc_firmware_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); + int ret; + + ret = lt9611uxc_firmware_update(lt9611uxc); + if (ret < 0) + return ret; + return len; +} + +static ssize_t lt9611uxc_firmware_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%02x\n", lt9611uxc->fw_version); +} + +static DEVICE_ATTR_RW(lt9611uxc_firmware); + +static struct attribute *lt9611uxc_attrs[] = { + &dev_attr_lt9611uxc_firmware.attr, + NULL, +}; + +static const struct attribute_group lt9611uxc_attr_group = { + .attrs = lt9611uxc_attrs, +}; + +static const struct attribute_group *lt9611uxc_attr_groups[] = { + <9611uxc_attr_group, + NULL, +}; + +static int lt9611uxc_probe(struct i2c_client *client) +{ + struct lt9611uxc *lt9611uxc; + struct device *dev = &client->dev; + int ret; + bool fw_updated = false; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "device doesn't support I2C\n"); + return -ENODEV; + } + + lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL); + if (!lt9611uxc) + return -ENOMEM; + + lt9611uxc->dev = dev; + lt9611uxc->client = client; + mutex_init(<9611uxc->ocm_lock); + + lt9611uxc->regmap = devm_regmap_init_i2c(client, <9611uxc_regmap_config); + if (IS_ERR(lt9611uxc->regmap)) { + dev_err(lt9611uxc->dev, "regmap i2c init failed\n"); + return PTR_ERR(lt9611uxc->regmap); + } + + ret = lt9611uxc_parse_dt(dev, lt9611uxc); + if (ret) { + dev_err(dev, "failed to parse device tree\n"); + return ret; + } + + ret = lt9611uxc_gpio_init(lt9611uxc); + if (ret < 0) + goto err_of_put; + + ret = lt9611uxc_regulator_init(lt9611uxc); + if (ret < 0) + goto err_of_put; + + lt9611uxc_assert_5v(lt9611uxc); + + ret = lt9611uxc_regulator_enable(lt9611uxc); + if (ret) + goto err_of_put; + + lt9611uxc_reset(lt9611uxc); + + ret = lt9611uxc_read_device_rev(lt9611uxc); + if (ret) { + dev_err(dev, "failed to read chip rev\n"); + goto err_disable_regulators; + } + +retry: + ret = lt9611uxc_read_version(lt9611uxc); + if (ret < 0) { + dev_err(dev, "failed to read FW version\n"); + goto err_disable_regulators; + } else if (ret == 0) { + if (!fw_updated) { + fw_updated = true; + dev_err(dev, "FW version 0, enforcing firmware update\n"); + ret = lt9611uxc_firmware_update(lt9611uxc); + if (ret < 0) + goto err_disable_regulators; + else + goto retry; + } else { + dev_err(dev, "FW version 0, update failed\n"); + ret = -EOPNOTSUPP; + goto err_disable_regulators; + } + } else if (ret < 0x40) { + dev_info(dev, "FW version 0x%x, HPD not supported\n", ret); + } else { + lt9611uxc->hpd_supported = true; + } + lt9611uxc->fw_version = ret; + + init_waitqueue_head(<9611uxc->wq); + INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work); + + ret = devm_request_threaded_irq(dev, client->irq, NULL, + lt9611uxc_irq_thread_handler, + IRQF_ONESHOT, "lt9611uxc", lt9611uxc); + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto err_disable_regulators; + } + + i2c_set_clientdata(client, lt9611uxc); + + lt9611uxc->bridge.funcs = <9611uxc_bridge_funcs; + lt9611uxc->bridge.of_node = client->dev.of_node; + lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; + if (lt9611uxc->hpd_supported) + lt9611uxc->bridge.ops |= DRM_BRIDGE_OP_HPD; + lt9611uxc->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + + drm_bridge_add(<9611uxc->bridge); + + return lt9611uxc_audio_init(dev, lt9611uxc); + +err_disable_regulators: + regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); + +err_of_put: + of_node_put(lt9611uxc->dsi1_node); + of_node_put(lt9611uxc->dsi0_node); + + return ret; +} + +static void lt9611uxc_remove(struct i2c_client *client) +{ + struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); + + disable_irq(client->irq); + cancel_work_sync(<9611uxc->work); + lt9611uxc_audio_exit(lt9611uxc); + drm_bridge_remove(<9611uxc->bridge); + + mutex_destroy(<9611uxc->ocm_lock); + + regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); + + of_node_put(lt9611uxc->dsi1_node); + of_node_put(lt9611uxc->dsi0_node); +} + +static struct i2c_device_id lt9611uxc_id[] = { + { "lt,lt9611uxc", 0 }, + { /* sentinel */ } +}; + +static const struct of_device_id lt9611uxc_match_table[] = { + { .compatible = "lt,lt9611uxc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lt9611uxc_match_table); + +static struct i2c_driver lt9611uxc_driver = { + .driver = { + .name = "lt9611uxc", + .of_match_table = lt9611uxc_match_table, + .dev_groups = lt9611uxc_attr_groups, + }, + .probe = lt9611uxc_probe, + .remove = lt9611uxc_remove, + .id_table = lt9611uxc_id, +}; +module_i2c_driver(lt9611uxc_driver); + +MODULE_AUTHOR("Dmitry Baryshkov "); +MODULE_LICENSE("GPL v2"); diff --git a/config/augen3disp.conf b/config/augen3disp.conf new file mode 100644 index 000000000..a936c0503 --- /dev/null +++ b/config/augen3disp.conf @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=n +export CONFIG_DRM_MSM_DP_MST=n +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=n +export CONFIG_DRM_SDE_WB=n +export CONFIG_DRM_MSM_REGISTER_LOGGING=n +export CONFIG_SDE_RECOVERY_MANAGER=n +export CONFIG_DRM_SDE_SHD=n +export CONFIG_DRM_SDE_SHP=n +export CONFIG_DRM_SDE_ROI_MISR=n +export CONFIG_DRM_MSM_LEASE=n +export CONFIG_DISPLAY_BUILD=m diff --git a/config/augen3dispconf.h b/config/augen3dispconf.h new file mode 100644 index 000000000..dea5c76c5 --- /dev/null +++ b/config/augen3dispconf.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 0 +#define CONFIG_DRM_MSM_DP_MST 0 +#define CONFIG_DRM_SDE_WB 0 +#define CONFIG_DRM_SDE_RSC 0 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 0 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_DRM_SDE_ROI_MISR 0 +#define CONFIG_DRM_SDE_SHD 0 +#define CONFIG_DRM_SDE_SHP 0 +#define CONFIG_DRM_MSM_LEASE 0 diff --git a/config/bengaldisp.conf b/config/bengaldisp.conf new file mode 100644 index 000000000..1ef288bd4 --- /dev/null +++ b/config/bengaldisp.conf @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2019, The Linux Foundation. All rights reserved. + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=n +export CONFIG_QCOM_MDSS_DP_PLL=n +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=n +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DRM_SDE_RSC=n diff --git a/config/bengaldispconf.h b/config/bengaldispconf.h new file mode 100644 index 000000000..c76a073ec --- /dev/null +++ b/config/bengaldispconf.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 diff --git a/config/gki_holidisp.conf b/config/gki_holidisp.conf new file mode 100644 index 000000000..ce2c475e5 --- /dev/null +++ b/config/gki_holidisp.conf @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2020, The Linux Foundation. All rights reserved. + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_DRM_MSM_DP=n +export CONFIG_DRM_MSM_DP_MST=n +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=n +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=n +export CONFIG_DISPLAY_BUILD=m +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_MSM_SDE_ROTATOR_INIT_ONLY=y diff --git a/config/gki_holidispconf.h b/config/gki_holidispconf.h new file mode 100644 index 000000000..535c20d51 --- /dev/null +++ b/config/gki_holidispconf.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 +#define CONFIG_MSM_SDE_ROTATOR_INIT_ONLY 1 diff --git a/config/gki_kalamadisp.conf b/config/gki_kalamadisp.conf new file mode 100644 index 000000000..5058638bb --- /dev/null +++ b/config/gki_kalamadisp.conf @@ -0,0 +1,18 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_DRM_MSM_DP_MST=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_MSM_MMRM=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_HDCP_QSEECOM=y +export CONFIG_DRM_SDE_VM=y +export CONFIG_QTI_HW_FENCE=y +export CONFIG_QCOM_SPEC_SYNC=y +export CONFIG_QCOM_FSA4480_I2C=y diff --git a/config/gki_kalamadispconf.h b/config/gki_kalamadispconf.h new file mode 100644 index 000000000..f88dec0db --- /dev/null +++ b/config/gki_kalamadispconf.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_DRM_MSM_DP_MST 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_SDE_RSC 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_MSM_MMRM 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_HDCP_QSEECOM 1 +#define CONFIG_DRM_SDE_VM 1 +#define CONFIG_QTI_HW_FENCE 1 +#define CONFIG_QCOM_SPEC_SYNC 1 +#define CONFIG_QCOM_FSA4480_I2C 1 diff --git a/config/gki_kalamadisptui.conf b/config/gki_kalamadisptui.conf new file mode 100644 index 000000000..54f40609a --- /dev/null +++ b/config/gki_kalamadisptui.conf @@ -0,0 +1,10 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_DRM_SDE_VM=y +export CONFIG_DRM_LOW_MSM_MEM_FOOTPRINT=y diff --git a/config/gki_kalamadisptuiconf.h b/config/gki_kalamadisptuiconf.h new file mode 100644 index 000000000..e201436a0 --- /dev/null +++ b/config/gki_kalamadisptuiconf.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_DRM_SDE_VM 1 +#define CONFIG_DRM_MSM_LOW_MEM_FOOTPRINT 1 diff --git a/config/gki_lahainadisp.conf b/config/gki_lahainadisp.conf new file mode 100644 index 000000000..fb7a9807d --- /dev/null +++ b/config/gki_lahainadisp.conf @@ -0,0 +1,12 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_DRM_MSM_DP_MST=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DRM_SDE_RSC=y +export CONFIG_DISPLAY_BUILD=m diff --git a/config/gki_lahainadispconf.h b/config/gki_lahainadispconf.h new file mode 100644 index 000000000..6d7c15822 --- /dev/null +++ b/config/gki_lahainadispconf.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_DRM_MSM_DP_MST 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_DRM_SDE_RSC 1 + diff --git a/config/gki_neodisp.conf b/config/gki_neodisp.conf new file mode 100644 index 000000000..e37085cf0 --- /dev/null +++ b/config/gki_neodisp.conf @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2021, The Linux Foundation. All rights reserved. + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DISPLAY_BUILD=m diff --git a/config/gki_neodispconf.h b/config/gki_neodispconf.h new file mode 100644 index 000000000..c8bf59de1 --- /dev/null +++ b/config/gki_neodispconf.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 diff --git a/config/gki_niobedisp.conf b/config/gki_niobedisp.conf new file mode 100644 index 000000000..cbcac81fc --- /dev/null +++ b/config/gki_niobedisp.conf @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_MSM_MMRM=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_DRM_SDE_SYSTEM_SLEEP_DISABLE=y +export CONFIG_DRM_SDE_IPCC=y +export CONFIG_DRM_SDE_MINIDUMP_DISABLE=y diff --git a/config/gki_niobedispconf.h b/config/gki_niobedispconf.h new file mode 100644 index 000000000..5252de9ff --- /dev/null +++ b/config/gki_niobedispconf.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_SDE_RSC 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_MSM_MMRM 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_DRM_SDE_SYSTEM_SLEEP_DISABLE 1 +#define CONFIG_DRM_SDE_IPCC 1 +#define CONFIG_DRM_SDE_MINIDUMP_DISABLE 1 diff --git a/config/gki_parrotdisp.conf b/config/gki_parrotdisp.conf new file mode 100644 index 000000000..a41c10f8d --- /dev/null +++ b/config/gki_parrotdisp.conf @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DISPLAY_BUILD=m diff --git a/config/gki_parrotdispconf.h b/config/gki_parrotdispconf.h new file mode 100644 index 000000000..a29ab23a4 --- /dev/null +++ b/config/gki_parrotdispconf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 diff --git a/config/gki_pineappledisp.conf b/config/gki_pineappledisp.conf new file mode 100644 index 000000000..827aa5c77 --- /dev/null +++ b/config/gki_pineappledisp.conf @@ -0,0 +1,18 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_DRM_MSM_DP_MST=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_MSM_MMRM=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_HDCP_QSEECOM=y +export CONFIG_DRM_SDE_VM=y +export CONFIG_QTI_HW_FENCE=y +export CONFIG_QCOM_SPEC_SYNC=y +export CONFIG_QCOM_WCD939X_I2C=y diff --git a/config/gki_pineappledispconf.h b/config/gki_pineappledispconf.h new file mode 100644 index 000000000..a689598ef --- /dev/null +++ b/config/gki_pineappledispconf.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_DRM_MSM_DP_MST 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_SDE_RSC 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_MSM_MMRM 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_HDCP_QSEECOM 1 +#define CONFIG_DRM_SDE_VM 1 +#define CONFIG_QTI_HW_FENCE 1 +#define CONFIG_QCOM_SPEC_SYNC 1 +#define CONFIG_QCOM_WCD939X_I2C 1 diff --git a/config/gki_pineappledisptui.conf b/config/gki_pineappledisptui.conf new file mode 100644 index 000000000..54f40609a --- /dev/null +++ b/config/gki_pineappledisptui.conf @@ -0,0 +1,10 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_DRM_SDE_VM=y +export CONFIG_DRM_LOW_MSM_MEM_FOOTPRINT=y diff --git a/config/gki_pineappledisptuiconf.h b/config/gki_pineappledisptuiconf.h new file mode 100644 index 000000000..cdab7050f --- /dev/null +++ b/config/gki_pineappledisptuiconf.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_DRM_SDE_VM 1 +#define CONFIG_DRM_MSM_LOW_MEM_FOOTPRINT 1 diff --git a/config/gki_pittidisp.conf b/config/gki_pittidisp.conf new file mode 100644 index 000000000..bbda16733 --- /dev/null +++ b/config/gki_pittidisp.conf @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_QCOM_SPEC_SYNC=y diff --git a/config/gki_pittidispconf.h b/config/gki_pittidispconf.h new file mode 100644 index 000000000..07e22fcbf --- /dev/null +++ b/config/gki_pittidispconf.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_QCOM_SPEC_SYNC 1 diff --git a/config/gki_waipiodisp.conf b/config/gki_waipiodisp.conf new file mode 100644 index 000000000..6822eb311 --- /dev/null +++ b/config/gki_waipiodisp.conf @@ -0,0 +1,15 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_DRM_MSM_DP_MST=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_MSM_MMRM=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_DRM_SDE_VM=y +export CONFIG_HDCP_QSEECOM=y diff --git a/config/gki_waipiodispconf.h b/config/gki_waipiodispconf.h new file mode 100644 index 000000000..530e4aa34 --- /dev/null +++ b/config/gki_waipiodispconf.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_DRM_MSM_DP_MST 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_SDE_RSC 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_MSM_MMRM 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_MSM_EXT_DISPLAY 1 +#define CONFIG_DRM_SDE_VM 1 +#define CONFIG_HDCP_QSEECOM 1 diff --git a/config/gki_waipiodisptui.conf b/config/gki_waipiodisptui.conf new file mode 100644 index 000000000..54f40609a --- /dev/null +++ b/config/gki_waipiodisptui.conf @@ -0,0 +1,10 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_DISPLAY_BUILD=m +export CONFIG_DRM_SDE_VM=y +export CONFIG_DRM_LOW_MSM_MEM_FOOTPRINT=y diff --git a/config/gki_waipiodisptuiconf.h b/config/gki_waipiodisptuiconf.h new file mode 100644 index 000000000..a7b1f035c --- /dev/null +++ b/config/gki_waipiodisptuiconf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_GKI_DISPLAY 1 +#define CONFIG_DRM_SDE_VM 1 +#define CONFIG_DRM_MSM_LOW_MEM_FOOTPRINT 1 diff --git a/config/holidisp.conf b/config/holidisp.conf new file mode 100644 index 000000000..0d3a8a467 --- /dev/null +++ b/config/holidisp.conf @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2020, The Linux Foundation. All rights reserved. + +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_DRM_MSM_DP=n +export CONFIG_DRM_MSM_DP_MST=n +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=n +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=n +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DISPLAY_BUILD=y diff --git a/config/holidispconf.h b/config/holidispconf.h new file mode 100644 index 000000000..7e38d1c53 --- /dev/null +++ b/config/holidispconf.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 diff --git a/config/konadisp.conf b/config/konadisp.conf new file mode 100644 index 000000000..dbbf3c847 --- /dev/null +++ b/config/konadisp.conf @@ -0,0 +1,13 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_QCOM_MDSS_DP_PLL=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DRM_SDE_RSC=y diff --git a/config/konadispconf.h b/config/konadispconf.h new file mode 100644 index 000000000..690d4ec79 --- /dev/null +++ b/config/konadispconf.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_QCOM_MDSS_DP_PLL 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 +#define CONFIG_DRM_SDE_RSC 1 + diff --git a/config/lahainadisp.conf b/config/lahainadisp.conf new file mode 100644 index 000000000..0e977c9d7 --- /dev/null +++ b/config/lahainadisp.conf @@ -0,0 +1,13 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_DRM_MSM_DP_MST=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_DRM_SDE_RSC=y +export CONFIG_DISPLAY_BUILD=y +export CONFIG_DRM_SDE_VM=y diff --git a/config/lahainadispconf.h b/config/lahainadispconf.h new file mode 100644 index 000000000..e72e0a43f --- /dev/null +++ b/config/lahainadispconf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_DRM_MSM_DP_MST 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_DRM_SDE_RSC 1 +#define CONFIG_DRM_SDE_VM 1 diff --git a/config/saipdisp.conf b/config/saipdisp.conf new file mode 100644 index 000000000..dbbf3c847 --- /dev/null +++ b/config/saipdisp.conf @@ -0,0 +1,13 @@ +export CONFIG_DRM_MSM=y +export CONFIG_DRM_MSM_SDE=y +export CONFIG_SYNC_FILE=y +export CONFIG_DRM_MSM_DSI=y +export CONFIG_DRM_MSM_DP=y +export CONFIG_QCOM_MDSS_DP_PLL=y +export CONFIG_DSI_PARSER=y +export CONFIG_DRM_SDE_WB=y +export CONFIG_DRM_MSM_REGISTER_LOGGING=y +export CONFIG_QCOM_MDSS_PLL=y +export CONFIG_MSM_SDE_ROTATOR=y +export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +export CONFIG_DRM_SDE_RSC=y diff --git a/config/saipdispconf.h b/config/saipdispconf.h new file mode 100644 index 000000000..049024839 --- /dev/null +++ b/config/saipdispconf.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#define CONFIG_DRM_MSM 1 +#define CONFIG_DRM_MSM_SDE 1 +#define CONFIG_SYNC_FILE 1 +#define CONFIG_DRM_MSM_DSI 1 +#define CONFIG_DRM_MSM_DP 1 +#define CONFIG_QCOM_MDSS_DP_PLL 1 +#define CONFIG_DSI_PARSER 1 +#define CONFIG_DRM_SDE_WB 1 +#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 +#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 +#define CONFIG_QCOM_MDSS_PLL 1 +#define CONFIG_MSM_SDE_ROTATOR 1 +#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 +#define CONFIG_DRM_SDE_RSC 1 diff --git a/display_driver_board.mk b/display_driver_board.mk new file mode 100644 index 000000000..122793479 --- /dev/null +++ b/display_driver_board.mk @@ -0,0 +1,15 @@ +#SPDX-License-Identifier: GPL-2.0-only +DISPLAY_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_DISPLAY_OVERRIDE), false) + DISPLAY_DLKM_ENABLE := false + endif +endif + +ifeq ($(DISPLAY_DLKM_ENABLE), true) + ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true) + BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_drm.ko + BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_drm.ko + BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_drm.ko + endif +endif diff --git a/display_driver_build.bzl b/display_driver_build.bzl new file mode 100644 index 000000000..6205a5ca6 --- /dev/null +++ b/display_driver_build.bzl @@ -0,0 +1,128 @@ +load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule") +load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") + +def _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps): + processed_config_srcs = {} + nested_config = {} + processed_config_deps = {} + + for config_src_name in config_srcs: + config_src = config_srcs[config_src_name] + + if type(config_src) == "list": + processed_config_srcs[config_src_name] = {True: config_src} + else: + processed_config_srcs[config_src_name] = config_src + if type(config_src) == "dict": + nested_config = config_src + + for nested_src, nest_name in nested_config.items(): + if nested_src == True: + processed_config_srcs[config_src_name] = {True: nest_name} + else: + processed_config_srcs[nested_src] = {True: nest_name} + + for config_deps_name in config_deps: + config_dep = config_deps[config_deps_name] + + if type(config_dep) == "list": + processed_config_deps[config_deps_name] = {True: config_dep} + else: + processed_config_deps[config_deps_name] = config_dep + module = struct( + name = name, + path = path, + srcs = srcs, + config_srcs = processed_config_srcs, + config_option = config_option, + deps = deps, + config_deps = processed_config_deps + ) + + module_map[name] = module + +def _get_config_choices(map, options): + choices = [] + for option in map: + choices.extend(map[option].get(option in options,[])) + return choices + +def _get_kernel_build_options(modules, config_options): + all_options = {option: True for option in config_options} + all_options = all_options | {module.config_option: True for module in modules if module.config_option} + return all_options + +def _get_kernel_build_module_srcs(module, options, formatter): + srcs = module.srcs + _get_config_choices(module.config_srcs, options) + module_path = "{}/".format(module.path) if module.path else "" + return ["{}{}".format(module_path, formatter(src)) for src in srcs] + +def _get_kernel_build_module_deps(module, options, formatter): + deps = module.deps + _get_config_choices(module.config_deps, options) + return [formatter(dep) for dep in deps] + +def display_module_entry(hdrs = []): + module_map = {} + + def register(name, path = None, config_option = None, srcs = [], config_srcs = {}, deps = [], config_deps = {}): + _register_module_to_map(module_map, name, path, config_option, srcs, config_srcs, deps, config_deps) + return struct( + register = register, + get = module_map.get, + hdrs = hdrs, + module_map = module_map + ) + +def define_target_variant_modules(target, variant, registry, modules, config_options = [], lunch_target=None): + + kernel_build_hdr = "{}_{}".format(target, variant) + kernel_build_label = "//msm-kernel:{}".format(kernel_build_hdr) + + if lunch_target != None: + kernel_build = "{}_{}_{}".format(target, variant, lunch_target) + else: + kernel_build = "{}_{}".format(target, variant) + + modules = [registry.get(module_name) for module_name in modules] + options = _get_kernel_build_options(modules, config_options) + build_print = lambda message : print("{}: {}".format(kernel_build, message)) + formatter = lambda s : s.replace("%b", kernel_build).replace("%t", target) + formatter_hdr = lambda s : s.replace("%b", kernel_build_hdr).replace("%t", target) + headers = ["//msm-kernel:all_headers"] + registry.hdrs + all_module_rules = [] + + for module in modules: + rule_name = "{}_{}".format(kernel_build, module.name) + module_srcs = _get_kernel_build_module_srcs(module, options, formatter) + print(rule_name) + if not module_srcs: + continue + + ddk_submodule( + name = rule_name, + srcs = module_srcs, + out = "{}.ko".format(module.name), + deps = headers + _get_kernel_build_module_deps(module, options, formatter_hdr), + local_defines = options.keys() + + select({ + ":factory_build" : [ "CONFIG_FACTORY_BUILD" ], + "//conditions:default" : [], + }), + ) + all_module_rules.append(rule_name) + + ddk_module( + name = "{}_display_drivers".format(kernel_build), + kernel_build = kernel_build_label, + deps = all_module_rules, + ) + copy_to_dist_dir( + name = "{}_display_drivers_dist".format(kernel_build), + data = [":{}_display_drivers".format(kernel_build)], + dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target), + flat = True, + wipe_dist_dir = False, + allow_duplicate_filenames = False, + mode_overrides = {"**/*": "644"}, + log = "info", + ) diff --git a/display_driver_product.mk b/display_driver_product.mk new file mode 100644 index 000000000..9137f94c0 --- /dev/null +++ b/display_driver_product.mk @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only + +DISPLAY_DLKM_ENABLE := true +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) + ifeq ($(TARGET_KERNEL_DLKM_DISPLAY_OVERRIDE), false) + DISPLAY_DLKM_ENABLE := false + endif +endif + +ifeq ($(DISPLAY_DLKM_ENABLE), true) + PRODUCT_PACKAGES += msm_drm.ko +endif + +DISPLAY_MODULES_DRIVER := msm_drm.ko \ No newline at end of file diff --git a/display_kernel_headers.py b/display_kernel_headers.py new file mode 100644 index 000000000..9a7fa15f2 --- /dev/null +++ b/display_kernel_headers.py @@ -0,0 +1,93 @@ + # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + # + # This program is free software; you can redistribute it and/or modify it + # under the terms of the GNU General Public License version 2 as published by + # the Free Software Foundation. + # + # This program is distributed in the hope that it will be useful, but WITHOUT + # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + # more details. + # + # You should have received a copy of the GNU General Public License along with + # this program. If not, see . + +import argparse +import filecmp +import os +import re +import subprocess +import sys + +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + cmd = ["sh", headers_install, h, out_h] + + if verbose: + print('run_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_display_headers(verbose, gen_dir, headers_install, unifdef, display_include_uapi): + error_count = 0 + for h in display_include_uapi: + display_uapi_include_prefix = os.path.join(h.split('/include/uapi')[0], 'include', 'uapi') + os.sep + if not run_headers_install( + verbose, gen_dir, headers_install, unifdef, + display_uapi_include_prefix, h): error_count += 1 + return error_count + +def main(): + """Parse command line arguments and perform top level control.""" + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments that apply to every invocation of this script. + parser.add_argument( + '--verbose', action='store_true', + help='Print output that describes the workings of this script.') + parser.add_argument( + '--header_arch', required=True, + help='The arch for which to generate headers.') + parser.add_argument( + '--gen_dir', required=True, + help='Where to place the generated files.') + parser.add_argument( + '--display_include_uapi', required=True, nargs='*', + help='The list of techpack/*/include/uapi header files.') + parser.add_argument( + '--headers_install', required=True, + help='The headers_install tool to process input headers.') + parser.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') + + args = parser.parse_args() + + if args.verbose: + print('header_arch [%s]' % args.header_arch) + print('gen_dir [%s]' % args.gen_dir) + print('display_include_uapi [%s]' % args.display_include_uapi) + print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) + + return gen_display_headers(args.verbose, args.gen_dir, + args.headers_install, args.unifdef, args.display_include_uapi) + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/display_modules.bzl b/display_modules.bzl new file mode 100644 index 000000000..d606e736f --- /dev/null +++ b/display_modules.bzl @@ -0,0 +1,223 @@ +load(":display_driver_build.bzl", "display_module_entry") + +display_driver_modules = display_module_entry([ + ":display_drivers_headers", + "//msm-kernel:mi_irq_headers", + ]) +module_entry = display_driver_modules.register + +#---------- MSM-DRM MODULE ------------------------- + +module_entry( + name = "msm_drm", + config_option = "CONFIG_DRM_MSM", + path = None, + config_srcs = { + "CONFIG_HDCP_QSEECOM": [ + "hdcp/msm_hdcp.c", + "msm/dp/dp_hdcp2p2.c", + "msm/sde_hdcp_1x.c", + "msm/sde_hdcp_2x.c", + ], + "CONFIG_DRM_SDE_VM" : [ + "msm/sde/sde_vm_common.c", + "msm/sde/sde_vm_primary.c", + "msm/sde/sde_vm_trusted.c", + "msm/sde/sde_vm_msgq.c", + ], + "CONFIG_DRM_MSM_DP" : [ + "msm/dp/dp_altmode.c", + "msm/dp/dp_parser.c", + "msm/dp/dp_power.c", + "msm/dp/dp_catalog.c", + "msm/dp/dp_catalog_v420.c", + "msm/dp/dp_catalog_v200.c", + "msm/dp/dp_aux.c", + "msm/dp/dp_panel.c", + "msm/dp/dp_link.c", + "msm/dp/dp_ctrl.c", + "msm/dp/dp_audio.c", + "msm/dp/dp_debug.c", + "msm/dp/dp_hpd.c", + "msm/dp/dp_aux_bridge.c", + "msm/dp/dp_bridge_hpd.c", + "msm/dp/dp_mst_sim.c", + "msm/dp/dp_mst_sim_helper.c", + "msm/dp/dp_gpio_hpd.c", + "msm/dp/dp_lphw_hpd.c", + "msm/dp/dp_display.c", + "msm/dp/dp_drm.c", + "msm/dp/dp_pll.c", + "msm/dp/dp_pll_5nm.c", + "msm/dp/dp_pll_4nm.c", + ], + "CONFIG_DRM_MSM_DP_MST" : [ + "msm/dp/dp_mst_drm.c", + ], + "CONFIG_DRM_MSM_DP_USBPD_LEGACY" : [ + "msm/dp/dp_usbpd.c", + ], + "CONFIG_DRM_MSM_SDE" : [ + "msm/sde/sde_crtc.c", + "msm/sde/sde_encoder.c", + "msm/sde/sde_encoder_dce.c", + "msm/sde/sde_encoder_phys_vid.c", + "msm/sde/sde_encoder_phys_cmd.c", + "msm/sde/sde_irq.c", + "msm/sde/sde_core_irq.c", + "msm/sde/sde_core_perf.c", + "msm/sde/sde_rm.c", + "msm/sde/sde_kms_utils.c", + "msm/sde/sde_kms.c", + "msm/sde/sde_plane.c", + "msm/sde/sde_connector.c", + "msm/sde/sde_color_processing.c", + "msm/sde/sde_vbif.c", + "msm/sde_dbg.c", + "msm/sde_dbg_evtlog.c", + "msm/sde_io_util.c", + "msm/sde_vm_event.c", + "msm/sde/sde_hw_reg_dma_v1_color_proc.c", + "msm/sde/sde_hw_color_proc_v4.c", + "msm/sde/sde_hw_ad4.c", + "msm/sde/sde_hw_uidle.c", + "msm/sde_edid_parser.c", + "msm/sde/sde_hw_catalog.c", + "msm/sde/sde_hw_cdm.c", + "msm/sde/sde_hw_dspp.c", + "msm/sde/sde_hw_intf.c", + "msm/sde/sde_hw_lm.c", + "msm/sde/sde_hw_ctl.c", + "msm/sde/sde_hw_util.c", + "msm/sde/sde_hw_sspp.c", + "msm/sde/sde_hw_wb.c", + "msm/sde/sde_hw_pingpong.c", + "msm/sde/sde_hw_top.c", + "msm/sde/sde_hw_interrupts.c", + "msm/sde/sde_hw_vbif.c", + "msm/sde/sde_formats.c", + "msm/sde_power_handle.c", + "msm/sde/sde_hw_color_processing_v1_7.c", + "msm/sde/sde_reg_dma.c", + "msm/sde/sde_hw_reg_dma_v1.c", + "msm/sde/sde_hw_dsc.c", + "msm/sde/sde_hw_dsc_1_2.c", + "msm/sde/sde_hw_vdc.c", + "msm/sde/sde_hw_ds.c", + "msm/sde/sde_fence.c", + "msm/sde/sde_hw_qdss.c", + "msm/sde_dsc_helper.c", + "msm/sde_vdc_helper.c", + "msm/sde/sde_hw_dnsc_blur.c", + "msm/sde/sde_hw_rc.c", + ], + "CONFIG_DRM_SDE_WB" : [ + "msm/sde/sde_wb.c", + "msm/sde/sde_encoder_phys_wb.c" + ], + "CONFIG_DRM_SDE_RSC" : [ + "msm/sde_rsc.c", + "msm/sde_rsc_hw.c", + "msm/sde_rsc_hw_v3.c", + ], + "CONFIG_DRM_MSM_DSI" : [ + "msm/dsi/dsi_phy.c", + "msm/dsi/dsi_pwr.c", + "msm/dsi/dsi_phy_hw_v3_0.c", + "msm/dsi/dsi_phy_hw_v4_0.c", + "msm/dsi/dsi_phy_hw_v5_0.c", + "msm/dsi/dsi_phy_timing_calc.c", + "msm/dsi/dsi_phy_timing_v3_0.c", + "msm/dsi/dsi_phy_timing_v4_0.c", + "msm/dsi/dsi_pll.c", + "msm/dsi/dsi_pll_5nm.c", + "msm/dsi/dsi_pll_4nm.c", + "msm/dsi/dsi_ctrl_hw_cmn.c", + "msm/dsi/dsi_ctrl_hw_2_2.c", + "msm/dsi/dsi_ctrl.c", + "msm/dsi/dsi_catalog.c", + "msm/dsi/dsi_drm.c", + "msm/dsi/dsi_display.c", + "msm/dsi/dsi_panel.c", + "msm/dsi/dsi_clk_manager.c", + "msm/dsi/dsi_display_test.c", + "msm/dsi/lcd_bias.c", + ], + "CONFIG_DRM_MSM_MI_DISP" : [ + "msm/mi_disp/mi_cooling_device.c", + "msm/mi_disp/mi_disp_core.c", + "msm/mi_disp/mi_disp_debugfs.c", + "msm/mi_disp/mi_disp_feature.c", + "msm/mi_disp/mi_disp_file.c", + "msm/mi_disp/mi_disp_flatmode.c", + "msm/mi_disp/mi_disp_lhbm.c", + "msm/mi_disp/mi_disp_log.c", + "msm/mi_disp/mi_disp_parser.c", + "msm/mi_disp/mi_disp_print.c", + "msm/mi_disp/mi_disp_procfs.c", + "msm/mi_disp/mi_disp_sysfs.c", + "msm/mi_disp/mi_dsi_display.c", + "msm/mi_disp/mi_dsi_panel.c", + "msm/mi_disp/mi_dsi_panel_count.c", + "msm/mi_disp/mi_hwconf_manager.c", + "msm/mi_disp/mi_sde_connector.c", + "msm/mi_disp/mi_sde_crtc.c", + "msm/mi_disp/mi_sde_encoder.c", + ], + "CONFIG_DSI_PARSER" : [ + "msm/dsi/dsi_parser.c", + ], + "CONFIG_THERMAL_OF" : [ + "msm/msm_cooling_device.c", + ], + "CONFIG_DRM_MSM" : [ + "msm/msm_atomic.c", + "msm/msm_fb.c", + "msm/msm_drv.c", + "msm/msm_gem.c", + "msm/msm_gem_prime.c", + "msm/msm_gem_vma.c", + "msm/msm_smmu.c", + "msm/msm_prop.c", + ], + "CONFIG_MSM_SDE_ROTATOR":{ + True: [ + "rotator/sde_rotator_dev.c", + "rotator/sde_rotator_core.c", + "rotator/sde_rotator_base.c", + "rotator/sde_rotator_formats.c", + "rotator/sde_rotator_util.c", + "rotator/sde_rotator_io_util.c", + "rotator/sde_rotator_smmu.c", + "rotator/sde_rotator_r1_wb.c", + "rotator/sde_rotator_r1_pipe.c", + "rotator/sde_rotator_r1_ctl.c", + "rotator/sde_rotator_r1.c", + "rotator/sde_rotator_r3.c"], + "CONFIG_SYNC_FILE":["rotator/sde_rotator_sync.c"], + "CONFIG_DEBUG_FS":["rotator/sde_rotator_debug.c", + "rotator/sde_rotator_r1_debug.c", + "rotator/sde_rotator_r3_debug.c"], + }, + }, + config_deps = { + "CONFIG_DRM_MSM" : [ + "//vendor/qcom/opensource/touch-drivers:%b_touch_drivers" + ], + "CONFIG_HDCP_QSEECOM" : [ + "//vendor/qcom/opensource/securemsm-kernel:%b_hdcp_qseecom_dlkm" + ], + "CONFIG_MSM_MMRM" : [ + "//vendor/qcom/opensource/mmrm-driver:%b_mmrm_driver" + ], + "CONFIG_QCOM_SPEC_SYNC" : [ + "//vendor/qcom/opensource/mm-drivers/sync_fence:%b_sync_fence" + ], + "CONFIG_QTI_HW_FENCE" : [ + "//vendor/qcom/opensource/mm-drivers/hw_fence:%b_msm_hw_fence" + ], + "CONFIG_MSM_EXT_DISPLAY" : [ + "//vendor/qcom/opensource/mm-drivers/msm_ext_display:%b_msm_ext_display" + ], + } +) diff --git a/hdcp/msm_hdcp.c b/hdcp/msm_hdcp.c new file mode 100644 index 000000000..16292b890 --- /dev/null +++ b/hdcp/msm_hdcp.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[msm-hdcp] %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLASS_NAME "hdcp" +#define DRIVER_NAME "msm_hdcp" + +struct msm_hdcp { + struct platform_device *pdev; + dev_t dev_num; + struct cdev cdev; + struct class *class; + struct device *device; + struct HDCP_V2V1_MSG_TOPOLOGY cached_tp; + u32 tp_msgid; + void *client_ctx; + void (*cb)(void *ctx, u8 data); +}; + +void msm_hdcp_register_cb(struct device *dev, void *ctx, + void (*cb)(void *ctx, u8 data)) +{ + struct msm_hdcp *hdcp = NULL; + + if (!dev) { + pr_err("invalid device pointer\n"); + return; + } + + hdcp = dev_get_drvdata(dev); + if (!hdcp) { + pr_err("invalid driver pointer\n"); + return; + } + + hdcp->cb = cb; + hdcp->client_ctx = ctx; +} +EXPORT_SYMBOL(msm_hdcp_register_cb); + +void msm_hdcp_notify_topology(struct device *dev) +{ + char *envp[4]; + char tp[SZ_16]; + char ver[SZ_16]; + struct msm_hdcp *hdcp = NULL; + + if (!dev) { + pr_err("invalid device pointer\n"); + return; + } + + hdcp = dev_get_drvdata(dev); + if (!hdcp) { + pr_err("invalid driver pointer\n"); + return; + } + + snprintf(tp, SZ_16, "%d", DOWN_CHECK_TOPOLOGY); + snprintf(ver, SZ_16, "%d", HDCP_V1_TX); + + envp[0] = "HDCP_MGR_EVENT=MSG_READY"; + envp[1] = tp; + envp[2] = ver; + envp[3] = NULL; + + kobject_uevent_env(&hdcp->device->kobj, KOBJ_CHANGE, envp); +} +EXPORT_SYMBOL(msm_hdcp_notify_topology); + +void msm_hdcp_cache_repeater_topology(struct device *dev, + struct HDCP_V2V1_MSG_TOPOLOGY *tp) +{ + struct msm_hdcp *hdcp = NULL; + + if (!dev || !tp) { + pr_err("invalid input\n"); + return; + } + + hdcp = dev_get_drvdata(dev); + if (!hdcp) { + pr_err("invalid driver pointer\n"); + return; + } + + memcpy(&hdcp->cached_tp, tp, + sizeof(struct HDCP_V2V1_MSG_TOPOLOGY)); +} +EXPORT_SYMBOL(msm_hdcp_cache_repeater_topology); + +static ssize_t tp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + ssize_t ret = 0; + struct msm_hdcp *hdcp = NULL; + + if (!dev) { + pr_err("invalid device pointer\n"); + return -ENODEV; + } + + hdcp = dev_get_drvdata(dev); + if (!hdcp) { + pr_err("invalid driver pointer\n"); + return -ENODEV; + } + + switch (hdcp->tp_msgid) { + case DOWN_CHECK_TOPOLOGY: + case DOWN_REQUEST_TOPOLOGY: + buf[MSG_ID_IDX] = hdcp->tp_msgid; + buf[RET_CODE_IDX] = HDCP_AUTHED; + ret = HEADER_LEN; + + memcpy(buf + HEADER_LEN, &hdcp->cached_tp, + sizeof(struct HDCP_V2V1_MSG_TOPOLOGY)); + + ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY); + + /* reset the flag once the data is written back to user space */ + hdcp->tp_msgid = DOWN_REQUEST_TOPOLOGY; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static ssize_t tp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int msgid = 0; + ssize_t ret = count; + struct msm_hdcp *hdcp = NULL; + + if (!dev) { + pr_err("invalid device pointer\n"); + return -ENODEV; + } + + hdcp = dev_get_drvdata(dev); + if (!hdcp) { + pr_err("invalid driver pointer\n"); + return -ENODEV; + } + + msgid = buf[0]; + + switch (msgid) { + case DOWN_CHECK_TOPOLOGY: + case DOWN_REQUEST_TOPOLOGY: + hdcp->tp_msgid = msgid; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static ssize_t min_level_change_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int rc; + int min_enc_lvl; + ssize_t ret = count; + struct msm_hdcp *hdcp = NULL; + + if (!dev) { + pr_err("invalid device pointer\n"); + return -ENODEV; + } + + hdcp = dev_get_drvdata(dev); + if (!hdcp) { + pr_err("invalid driver pointer\n"); + return -ENODEV; + } + + rc = kstrtoint(buf, 10, &min_enc_lvl); + if (rc) { + pr_err("kstrtoint failed. rc=%d\n", rc); + return -EINVAL; + } + + if (hdcp->cb && hdcp->client_ctx) + hdcp->cb(hdcp->client_ctx, min_enc_lvl); + + return ret; +} + +static DEVICE_ATTR_RW(tp); + +static DEVICE_ATTR_WO(min_level_change); + +static struct attribute *msm_hdcp_fs_attrs[] = { + &dev_attr_tp.attr, + &dev_attr_min_level_change.attr, + NULL +}; + +static struct attribute_group msm_hdcp_fs_attr_group = { + .attrs = msm_hdcp_fs_attrs +}; + +static int msm_hdcp_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int msm_hdcp_close(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations msm_hdcp_fops = { + .owner = THIS_MODULE, + .open = msm_hdcp_open, + .release = msm_hdcp_close, +}; + +static const struct of_device_id msm_hdcp_dt_match[] = { + { .compatible = "qcom,msm-hdcp",}, + {} +}; + +MODULE_DEVICE_TABLE(of, msm_hdcp_dt_match); + +static int msm_hdcp_probe(struct platform_device *pdev) +{ + int ret; + struct msm_hdcp *hdcp; + + hdcp = devm_kzalloc(&pdev->dev, sizeof(struct msm_hdcp), GFP_KERNEL); + if (!hdcp) + return -ENOMEM; + + hdcp->pdev = pdev; + + platform_set_drvdata(pdev, hdcp); + + ret = alloc_chrdev_region(&hdcp->dev_num, 0, 1, DRIVER_NAME); + if (ret < 0) { + pr_err("alloc_chrdev_region failed ret = %d\n", ret); + return ret; + } + + hdcp->class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(hdcp->class)) { + ret = PTR_ERR(hdcp->class); + pr_err("couldn't create class rc = %d\n", ret); + goto error_class_create; + } + + hdcp->device = device_create(hdcp->class, NULL, + hdcp->dev_num, hdcp, DRIVER_NAME); + if (IS_ERR(hdcp->device)) { + ret = PTR_ERR(hdcp->device); + pr_err("device_create failed %d\n", ret); + goto error_class_device_create; + } + + cdev_init(&hdcp->cdev, &msm_hdcp_fops); + ret = cdev_add(&hdcp->cdev, MKDEV(MAJOR(hdcp->dev_num), 0), 1); + if (ret < 0) { + pr_err("cdev_add failed %d\n", ret); + goto error_cdev_add; + } + + ret = sysfs_create_group(&hdcp->device->kobj, &msm_hdcp_fs_attr_group); + if (ret) + pr_err("unable to register msm_hdcp sysfs nodes\n"); + + hdcp->tp_msgid = DOWN_REQUEST_TOPOLOGY; + + return 0; +error_cdev_add: + device_destroy(hdcp->class, hdcp->dev_num); +error_class_device_create: + class_destroy(hdcp->class); +error_class_create: + unregister_chrdev_region(hdcp->dev_num, 1); + return ret; +} + +static int msm_hdcp_remove(struct platform_device *pdev) +{ + struct msm_hdcp *hdcp; + + hdcp = platform_get_drvdata(pdev); + if (!hdcp) + return -ENODEV; + + sysfs_remove_group(&hdcp->device->kobj, + &msm_hdcp_fs_attr_group); + cdev_del(&hdcp->cdev); + device_destroy(hdcp->class, hdcp->dev_num); + class_destroy(hdcp->class); + unregister_chrdev_region(hdcp->dev_num, 1); + + return 0; +} + +static struct platform_driver msm_hdcp_driver = { + .probe = msm_hdcp_probe, + .remove = msm_hdcp_remove, + .driver = { + .name = "msm_hdcp", + .of_match_table = msm_hdcp_dt_match, + .pm = NULL, + } +}; + +void __init msm_hdcp_register(void) +{ + platform_driver_register(&msm_hdcp_driver); +} + +void __exit msm_hdcp_unregister(void) +{ + platform_driver_unregister(&msm_hdcp_driver); +} diff --git a/include/Kbuild b/include/Kbuild new file mode 100644 index 000000000..bab1145bc --- /dev/null +++ b/include/Kbuild @@ -0,0 +1,2 @@ +# Top-level Makefile calls into asm-$(ARCH) +# List only non-arch directories below diff --git a/include/linux/Kbuild b/include/linux/Kbuild new file mode 100644 index 000000000..a8e1726b2 --- /dev/null +++ b/include/linux/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +header-y += msm_hdcp.h +header-y += sde_io_util.h +header-y += sde_rsc.h + diff --git a/include/linux/msm_hdcp.h b/include/linux/msm_hdcp.h new file mode 100644 index 000000000..120875419 --- /dev/null +++ b/include/linux/msm_hdcp.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_HDCP_H +#define __MSM_HDCP_H +#include +#include "hdcp/msm_hdmi_hdcp_mgr.h" + +#if IS_ENABLED(CONFIG_HDCP_QSEECOM) +void msm_hdcp_notify_topology(struct device *dev); +void msm_hdcp_cache_repeater_topology(struct device *dev, + struct HDCP_V2V1_MSG_TOPOLOGY *tp); +void msm_hdcp_register_cb(struct device *dev, void *ctx, + void (*cb)(void *ctx, u8 data)); +#else +static inline void msm_hdcp_notify_topology(struct device *dev) +{ +} + +static inline void msm_hdcp_cache_repeater_topology(struct device *dev, + struct HDCP_V2V1_MSG_TOPOLOGY *tp) +{ +} + +static inline void msm_hdcp_register_cb(struct device *dev, void *ctx, + void (*cb)(void *ctx, u8 data)) +{ +} +#endif /* CONFIG_HDCP_QSEECOM*/ + +#endif /* __MSM_HDCP_H */ diff --git a/include/linux/sde_io_util.h b/include/linux/sde_io_util.h new file mode 100644 index 000000000..76b1f2a80 --- /dev/null +++ b/include/linux/sde_io_util.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012, 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __SDE_IO_UTIL_H__ +#define __SDE_IO_UTIL_H__ + +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG +#define DEV_DBG(fmt, args...) pr_err(fmt, ##args) +#else +#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args) +#endif +#define DEV_INFO(fmt, args...) pr_info(fmt, ##args) +#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args) +#define DEV_ERR(fmt, args...) pr_err(fmt, ##args) + +struct dss_io_data { + u32 len; + void __iomem *base; +}; + +void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug); +u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug); +void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug); + +#define DSS_REG_W_ND(io, offset, val) dss_reg_w(io, offset, val, false) +#define DSS_REG_W(io, offset, val) dss_reg_w(io, offset, val, true) +#define DSS_REG_R_ND(io, offset) dss_reg_r(io, offset, false) +#define DSS_REG_R(io, offset) dss_reg_r(io, offset, true) + +enum dss_vreg_type { + DSS_REG_LDO, + DSS_REG_VS, +}; + +struct dss_vreg { + struct regulator *vreg; /* vreg handle */ + char vreg_name[32]; + int min_voltage; + int max_voltage; + int enable_load; + int disable_load; + int pre_on_sleep; + int post_on_sleep; + int pre_off_sleep; + int post_off_sleep; +}; + +struct dss_gpio { + unsigned int gpio; + unsigned int value; + char gpio_name[32]; +}; + +enum dss_clk_type { + DSS_CLK_AHB, /* no set rate. rate controlled through rpm */ + DSS_CLK_PCLK, + DSS_CLK_MMRM, /* set rate called through mmrm driver */ + DSS_CLK_OTHER, +}; + +struct dss_clk_mmrm_cb { + void *phandle; + struct dss_clk *clk; +}; + +struct dss_clk_mmrm { + unsigned int clk_id; + unsigned int flags; + struct mmrm_client *mmrm_client; + struct dss_clk_mmrm_cb *mmrm_cb_data; + unsigned long mmrm_requested_clk; + wait_queue_head_t mmrm_cb_wq; +}; + +struct dss_clk { + struct clk *clk; /* clk handle */ + char clk_name[32]; + enum dss_clk_type type; + unsigned long rate; + unsigned long max_rate; + struct dss_clk_mmrm mmrm; +}; + +struct dss_module_power { + unsigned int num_vreg; + struct dss_vreg *vreg_config; + unsigned int num_gpio; + struct dss_gpio *gpio_config; + unsigned int num_clk; + struct dss_clk *clk_config; +}; + +int msm_dss_ioremap_byname(struct platform_device *pdev, + struct dss_io_data *io_data, const char *name); +void msm_dss_iounmap(struct dss_io_data *io_data); +int msm_dss_get_io_mem(struct platform_device *pdev, + struct list_head *mem_list); +void msm_dss_clean_io_mem(struct list_head *mem_list); +int msm_dss_get_pmic_io_mem(struct platform_device *pdev, + struct list_head *mem_list); +int msm_dss_get_gpio_io_mem(const int gpio_pin, struct list_head *mem_list); +int msm_dss_get_io_irq(struct platform_device *pdev, + struct list_head *irq_list, u32 label); +void msm_dss_clean_io_irq(struct list_head *irq_list); +int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable); +int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable); + +int msm_dss_get_vreg(struct device *dev, struct dss_vreg *in_vreg, + int num_vreg, int enable); +int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable); + +int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk); +int msm_dss_mmrm_register(struct device *dev, struct dss_module_power *mp, + int (*cb_fnc)(struct mmrm_client_notifier_data *data), void *phandle, + bool *mmrm_enable); +void msm_dss_mmrm_deregister(struct device *dev, struct dss_module_power *mp); +void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk); +int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk); +int msm_dss_single_clk_set_rate(struct dss_clk *clk); +int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable); + +int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr, + uint8_t reg_offset, uint8_t *read_buf); +int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr, + uint8_t reg_offset, uint8_t *value); + +#endif /* __SDE_IO_UTIL_H__ */ diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h new file mode 100644 index 000000000..ecf2b4f51 --- /dev/null +++ b/include/linux/sde_rsc.h @@ -0,0 +1,360 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _SDE_RSC_H_ +#define _SDE_RSC_H_ + +#include + +/* primary display rsc index */ +#define SDE_RSC_INDEX 0 + +#define MAX_RSC_CLIENT_NAME_LEN 128 +#define NUM_RSC_PROFILING_COUNTERS 3 + +/* DRM Object IDs are numbered excluding 0, use 0 to indicate invalid CRTC */ +#define SDE_RSC_INVALID_CRTC_ID 0 + +/** + * event will be triggered before sde core power collapse, + * mdss gdsc is still on + */ +#define SDE_RSC_EVENT_PRE_CORE_PC 0x1 +/** + * event will be triggered after sde core collapse complete, + * mdss gdsc is off now + */ +#define SDE_RSC_EVENT_POST_CORE_PC 0x2 +/** + * event will be triggered before restoring the sde core from power collapse, + * mdss gdsc is still off + */ +#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4 +/** + * event will be triggered after restoring the sde core from power collapse, + * mdss gdsc is on now + */ +#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8 +/** + * event attached with solver state enabled + * all clients in clk_state or cmd_state + */ +#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10 +/** + * event attached with solver state disabled + * one of the client requested for vid state + */ +#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20 + +/** + * sde_rsc_client_type: sde rsc client type information + * SDE_RSC_PRIMARY_DISP_CLIENT: A primary display client which can request + * vid or cmd state switch. + * SDE_RSC_EXTERNAL_DISPLAY_CLIENT:An external display client which can + * request only clk state switch. + * SDE_RSC_CLK_CLIENT: A clk client request for only rsc clocks + * enabled and mode_2 exit state. + */ +enum sde_rsc_client_type { + SDE_RSC_PRIMARY_DISP_CLIENT, + SDE_RSC_EXTERNAL_DISP_CLIENT, + SDE_RSC_CLK_CLIENT, + SDE_RSC_INVALID_CLIENT, +}; + +/** + * sde_rsc_state: sde rsc state information + * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no + * pixel or cmd transfer expected. An idle vote from + * all clients lead to power collapse state. + * SDE_RSC_CLK_STATE: A client requests for clk state when it wants to + * only avoid mode-2 entry/exit. For ex: V4L2 driver, + * sde power handle, etc. + * SDE_RSC_CMD_STATE: A client requests for cmd state when it wants to + * enable the solver mode. + * SDE_RSC_VID_STATE: A client requests for vid state it wants to avoid + * solver enable because client is fetching data from + * continuously. + */ +enum sde_rsc_state { + SDE_RSC_IDLE_STATE, + SDE_RSC_CLK_STATE, + SDE_RSC_CMD_STATE, + SDE_RSC_VID_STATE, +}; + +/** + * struct sde_rsc_client: stores the rsc client for sde driver + * @name: name of the client + * @current_state: current client state + * @crtc_id: crtc_id associated with this rsc client. + * @rsc_index: rsc index of a client - only index "0" valid. + * @id: Index of client. It will be assigned during client_create call + * @client_type: check sde_rsc_client_type information + * @list: list to attach client master list + */ +struct sde_rsc_client { + char name[MAX_RSC_CLIENT_NAME_LEN]; + short current_state; + int crtc_id; + u32 rsc_index; + u32 id; + enum sde_rsc_client_type client_type; + struct list_head list; +}; + +/** + * struct sde_rsc_event: local event registration entry structure + * @cb_func: Pointer to desired callback function + * @usr: User pointer to pass to callback on event trigger + * @rsc_index: rsc index of a client - only index "0" valid. + * @event_type: refer comments in event_register + * @list: list to attach event master list + */ +struct sde_rsc_event { + void (*cb_func)(uint32_t event_type, void *usr); + void *usr; + u32 rsc_index; + uint32_t event_type; + struct list_head list; +}; + +/** + * struct sde_rsc_cmd_config: provides panel configuration to rsc + * when client is command mode. It is not required to set it during + * video mode. + * + * @fps: panel te interval + * @vtotal: current vertical total (height + vbp + vfp) + * @jitter_numer: panel jitter numerator value. This config causes rsc/solver + * early before te. Default is 0.8% jitter. + * @jitter_denom: panel jitter denominator. + * @prefill_lines: max prefill lines based on panel + */ +struct sde_rsc_cmd_config { + u32 fps; + u32 vtotal; + u32 jitter_numer; + u32 jitter_denom; + u32 prefill_lines; +}; + +#if IS_ENABLED(CONFIG_DRM_SDE_RSC) +/** + * sde_rsc_client_create() - create the client for sde rsc. + * Different displays like DSI, HDMI, DP, WB, etc should call this + * api to register their vote for rpmh. They still need to vote for + * power handle to get the clocks. + + * @rsc_index: A client will be created on this RSC. As of now only + * SDE_RSC_INDEX is valid rsc index. + * @name: Caller needs to provide some valid string to identify + * the client. "primary", "dp", "hdmi" are suggested name. + * @client_type: check client_type enum for information + * @vsync_source: This parameter is only valid for primary display. It provides + * vsync source information + * + * Return: client node pointer. + */ +struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name, + enum sde_rsc_client_type client_type, u32 vsync_source); + +/** + * sde_rsc_client_destroy() - Destroy the sde rsc client. + * + * @client: Client pointer provided by sde_rsc_client_create(). + * + * Return: none + */ +void sde_rsc_client_destroy(struct sde_rsc_client *client); + +/** + * sde_rsc_client_state_update() - rsc client state update + * Video mode, cmd mode and clk state are supported as modes. A client need to + * set this property during panel time. A switching client can set the + * property to change the state + * + * @client: Client pointer provided by sde_rsc_client_create(). + * @state: Client state - video/cmd + * @config: fps, vtotal, porches, etc configuration for command mode + * panel + * @crtc_id: current client's crtc id + * @wait_vblank_crtc_id: Output parameter. If set to non-zero, rsc hw + * state update requires a wait for one vblank on + * the primary crtc. In that case, this output + * param will be set to the crtc on which to wait. + * If SDE_RSC_INVALID_CRTC_ID, no wait necessary + * + * Return: error code. + */ +int sde_rsc_client_state_update(struct sde_rsc_client *client, + enum sde_rsc_state state, + struct sde_rsc_cmd_config *config, int crtc_id, + int *wait_vblank_crtc_id); + +/** + * sde_rsc_client_get_vsync_refcount() - returns the status of the vsync + * refcount, to signal if the client needs to reset the refcounting logic + * @client: Client pointer provided by sde_rsc_client_create(). + * + * Return: true if the state update has completed. + */ +int sde_rsc_client_get_vsync_refcount( + struct sde_rsc_client *caller_client); + +/** + * sde_rsc_client_reset_vsync_refcount() - reduces the refcounting + * logic that waits for the vsync. + * @client: Client pointer provided by sde_rsc_client_create(). + * + * Return: true if the state update has completed. + */ +int sde_rsc_client_reset_vsync_refcount( + struct sde_rsc_client *caller_client); + +/** + * sde_rsc_client_is_state_update_complete() - check if state update is complete + * RSC state transition is not complete until HW receives VBLANK signal. This + * function checks RSC HW to determine whether that signal has been received. + * @client: Client pointer provided by sde_rsc_client_create(). + * + * Return: true if the state update has completed. + */ +bool sde_rsc_client_is_state_update_complete( + struct sde_rsc_client *caller_client); + +/** + * sde_rsc_client_vote() - stores ab/ib vote for rsc client + * + * @client: Client pointer provided by sde_rsc_client_create(). + * @bus_id: data bus identifier + * @ab: aggregated bandwidth vote from client. + * @ib: instant bandwidth vote from client. + * + * Return: error code. + */ +int sde_rsc_client_vote(struct sde_rsc_client *caller_client, + u32 bus_id, u64 ab_vote, u64 ib_vote); + +/** + * sde_rsc_register_event - register a callback function for an event + * @rsc_index: A client will be created on this RSC. As of now only + * SDE_RSC_INDEX is valid rsc index. + * @event_type: event type to register; client sets 0x3 if it wants + * to register for CORE_PC and CORE_RESTORE - both events. + * @cb_func: Pointer to desired callback function + * @usr: User pointer to pass to callback on event trigger + * Returns: sde_rsc_event pointer on success + */ +struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type, + void (*cb_func)(uint32_t event_type, void *usr), void *usr); + +/** + * sde_rsc_unregister_event - unregister callback for an event + * @sde_rsc_event: event returned by sde_rsc_register_event + */ +void sde_rsc_unregister_event(struct sde_rsc_event *event); + +/** + * is_sde_rsc_available - check if display rsc available. + * @rsc_index: A client will be created on this RSC. As of now only + * SDE_RSC_INDEX is valid rsc index. + * Returns: true if rsc is available; false in all other cases + */ +bool is_sde_rsc_available(int rsc_index); + +/** + * get_sde_rsc_current_state - gets the current state of sde rsc. + * @rsc_index: A client will be created on this RSC. As of now only + * SDE_RSC_INDEX is valid rsc index. + * Returns: current state if rsc available; SDE_RSC_IDLE_STATE for + * all other cases + */ +enum sde_rsc_state get_sde_rsc_current_state(int rsc_index); + +/** + * sde_rsc_client_trigger_vote() - triggers ab/ib vote for rsc client + * + * @client: Client pointer provided by sde_rsc_client_create(). + * @delta_vote: if bw vote is increased or decreased + * + * Return: error code. + */ +int sde_rsc_client_trigger_vote(struct sde_rsc_client *caller_client, + bool delta_vote); + +#else + +static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, + char *name, enum sde_rsc_client_type client_type, u32 vsync_source) +{ + return NULL; +} + +static inline void sde_rsc_client_destroy(struct sde_rsc_client *client) +{ +} + +static inline int sde_rsc_client_state_update(struct sde_rsc_client *client, + enum sde_rsc_state state, + struct sde_rsc_cmd_config *config, int crtc_id, + int *wait_vblank_crtc_id) +{ + return 0; +} + +static inline int sde_rsc_client_get_vsync_refcount( + struct sde_rsc_client *caller_client) +{ + return 0; +} + +static inline int sde_rsc_client_reset_vsync_refcount( + struct sde_rsc_client *caller_client) +{ + return 0; +} + +static inline bool sde_rsc_client_is_state_update_complete( + struct sde_rsc_client *caller_client) +{ + return false; +} + +static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client, + u32 bus_id, u64 ab_vote, u64 ib_vote) +{ + return 0; +} + +static inline struct sde_rsc_event *sde_rsc_register_event(int rsc_index, + uint32_t event_type, + void (*cb_func)(uint32_t event_type, void *usr), void *usr) +{ + return NULL; +} + +static inline void sde_rsc_unregister_event(struct sde_rsc_event *event) +{ +} + +static inline bool is_sde_rsc_available(int rsc_index) +{ + return false; +} + +static inline enum sde_rsc_state get_sde_rsc_current_state(int rsc_index) +{ + return SDE_RSC_IDLE_STATE; +} + +static inline int sde_rsc_client_trigger_vote( + struct sde_rsc_client *caller_client, bool delta_vote) +{ + return 0; +} +#endif /* CONFIG_DRM_SDE_RSC */ + +#endif /* _SDE_RSC_H_ */ diff --git a/include/linux/sde_vm_event.h b/include/linux/sde_vm_event.h new file mode 100644 index 000000000..cdde02e87 --- /dev/null +++ b/include/linux/sde_vm_event.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __SDE_VM_EVENT_H__ +#define __SDE_VM_EVENT_H__ + +#include +#include +#include +#include +#include + +/** + * struct - msm_io_irq_entry - define irq item + * @label: gh_irq_label for the irq + * @irq_num: linux mapped irq num + * @list: list head pointer + */ +struct msm_io_irq_entry { + u32 label; + u32 irq_num; + struct list_head list; +}; + +/** + * struct - msm_io_mem_entry - define io memory item + * @base: reg base + * @size: size of the reg range + * @list: list head pointer + */ +struct msm_io_mem_entry { + phys_addr_t base; + phys_addr_t size; + struct list_head list; +}; + +/** + * struct - msm_io_res - represents the hw resources for vm sharing + * @irq: list of IRQ's of all the dislay sub-devices + * @mem: list of IO memory ranges of all the display sub-devices + */ +struct msm_io_res { + struct list_head irq; + struct list_head mem; +}; + +/** + * struct msm_vm_ops - hooks for communication with vm clients + * @vm_pre_hw_release: invoked before releasing the HW + * @vm_post_hw_acquire: invoked before pushing the first commit + * @vm_check: invoked to check the readiness of the vm_clients + * before releasing the HW + * @vm_get_io_resources: invoked to collect HW resources + */ +struct msm_vm_ops { + int (*vm_pre_hw_release)(void *priv_data); + int (*vm_post_hw_acquire)(void *priv_data); + int (*vm_check)(void *priv_data); + int (*vm_get_io_resources)(struct msm_io_res *io_res, void *priv_data); +}; + +/** + * msm_vm_client_entry - defines the vm client info + * @ops: client vm_ops + * @dev: clients device id. Used in unregister + * @data: client custom data + * @list: linked list entry + */ +struct msm_vm_client_entry { + struct msm_vm_ops ops; + struct device *dev; + void *data; + struct list_head list; +}; + +/** + * msm_register_vm_event - api for display dependent drivers(clients) to + * register for vm events + * @dev: msm device + * @client_dev: client device + * @ops: vm event hooks + * @priv_data: client custom data + */ +int msm_register_vm_event(struct device *dev, struct device *client_dev, + struct msm_vm_ops *ops, void *priv_data); + +/** + * msm_unregister_vm_event - api for display dependent drivers(clients) to + * unregister from vm events + * @dev: msm device + * @client_dev: client device + */ +void msm_unregister_vm_event(struct device *dev, struct device *client_dev); + +#endif //__SDE_VM_EVENT_H__ diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild new file mode 100644 index 000000000..40f6bb96e --- /dev/null +++ b/include/uapi/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +# Top-level Makefile calls into asm-$(ARCH) +# List only non-arch directories below + +header-y += display/ diff --git a/include/uapi/display/Kbuild b/include/uapi/display/Kbuild new file mode 100644 index 000000000..5240a26e1 --- /dev/null +++ b/include/uapi/display/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +header-y += media/ +header-y += drm/ +header-y += hdcp/ diff --git a/include/uapi/display/drm/Kbuild b/include/uapi/display/drm/Kbuild new file mode 100644 index 000000000..7f043b1bc --- /dev/null +++ b/include/uapi/display/drm/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +header-y += msm_drm_pp.h +header-y += sde_drm.h +header-y += mi_disp.h + diff --git a/include/uapi/display/drm/mi_disp.h b/include/uapi/display/drm/mi_disp.h new file mode 100644 index 000000000..b954e15ad --- /dev/null +++ b/include/uapi/display/drm/mi_disp.h @@ -0,0 +1,1046 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _MI_DISP_H_ +#define _MI_DISP_H_ + +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +enum disp_display_type { + MI_DISP_PRIMARY = 0, + MI_DISP_SECONDARY = 1, + MI_DISP_MAX, +}; + +enum common_feature_state { + FEATURE_OFF = 0, + FEATURE_ON = 1, +}; + +enum disp_feature_id { + DISP_FEATURE_DIMMING = 0, + DISP_FEATURE_HBM = 1, + DISP_FEATURE_HBM_FOD = 2, + DISP_FEATURE_DOZE_BRIGHTNESS = 3, + DISP_FEATURE_FOD_CALIBRATION_BRIGHTNESS = 4, + DISP_FEATURE_FOD_CALIBRATION_HBM = 5, + DISP_FEATURE_FLAT_MODE = 6, + DISP_FEATURE_CRC = 7, + DISP_FEATURE_DC = 8, + DISP_FEATURE_LOCAL_HBM = 9, + DISP_FEATURE_SENSOR_LUX = 10, + DISP_FEATURE_LOW_BRIGHTNESS_FOD = 11, + DISP_FEATURE_FP_STATUS = 12, + DISP_FEATURE_FOLD_STATUS = 13, + DISP_FEATURE_NATURE_FLAT_MODE = 14, + DISP_FEATURE_SPR_RENDER = 15, + DISP_FEATURE_AOD_TO_NORMAL = 16, + DISP_FEATURE_COLOR_INVERT = 17, + DISP_FEATURE_DC_BACKLIGHT = 18, + DISP_FEATURE_GIR = 19, + DISP_FEATURE_DBI = 20, + DISP_FEATURE_DDIC_ROUND_CORNER = 21, + DISP_FEATURE_HBM_BACKLIGHT = 22, + DISP_FEATURE_BACKLIGHT = 23, + DISP_FEATURE_BRIGHTNESS = 24, + DISP_FEATURE_LCD_HBM = 25, + DISP_FEATURE_DOZE_STATE =26, + DISP_FEATURE_PEAK_HDR_MODE = 27, + DISP_FEATURE_CABC = 28, + DISP_FEATURE_BIST_MODE = 29, + DISP_FEATURE_BIST_MODE_COLOR = 30, + DISP_FEATURE_ROUND_MODE = 31, + DISP_FEATURE_GAMUT = 32, + DISP_FEATURE_COLORMODE_NOTIFY = 33, + DISP_FEATURE_DOLBY_STATUS = 34, + DISP_FEATURE_MAX, +}; + +/* feature_id: DISP_FEATURE_LOCAL_HBM corresponding feature_val */ +enum local_hbm_state { + LOCAL_HBM_OFF_TO_NORMAL = 0, + LOCAL_HBM_NORMAL_WHITE_1000NIT = 1, + LOCAL_HBM_NORMAL_WHITE_750NIT = 2, + LOCAL_HBM_NORMAL_WHITE_500NIT = 3, + LOCAL_HBM_NORMAL_WHITE_110NIT = 4, + LOCAL_HBM_NORMAL_GREEN_500NIT = 5, + LOCAL_HBM_HLPM_WHITE_1000NIT = 6, + LOCAL_HBM_HLPM_WHITE_110NIT = 7, + LOCAL_HBM_OFF_TO_HLPM = 8, + LOCAL_HBM_OFF_TO_LLPM = 9, + LOCAL_HBM_OFF_TO_NORMAL_BACKLIGHT = 10, + LOCAL_HBM_OFF_TO_NORMAL_BACKLIGHT_RESTORE = 11, + LOCAL_HBM_MAX, +}; + +/* feature_id: DISP_FEATURE_DBI corresponding feature_val */ +enum dbi_by_temp_state { + TEMP_INDEX_20 = 20, + TEMP_INDEX_25 = 25, + TEMP_INDEX_28 = 28, + TEMP_INDEX_32 = 32, + TEMP_INDEX_36 = 36, + TEMP_INDEX_37 = 37, + TEMP_INDEX_38 = 38, + TEMP_INDEX_39 = 39, + TEMP_INDEX_40 = 40, + TEMP_INDEX_41 = 41, + TEMP_INDEX_42 = 42, + TEMP_INDEX_43 = 43, + TEMP_INDEX_44 = 44, + TEMP_INDEX_45 = 45, + TEMP_INDEX_46 = 46, + TEMP_INDEX_47 = 47, + TEMP_INDEX_48 = 48, + TEMP_INDEX_49 = 49, + TEMP_INDEX_50 = 50, + TEMP_INDEX_51 = 51, + TEMP_INDEX_52 = 52, + TEMP_INDEX_MAX, +}; + +/* feature_id: DISP_FEATURE_FP_STATUS corresponding feature_val */ +enum fingerprint_status { + FINGERPRINT_NONE = 0, + ENROLL_START = 1, + ENROLL_STOP = 2, + AUTH_START = 3, + AUTH_STOP = 4, + HEART_RATE_START = 5, + HEART_RATE_STOP = 6, +}; + +/* feature_id: DISP_FEATURE_SPR_RENDER corresponding feature_val */ +enum spr_render_status { + SPR_1D_RENDERING = 1, + SPR_2D_RENDERING = 2, +}; + +/* feature_id: DISP_FEATURE_LCD_HBM corresponding feature_val */ +enum lcd_hbm_level { + LCD_HBM_OFF = 0, + LCD_HBM_L1_ON = 1, + LCD_HBM_L2_ON = 2, + LCD_HBM_L3_ON = 3, + LCD_HBM_MAX, +}; + +/* feature_id: DISP_FEATURE_CRC corresponding feature_val */ +enum crc_mode { + CRC_OFF = 0, + CRC_SRGB = 1, + CRC_P3 = 2, + CRC_P3_D65 = 3, + CRC_P3_FLAT = 4, + CRC_SRGB_D65 = 5, + CRC_MODE_MAX, +}; + +/* feature_id: DISP_FEATURE_GIR corresponding feature_val */ +enum gir_mode { + GIR_OFF = 0, + GIR_ON = 1, + GIR_MODE_MAX, +}; + +/* feature_id: DISP_FEATURE_CABC corresponding feature_val */ +enum cabc_status { + LCD_CABC_OFF = 0, + LCD_CABC_UI_ON = 1, + LCD_CABC_MOVIE_ON = 2, + LCD_CABC_STILL_ON = 3, +}; + +struct disp_base { + __u32 flag; + __u32 disp_id; +}; + +/* IOCTL: MI_DISP_IOCTL_VERSION parameter */ +struct disp_version { + struct disp_base base; + __u32 version; +}; + +/* IOCTL: MI_DISP_IOCTL_SET_FEATURE parameter */ +struct disp_feature_req { + struct disp_base base; + __u32 feature_id; + __s32 feature_val; + __u32 tx_len; + __u64 tx_ptr; + __u32 rx_len; + __u64 rx_ptr; +}; + +/** + * enum doze_brightness_state - set doze brightness state + * @DOZE_TO_NORMAL : doze mode to normal mode + * @DOZE_BRIGHTNESS_HBM: doze mode high brightness (60 nit) + * @DOZE_BRIGHTNESS_LBM: doze mode low brightness (5 nit) + * @DOZE_BRIGHTNESS_MAX + */ +enum doze_brightness_state { + DOZE_TO_NORMAL = 0, + DOZE_BRIGHTNESS_HBM = 1, + DOZE_BRIGHTNESS_LBM = 2, + DOZE_BRIGHTNESS_MAX, +}; + +/* IOCTL: + * MI_DISP_IOCTL_SET_DOZE_BRIGHTNESS parameter + * MI_DISP_IOCTL_GET_DOZE_BRIGHTNESS parameter + */ +struct disp_doze_brightness_req { + struct disp_base base; + __u32 doze_brightness; +}; + +/* local_hbm_value value */ +enum lhbm_target_brightness_state { + LHBM_TARGET_BRIGHTNESS_OFF_FINGER_UP = 0, + LHBM_TARGET_BRIGHTNESS_OFF_AUTH_STOP = 1, + LHBM_TARGET_BRIGHTNESS_WHITE_1000NIT = 2, + LHBM_TARGET_BRIGHTNESS_WHITE_110NIT = 3, + LHBM_TARGET_BRIGHTNESS_GREEN_500NIT = 4, + LHBM_TARGET_BRIGHTNESS_MAX +}; + +/* feature_id: DISP_FEATURE_COLORMODE_NOTIFY feature_val */ +enum display_color_mode_id { + COLOR_MODE_STANDARD = 0, + COLOR_MODE_ACNORMAL = 1, + COLOR_MODE_CONTRAST = 2, + COLOR_MODE_EXPERT_NATURE = 3, + COLOR_MODE_EXPERT_OTHERS = 4, +}; + +/* IOCTL: MI_DISP_IOCTL_SET_LOCAL_HBM parameter */ +struct disp_local_hbm_req { + struct disp_base base; + __u32 local_hbm_value; +}; + +/* IOCTL: + * MI_DISP_IOCTL_GET_BRIGHTNESS parameter + * MI_DISP_IOCTL_SET_BRIGHTNESS parameter + */ +struct disp_brightness_req { + struct disp_base base; + __u32 brightness; + __u32 brightness_clone; +}; + +/* IOCTL: MI_DISP_IOCTL_GET_PANEL_INFO parameter */ +struct disp_panel_info { + struct disp_base base; + __u32 info_len; + char *info; +}; + +/* IOCTL: MI_DISP_IOCTL_GET_WP_INFO parameter */ +struct disp_wp_info { + struct disp_base base; + __u32 info_len; + char *info; +}; + +struct disp_manufacturer_info_req { + struct disp_base base; + char __user *wp_info; + char __user *maxbrightness; + char __user *manufacturer_time; + __u32 wp_info_len; + __u32 max_brightness_len; + __u32 manufacturer_time_len; +}; + +/** + * enum ddic_mode_type - ddic mode + * @DDIC_MODE_NORMAL: TE frequency is equal to screen refresh rate + * @DDIC_MODE_IDLE: TE frequency is fixed(120/60Hz), idle mode + * the screen refresh rate dimming to ddic minimum + * @DDIC_MODE_AUTO: TE frequency is fixed(120/60Hz), adaptive mode + * the screen refresh rate adaptive app refresh frequency + * @DDIC_MODE_QSYNC: TE frequency and screen refresh rate adaptive app refresh frequency + * @DDIC_MODE_DIFF: The surfaceflinger refresh rate is different with kernel timing refresh rate + * @DDIC_MODE_TEST: Used for testing, or factory version with special refresh rate + */ +enum ddic_mode_type { + DDIC_MODE_NORMAL = 0, + DDIC_MODE_IDLE = 1, + DDIC_MODE_AUTO = 2, + DDIC_MODE_QSYNC = 3, + DDIC_MODE_DIFF = 4, + DDIC_MODE_TEST = 5, + DDIC_MODE_MAX, +}; + +/** + * struct mi_dsi_display_sub_mode - specifies xiaomi timing parameters for dsi display + * @ddic_mode: ddic mode. + * @timing_refresh_rate: kernel timing refresh rate. + * @sf_refresh_rate: surfaceflinger refresh rate. + * @ddic_min_refresh_rate: ddic mininum refresh rate. + */ +struct mi_mode_info { + __u32 ddic_mode; + __u32 timing_refresh_rate; + __u32 sf_refresh_rate; + __u32 ddic_min_refresh_rate; +}; + +/* IOCTL: MI_DISP_IOCTL_GET_FPS parameter */ +struct disp_fps_info { + struct disp_base base; + __u32 fps; + struct mi_mode_info mode; +}; + +enum disp_event_type { + MI_DISP_EVENT_POWER = 0, + MI_DISP_EVENT_BACKLIGHT = 1, + MI_DISP_EVENT_FOD = 2, + MI_DISP_EVENT_DOZE = 3, + MI_DISP_EVENT_FPS = 4, + MI_DISP_EVENT_BRIGHTNESS_CLONE = 5, + MI_DISP_EVENT_51_BRIGHTNESS = 6, + MI_DISP_EVENT_HBM = 7, + MI_DISP_EVENT_DC = 8, + MI_DISP_EVENT_PANEL_DEAD = 9, + MI_DISP_EVENT_PANEL_EVENT = 10, + MI_DISP_EVENT_DDIC_RESOLUTION = 11, + MI_DISP_EVENT_FLAT_MODE = 12, + MI_DISP_EVENT_MAX, +}; + +/* IOCTL: + * MI_DISP_IOCTL_REGISTER_EVENT parameter + * MI_DISP_IOCTL_DEREGISTER_EVENT parameter + */ +struct disp_event_req { + struct disp_base base; + __u32 type; +}; + +struct disp_event { + __s32 disp_id; + __u32 type; + __u32 length; +}; + +struct disp_event_resp { + struct disp_event base; + __u8 data[]; +}; + +/* Define supported power modes */ +enum panel_power_state { + MI_DISP_POWER_ON = 0, + MI_DISP_POWER_LP1 = 1, + MI_DISP_POWER_LP2 = 2, + MI_DISP_POWER_STANDBY = 3, + MI_DISP_POWER_SUSPEND = 4, + MI_DISP_POWER_OFF = 5, +}; + +/** + * enum disp_dsi_cmd_state - command set state + * @MI_DSI_CMD_LP_STATE: dsi low power mode + * @MI_DSI_CMD_HS_STATE: dsi high speed mode + * @MI_DSI_CMD_MAX_STATE + */ +enum disp_dsi_cmd_state { + MI_DSI_CMD_LP_STATE = 0, + MI_DSI_CMD_HS_STATE = 1, + MI_DSI_CMD_MAX_STATE, +}; + +/* IOCTL: + * MI_DISP_IOCTL_WRITE_DSI_CMD parameter + * MI_DISP_IOCTL_READ_DSI_CMD parameter + */ +struct disp_dsi_cmd_req { + struct disp_base base; + __u8 tx_state; + __u32 tx_len; + __u64 tx_ptr; + __u8 rx_state; + __u32 rx_len; + __u64 rx_ptr; +}; + +/* supported count info ids */ +enum disp_count_info_type { + DISP_COUNT_INFO_POWERSTATUS = 0, + DISP_COUNT_INFO_SYSTEM_BUILD_VERSION = 1, + DISP_COUNT_INFO_FRAME_DROP_COUNT = 2, + DISP_COUNT_INFO_SWITCH_KERNEL_FUNCTION_TIMER = 3, + DISP_COUNT_INFO_TPIDLE_COUNT = 4, + DISP_COUNT_INFO_MAX, +}; + +/* IOCTL: MI_DISP_IOCTL_SET_COUNT_INFO parameter */ +struct disp_count_info_req { + struct disp_base base; + __u32 count_info_type; + __s32 count_info_val; + __u32 tx_len; + __u64 tx_ptr; + __u32 rx_len; + __u64 rx_ptr; +}; + +#if defined(__KERNEL__) +static inline int is_support_disp_id(__u32 disp_id) +{ + if (disp_id < MI_DISP_MAX) + return 1; + else + return 0; +} + +static inline const char *get_disp_id_name(__u32 disp_id) +{ + switch (disp_id) { + case MI_DISP_PRIMARY: + return "primary"; + case MI_DISP_SECONDARY: + return "secondary"; + default: + return "Unknown"; + } +} + +static inline int is_support_doze_brightness(__u32 doze_brightness) +{ + if (doze_brightness < DOZE_BRIGHTNESS_MAX) + return 1; + else + return 0; +} + +static inline int is_aod_brightness(__u32 doze_brightness) +{ + if (DOZE_BRIGHTNESS_HBM == doze_brightness || + doze_brightness == DOZE_BRIGHTNESS_LBM) + return 1; + else + return 0; +} + +static inline const char *get_doze_brightness_name(__u32 doze_brightness) +{ + switch (doze_brightness) { + case DOZE_TO_NORMAL: + return "doze_to_normal"; + case DOZE_BRIGHTNESS_HBM: + return "doze_brightness_high"; + case DOZE_BRIGHTNESS_LBM: + return "doze_brightness_low"; + default: + return "Unknown"; + } +} + +static inline int is_support_lcd_hbm_level(__u32 lcd_hbm_level) +{ + if (lcd_hbm_level < LCD_HBM_MAX) + return 1; + else + return 0; +} + +static inline int is_support_disp_event_type(__u32 event_type) +{ + if (event_type < MI_DISP_EVENT_MAX) + return 1; + else + return 0; +} + +static inline const char *get_disp_event_type_name(__u32 event_type) +{ + switch (event_type) { + case MI_DISP_EVENT_POWER: + return "Power"; + case MI_DISP_EVENT_BACKLIGHT: + return "Backlight"; + case MI_DISP_EVENT_FOD: + return "Fod"; + case MI_DISP_EVENT_DOZE: + return "Doze"; + case MI_DISP_EVENT_FPS: + return "Fps"; + case MI_DISP_EVENT_BRIGHTNESS_CLONE: + return "Brightness_clone"; + case MI_DISP_EVENT_51_BRIGHTNESS: + return "51_brightness"; + case MI_DISP_EVENT_HBM: + return "HBM"; + case MI_DISP_EVENT_DC: + return "DC"; + case MI_DISP_EVENT_PANEL_DEAD: + return "panel_dead"; + case MI_DISP_EVENT_PANEL_EVENT: + return "panel_event"; + case MI_DISP_EVENT_DDIC_RESOLUTION: + return "ddic_resolution"; + case MI_DISP_EVENT_FLAT_MODE: + return "flat_mode"; + default: + return "Unknown"; + } +} + +static inline int is_support_disp_feature_id(__u32 feature_id) +{ + if (feature_id < DISP_FEATURE_MAX) + return 1; + else + return 0; +} + +static inline const char *get_local_hbm_state_name(int state) +{ + switch (state) { + case LOCAL_HBM_OFF_TO_NORMAL: + return "[lhbm off to nomal]"; + case LOCAL_HBM_NORMAL_WHITE_1000NIT: + return "[lhbm normal white 1000nit]"; + case LOCAL_HBM_NORMAL_WHITE_750NIT: + return "[lhbm normal white 750nit]"; + case LOCAL_HBM_NORMAL_WHITE_500NIT: + return "[lhbm normal white 500nit]"; + case LOCAL_HBM_NORMAL_WHITE_110NIT: + return "[lhbm normal white 110nit]"; + case LOCAL_HBM_NORMAL_GREEN_500NIT: + return "[lhbm normal green 500nit]"; + case LOCAL_HBM_HLPM_WHITE_1000NIT: + return "[lhbm H-doze to white 1000nit]"; + case LOCAL_HBM_HLPM_WHITE_110NIT: + return "[lhbm H-doze to white 110nit]"; + case LOCAL_HBM_OFF_TO_HLPM: + return "[lhbm off to H-doze]"; + case LOCAL_HBM_OFF_TO_LLPM: + return "[lhbm off to L-doze]"; + case LOCAL_HBM_OFF_TO_NORMAL_BACKLIGHT: + return "[lhbm off to nomal backlight]"; + case LOCAL_HBM_OFF_TO_NORMAL_BACKLIGHT_RESTORE: + return "[lhbm off to nomal backlight restore]"; + default: + return "Unknown"; + } +} + +static inline const char *get_fingerprint_status_name(int status) +{ + switch (status) { + case FINGERPRINT_NONE: + return "none"; + case ENROLL_START: + return "enroll_start"; + case ENROLL_STOP: + return "enroll_stop"; + case AUTH_START: + return "authenticate_start"; + case AUTH_STOP: + return "authenticate_stop"; + case HEART_RATE_START: + return "heart_rate_start"; + case HEART_RATE_STOP: + return "heart_rate_stop"; + default: + return "Unknown"; + } +} + +static inline const char *get_disp_feature_id_name(__u32 feature_id) +{ + switch (feature_id) { + case DISP_FEATURE_DIMMING: + return "dimming"; + case DISP_FEATURE_HBM: + return "hbm"; + case DISP_FEATURE_HBM_FOD: + return "hbm_fod"; + case DISP_FEATURE_DOZE_BRIGHTNESS: + return "doze_brightness"; + case DISP_FEATURE_FOD_CALIBRATION_BRIGHTNESS: + return "fod_calibration_brightness"; + case DISP_FEATURE_FOD_CALIBRATION_HBM: + return "fod_calibration_hbm"; + case DISP_FEATURE_FLAT_MODE: + return "flat_mode"; + case DISP_FEATURE_CRC: + return "crc"; + case DISP_FEATURE_DC: + return "dc_mode"; + case DISP_FEATURE_LOCAL_HBM: + return "local_hbm"; + case DISP_FEATURE_SENSOR_LUX: + return "sensor_lux"; + case DISP_FEATURE_LOW_BRIGHTNESS_FOD: + return "low_brightness_fod"; + case DISP_FEATURE_FP_STATUS: + return "fp_status"; + case DISP_FEATURE_FOLD_STATUS: + return "fold_status"; + case DISP_FEATURE_NATURE_FLAT_MODE: + return "nature_flat_mode"; + case DISP_FEATURE_SPR_RENDER: + return "spr_render"; + case DISP_FEATURE_AOD_TO_NORMAL: + return "aod_to_normal"; + case DISP_FEATURE_COLOR_INVERT: + return "color_invert"; + case DISP_FEATURE_DC_BACKLIGHT: + return "dc_backlight"; + case DISP_FEATURE_GIR: + return "gir"; + case DISP_FEATURE_DBI: + return "dbi"; + case DISP_FEATURE_DDIC_ROUND_CORNER: + return "ddic_round_corner"; + case DISP_FEATURE_HBM_BACKLIGHT: + return "hbm_backlight_level"; + case DISP_FEATURE_BACKLIGHT: + return "backlight"; + case DISP_FEATURE_BRIGHTNESS: + return "brightness"; + case DISP_FEATURE_LCD_HBM: + return "lcd_hbm"; + case DISP_FEATURE_DOZE_STATE: + return "doze_state"; + case DISP_FEATURE_PEAK_HDR_MODE: + return "peak_hdr_mode"; + case DISP_FEATURE_CABC: + return "cabc"; + case DISP_FEATURE_BIST_MODE: + return "bist_mode"; + case DISP_FEATURE_BIST_MODE_COLOR: + return "bist_mode_color"; + case DISP_FEATURE_ROUND_MODE: + return "round_mode"; + case DISP_FEATURE_GAMUT: + return "gamut"; + case DISP_FEATURE_COLORMODE_NOTIFY: + return "colormode_notify"; + case DISP_FEATURE_DOLBY_STATUS: + return "dolby_status"; + default: + return "Unknown"; + } +} + +static inline const char *get_lhbm_value_name(__u32 lhbm_value) +{ + switch (lhbm_value) { + case LHBM_TARGET_BRIGHTNESS_OFF_FINGER_UP: + return "LHBM_OFF_FINGER_UP"; + case LHBM_TARGET_BRIGHTNESS_OFF_AUTH_STOP: + return "LHBM_OFF_AUTH_STOP"; + case LHBM_TARGET_BRIGHTNESS_WHITE_1000NIT: + return "LHBM_ON_WHITE_1000NIT"; + case LHBM_TARGET_BRIGHTNESS_WHITE_110NIT: + return "LHBM_ON_WHITE_110NIT"; + case LHBM_TARGET_BRIGHTNESS_GREEN_500NIT: + return "LHBM_ON_GREEN_500NIT"; + default: + return "Unknown"; + } +} + +static inline const char *get_ddic_mode_name(__u32 ddic_mode) +{ + switch (ddic_mode) { + case DDIC_MODE_NORMAL: + return "normal"; + case DDIC_MODE_IDLE: + return "idle"; + case DDIC_MODE_AUTO: + return "auto"; + case DDIC_MODE_QSYNC: + return "qsync"; + case DDIC_MODE_DIFF: + return "diff"; + case DDIC_MODE_TEST: + return "test"; + default: + return "Unknown"; + } +} + +static inline int is_support_disp_count_info_type(__u32 count_info_type) +{ + if (count_info_type < DISP_COUNT_INFO_MAX) + return 1; + else + return 0; +} + +static inline const char *get_disp_count_info_type_name(__u32 count_info_type) +{ + switch (count_info_type) { + case DISP_COUNT_INFO_POWERSTATUS: + return "power_status"; + case DISP_COUNT_INFO_SYSTEM_BUILD_VERSION: + return "system_build_version"; + case DISP_COUNT_INFO_FRAME_DROP_COUNT: + return "frame_drop_count"; + case DISP_COUNT_INFO_SWITCH_KERNEL_FUNCTION_TIMER: + return "swith_function_timer"; + case DISP_COUNT_INFO_TPIDLE_COUNT: + return "tpidle_count"; + default: + return "Unknown count info type"; + } +} + +#else +static inline int isSupportDispId(__u32 disp_id) +{ + if (disp_id < MI_DISP_MAX) + return 1; + else + return 0; +} + +static inline const char *getDispIdName(__u32 disp_id) +{ + switch (disp_id) { + case MI_DISP_PRIMARY: + return "primary"; + case MI_DISP_SECONDARY: + return "secondary"; + default: + return "Unknown"; + } +} + +static inline int isSupportDozeBrightness(__u32 doze_brightness) +{ + if (doze_brightness < DOZE_BRIGHTNESS_MAX) + return 1; + else + return 0; +} + +static inline int isAodBrightness(__u32 doze_brightness) +{ + if (DOZE_BRIGHTNESS_HBM == doze_brightness || + doze_brightness == DOZE_BRIGHTNESS_LBM) + return 1; + else + return 0; +} + +static inline const char *getDozeBrightnessName(__u32 doze_brightness) +{ + switch (doze_brightness) { + case DOZE_TO_NORMAL: + return "doze_to_normal"; + case DOZE_BRIGHTNESS_HBM: + return "doze_brightness_high"; + case DOZE_BRIGHTNESS_LBM: + return "doze_brightness_low"; + default: + return "Unknown"; + } +} + +static inline int isSupportLcdHbmLevel(__u32 lcd_hbm_level) +{ + if (lcd_hbm_level < LCD_HBM_MAX) + return 1; + else + return 0; +} + +static inline int isSupportDispEventType(__u32 event_type) +{ + if (event_type < MI_DISP_EVENT_MAX) + return 1; + else + return 0; +} + +static inline const char *getDispEventTypeName(__u32 event_type) +{ + switch (event_type) { + case MI_DISP_EVENT_POWER: + return "Power"; + case MI_DISP_EVENT_BACKLIGHT: + return "Backlight"; + case MI_DISP_EVENT_FOD: + return "Fod"; + case MI_DISP_EVENT_DOZE: + return "Doze"; + case MI_DISP_EVENT_FPS: + return "Fps"; + case MI_DISP_EVENT_BRIGHTNESS_CLONE: + return "Brightness_clone"; + case MI_DISP_EVENT_51_BRIGHTNESS: + return "51_brightness"; + case MI_DISP_EVENT_HBM: + return "HBM"; + case MI_DISP_EVENT_DC: + return "DC"; + case MI_DISP_EVENT_PANEL_DEAD: + return "panel_dead"; + case MI_DISP_EVENT_PANEL_EVENT: + return "panel_event"; + case MI_DISP_EVENT_DDIC_RESOLUTION: + return "ddic_resolution"; + case MI_DISP_EVENT_FLAT_MODE: + return "flat_mode"; + default: + return "Unknown"; + } +} + +static inline int isSupportDispFeatureId(__u32 feature_id) +{ + if (feature_id < DISP_FEATURE_MAX) + return 1; + else + return 0; +} + +static inline const char *getLocalHbmStateName(int state) +{ + switch (state) { + case LOCAL_HBM_OFF_TO_NORMAL: + return "[lhbm off to nomal]"; + case LOCAL_HBM_NORMAL_WHITE_1000NIT: + return "[lhbm normal white 1000nit]"; + case LOCAL_HBM_NORMAL_WHITE_750NIT: + return "[lhbm normal white 750nit]"; + case LOCAL_HBM_NORMAL_WHITE_500NIT: + return "[lhbm normal white 500nit]"; + case LOCAL_HBM_NORMAL_WHITE_110NIT: + return "[lhbm normal white 110nit]"; + case LOCAL_HBM_NORMAL_GREEN_500NIT: + return "[lhbm normal green 500nit]"; + case LOCAL_HBM_HLPM_WHITE_1000NIT: + return "[lhbm H-doze to white 1000nit]"; + case LOCAL_HBM_HLPM_WHITE_110NIT: + return "[lhbm H-doze to white 110nit]"; + case LOCAL_HBM_OFF_TO_HLPM: + return "[lhbm off to H-doze]"; + case LOCAL_HBM_OFF_TO_LLPM: + return "[lhbm off to L-doze]"; + case LOCAL_HBM_OFF_TO_NORMAL_BACKLIGHT: + return "[lhbm off to nomal backlight]"; + case LOCAL_HBM_OFF_TO_NORMAL_BACKLIGHT_RESTORE: + return "[lhbm off to nomal backlight restore]"; + default: + return "Unknown"; + } +} + +static inline const char *getFingerprintStatusName(int status) +{ + switch (status) { + case FINGERPRINT_NONE: + return "none"; + case ENROLL_START: + return "enroll_start"; + case ENROLL_STOP: + return "enroll_stop"; + case AUTH_START: + return "authenticate_start"; + case AUTH_STOP: + return "authenticate_stop"; + case HEART_RATE_START: + return "heart_rate_start"; + case HEART_RATE_STOP: + return "heart_rate_stop"; + default: + return "Unknown"; + } +} + +static inline const char *getDispFeatureIdName(__u32 feature_id) +{ + switch (feature_id) { + case DISP_FEATURE_DIMMING: + return "dimming"; + case DISP_FEATURE_HBM: + return "hbm"; + case DISP_FEATURE_HBM_FOD: + return "hbm_fod"; + case DISP_FEATURE_DOZE_BRIGHTNESS: + return "doze_brightness"; + case DISP_FEATURE_FOD_CALIBRATION_BRIGHTNESS: + return "fod_calibration_brightness"; + case DISP_FEATURE_FOD_CALIBRATION_HBM: + return "fod_calibration_hbm"; + case DISP_FEATURE_FLAT_MODE: + return "flat_mode"; + case DISP_FEATURE_CRC: + return "crc"; + case DISP_FEATURE_DC: + return "dc_mode"; + case DISP_FEATURE_LOCAL_HBM: + return "local_hbm"; + case DISP_FEATURE_SENSOR_LUX: + return "sensor_lux"; + case DISP_FEATURE_LOW_BRIGHTNESS_FOD: + return "low_brightness_fod"; + case DISP_FEATURE_FP_STATUS: + return "fp_status"; + case DISP_FEATURE_FOLD_STATUS: + return "fold_status"; + case DISP_FEATURE_NATURE_FLAT_MODE: + return "nature_flat_mode"; + case DISP_FEATURE_SPR_RENDER: + return "spr_render"; + case DISP_FEATURE_AOD_TO_NORMAL: + return "aod_to_normal"; + case DISP_FEATURE_COLOR_INVERT: + return "color_invert"; + case DISP_FEATURE_DC_BACKLIGHT: + return "dc_backlight"; + case DISP_FEATURE_GIR: + return "gir"; + case DISP_FEATURE_DBI: + return "dbi"; + case DISP_FEATURE_DDIC_ROUND_CORNER: + return "ddic_round_corner"; + case DISP_FEATURE_HBM_BACKLIGHT: + return "hbm_backlight_level"; + case DISP_FEATURE_BACKLIGHT: + return "backlight"; + case DISP_FEATURE_BRIGHTNESS: + return "brightness"; + case DISP_FEATURE_LCD_HBM: + return "lcd_hbm"; + case DISP_FEATURE_DOZE_STATE: + return "doze_state"; + case DISP_FEATURE_PEAK_HDR_MODE: + return "peak_hdr_mode"; + case DISP_FEATURE_CABC: + return "cabc"; + case DISP_FEATURE_BIST_MODE: + return "bist_mode"; + case DISP_FEATURE_BIST_MODE_COLOR: + return "bist_mode_color"; + case DISP_FEATURE_ROUND_MODE: + return "round_mode"; + case DISP_FEATURE_GAMUT: + return "gamut"; + case DISP_FEATURE_COLORMODE_NOTIFY: + return "colormode_notify"; + case DISP_FEATURE_DOLBY_STATUS: + return "dolby_status"; + default: + return "Unknown"; + } +} + +static inline const char *getLhbmValueName(__u32 lhbm_value) +{ + switch (lhbm_value) { + case LHBM_TARGET_BRIGHTNESS_OFF_FINGER_UP: + return "LHBM_OFF_FINGER_UP"; + case LHBM_TARGET_BRIGHTNESS_OFF_AUTH_STOP: + return "LHBM_OFF_AUTH_STOP"; + case LHBM_TARGET_BRIGHTNESS_WHITE_1000NIT: + return "LHBM_ON_WHITE_1000NIT"; + case LHBM_TARGET_BRIGHTNESS_WHITE_110NIT: + return "LHBM_ON_WHITE_110NIT"; + case LHBM_TARGET_BRIGHTNESS_GREEN_500NIT: + return "LHBM_ON_GREEN_500NIT"; + default: + return "Unknown"; + } +} + +static inline const char *getDdicModeName(__u32 ddic_mode) +{ + switch (ddic_mode) { + case DDIC_MODE_NORMAL: + return "normal"; + case DDIC_MODE_IDLE: + return "idle"; + case DDIC_MODE_AUTO: + return "auto"; + case DDIC_MODE_QSYNC: + return "qsync"; + case DDIC_MODE_DIFF: + return "diff"; + case DDIC_MODE_TEST: + return "test"; + default: + return "Unknown"; + } +} + +static inline int isSupportDispCountInfoType(__u32 count_info_type) +{ + if (count_info_type < DISP_COUNT_INFO_MAX) + return 1; + else + return 0; +} + +static inline const char *getDispCountInfoTypeName(__u32 count_info_type) +{ + switch (count_info_type) { + case DISP_COUNT_INFO_POWERSTATUS: + return "power_status"; + case DISP_COUNT_INFO_SYSTEM_BUILD_VERSION: + return "system_build_version"; + case DISP_COUNT_INFO_FRAME_DROP_COUNT: + return "frame_drop_count"; + case DISP_COUNT_INFO_SWITCH_KERNEL_FUNCTION_TIMER: + return "swith_function_timer"; + case DISP_COUNT_INFO_TPIDLE_COUNT: + return "tpidle_count"; + default: + return "Unknown count info type"; + } +} + +#endif + + +#define MI_DISP_FLAG_BLOCK 0x0000 +#define MI_DISP_FLAG_NONBLOCK 0x0001 + +#define MI_DISP_FEATURE_VERSION_MAJOR 1 +#define MI_DISP_FEATURE_VERSION_MINOR 0 +#define MI_DISP_FEATURE_VERSION (((MI_DISP_FEATURE_VERSION_MAJOR & 0xFF) << 8) | \ + (MI_DISP_FEATURE_VERSION_MINOR & 0xFF)) + +#define MI_DISP_IOCTL_VERSION _IOR('D', 0x00, struct disp_version) +#define MI_DISP_IOCTL_SET_FEATURE _IOWR('D', 0x01, struct disp_feature_req) +#define MI_DISP_IOCTL_SET_DOZE_BRIGHTNESS _IOW('D', 0x02, struct disp_doze_brightness_req) +#define MI_DISP_IOCTL_GET_DOZE_BRIGHTNESS _IOR('D', 0x03, struct disp_doze_brightness_req) +#define MI_DISP_IOCTL_GET_PANEL_INFO _IOWR('D', 0x04, struct disp_panel_info) +#define MI_DISP_IOCTL_GET_WP_INFO _IOWR('D', 0x05, struct disp_wp_info) +#define MI_DISP_IOCTL_GET_FPS _IOR('D', 0x06, struct disp_fps_info) +#define MI_DISP_IOCTL_REGISTER_EVENT _IOW('D', 0x07, struct disp_event_req) +#define MI_DISP_IOCTL_DEREGISTER_EVENT _IOW('D', 0x08, struct disp_event_req) +#define MI_DISP_IOCTL_WRITE_DSI_CMD _IOW('D', 0x09, struct disp_dsi_cmd_req) +#define MI_DISP_IOCTL_READ_DSI_CMD _IOWR('D', 0x0A, struct disp_dsi_cmd_req) +#define MI_DISP_IOCTL_GET_BRIGHTNESS _IOR('D', 0x0B, struct disp_brightness_req) +#define MI_DISP_IOCTL_SET_BRIGHTNESS _IOW('D', 0x0C, struct disp_brightness_req) +#define MI_DISP_IOCTL_SET_COUNT_INFO _IOWR('D', 0x0D, struct disp_count_info_req) +#define MI_DISP_IOCTL_SET_LOCAL_HBM _IOW('D', 0x0E, struct disp_local_hbm_req) +#define MI_DISP_IOCTL_GET_FEATURE _IOWR('D', 0x0F, struct disp_feature_req) +#define MI_DISP_IOCTL_GET_MANUFACTURER_INFO _IOWR('D', 0x0D, struct disp_manufacturer_info_req) +#if defined(__cplusplus) +} +#endif + +#endif /* _MI_DISP_H_ */ + diff --git a/include/uapi/display/drm/msm_drm_pp.h b/include/uapi/display/drm/msm_drm_pp.h new file mode 100644 index 000000000..a4c68f7a0 --- /dev/null +++ b/include/uapi/display/drm/msm_drm_pp.h @@ -0,0 +1,817 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _MSM_DRM_PP_H_ +#define _MSM_DRM_PP_H_ + +#include +#include + +#define ENABLE_EVENT_SPR_OPR_VALUE +#define ENABLE_EVENT_INTF_MISR_SIGNATURE +#define MAX_DSI_DISPLAY 4 + +/** + * struct drm_msm_pcc_coeff - PCC coefficient structure for each color + * component. + * @c: constant coefficient. + * @r: red coefficient. + * @g: green coefficient. + * @b: blue coefficient. + * @rg: red green coefficient. + * @gb: green blue coefficient. + * @rb: red blue coefficient. + * @rgb: red blue green coefficient. + */ + +struct drm_msm_pcc_coeff { + __u32 c; + __u32 r; + __u32 g; + __u32 b; + __u32 rg; + __u32 gb; + __u32 rb; + __u32 rgb; +}; + +#define PCC_BEFORE (1 << 0) + +/** + * struct drm_msm_pcc - pcc feature structure + * @flags: for customizing operations. Values can be + * - PCC_BEFORE: Operate PCC using a 'before' arrangement + * @r: red coefficients. + * @g: green coefficients. + * @b: blue coefficients. + * @r_rr: second order coefficients + * @r_gg: second order coefficients + * @r_bb: second order coefficients + * @g_rr: second order coefficients + * @g_gg: second order coefficients + * @g_bb: second order coefficients + * @b_rr: second order coefficients + * @b_gg: second order coefficients + * @b_bb: second order coefficients + */ +#define DRM_MSM_PCC3 +struct drm_msm_pcc { + __u64 flags; + struct drm_msm_pcc_coeff r; + struct drm_msm_pcc_coeff g; + struct drm_msm_pcc_coeff b; + __u32 r_rr; + __u32 r_gg; + __u32 r_bb; + __u32 g_rr; + __u32 g_gg; + __u32 g_bb; + __u32 b_rr; + __u32 b_gg; + __u32 b_bb; +}; + +/* struct drm_msm_pa_vlut - picture adjustment vLUT structure + * flags: for customizing vlut operation + * val: vLUT values + */ +#define PA_VLUT_SIZE 256 +struct drm_msm_pa_vlut { + __u64 flags; + __u32 val[PA_VLUT_SIZE]; +}; + +#define PA_HSIC_HUE_ENABLE (1 << 0) +#define PA_HSIC_SAT_ENABLE (1 << 1) +#define PA_HSIC_VAL_ENABLE (1 << 2) +#define PA_HSIC_CONT_ENABLE (1 << 3) +/** + * struct drm_msm_pa_hsic - pa hsic feature structure + * @flags: flags for the feature customization, values can be: + * - PA_HSIC_HUE_ENABLE: Enable hue adjustment + * - PA_HSIC_SAT_ENABLE: Enable saturation adjustment + * - PA_HSIC_VAL_ENABLE: Enable value adjustment + * - PA_HSIC_CONT_ENABLE: Enable contrast adjustment + * + * @hue: hue setting + * @saturation: saturation setting + * @value: value setting + * @contrast: contrast setting + */ +#define DRM_MSM_PA_HSIC +struct drm_msm_pa_hsic { + __u64 flags; + __u32 hue; + __u32 saturation; + __u32 value; + __u32 contrast; +}; + +#define MEMCOL_PROT_HUE (1 << 0) +#define MEMCOL_PROT_SAT (1 << 1) +#define MEMCOL_PROT_VAL (1 << 2) +#define MEMCOL_PROT_CONT (1 << 3) +#define MEMCOL_PROT_SIXZONE (1 << 4) +#define MEMCOL_PROT_BLEND (1 << 5) +/* struct drm_msm_memcol - Memory color feature structure. + * Skin, sky, foliage features are supported. + * @prot_flags: Bit mask for enabling protection feature. + * @color_adjust_p0: Adjustment curve. + * @color_adjust_p1: Adjustment curve. + * @color_adjust_p2: Adjustment curve. + * @blend_gain: Blend gain weightage from othe PA features. + * @sat_hold: Saturation hold value. + * @val_hold: Value hold info. + * @hue_region: Hue qualifier. + * @sat_region: Saturation qualifier. + * @val_region: Value qualifier. + */ +#define DRM_MSM_MEMCOL +struct drm_msm_memcol { + __u64 prot_flags; + __u32 color_adjust_p0; + __u32 color_adjust_p1; + __u32 color_adjust_p2; + __u32 blend_gain; + __u32 sat_hold; + __u32 val_hold; + __u32 hue_region; + __u32 sat_region; + __u32 val_region; +}; + +#define DRM_MSM_SIXZONE +#define SIXZONE_LUT_SIZE 384 +#define SIXZONE_HUE_ENABLE (1 << 0) +#define SIXZONE_SAT_ENABLE (1 << 1) +#define SIXZONE_VAL_ENABLE (1 << 2) +#define SIXZONE_SV_ENABLE (1 << 3) +/* struct drm_msm_sixzone_curve - Sixzone HSV adjustment curve structure. + * @p0: Hue adjustment. + * @p1: Saturation/Value adjustment. + */ +struct drm_msm_sixzone_curve { + __u32 p1; + __u32 p0; +}; + +/* struct drm_msm_sixzone - Sixzone feature structure. + * @flags: for feature customization, values can be: + * - SIXZONE_HUE_ENABLE: Enable hue adjustment + * - SIXZONE_SAT_ENABLE: Enable saturation adjustment + * - SIXZONE_VAL_ENABLE: Enable value adjustment + * - SIXZONE_SV_ENABLE: Enable SV feature + * @threshold: threshold qualifier. + * @adjust_p0: Adjustment curve. + * @adjust_p1: Adjustment curve. + * @sat_hold: Saturation hold info. + * @val_hold: Value hold info. + * @curve: HSV adjustment curve lut. + * @sat_adjust_p0: Saturation adjustment curve. + * @sat_adjust_p1: Saturation adjustment curve. + * @curve_p2: Saturation Mid/Saturation High adjustment + */ +struct drm_msm_sixzone { + __u64 flags; + __u32 threshold; + __u32 adjust_p0; + __u32 adjust_p1; + __u32 sat_hold; + __u32 val_hold; + struct drm_msm_sixzone_curve curve[SIXZONE_LUT_SIZE]; + __u32 sat_adjust_p0; + __u32 sat_adjust_p1; + __u32 curve_p2[SIXZONE_LUT_SIZE]; +}; + +#define GAMUT_3D_MODE_17 1 +#define GAMUT_3D_MODE_5 2 +#define GAMUT_3D_MODE_13 3 + +#define GAMUT_3D_MODE17_TBL_SZ 1229 +#define GAMUT_3D_MODE5_TBL_SZ 32 +#define GAMUT_3D_MODE13_TBL_SZ 550 +#define GAMUT_3D_SCALE_OFF_SZ 16 +#define GAMUT_3D_SCALEB_OFF_SZ 12 +#define GAMUT_3D_TBL_NUM 4 +#define GAMUT_3D_SCALE_OFF_TBL_NUM 3 +#define GAMUT_3D_MAP_EN (1 << 0) + +/** + * struct drm_msm_3d_col - 3d gamut color component structure + * @c0: Holds c0 value + * @c2_c1: Holds c2/c1 values + */ +struct drm_msm_3d_col { + __u32 c2_c1; + __u32 c0; +}; +/** + * struct drm_msm_3d_gamut - 3d gamut feature structure + * @flags: flags for the feature values are: + * 0 - no map + * GAMUT_3D_MAP_EN - enable map + * @mode: lut mode can take following values: + * - GAMUT_3D_MODE_17 + * - GAMUT_3D_MODE_5 + * - GAMUT_3D_MODE_13 + * @scale_off: Scale offset table + * @col: Color component tables + */ +struct drm_msm_3d_gamut { + __u64 flags; + __u32 mode; + __u32 scale_off[GAMUT_3D_SCALE_OFF_TBL_NUM][GAMUT_3D_SCALE_OFF_SZ]; + struct drm_msm_3d_col col[GAMUT_3D_TBL_NUM][GAMUT_3D_MODE17_TBL_SZ]; +}; + +#define PGC_TBL_LEN 512 +#define PGC_8B_ROUND (1 << 0) +/** + * struct drm_msm_pgc_lut - pgc lut feature structure + * @flags: flags for the featue values can be: + * - PGC_8B_ROUND + * @c0: color0 component lut + * @c1: color1 component lut + * @c2: color2 component lut + */ +struct drm_msm_pgc_lut { + __u64 flags; + __u32 c0[PGC_TBL_LEN]; + __u32 c1[PGC_TBL_LEN]; + __u32 c2[PGC_TBL_LEN]; +}; + +#define IGC_TBL_LEN 256 +#define IGC_DITHER_ENABLE (1 << 0) +/** + * struct drm_msm_igc_lut - igc lut feature structure + * @flags: flags for the feature customization, values can be: + * - IGC_DITHER_ENABLE: Enable dither functionality + * @c0: color0 component lut + * @c1: color1 component lut + * @c2: color2 component lut + * @strength: dither strength, considered valid when IGC_DITHER_ENABLE + * is set in flags. Strength value based on source bit width. + * @c0_last: color0 lut_last component + * @c1_last: color1 lut_last component + * @c2_last: color2 lut_last component + */ +struct drm_msm_igc_lut { + __u64 flags; + __u32 c0[IGC_TBL_LEN]; + __u32 c1[IGC_TBL_LEN]; + __u32 c2[IGC_TBL_LEN]; + __u32 strength; + __u32 c0_last; + __u32 c1_last; + __u32 c2_last; +}; +#define LAST_LUT 2 + +#define HIST_V_SIZE 256 +/** + * struct drm_msm_hist - histogram feature structure + * @flags: for customizing operations + * @data: histogram data + */ +struct drm_msm_hist { + __u64 flags; + __u32 data[HIST_V_SIZE]; +}; + +#define AD4_LUT_GRP0_SIZE 33 +#define AD4_LUT_GRP1_SIZE 32 +/* + * struct drm_msm_ad4_init - ad4 init structure set by user-space client. + * Init param values can change based on tuning + * hence it is passed by user-space clients. + */ +struct drm_msm_ad4_init { + __u32 init_param_001[AD4_LUT_GRP0_SIZE]; + __u32 init_param_002[AD4_LUT_GRP0_SIZE]; + __u32 init_param_003[AD4_LUT_GRP0_SIZE]; + __u32 init_param_004[AD4_LUT_GRP0_SIZE]; + __u32 init_param_005[AD4_LUT_GRP1_SIZE]; + __u32 init_param_006[AD4_LUT_GRP1_SIZE]; + __u32 init_param_007[AD4_LUT_GRP0_SIZE]; + __u32 init_param_008[AD4_LUT_GRP0_SIZE]; + __u32 init_param_009; + __u32 init_param_010; + __u32 init_param_011; + __u32 init_param_012; + __u32 init_param_013; + __u32 init_param_014; + __u32 init_param_015; + __u32 init_param_016; + __u32 init_param_017; + __u32 init_param_018; + __u32 init_param_019; + __u32 init_param_020; + __u32 init_param_021; + __u32 init_param_022; + __u32 init_param_023; + __u32 init_param_024; + __u32 init_param_025; + __u32 init_param_026; + __u32 init_param_027; + __u32 init_param_028; + __u32 init_param_029; + __u32 init_param_030; + __u32 init_param_031; + __u32 init_param_032; + __u32 init_param_033; + __u32 init_param_034; + __u32 init_param_035; + __u32 init_param_036; + __u32 init_param_037; + __u32 init_param_038; + __u32 init_param_039; + __u32 init_param_040; + __u32 init_param_041; + __u32 init_param_042; + __u32 init_param_043; + __u32 init_param_044; + __u32 init_param_045; + __u32 init_param_046; + __u32 init_param_047; + __u32 init_param_048; + __u32 init_param_049; + __u32 init_param_050; + __u32 init_param_051; + __u32 init_param_052; + __u32 init_param_053; + __u32 init_param_054; + __u32 init_param_055; + __u32 init_param_056; + __u32 init_param_057; + __u32 init_param_058; + __u32 init_param_059; + __u32 init_param_060; + __u32 init_param_061; + __u32 init_param_062; + __u32 init_param_063; + __u32 init_param_064; + __u32 init_param_065; + __u32 init_param_066; + __u32 init_param_067; + __u32 init_param_068; + __u32 init_param_069; + __u32 init_param_070; + __u32 init_param_071; + __u32 init_param_072; + __u32 init_param_073; + __u32 init_param_074; + __u32 init_param_075; +}; + +/* + * struct drm_msm_ad4_cfg - ad4 config structure set by user-space client. + * Config param values can vary based on tuning, + * hence it is passed by user-space clients. + */ +struct drm_msm_ad4_cfg { + __u32 cfg_param_001; + __u32 cfg_param_002; + __u32 cfg_param_003; + __u32 cfg_param_004; + __u32 cfg_param_005; + __u32 cfg_param_006; + __u32 cfg_param_007; + __u32 cfg_param_008; + __u32 cfg_param_009; + __u32 cfg_param_010; + __u32 cfg_param_011; + __u32 cfg_param_012; + __u32 cfg_param_013; + __u32 cfg_param_014; + __u32 cfg_param_015; + __u32 cfg_param_016; + __u32 cfg_param_017; + __u32 cfg_param_018; + __u32 cfg_param_019; + __u32 cfg_param_020; + __u32 cfg_param_021; + __u32 cfg_param_022; + __u32 cfg_param_023; + __u32 cfg_param_024; + __u32 cfg_param_025; + __u32 cfg_param_026; + __u32 cfg_param_027; + __u32 cfg_param_028; + __u32 cfg_param_029; + __u32 cfg_param_030; + __u32 cfg_param_031; + __u32 cfg_param_032; + __u32 cfg_param_033; + __u32 cfg_param_034; + __u32 cfg_param_035; + __u32 cfg_param_036; + __u32 cfg_param_037; + __u32 cfg_param_038; + __u32 cfg_param_039; + __u32 cfg_param_040; + __u32 cfg_param_041; + __u32 cfg_param_042; + __u32 cfg_param_043; + __u32 cfg_param_044; + __u32 cfg_param_045; + __u32 cfg_param_046; + __u32 cfg_param_047; + __u32 cfg_param_048; + __u32 cfg_param_049; + __u32 cfg_param_050; + __u32 cfg_param_051; + __u32 cfg_param_052; + __u32 cfg_param_053; +}; + +#define DITHER_MATRIX_SZ 16 +#define DITHER_LUMA_MODE (1 << 0) + +/** + * struct drm_msm_dither - dither feature structure + * @flags: flags for the feature customization, values can be: + -DITHER_LUMA_MODE: Enable LUMA dither mode + * @temporal_en: temperal dither enable + * @c0_bitdepth: c0 component bit depth + * @c1_bitdepth: c1 component bit depth + * @c2_bitdepth: c2 component bit depth + * @c3_bitdepth: c2 component bit depth + * @matrix: dither strength matrix + */ +struct drm_msm_dither { + __u64 flags; + __u32 temporal_en; + __u32 c0_bitdepth; + __u32 c1_bitdepth; + __u32 c2_bitdepth; + __u32 c3_bitdepth; + __u32 matrix[DITHER_MATRIX_SZ]; +}; + +/** + * struct drm_msm_pa_dither - dspp dither feature structure + * @flags: for customizing operations + * @strength: dither strength + * @offset_en: offset enable bit + * @matrix: dither data matrix + */ +#define DRM_MSM_PA_DITHER +struct drm_msm_pa_dither { + __u64 flags; + __u32 strength; + __u32 offset_en; + __u32 matrix[DITHER_MATRIX_SZ]; +}; + +/** + * struct drm_msm_ad4_roi_cfg - ad4 roi params config set + * by user-space client. + * @h_x - hotizontal direction start + * @h_y - hotizontal direction end + * @v_x - vertical direction start + * @v_y - vertical direction end + * @factor_in - the alpha value for inside roi region + * @factor_out - the alpha value for outside roi region + */ +#define DRM_MSM_AD4_ROI +struct drm_msm_ad4_roi_cfg { + __u32 h_x; + __u32 h_y; + __u32 v_x; + __u32 v_y; + __u32 factor_in; + __u32 factor_out; +}; + +#define LTM_FEATURE_DEF 1 +#define LTM_DATA_SIZE_0 32 +#define LTM_DATA_SIZE_1 128 +#define LTM_DATA_SIZE_2 256 +#define LTM_DATA_SIZE_3 33 +#define LTM_BUFFER_SIZE 5 +#define LTM_GUARD_BYTES 255 +#define LTM_BLOCK_SIZE 4 + +#define LTM_STATS_SAT (1 << 1) +#define LTM_STATS_MERGE_SAT (1 << 2) +#define LTM_HIST_CHECKSUM_SUPPORT (1 << 0) + +/* + * struct drm_msm_ltm_stats_data - LTM stats data structure + */ +struct drm_msm_ltm_stats_data { + __u32 stats_01[LTM_DATA_SIZE_0][LTM_DATA_SIZE_1]; + __u32 stats_02[LTM_DATA_SIZE_2]; + __u32 stats_03[LTM_DATA_SIZE_0]; + __u32 stats_04[LTM_DATA_SIZE_0]; + __u32 stats_05[LTM_DATA_SIZE_0]; + __u32 status_flag; + __u32 display_h; + __u32 display_v; + __u32 init_h[LTM_BLOCK_SIZE]; + __u32 init_v; + __u32 inc_h; + __u32 inc_v; + __u32 portrait_en; + __u32 merge_en; + __u32 cfg_param_01; + __u32 cfg_param_02; + __u32 cfg_param_03; + __u32 cfg_param_04; + __u32 feature_flag; + __u32 checksum; +}; + +/* + * struct drm_msm_ltm_init_param - LTM init param structure + */ +struct drm_msm_ltm_init_param { + __u32 init_param_01; + __u32 init_param_02; + __u32 init_param_03; + __u32 init_param_04; +}; + +/* + * struct drm_msm_ltm_cfg_param - LTM config param structure + */ +struct drm_msm_ltm_cfg_param { + __u32 cfg_param_01; + __u32 cfg_param_02; + __u32 cfg_param_03; + __u32 cfg_param_04; + __u32 cfg_param_05; + __u32 cfg_param_06; +}; + +/* + * struct drm_msm_ltm_data - LTM data structure + */ +struct drm_msm_ltm_data { + __u32 data[LTM_DATA_SIZE_0][LTM_DATA_SIZE_3]; +}; + +/* + * struct drm_msm_ltm_buffers_crtl - LTM buffer control structure. + * This struct will be used to init and + * de-init the LTM buffers in driver. + * @num_of_buffers: valid number of buffers used + * @fds: fd array to for all the valid buffers + */ +struct drm_msm_ltm_buffers_ctrl { + __u32 num_of_buffers; + __u32 fds[LTM_BUFFER_SIZE]; +}; + +/* + * struct drm_msm_ltm_buffer - LTM buffer structure. + * This struct will be passed from driver to user + * space for LTM stats data notification. + * @fd: fd assicated with the buffer that has LTM stats data + * @offset: offset from base address that used for alignment + * @status status flag for error indication + */ +struct drm_msm_ltm_buffer { + __u32 fd; + __u32 offset; + __u32 status; +}; + +#define SPR_INIT_PARAM_SIZE_1 4 +#define SPR_INIT_PARAM_SIZE_2 5 +#define SPR_INIT_PARAM_SIZE_3 16 +#define SPR_INIT_PARAM_SIZE_4 24 +#define SPR_INIT_PARAM_SIZE_5 32 +#define SPR_INIT_PARAM_SIZE_6 7 +#define SPR_FLAG_BYPASS (1 << 0) + +/** + * struct drm_msm_spr_init_cfg - SPR initial configuration structure + */ +struct drm_msm_spr_init_cfg { + __u64 flags; + __u16 cfg0; + __u16 cfg1; + __u16 cfg2; + __u16 cfg3; + __u16 cfg4; + __u16 cfg5; + __u16 cfg6; + __u16 cfg7; + __u16 cfg8; + __u16 cfg9; + __u32 cfg10; + __u16 cfg11[SPR_INIT_PARAM_SIZE_1]; + __u16 cfg12[SPR_INIT_PARAM_SIZE_1]; + __u16 cfg13[SPR_INIT_PARAM_SIZE_1]; + __u16 cfg14[SPR_INIT_PARAM_SIZE_2]; + __u16 cfg15[SPR_INIT_PARAM_SIZE_5]; + int cfg16[SPR_INIT_PARAM_SIZE_3]; + int cfg17[SPR_INIT_PARAM_SIZE_4]; + __u16 cfg18_en; + __u8 cfg18[SPR_INIT_PARAM_SIZE_6]; +}; + +/** + * struct drm_msm_spr_udc_cfg - SPR UDC configuration structure + */ + +#define SPR_UDC_PARAM_SIZE_1 27 +#define SPR_UDC_PARAM_SIZE_2 1536 +struct drm_msm_spr_udc_cfg { + __u64 flags; + __u16 init_cfg4; + __u16 init_cfg11[SPR_INIT_PARAM_SIZE_1]; + __u16 cfg1[SPR_UDC_PARAM_SIZE_1]; + __u16 cfg2[SPR_UDC_PARAM_SIZE_2]; +}; + + +#define FEATURE_DEM +#define CFG0_PARAM_LEN 8 +#define CFG1_PARAM_LEN 8 +#define CFG1_PARAM0_LEN 153 +#define CFG0_PARAM2_LEN 256 +#define CFG5_PARAM01_LEN 4 +#define CFG3_PARAM01_LEN 4 +#define DEMURA_FLAG_0 (1 << 0) +#define DEMURA_FLAG_1 (1 << 1) +#define DEMURA_FLAG_2 (3 << 2) +#define DEMURA_SKIP_CFG0_PARAM2 (1 << 4) +#define DEMURA_PRECISION_0 (0 << 2) +#define DEMURA_PRECISION_1 (1 << 2) +#define DEMURA_PRECISION_2 (2 << 2) + +struct drm_msm_dem_cfg { + __u64 flags; + __u32 pentile; + __u32 cfg0_en; + __u32 cfg0_param0_len; + __u32 cfg0_param0[CFG0_PARAM_LEN]; + __u32 cfg0_param1_len; + __u32 cfg0_param1[CFG0_PARAM_LEN]; + __u32 cfg0_param2_len; + __u64 cfg0_param2_c0[CFG0_PARAM2_LEN]; + __u64 cfg0_param2_c1[CFG0_PARAM2_LEN]; + __u64 cfg0_param2_c2[CFG0_PARAM2_LEN]; + __u32 cfg0_param3_len; + __u32 cfg0_param3_c0[CFG0_PARAM_LEN]; + __u32 cfg0_param3_c1[CFG0_PARAM_LEN]; + __u32 cfg0_param3_c2[CFG0_PARAM_LEN]; + __u32 cfg0_param4_len; + __u32 cfg0_param4[CFG0_PARAM_LEN]; + + __u32 cfg1_en; + __u32 cfg1_high_idx; + __u32 cfg1_low_idx; + __u32 cfg01_param0_len; + __u32 cfg01_param0[CFG1_PARAM_LEN]; + __u32 cfg1_param0_len; + __u32 cfg1_param0_c0[CFG1_PARAM0_LEN]; + __u32 cfg1_param0_c1[CFG1_PARAM0_LEN]; + __u32 cfg1_param0_c2[CFG1_PARAM0_LEN]; + + __u32 cfg2_en; + __u32 cfg3_en; + __u32 cfg3_param0_len; + __u32 cfg3_param0_a[CFG3_PARAM01_LEN]; + __u32 cfg3_param0_b[CFG3_PARAM01_LEN]; + __u32 cfg3_ab_adj; + __u32 cfg4_en; + __u32 cfg5_en; + __u32 cfg5_param0_len; + __u32 cfg5_param0[CFG5_PARAM01_LEN]; + __u32 cfg5_param1_len; + __u32 cfg5_param1[CFG5_PARAM01_LEN]; + + __u32 c0_depth; + __u32 c1_depth; + __u32 c2_depth; + __u32 src_id; + __u32 cfg0_param2_idx; +}; + +struct drm_msm_dem_cfg0_param2 { + __u32 cfg0_param2_len; + __u64 cfg0_param2_c0[CFG0_PARAM2_LEN]; + __u64 cfg0_param2_c1[CFG0_PARAM2_LEN]; + __u64 cfg0_param2_c2[CFG0_PARAM2_LEN]; +}; + +/** + * struct drm_msm_ad4_manual_str_cfg - ad4 manual strength config set + * by user-space client. + * @in_str - strength for inside roi region + * @out_str - strength for outside roi region + */ +#define DRM_MSM_AD4_MANUAL_STRENGTH +struct drm_msm_ad4_manual_str_cfg { + __u32 in_str; + __u32 out_str; +}; + +#define RC_DATA_SIZE_MAX 2720 +#define RC_CFG_SIZE_MAX 4 + +struct drm_msm_rc_mask_cfg { + __u64 flags; + __u32 cfg_param_01; + __u32 cfg_param_02; + __u32 cfg_param_03; + __u32 cfg_param_04[RC_CFG_SIZE_MAX]; + __u32 cfg_param_05[RC_CFG_SIZE_MAX]; + __u32 cfg_param_06[RC_CFG_SIZE_MAX]; + __u64 cfg_param_07; + __u32 cfg_param_08; + __u64 cfg_param_09[RC_DATA_SIZE_MAX]; + __u32 height; + __u32 width; +}; + +#define FP16_SUPPORTED +#define FP16_GC_FLAG_ALPHA_EN (1 << 0) + + /* FP16 GC mode options */ +#define FP16_GC_MODE_INVALID 0 +#define FP16_GC_MODE_SRGB 1 +#define FP16_GC_MODE_PQ 2 + +/** + * struct drm_msm_fp16_gc - FP16 GC configuration structure + * @in flags - Settings flags for FP16 GC + * @in mode - Gamma correction mode to use for FP16 GC + */ +struct drm_msm_fp16_gc { + __u64 flags; + __u64 mode; +}; + +/** + * struct drm_msm_fp16_csc - FP16 CSC configuration structure + * @in flags - Settings flags for FP16 CSC. Currently unused + * @in cfg_param_0_len - Length of data for cfg_param_0 + * @in cfg_param_0 - Data for param 0. Max size is FP16_CSC_CFG0_PARAM_LEN + * @in cfg_param_1_len - Length of data for cfg_param_1 + * @in cfg_param_1 - Data for param 1. Max size is FP16_CSC_CFG1_PARAM_LEN + */ +#define FP16_CSC_CFG0_PARAM_LEN 12 +#define FP16_CSC_CFG1_PARAM_LEN 8 +struct drm_msm_fp16_csc { + __u64 flags; + __u32 cfg_param_0_len; + __u32 cfg_param_0[FP16_CSC_CFG0_PARAM_LEN]; + __u32 cfg_param_1_len; + __u32 cfg_param_1[FP16_CSC_CFG1_PARAM_LEN]; +}; + +#define DIMMING_ENABLE (1 << 0) +#define DIMMING_MIN_BL_VALID (1 << 1) +struct drm_msm_backlight_info { + __u32 brightness_max; + __u32 brightness; + __u32 bl_level_max; + __u32 bl_level; + __u32 bl_scale; + __u32 bl_scale_sv; + __u32 status; + __u32 min_bl; + __u32 bl_scale_max; + __u32 bl_scale_sv_max; +}; + +#define DIMMING_BL_LUT_LEN 8192 +struct drm_msm_dimming_bl_lut { + __u32 length; + __u32 mapped_bl[DIMMING_BL_LUT_LEN]; +}; + +struct drm_msm_opr_value { + __u32 num_valid_opr; + __u32 opr_value[MAX_DSI_DISPLAY]; +}; + +#define SDE_MAX_ROI 4 +struct drm_msm_roi { + __u32 num_rects; + struct drm_clip_rect roi[SDE_MAX_ROI]; +}; + +struct drm_msm_misr_sign { + __u64 num_valid_misr; + struct drm_msm_roi roi_list; + __u64 misr_sign_value[MAX_DSI_DISPLAY]; +}; + +#define UCSC_SUPPORTED + +#define UCSC_CSC_CFG0_PARAM_LEN FP16_CSC_CFG0_PARAM_LEN +#define UCSC_CSC_CFG1_PARAM_LEN FP16_CSC_CFG1_PARAM_LEN + +typedef struct drm_msm_fp16_csc drm_msm_ucsc_csc; + +#endif /* _MSM_DRM_PP_H_ */ diff --git a/include/uapi/display/drm/sde_drm.h b/include/uapi/display/drm/sde_drm.h new file mode 100644 index 000000000..db82afd69 --- /dev/null +++ b/include/uapi/display/drm/sde_drm.h @@ -0,0 +1,974 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _SDE_DRM_H_ +#define _SDE_DRM_H_ + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Total number of supported color planes */ +#define SDE_MAX_PLANES 4 + +/* Total number of parameterized detail enhancer mapping curves */ +#define SDE_MAX_DE_CURVES 3 + + /* Y/RGB and UV filter configuration */ +#define FILTER_EDGE_DIRECTED_2D 0x0 +#define FILTER_CIRCULAR_2D 0x1 +#define FILTER_SEPARABLE_1D 0x2 +#define FILTER_BILINEAR 0x3 + +/* Alpha filters */ +#define FILTER_ALPHA_DROP_REPEAT 0x0 +#define FILTER_ALPHA_BILINEAR 0x1 +#define FILTER_ALPHA_2D 0x3 + +/* Blend filters */ +#define FILTER_BLEND_CIRCULAR_2D 0x0 +#define FILTER_BLEND_SEPARABLE_1D 0x1 + +/* LUT configuration flags */ +#define SCALER_LUT_SWAP 0x1 +#define SCALER_LUT_DIR_WR 0x2 +#define SCALER_LUT_Y_CIR_WR 0x4 +#define SCALER_LUT_UV_CIR_WR 0x8 +#define SCALER_LUT_Y_SEP_WR 0x10 +#define SCALER_LUT_UV_SEP_WR 0x20 + +/** + * DRM format modifier tokens + * + * @DRM_FORMAT_MOD_QCOM_DX: Refers to a DX variant of the base format. + * Implementation may be platform and + * base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_DX fourcc_mod_code(QCOM, 0x2) + +/** + * @DRM_FORMAT_MOD_QCOM_TIGHT: Refers to a tightly packed variant of the + * base variant. Implementation may be + * platform and base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TIGHT fourcc_mod_code(QCOM, 0x4) + +/** + * @DRM_FORMAT_MOD_QCOM_TILE: Refers to a tile variant of the base format. + * Implementation may be platform and + * base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_TILE fourcc_mod_code(QCOM, 0x8) + +/** + * @DRM_FORMAT_MOD_QCOM_ALPHA_SWAP: Refers to a pixel format for which + * its alpha ordering has been reversed. + * Implementation may be platform and + * base-format specific. + */ +#define DRM_FORMAT_MOD_QCOM_ALPHA_SWAP fourcc_mod_code(QCOM, 0x10) + +/** + * Blend operations for "blend_op" property + * + * @SDE_DRM_BLEND_OP_NOT_DEFINED: No blend operation defined for the layer. + * @SDE_DRM_BLEND_OP_OPAQUE: Apply a constant blend operation. The layer + * would appear opaque in case fg plane alpha + * is 0xff. + * @SDE_DRM_BLEND_OP_PREMULTIPLIED: Apply source over blend rule. Layer already + * has alpha pre-multiplication done. If the fg + * plane alpha is less than 0xff, apply + * modulation as well. This operation is + * intended on layers having alpha channel. + * @SDE_DRM_BLEND_OP_COVERAGE: Apply source over blend rule. Layer is not + * alpha pre-multiplied. Apply + * pre-multiplication. If fg plane alpha is + * less than 0xff, apply modulation as well. + * @SDE_DRM_BLEND_OP_MAX: Used to track maximum blend operation + * possible by mdp. + * @SDE_DRM_BLEND_OP_SKIP: Skip staging the layer in the layer mixer. + */ +#define SDE_DRM_BLEND_OP_NOT_DEFINED 0 +#define SDE_DRM_BLEND_OP_OPAQUE 1 +#define SDE_DRM_BLEND_OP_PREMULTIPLIED 2 +#define SDE_DRM_BLEND_OP_COVERAGE 3 +#define SDE_DRM_BLEND_OP_MAX 4 +#define SDE_DRM_BLEND_OP_SKIP 5 + +/** + * Bit masks for "src_config" property + * construct bitmask via (1UL << SDE_DRM_) + */ +#define SDE_DRM_DEINTERLACE 0 /* Specifies interlaced input */ + +/* DRM bitmasks are restricted to 0..63 */ +#define SDE_DRM_BITMASK_COUNT 64 + +/** + * Framebuffer modes for "fb_translation_mode" PLANE and CONNECTOR property + * + * @SDE_DRM_FB_NON_SEC: IOMMU configuration for this framebuffer mode + * is non-secure domain and requires + * both stage I and stage II translations when + * this buffer is accessed by the display HW. + * This is the default mode of all frambuffers. + * @SDE_DRM_FB_SEC: IOMMU configuration for this framebuffer mode + * is secure domain and requires + * both stage I and stage II translations when + * this buffer is accessed by the display HW. + * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode + * is non-secure domain and requires + * only stage II translation when + * this buffer is accessed by the display HW. + * @SDE_DRM_FB_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode + * is secure domain and requires + * only stage II translation when + * this buffer is accessed by the display HW. + */ + +#define SDE_DRM_FB_NON_SEC 0 +#define SDE_DRM_FB_SEC 1 +#define SDE_DRM_FB_NON_SEC_DIR_TRANS 2 +#define SDE_DRM_FB_SEC_DIR_TRANS 3 + +/** + * Secure levels for "security_level" CRTC property. + * CRTC property which specifies what plane types + * can be attached to this CRTC. Plane component + * derives the plane type based on the FB_MODE. + * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be + * attached to this CRTC. This is the default state of + * the CRTC. + * @ SDE_DRM_SEC_ONLY: Only secure planes can be added to this CRTC. If a + * CRTC is instructed to be in this mode it follows the + * platform dependent restrictions. + */ +#define SDE_DRM_SEC_NON_SEC 0 +#define SDE_DRM_SEC_ONLY 1 + +/** + * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure + * @num_ext_pxls_lr: Number of total horizontal pixels + * @num_ext_pxls_tb: Number of total vertical lines + * @left_ftch: Number of extra pixels to overfetch from left + * @right_ftch: Number of extra pixels to overfetch from right + * @top_ftch: Number of extra lines to overfetch from top + * @btm_ftch: Number of extra lines to overfetch from bottom + * @left_rpt: Number of extra pixels to repeat from left + * @right_rpt: Number of extra pixels to repeat from right + * @top_rpt: Number of extra lines to repeat from top + * @btm_rpt: Number of extra lines to repeat from bottom + */ +struct sde_drm_pix_ext_v1 { + /* + * Number of pixels ext in left, right, top and bottom direction + * for all color components. + */ + __s32 num_ext_pxls_lr[SDE_MAX_PLANES]; + __s32 num_ext_pxls_tb[SDE_MAX_PLANES]; + + /* + * Number of pixels needs to be overfetched in left, right, top + * and bottom directions from source image for scaling. + */ + __s32 left_ftch[SDE_MAX_PLANES]; + __s32 right_ftch[SDE_MAX_PLANES]; + __s32 top_ftch[SDE_MAX_PLANES]; + __s32 btm_ftch[SDE_MAX_PLANES]; + /* + * Number of pixels needs to be repeated in left, right, top and + * bottom directions for scaling. + */ + __s32 left_rpt[SDE_MAX_PLANES]; + __s32 right_rpt[SDE_MAX_PLANES]; + __s32 top_rpt[SDE_MAX_PLANES]; + __s32 btm_rpt[SDE_MAX_PLANES]; + +}; + +/** + * struct sde_drm_scaler_v1 - version 1 of struct sde_drm_scaler + * @lr: Pixel extension settings for left/right + * @tb: Pixel extension settings for top/botton + * @init_phase_x: Initial scaler phase values for x + * @phase_step_x: Phase step values for x + * @init_phase_y: Initial scaler phase values for y + * @phase_step_y: Phase step values for y + * @horz_filter: Horizontal filter array + * @vert_filter: Vertical filter array + */ +struct sde_drm_scaler_v1 { + /* + * Pix ext settings + */ + struct sde_drm_pix_ext_v1 pe; + /* + * Phase settings + */ + __s32 init_phase_x[SDE_MAX_PLANES]; + __s32 phase_step_x[SDE_MAX_PLANES]; + __s32 init_phase_y[SDE_MAX_PLANES]; + __s32 phase_step_y[SDE_MAX_PLANES]; + + /* + * Filter type to be used for scaling in horizontal and vertical + * directions + */ + __u32 horz_filter[SDE_MAX_PLANES]; + __u32 vert_filter[SDE_MAX_PLANES]; +}; + +/** + * struct sde_drm_de_v1 - version 1 of detail enhancer structure + * @enable: Enables/disables detail enhancer + * @sharpen_level1: Sharpening strength for noise + * @sharpen_level2: Sharpening strength for context + * @clip: Clip coefficient + * @limit: Detail enhancer limit factor + * @thr_quiet: Quite zone threshold + * @thr_dieout: Die-out zone threshold + * @thr_low: Linear zone left threshold + * @thr_high: Linear zone right threshold + * @prec_shift: Detail enhancer precision + * @adjust_a: Mapping curves A coefficients + * @adjust_b: Mapping curves B coefficients + * @adjust_c: Mapping curves C coefficients + */ +struct sde_drm_de_v1 { + __u32 enable; + __s16 sharpen_level1; + __s16 sharpen_level2; + __u16 clip; + __u16 limit; + __u16 thr_quiet; + __u16 thr_dieout; + __u16 thr_low; + __u16 thr_high; + __u16 prec_shift; + __s16 adjust_a[SDE_MAX_DE_CURVES]; + __s16 adjust_b[SDE_MAX_DE_CURVES]; + __s16 adjust_c[SDE_MAX_DE_CURVES]; +}; + +/* + * Scaler configuration flags + */ + +/* Disable dynamic expansion */ +#define SDE_DYN_EXP_DISABLE 0x1 + +#define SDE_DE_LPF_BLEND_FILT +#define SDE_DE_LPF_BLEND_FLAG_EN (1 << 0) + +#define SDE_DRM_QSEED3LITE +#define SDE_DRM_QSEED4 +#define SDE_DRM_INLINE_PREDOWNSCALE +#define SDE_DRM_QSEED6 + +/** + * struct sde_drm_scaler_v2 - version 2 of struct sde_drm_scaler + * @enable: Scaler enable + * @dir_en: Detail enhancer enable + * @pe: Pixel extension settings + * @horz_decimate: Horizontal decimation factor + * @vert_decimate: Vertical decimation factor + * @init_phase_x: Initial scaler phase values for x + * @phase_step_x: Phase step values for x + * @init_phase_y: Initial scaler phase values for y + * @phase_step_y: Phase step values for y + * @preload_x: Horizontal preload value + * @preload_y: Vertical preload value + * @src_width: Source width + * @src_height: Source height + * @dst_width: Destination width + * @dst_height: Destination height + * @y_rgb_filter_cfg: Y/RGB plane filter configuration + * @uv_filter_cfg: UV plane filter configuration + * @alpha_filter_cfg: Alpha filter configuration + * @blend_cfg: Selection of blend coefficients + * @lut_flag: LUT configuration flags + * @dir_lut_idx: 2d 4x4 LUT index + * @y_rgb_cir_lut_idx: Y/RGB circular LUT index + * @uv_cir_lut_idx: UV circular LUT index + * @y_rgb_sep_lut_idx: Y/RGB separable LUT index + * @uv_sep_lut_idx: UV separable LUT index + * @de: Detail enhancer settings + * @dir_weight: Directional Weight + * @unsharp_mask_blend: Unsharp Blend Filter Ratio + * @de_blend: Ratio of two unsharp mask filters + * @flags: Scaler configuration flags + * @pre_downscale_x_0 Pre-downscale ratio, x-direction, plane 0(Y/RGB) + * @pre_downscale_x_1 Pre-downscale ratio, x-direction, plane 1(UV) + * @pre_downscale_y_0 Pre-downscale ratio, y-direction, plane 0(Y/RGB) + * @pre_downscale_y_1 Pre-downscale ratio, y-direction, plane 1(UV) + * @de_lpf_flags: Detail enhancer lpf blned configuration flags + * @de_lpf_h: Detail enhancer lpf blend high + * @de_lpf_l: Detail enhancer lpf blend low + * @de_lpf_m: Detail enhancer lpf blend medium + * @dir45_en: 45/-45 degree direction filtering enable + * @cor_en: corner enhancer enable + */ +struct sde_drm_scaler_v2 { + /* + * General definitions + */ + __u32 enable; + __u32 dir_en; + + /* + * Pix ext settings + */ + struct sde_drm_pix_ext_v1 pe; + + /* + * Decimation settings + */ + __u32 horz_decimate; + __u32 vert_decimate; + + /* + * Phase settings + */ + __s32 init_phase_x[SDE_MAX_PLANES]; + __s32 phase_step_x[SDE_MAX_PLANES]; + __s32 init_phase_y[SDE_MAX_PLANES]; + __s32 phase_step_y[SDE_MAX_PLANES]; + + __u32 preload_x[SDE_MAX_PLANES]; + __u32 preload_y[SDE_MAX_PLANES]; + __u32 src_width[SDE_MAX_PLANES]; + __u32 src_height[SDE_MAX_PLANES]; + + __u32 dst_width; + __u32 dst_height; + + __u32 y_rgb_filter_cfg; + __u32 uv_filter_cfg; + __u32 alpha_filter_cfg; + __u32 blend_cfg; + + __u32 lut_flag; + __u32 dir_lut_idx; + + /* for Y(RGB) and UV planes*/ + __u32 y_rgb_cir_lut_idx; + __u32 uv_cir_lut_idx; + __u32 y_rgb_sep_lut_idx; + __u32 uv_sep_lut_idx; + + /* + * Detail enhancer settings + */ + struct sde_drm_de_v1 de; + __u32 dir_weight; + __u32 unsharp_mask_blend; + __u32 de_blend; + __u32 flags; + + /* + * Inline pre-downscale settings + */ + __u32 pre_downscale_x_0; + __u32 pre_downscale_x_1; + __u32 pre_downscale_y_0; + __u32 pre_downscale_y_1; + + __u32 de_lpf_flags; + __u32 de_lpf_h; + __u32 de_lpf_l; + __u32 de_lpf_m; + __u32 dir45_en; + __u32 cor_en; +}; + +/* Number of dest scalers supported */ +#define SDE_MAX_DS_COUNT 4 + +/* + * Destination scaler flag config + */ +#define SDE_DRM_DESTSCALER_ENABLE 0x1 +#define SDE_DRM_DESTSCALER_SCALE_UPDATE 0x2 +#define SDE_DRM_DESTSCALER_ENHANCER_UPDATE 0x4 +#define SDE_DRM_DESTSCALER_PU_ENABLE 0x8 + +/** + * struct sde_drm_dest_scaler_cfg - destination scaler config structure + * @flags: Flag to switch between mode for destination scaler + * refer to destination scaler flag config + * @index: Destination scaler selection index + * @lm_width: Layer mixer width configuration + * @lm_height: Layer mixer height configuration + * @scaler_cfg: The scaling parameters for all the mode except disable + * Userspace pointer to struct sde_drm_scaler_v2 + */ +struct sde_drm_dest_scaler_cfg { + __u32 flags; + __u32 index; + __u32 lm_width; + __u32 lm_height; + __u64 scaler_cfg; +}; + +/** + * struct sde_drm_dest_scaler_data - destination scaler data struct + * @num_dest_scaler: Number of dest scalers to be configured + * @ds_cfg: Destination scaler block configuration + */ +struct sde_drm_dest_scaler_data { + __u32 num_dest_scaler; + struct sde_drm_dest_scaler_cfg ds_cfg[SDE_MAX_DS_COUNT]; +}; + +/* + * Define constants for struct sde_drm_csc + */ +#define SDE_CSC_MATRIX_COEFF_SIZE 9 +#define SDE_CSC_CLAMP_SIZE 6 +#define SDE_CSC_BIAS_SIZE 3 + +/** + * struct sde_drm_csc_v1 - version 1 of struct sde_drm_csc + * @ctm_coeff: Matrix coefficients, in S31.32 format + * @pre_bias: Pre-bias array values + * @post_bias: Post-bias array values + * @pre_clamp: Pre-clamp array values + * @post_clamp: Post-clamp array values + */ +struct sde_drm_csc_v1 { + __s64 ctm_coeff[SDE_CSC_MATRIX_COEFF_SIZE]; + __u32 pre_bias[SDE_CSC_BIAS_SIZE]; + __u32 post_bias[SDE_CSC_BIAS_SIZE]; + __u32 pre_clamp[SDE_CSC_CLAMP_SIZE]; + __u32 post_clamp[SDE_CSC_CLAMP_SIZE]; +}; + +/** + * struct sde_drm_color - struct to store the color and alpha values + * @color_0: Color 0 value + * @color_1: Color 1 value + * @color_2: Color 2 value + * @color_3: Color 3 value + */ +struct sde_drm_color { + __u32 color_0; + __u32 color_1; + __u32 color_2; + __u32 color_3; +}; + +/* Total number of supported dim layers */ +#define SDE_MAX_DIM_LAYERS 7 + +/* SDE_DRM_DIM_LAYER_CONFIG_FLAG - flags for Dim Layer */ +/* Color fill inside of the rect, including border */ +#define SDE_DRM_DIM_LAYER_INCLUSIVE 0x1 +/* Color fill outside of the rect, excluding border */ +#define SDE_DRM_DIM_LAYER_EXCLUSIVE 0x2 + + /* bitmask for allowed_dsc_reservation_switch property */ +#define SDE_DP_DSC_RESERVATION_SWITCH (1 << 0) + +/** + * struct sde_drm_dim_layer - dim layer cfg struct + * @flags: Refer SDE_DRM_DIM_LAYER_CONFIG_FLAG for possible values + * @stage: Blending stage of the dim layer + * @color_fill: Color fill for dim layer + * @rect: Dim layer coordinates + */ +struct sde_drm_dim_layer_cfg { + __u32 flags; + __u32 stage; + struct sde_drm_color color_fill; + struct drm_clip_rect rect; +}; + +/** + * struct sde_drm_dim_layer_v1 - version 1 of dim layer struct + * @num_layers: Numer of Dim Layers + * @layer: Dim layer user cfgs ptr for the num_layers + */ +struct sde_drm_dim_layer_v1 { + __u32 num_layers; + struct sde_drm_dim_layer_cfg layer_cfg[SDE_MAX_DIM_LAYERS]; +}; + +/* Writeback Config version definition */ +#define SDE_DRM_WB_CFG 0x1 + +/* SDE_DRM_WB_CONFIG_FLAGS - Writeback configuration flags */ +#define SDE_DRM_WB_CFG_FLAGS_CONNECTED (1<<0) + +/** + * struct sde_drm_wb_cfg - Writeback configuration structure + * @flags: see DRM_MSM_WB_CONFIG_FLAGS + * @connector_id: writeback connector identifier + * @count_modes: Count of modes in modes_ptr + * @modes: Pointer to struct drm_mode_modeinfo + */ +struct sde_drm_wb_cfg { + __u32 flags; + __u32 connector_id; + __u32 count_modes; + __u64 modes; +}; + +#define SDE_MAX_ROI_V1 4 +#define SDE_DRM_SPR_ROI 1 +/* DRM_ROI_CONFIG_FLAGS */ +#define SDE_DRM_ROI_SPR_FLAG_EN (1 << 0) + +/** + * struct sde_drm_roi_v1 - list of regions of interest for a drm object + * @num_rects: number of valid rectangles in the roi array + * @roi: list of roi rectangles + * @roi_feature_flags: flags indicates that specific roi rect is valid or not + * @spr_roi: list of roi rectangles for spr + */ +struct sde_drm_roi_v1 { + __u32 num_rects; + struct drm_clip_rect roi[SDE_MAX_ROI_V1]; + __u32 roi_feature_flags; + struct drm_clip_rect spr_roi[SDE_MAX_ROI_V1]; +}; + +/** + * Define extended power modes supported by the SDE connectors. + */ +#define SDE_MODE_DPMS_ON 0 +#define SDE_MODE_DPMS_LP1 1 +#define SDE_MODE_DPMS_LP2 2 +#define SDE_MODE_DPMS_STANDBY 3 +#define SDE_MODE_DPMS_SUSPEND 4 +#define SDE_MODE_DPMS_OFF 5 + +/** + * sde recovery events for notifying client + */ +#define SDE_RECOVERY_SUCCESS 0 +#define SDE_RECOVERY_CAPTURE 1 +#define SDE_RECOVERY_HARD_RESET 2 + +/** + * Define UBWC statistics config + */ +#define UBWC_STATS_MAX_ROI 0x3 + +/** + * struct sde_drm_ubwc_stats_roi - region of interest for ubwc stats + * y_coord0: first y offset from top of display + * y_coord1: second y offset from top of display + */ +struct sde_drm_ubwc_stats_roi { + __u16 y_coord0; + __u16 y_coord1; +}; + +/** + * struct sde_drm_ubwc_stats_data: ubwc statistics + * roi: region of interest + * worst_bw: worst bandwidth, per roi + * worst_bw_y_coord: y offset (row) location of worst bandwidth, per roi + * total_bw: total bandwidth, per roi + * error: error status + * meta_error: meta error data + */ +struct sde_drm_ubwc_stats_data { + struct sde_drm_ubwc_stats_roi roi; + __u16 worst_bw[UBWC_STATS_MAX_ROI]; + __u16 worst_bw_y_coord[UBWC_STATS_MAX_ROI]; + __u32 total_bw[UBWC_STATS_MAX_ROI]; + __u32 error; + __u32 meta_error; +}; + +/** + * Define frame data config + */ +#define SDE_FRAME_DATA_BUFFER_MAX 0x3 +#define SDE_FRAME_DATA_GUARD_BYTES 0xFF +#define SDE_FRAME_DATA_MAX_PLANES 0x14 + +/** + * struct sde_drm_frame_data_buffers_ctrl - control frame data buffers + * num_buffers: number of allocated buffers + * fds: fd list for allocated buffers + */ +struct sde_drm_frame_data_buffers_ctrl { + __u32 num_buffers; + __u32 fds[SDE_FRAME_DATA_BUFFER_MAX]; +}; + +/** + * struct sde_drm_frame_data_buf - frame data buffer info sent to userspace + * fd: buffer fd + * offset: offset from buffer address + * status: status flag + */ +struct sde_drm_frame_data_buf { + __u32 fd; + __u32 offset; + __u32 status; +}; + +/** + * struct sde_drm_plane_frame_data - definition of plane frame data struct + * plane_id: drm plane id + * ubwc_stats: ubwc statistics + */ +struct sde_drm_plane_frame_data { + __u32 plane_id; + + struct sde_drm_ubwc_stats_data ubwc_stats; +}; + +/** + * struct sde_drm_frame_data_packet - definition of frame data struct + * frame_count: interface frame count + * commit_count: sw commit count + * plane_frame_data: data available per plane + */ +struct sde_drm_frame_data_packet { + __u32 frame_count; + __u64 commit_count; + + struct sde_drm_plane_frame_data plane_frame_data[SDE_FRAME_DATA_MAX_PLANES]; +}; + +/* + * Colorimetry Data Block values + * These bit nums are defined as per the CTA spec + * and indicate the colorspaces supported by the sink + */ +#define DRM_EDID_CLRMETRY_xvYCC_601 (1 << 0) +#define DRM_EDID_CLRMETRY_xvYCC_709 (1 << 1) +#define DRM_EDID_CLRMETRY_sYCC_601 (1 << 2) +#define DRM_EDID_CLRMETRY_ADOBE_YCC_601 (1 << 3) +#define DRM_EDID_CLRMETRY_ADOBE_RGB (1 << 4) +#define DRM_EDID_CLRMETRY_BT2020_CYCC (1 << 5) +#define DRM_EDID_CLRMETRY_BT2020_YCC (1 << 6) +#define DRM_EDID_CLRMETRY_BT2020_RGB (1 << 7) +#define DRM_EDID_CLRMETRY_DCI_P3 (1 << 15) + +/* + * HDR Metadata + * These are defined as per EDID spec and shall be used by the sink + * to set the HDR metadata for playback from userspace. + */ + +#define HDR_PRIMARIES_COUNT 3 + +/* HDR EOTF */ +#define HDR_EOTF_SDR_LUM_RANGE 0x0 +#define HDR_EOTF_HDR_LUM_RANGE 0x1 +#define HDR_EOTF_SMTPE_ST2084 0x2 +#define HDR_EOTF_HLG 0x3 + +#define DRM_MSM_EXT_HDR_METADATA +#define DRM_MSM_EXT_HDR_PLUS_METADATA +struct drm_msm_ext_hdr_metadata { + __u32 hdr_state; /* HDR state */ + __u32 eotf; /* electro optical transfer function */ + __u32 hdr_supported; /* HDR supported */ + __u32 display_primaries_x[HDR_PRIMARIES_COUNT]; /* Primaries x */ + __u32 display_primaries_y[HDR_PRIMARIES_COUNT]; /* Primaries y */ + __u32 white_point_x; /* white_point_x */ + __u32 white_point_y; /* white_point_y */ + __u32 max_luminance; /* Max luminance */ + __u32 min_luminance; /* Min Luminance */ + __u32 max_content_light_level; /* max content light level */ + __u32 max_average_light_level; /* max average light level */ + + __u64 hdr_plus_payload; /* user pointer to dynamic HDR payload */ + __u32 hdr_plus_payload_size;/* size of dynamic HDR payload data */ +}; + +/** + * HDR sink properties + * These are defined as per EDID spec and shall be used by the userspace + * to determine the HDR properties to be set to the sink. + */ +#define DRM_MSM_EXT_HDR_PROPERTIES +#define DRM_MSM_EXT_HDR_PLUS_PROPERTIES +struct drm_msm_ext_hdr_properties { + __u8 hdr_metadata_type_one; /* static metadata type one */ + __u32 hdr_supported; /* HDR supported */ + __u32 hdr_eotf; /* electro optical transfer function */ + __u32 hdr_max_luminance; /* Max luminance */ + __u32 hdr_avg_luminance; /* Avg luminance */ + __u32 hdr_min_luminance; /* Min Luminance */ + + __u32 hdr_plus_supported; /* HDR10+ supported */ +}; + +/* HDR WRGB x and y index */ +#define DISPLAY_PRIMARIES_WX 0 +#define DISPLAY_PRIMARIES_WY 1 +#define DISPLAY_PRIMARIES_RX 2 +#define DISPLAY_PRIMARIES_RY 3 +#define DISPLAY_PRIMARIES_GX 4 +#define DISPLAY_PRIMARIES_GY 5 +#define DISPLAY_PRIMARIES_BX 6 +#define DISPLAY_PRIMARIES_BY 7 +#define DISPLAY_PRIMARIES_MAX 8 + +struct drm_panel_hdr_properties { + __u32 hdr_enabled; + + /* WRGB X and y values arrayed in format */ + /* [WX, WY, RX, RY, GX, GY, BX, BY] */ + __u32 display_primaries[DISPLAY_PRIMARIES_MAX]; + + /* peak brightness supported by panel */ + __u32 peak_brightness; + /* Blackness level supported by panel */ + __u32 blackness_level; +}; + +/** + * struct drm_msm_event_req - Payload to event enable/disable ioctls. + * @object_id: DRM object id. e.g.: for crtc pass crtc id. + * @object_type: DRM object type. e.g.: for crtc set it to DRM_MODE_OBJECT_CRTC. + * @event: Event for which notification is being enabled/disabled. + * e.g.: for Histogram set - DRM_EVENT_HISTOGRAM. + * @client_context: Opaque pointer that will be returned during event response + * notification. + * @index: Object index(e.g.: crtc index), optional for user-space to set. + * Driver will override value based on object_id and object_type. + */ +struct drm_msm_event_req { + __u32 object_id; + __u32 object_type; + __u32 event; + __u64 client_context; + __u32 index; +}; + +/** + * struct drm_msm_event_resp - payload returned when read is called for + * custom notifications. + * @base: Event type and length of complete notification payload. + * @info: Contains information about DRM that which raised this event. + * @data: Custom payload that driver returns for event type. + * size of data = base.length - (sizeof(base) + sizeof(info)) + */ +struct drm_msm_event_resp { + struct drm_event base; + struct drm_msm_event_req info; + __u8 data[]; +}; + +/** + * struct drm_msm_power_ctrl: Payload to enable/disable the power vote + * @enable: enable/disable the power vote + * @flags: operation control flags, for future use + */ +struct drm_msm_power_ctrl { + __u32 enable; + __u32 flags; +}; + +/** + * struct drm_msm_early_wakeup: Payload to early wake up display + * @wakeup_hint: early wakeup hint. + * @connector_id: connector id. e.g.: for connector pass connector id. + */ +struct drm_msm_early_wakeup { + __u32 wakeup_hint; + __u32 connector_id; +}; + +/** + * struct drm_msm_display_hint: Payload for display hint + * @hint_flags: display hint flags. + * @data: data struct. e.g.: for display hint parameter. + * Userspace pointer to struct base on hint flags. + */ +struct drm_msm_display_hint { + __u64 data; + __u32 hint_flags; +}; + +#define DRM_NOISE_LAYER_CFG +#define DRM_NOISE_TEMPORAL_FLAG (1 << 0) +#define DRM_NOISE_ATTN_MAX 255 +#define DRM_NOISE_STREN_MAX 6 + +/** + * struct drm_msm_noise_layer_cfg: Payload to enable/disable noise blend + * @flags: operation control flags, for future use + * @zposn: noise zorder + * @zposattn: attenuation zorder + * @attn_factor: attenuation factor in range of 1 to 255 + * @stength: strength in range of 0 to 6 + * @alpha_noise: attenuation in range of 1 to 255 +*/ +struct drm_msm_noise_layer_cfg { + __u64 flags; + __u32 zposn; + __u32 zposattn; + __u32 attn_factor; + __u32 strength; + __u32 alpha_noise; +}; + +#define FEATURE_DNSC_BLUR +/* Downscale Blur - number of gaussian coefficient LUTs */ +#define DNSC_BLUR_COEF_NUM 64 + +/* Downscale Blur flags */ +#define DNSC_BLUR_EN (1 << 0) +#define DNSC_BLUR_RND_8B_EN (1 << 1) +#define DNSC_BLUR_DITHER_EN (1 << 2) + +#define DNSC_BLUR_MIRROR_BLK_CFG (1 << 16) +#define DNSC_BLUR_INDEPENDENT_BLK_CFG (1 << 17) + +/* Downscale Blur horizontal/vertical filter flags */ +#define DNSC_BLUR_GAUS_FILTER (1 << 0) +#define DNSC_BLUR_PCMN_FILTER (1 << 1) + +/* Downscale Blur Dither matrix size */ +#define DNSC_BLUR_DITHER_MATRIX_SZ 16 + +/* Downscale Blur Dither flags */ +#define DNSC_BLUR_DITHER_LUMA_MODE (1 << 0) + +/** + * struct sde_drm_dnsc_blur_cfg - Downscale Blur config structure + * @flags: Flags to indicate features enabled, values are + * based on "Downscale Blur flags" + * @num_blocks: Active dnsc_blur blocks used for the display + * @src_width: Source width configuration + * @src_height: Source height configuration + * @dst_width: Destination width configuration + * @dst_height: Destination height configuration + * @flags_h: Flags for horizontal downscaling, values are + * based on "Downscale Blur horizontal/vertical filter flags" + * @flags_v: Flags for veritcal downscaling + * @phase_init_h: Initial phase value for horizontal downscaling + * @phase_step_h: Phase step value for horizontal downscaling + * @phase_init_v: Initial phase value for vertical downscaling + * @phase_step_v: Phase step value for vertical downscaling + * @norm_h: Horizontal downscale normalization downshift value + * @ratio_h: Horizontal downscale ratio value + * @norm_v: Vertical downscale normalization downshift value + * @ratio_v: Vertical downscale ratio value + * @coef_hori: Horizontal downscale LUT coefficients + * @coef_vert: Vertical downscale LUT coefficients + * @dither_flags: Flags for dither customization, values are + * based on "Downscale Blur Dither flags" + * @temporal_en: Temperal dither enable + * @c0_bitdepth: c0 component bit depth + * @c1_bitdepth: c1 component bit depth + * @c2_bitdepth: c2 component bit depth + * @c3_bitdepth: c2 component bit depth + * @dither_matrix: Dither strength matrix + */ +struct sde_drm_dnsc_blur_cfg { + __u64 flags; + __u32 num_blocks; + + __u32 src_width; + __u32 src_height; + __u32 dst_width; + __u32 dst_height; + + __u32 flags_h; + __u32 flags_v; + + /* pcmn filter parameters */ + __u32 phase_init_h; + __u32 phase_step_h; + __u32 phase_init_v; + __u32 phase_step_v; + + /* gaussian filter parameters */ + __u32 norm_h; + __u32 ratio_h; + __u32 norm_v; + __u32 ratio_v; + __u32 coef_hori[DNSC_BLUR_COEF_NUM]; + __u32 coef_vert[DNSC_BLUR_COEF_NUM]; + + /* dither configs */ + __u64 dither_flags; + __u32 temporal_en; + __u32 c0_bitdepth; + __u32 c1_bitdepth; + __u32 c2_bitdepth; + __u32 c3_bitdepth; + __u32 dither_matrix[DNSC_BLUR_DITHER_MATRIX_SZ]; +}; + +#define DRM_SDE_WB_CONFIG 0x40 +#define DRM_MSM_REGISTER_EVENT 0x41 +#define DRM_MSM_DEREGISTER_EVENT 0x42 +#define DRM_MSM_RMFB2 0x43 +#define DRM_MSM_POWER_CTRL 0x44 +#define DRM_MSM_DISPLAY_HINT 0x45 + +/* sde custom events */ +#define DRM_EVENT_HISTOGRAM 0x80000000 +#define DRM_EVENT_AD_BACKLIGHT 0x80000001 +#define DRM_EVENT_CRTC_POWER 0x80000002 +#define DRM_EVENT_SYS_BACKLIGHT 0x80000003 +#define DRM_EVENT_SDE_POWER 0x80000004 +#define DRM_EVENT_IDLE_NOTIFY 0x80000005 +#define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */ +#define DRM_EVENT_SDE_HW_RECOVERY 0X80000007 +#define DRM_EVENT_LTM_HIST 0X80000008 +#define DRM_EVENT_LTM_WB_PB 0X80000009 +#define DRM_EVENT_LTM_OFF 0X8000000A +#define DRM_EVENT_MMRM_CB 0X8000000B +#define DRM_EVENT_FRAME_DATA 0x8000000C +#define DRM_EVENT_DIMMING_BL 0X8000000D +#define DRM_EVENT_VM_RELEASE 0X8000000E +#define DRM_EVENT_OPR_VALUE 0X8000000F +#define DRM_EVENT_MISR_SIGN 0X80000010 + +#ifndef DRM_MODE_FLAG_VID_MODE_PANEL +#define DRM_MODE_FLAG_VID_MODE_PANEL 0x01 +#endif +#ifndef DRM_MODE_FLAG_CMD_MODE_PANEL +#define DRM_MODE_FLAG_CMD_MODE_PANEL 0x02 +#endif + +#ifndef DRM_MODE_FLAG_DSI_24BPP +#define DRM_MODE_FLAG_DSI_24BPP 0x01 +#endif +#ifndef DRM_MODE_FLAG_DSI_30BPP +#define DRM_MODE_FLAG_DSI_30BPP 0x02 +#endif + +/* display hint flags*/ +#define DRM_MSM_DISPLAY_EARLY_WAKEUP_HINT 0x01 +#define DRM_MSM_DISPLAY_POWER_COLLAPSE_HINT 0x02 +#define DRM_MSM_DISPLAY_IDLE_TIMEOUT_HINT 0x04 +#define DRM_MSM_DISPLAY_MODE_CHANGE_HINT 0x08 + +#define DRM_MSM_WAKE_UP_ALL_DISPLAYS 0xFFFFFFFF + +#define DRM_IOCTL_SDE_WB_CONFIG \ + DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg) +#define DRM_IOCTL_MSM_REGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \ + DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req) +#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \ + DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req) +#define DRM_IOCTL_MSM_RMFB2 DRM_IOW((DRM_COMMAND_BASE + \ + DRM_MSM_RMFB2), unsigned int) +#define DRM_IOCTL_MSM_POWER_CTRL DRM_IOW((DRM_COMMAND_BASE + \ + DRM_MSM_POWER_CTRL), struct drm_msm_power_ctrl) +#define DRM_IOCTL_MSM_DISPLAY_HINT DRM_IOW((DRM_COMMAND_BASE + \ + DRM_MSM_DISPLAY_HINT), struct drm_msm_display_hint) + +#if defined(__cplusplus) +} +#endif + +#endif /* _SDE_DRM_H_ */ diff --git a/include/uapi/display/hdcp/Kbuild b/include/uapi/display/hdcp/Kbuild new file mode 100644 index 000000000..e3bd03da8 --- /dev/null +++ b/include/uapi/display/hdcp/Kbuild @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +header-y += msm_hdmi_hdcp_mgr.h diff --git a/include/uapi/display/hdcp/msm_hdmi_hdcp_mgr.h b/include/uapi/display/hdcp/msm_hdmi_hdcp_mgr.h new file mode 100644 index 000000000..78589ce9f --- /dev/null +++ b/include/uapi/display/hdcp/msm_hdmi_hdcp_mgr.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI__MSM_HDMI_HDCP_MGR_H +#define _UAPI__MSM_HDMI_HDCP_MGR_H + +#include + +enum DS_TYPE { /* type of downstream device */ + DS_UNKNOWN, + DS_RECEIVER, + DS_REPEATER, +}; + +enum { + MSG_ID_IDX, + RET_CODE_IDX, + HEADER_LEN, +}; + +enum RET_CODE { + HDCP_NOT_AUTHED, + HDCP_AUTHED, + HDCP_DISABLE, +}; + +enum MSG_ID { /* List of functions expected to be called after it */ + DOWN_CHECK_TOPOLOGY, + UP_REQUEST_TOPOLOGY, + UP_SEND_TOPOLOGY, + DOWN_REQUEST_TOPOLOGY, + MSG_NUM, +}; + +enum SOURCE_ID { + HDCP_V1_TX, + HDCP_V1_RX, + HDCP_V2_RX, + HDCP_V2_TX, + SRC_NUM, +}; + +/* + * how to parse sysfs params buffer + * from hdcp_tx driver. + */ + +struct HDCP_V2V1_MSG_TOPOLOGY { + /* indicates downstream's type */ + __u32 ds_type; + __u8 bksv[5]; + __u8 dev_count; + __u8 depth; + __u8 ksv_list[5 * 127]; + __u32 max_cascade_exceeded; + __u32 max_dev_exceeded; +}; + +#endif /* _UAPI__MSM_HDMI_HDCP_MGR_H */ diff --git a/include/uapi/display/media/Kbuild b/include/uapi/display/media/Kbuild new file mode 100644 index 000000000..c8bca26ba --- /dev/null +++ b/include/uapi/display/media/Kbuild @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note + +header-y += msm_sde_rotator.h +header-y += mmm_color_fmt.h diff --git a/include/uapi/display/media/mmm_color_fmt.h b/include/uapi/display/media/mmm_color_fmt.h new file mode 100644 index 000000000..917b2d373 --- /dev/null +++ b/include/uapi/display/media/mmm_color_fmt.h @@ -0,0 +1,1472 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __MMM_COLOR_FMT_INFO_H__ +#define __MMM_COLOR_FMT_INFO_H__ + +/* Width and Height should be multiple of 16 */ +#define INTERLACE_WIDTH_MAX 1920 +#define INTERLACE_HEIGHT_MAX 1920 +#define INTERLACE_MB_PER_FRAME_MAX ((1920*1088)/256) + +#ifndef MMM_COLOR_FMT_ALIGN +#define MMM_COLOR_FMT_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\ + ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\ + (((__sz) + (__align) - 1) & (~((__align) - 1)))) +#endif + +#ifndef MMM_COLOR_FMT_ROUNDUP +#define MMM_COLOR_FMT_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r)) +#endif + +enum mmm_color_fmts { + /* Venus NV12: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + MMM_COLOR_FMT_NV12, + /* Venus NV21: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved V/U plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * V U V U V U V U V U V U . . . . ^ + * V U V U V U V U V U V U . . . . | + * V U V U V U V U V U V U . . . . | + * V U V U V U V U V U V U . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + MMM_COLOR_FMT_NV21, + /* + * The buffer can be of 2 types: + * (1) Venus NV12 UBWC Progressive + * (2) Venus NV12 UBWC Interlaced + * + * (1) Venus NV12 UBWC Progressive Buffer Format: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * Y_Stride = align(Width, 128) + * UV_Stride = align(Width, 128) + * Y_Scanlines = align(Height, 32) + * UV_Scanlines = align(Height/2, 32) + * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * + * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096) + * + * + * (2) Venus NV12 UBWC Interlaced Buffer Format: + * Compressed Macro-tile format for NV12 interlaced. + * Contains 8 planes in the following order - + * (A) Y_Meta_Top_Field_Plane + * (B) Y_UBWC_Top_Field_Plane + * (C) UV_Meta_Top_Field_Plane + * (D) UV_UBWC_Top_Field_Plane + * (E) Y_Meta_Bottom_Field_Plane + * (F) Y_UBWC_Bottom_Field_Plane + * (G) UV_Meta_Bottom_Field_Plane + * (H) UV_UBWC_Bottom_Field_Plane + * Y_Meta_Top_Field_Plane consists of meta information to decode + * compressed tile data for Y_UBWC_Top_Field_Plane. + * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile + * format for top field of an interlaced frame. + * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together + * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed + * 8 bit Y samples for top field of an interlaced frame. + * + * UV_Meta_Top_Field_Plane consists of meta information to decode + * compressed tile data in UV_UBWC_Top_Field_Plane. + * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile + * format for top field of an interlaced frame. + * UBWC decoder block will use UV_Meta_Top_Field_Plane data together + * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed + * 8 bit subsampled color difference samples for top field of an + * interlaced frame. + * + * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is + * independently decodable and randomly accessible. There is no + * dependency between tiles. + * + * Y_Meta_Bottom_Field_Plane consists of meta information to decode + * compressed tile data for Y_UBWC_Bottom_Field_Plane. + * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile + * format for bottom field of an interlaced frame. + * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data + * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less + * uncompressed 8 bit Y samples for bottom field of an interlaced frame. + * + * UV_Meta_Bottom_Field_Plane consists of meta information to decode + * compressed tile data in UV_UBWC_Bottom_Field_Plane. + * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed + * macro-tile format for bottom field of an interlaced frame. + * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together + * with UV_UBWC_Bottom_Field_Plane data to produce loss-less + * uncompressed 8 bit subsampled color difference samples for bottom + * field of an interlaced frame. + * + * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is + * independently decodable and randomly accessible. There is no + * dependency between tiles. + * + * <-----Y_TF_Meta_Stride----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Half_height | + * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-Compressed tile Y_TF Stride-> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----UV_TF_Meta_Stride----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_TF_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-Compressed tile UV_TF Stride-> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-----Y_BF_Meta_Stride----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Half_height | + * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-Compressed tile Y_BF Stride-> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----UV_BF_Meta_Stride----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_BF_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-Compressed tile UV_BF Stride-> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * Half_height = (Height+1)>>1 + * Y_TF_Stride = align(Width, 128) + * UV_TF_Stride = align(Width, 128) + * Y_TF_Scanlines = align(Half_height, 32) + * UV_TF_Scanlines = align((Half_height+1)/2, 32) + * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096) + * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096) + * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) + * Y_TF_Meta_Plane_size = + * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096) + * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) + * UV_TF_Meta_Plane_size = + * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096) + * Y_BF_Stride = align(Width, 128) + * UV_BF_Stride = align(Width, 128) + * Y_BF_Scanlines = align(Half_height, 32) + * UV_BF_Scanlines = align((Half_height+1)/2, 32) + * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096) + * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096) + * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) + * Y_BF_Meta_Plane_size = + * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096) + * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) + * UV_BF_Meta_Plane_size = + * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096) + * + * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size + + * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size + + * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size + + * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +, 4096) + */ + MMM_COLOR_FMT_NV12_UBWC, + /* Venus NV12 10-bit UBWC: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride -----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * + * Y_Stride = align(Width * 4/3, 256) + * UV_Stride = align(Width * 4/3, 256) + * Y_Scanlines = align(Height, 32) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * + * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096) + */ + MMM_COLOR_FMT_NV12_BPP10_UBWC, + /* Venus RGBA8888 format: + * Contains 1 plane in the following order - + * (A) RGBA plane + * + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 256) + * RGB_Scanlines = align(Height, 32) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * + * Total size = align(RGB_Plane_size , 4096) + */ + MMM_COLOR_FMT_RGBA8888, + /* Venus RGBA8888 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + MMM_COLOR_FMT_RGBA8888_UBWC, + /* Venus RGBA1010102 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + MMM_COLOR_FMT_RGBA1010102_UBWC, + /* Venus RGB565 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGB plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 2, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + MMM_COLOR_FMT_RGB565_UBWC, + /* P010 UBWC: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride -----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * + * Y_Stride = align(Width * 2, 256) + * UV_Stride = align(Width * 2, 256) + * Y_Scanlines = align(Height, 16) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * + * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size, 4096) + */ + MMM_COLOR_FMT_P010_UBWC, + /* Venus P010: + * YUV 4:2:0 image with a plane of 10 bit Y samples followed + * by an interleaved U/V plane containing 10 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width * 2 aligned to 256 + * UV_Stride : Width * 2 aligned to 256 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + MMM_COLOR_FMT_P010, + /* Venus P010_512: + * YUV 4:2:0 image with a plane of 10 bit Y samples followed + * by an interleaved U/V plane containing 10 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width * 2 aligned to 512 + * UV_Stride : Width * 2 aligned to 512 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Total size = align(Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines, 4096) + */ + MMM_COLOR_FMT_P010_512, + /* Venus NV12_512: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 512 + * UV_Stride : Width aligned to 512 + * Y_Scanlines: Height aligned to 512 + * UV_Scanlines: Height/2 aligned to 256 + * Total size = align((Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines), 4096) + */ + MMM_COLOR_FMT_NV12_512, + /* Venus RGBA FP16 UBWC: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 8, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * RGB_TileWidth = 8 pixels across is 1 tile + * RGB_TileHeight = 4 pixels + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size, 4096) + */ + MMM_COLOR_FMT_RGBA16161616F_UBWC, +}; + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int MMM_COLOR_FMT_Y_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + unsigned int alignment, stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_512: + alignment = 512; + stride = MMM_COLOR_FMT_ALIGN(width, alignment); + break; + case MMM_COLOR_FMT_NV12: + case MMM_COLOR_FMT_NV21: + case MMM_COLOR_FMT_NV12_UBWC: + alignment = 128; + stride = MMM_COLOR_FMT_ALIGN(width, alignment); + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + alignment = 256; + stride = MMM_COLOR_FMT_ALIGN(width, 192); + stride = MMM_COLOR_FMT_ALIGN(stride * 4/3, alignment); + break; + case MMM_COLOR_FMT_P010_UBWC: + case MMM_COLOR_FMT_P010: + alignment = 256; + stride = MMM_COLOR_FMT_ALIGN(width * 2, alignment); + break; + case MMM_COLOR_FMT_P010_512: + alignment = 512; + stride = MMM_COLOR_FMT_ALIGN(width * 2, alignment); + break; + default: + break; + } +invalid_input: + return stride; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int MMM_COLOR_FMT_UV_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + unsigned int alignment, stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_512: + alignment = 512; + stride = MMM_COLOR_FMT_ALIGN(width, alignment); + break; + case MMM_COLOR_FMT_NV12: + case MMM_COLOR_FMT_NV21: + case MMM_COLOR_FMT_NV12_UBWC: + alignment = 128; + stride = MMM_COLOR_FMT_ALIGN(width, alignment); + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + alignment = 256; + stride = MMM_COLOR_FMT_ALIGN(width, 192); + stride = MMM_COLOR_FMT_ALIGN(stride * 4/3, alignment); + break; + case MMM_COLOR_FMT_P010_UBWC: + case MMM_COLOR_FMT_P010: + alignment = 256; + stride = MMM_COLOR_FMT_ALIGN(width * 2, alignment); + break; + case MMM_COLOR_FMT_P010_512: + alignment = 512; + stride = MMM_COLOR_FMT_ALIGN(width * 2, alignment); + break; + default: + break; + } +invalid_input: + return stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int MMM_COLOR_FMT_Y_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + unsigned int alignment, sclines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_512: + alignment = 512; + break; + case MMM_COLOR_FMT_NV12: + case MMM_COLOR_FMT_NV21: + case MMM_COLOR_FMT_NV12_UBWC: + case MMM_COLOR_FMT_P010_512: + case MMM_COLOR_FMT_P010: + alignment = 32; + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + case MMM_COLOR_FMT_P010_UBWC: + alignment = 16; + break; + default: + return 0; + } + sclines = MMM_COLOR_FMT_ALIGN(height, alignment); +invalid_input: + return sclines; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int MMM_COLOR_FMT_UV_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + unsigned int alignment, sclines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + + case MMM_COLOR_FMT_NV12_512: + alignment = 256; + break; + case MMM_COLOR_FMT_NV12: + case MMM_COLOR_FMT_NV21: + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + case MMM_COLOR_FMT_P010_UBWC: + case MMM_COLOR_FMT_P010_512: + case MMM_COLOR_FMT_P010: + alignment = 16; + break; + case MMM_COLOR_FMT_NV12_UBWC: + alignment = 32; + break; + default: + goto invalid_input; + } + + sclines = MMM_COLOR_FMT_ALIGN((height+1)>>1, alignment); + +invalid_input: + return sclines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int MMM_COLOR_FMT_Y_META_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + int y_tile_width = 0, y_meta_stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_UBWC: + case MMM_COLOR_FMT_P010_UBWC: + y_tile_width = 32; + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + y_tile_width = 48; + break; + default: + goto invalid_input; + } + + y_meta_stride = MMM_COLOR_FMT_ROUNDUP(width, y_tile_width); + y_meta_stride = MMM_COLOR_FMT_ALIGN(y_meta_stride, 64); + +invalid_input: + return y_meta_stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int MMM_COLOR_FMT_Y_META_SCANLINES( + unsigned int color_fmt, unsigned int height) +{ + int y_tile_height = 0, y_meta_scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_UBWC: + y_tile_height = 8; + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + case MMM_COLOR_FMT_P010_UBWC: + y_tile_height = 4; + break; + default: + goto invalid_input; + } + + y_meta_scanlines = MMM_COLOR_FMT_ROUNDUP(height, y_tile_height); + y_meta_scanlines = MMM_COLOR_FMT_ALIGN(y_meta_scanlines, 16); + +invalid_input: + return y_meta_scanlines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static inline unsigned int MMM_COLOR_FMT_UV_META_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + int uv_tile_width = 0, uv_meta_stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_UBWC: + case MMM_COLOR_FMT_P010_UBWC: + uv_tile_width = 16; + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + uv_tile_width = 24; + break; + default: + goto invalid_input; + } + + uv_meta_stride = MMM_COLOR_FMT_ROUNDUP((width+1)>>1, uv_tile_width); + uv_meta_stride = MMM_COLOR_FMT_ALIGN(uv_meta_stride, 64); + +invalid_input: + return uv_meta_stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static inline unsigned int MMM_COLOR_FMT_UV_META_SCANLINES( + unsigned int color_fmt, unsigned int height) +{ + int uv_tile_height = 0, uv_meta_scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_NV12_UBWC: + uv_tile_height = 8; + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + case MMM_COLOR_FMT_P010_UBWC: + uv_tile_height = 4; + break; + default: + goto invalid_input; + } + + uv_meta_scanlines = MMM_COLOR_FMT_ROUNDUP((height+1)>>1, + uv_tile_height); + uv_meta_scanlines = MMM_COLOR_FMT_ALIGN(uv_meta_scanlines, 16); + +invalid_input: + return uv_meta_scanlines; +} + +static inline unsigned int MMM_COLOR_FMT_RGB_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + unsigned int alignment = 0, stride = 0, bpp = 4; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_RGBA8888: + alignment = 256; + break; + case MMM_COLOR_FMT_RGB565_UBWC: + alignment = 256; + bpp = 2; + break; + case MMM_COLOR_FMT_RGBA8888_UBWC: + case MMM_COLOR_FMT_RGBA1010102_UBWC: + alignment = 256; + break; + case MMM_COLOR_FMT_RGBA16161616F_UBWC: + alignment = 256; + bpp = 8; + break; + default: + goto invalid_input; + } + + stride = MMM_COLOR_FMT_ALIGN(width * bpp, alignment); + +invalid_input: + return stride; +} + +static inline unsigned int MMM_COLOR_FMT_RGB_SCANLINES(unsigned int color_fmt, + unsigned int height) +{ + unsigned int alignment = 0, scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_RGBA8888: + alignment = 32; + break; + case MMM_COLOR_FMT_RGBA8888_UBWC: + case MMM_COLOR_FMT_RGBA1010102_UBWC: + case MMM_COLOR_FMT_RGB565_UBWC: + case MMM_COLOR_FMT_RGBA16161616F_UBWC: + alignment = 16; + break; + default: + goto invalid_input; + } + + scanlines = MMM_COLOR_FMT_ALIGN(height, alignment); + +invalid_input: + return scanlines; +} + +static inline unsigned int MMM_COLOR_FMT_RGB_META_STRIDE(unsigned int color_fmt, + unsigned int width) +{ + int rgb_tile_width = 0, rgb_meta_stride = 0; + + if (!width) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_RGBA8888_UBWC: + case MMM_COLOR_FMT_RGBA1010102_UBWC: + case MMM_COLOR_FMT_RGB565_UBWC: + rgb_tile_width = 16; + break; + case MMM_COLOR_FMT_RGBA16161616F_UBWC: + rgb_tile_width = 8; + break; + default: + goto invalid_input; + } + + rgb_meta_stride = MMM_COLOR_FMT_ROUNDUP(width, rgb_tile_width); + rgb_meta_stride = MMM_COLOR_FMT_ALIGN(rgb_meta_stride, 64); + +invalid_input: + return rgb_meta_stride; +} + +static inline unsigned int MMM_COLOR_FMT_RGB_META_SCANLINES( + unsigned int color_fmt, unsigned int height) +{ + int rgb_tile_height = 0, rgb_meta_scanlines = 0; + + if (!height) + goto invalid_input; + + switch (color_fmt) { + case MMM_COLOR_FMT_RGBA8888_UBWC: + case MMM_COLOR_FMT_RGBA1010102_UBWC: + case MMM_COLOR_FMT_RGB565_UBWC: + case MMM_COLOR_FMT_RGBA16161616F_UBWC: + rgb_tile_height = 4; + break; + default: + goto invalid_input; + } + + rgb_meta_scanlines = MMM_COLOR_FMT_ROUNDUP(height, rgb_tile_height); + rgb_meta_scanlines = MMM_COLOR_FMT_ALIGN(rgb_meta_scanlines, 16); + +invalid_input: + return rgb_meta_scanlines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + * @height + * Progressive: height + * Interlaced: height + */ +static inline unsigned int MMM_COLOR_FMT_BUFFER_SIZE(unsigned int color_fmt, + unsigned int width, unsigned int height) +{ + unsigned int size = 0; + unsigned int y_plane, uv_plane, y_stride, + uv_stride, y_sclines, uv_sclines; + unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0; + unsigned int y_meta_stride = 0, y_meta_scanlines = 0; + unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0; + unsigned int y_meta_plane = 0, uv_meta_plane = 0; + unsigned int rgb_stride = 0, rgb_scanlines = 0; + unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0; + unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0; + + if (!width || !height) + goto invalid_input; + + y_stride = MMM_COLOR_FMT_Y_STRIDE(color_fmt, width); + uv_stride = MMM_COLOR_FMT_UV_STRIDE(color_fmt, width); + y_sclines = MMM_COLOR_FMT_Y_SCANLINES(color_fmt, height); + uv_sclines = MMM_COLOR_FMT_UV_SCANLINES(color_fmt, height); + rgb_stride = MMM_COLOR_FMT_RGB_STRIDE(color_fmt, width); + rgb_scanlines = MMM_COLOR_FMT_RGB_SCANLINES(color_fmt, height); + + switch (color_fmt) { + case MMM_COLOR_FMT_NV21: + case MMM_COLOR_FMT_NV12: + case MMM_COLOR_FMT_P010_512: + case MMM_COLOR_FMT_P010: + case MMM_COLOR_FMT_NV12_512: + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines; + size = y_plane + uv_plane; + break; + case MMM_COLOR_FMT_NV12_UBWC: + y_meta_stride = MMM_COLOR_FMT_Y_META_STRIDE(color_fmt, width); + uv_meta_stride = MMM_COLOR_FMT_UV_META_STRIDE(color_fmt, width); + if (width <= INTERLACE_WIDTH_MAX && + height <= INTERLACE_HEIGHT_MAX && + (height * width) / 256 <= INTERLACE_MB_PER_FRAME_MAX) { + y_sclines = MMM_COLOR_FMT_Y_SCANLINES(color_fmt, + (height+1)>>1); + y_ubwc_plane = + MMM_COLOR_FMT_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = MMM_COLOR_FMT_UV_SCANLINES(color_fmt, + (height+1)>>1); + uv_ubwc_plane = MMM_COLOR_FMT_ALIGN( + uv_stride * uv_sclines, 4096); + y_meta_scanlines = MMM_COLOR_FMT_Y_META_SCANLINES( + color_fmt, (height+1)>>1); + y_meta_plane = MMM_COLOR_FMT_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_scanlines = MMM_COLOR_FMT_UV_META_SCANLINES( + color_fmt, (height+1)>>1); + uv_meta_plane = MMM_COLOR_FMT_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane)*2; + } else { + y_sclines = MMM_COLOR_FMT_Y_SCANLINES(color_fmt, + height); + y_ubwc_plane = + MMM_COLOR_FMT_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = MMM_COLOR_FMT_UV_SCANLINES(color_fmt, + height); + uv_ubwc_plane = + MMM_COLOR_FMT_ALIGN(uv_stride * uv_sclines, + 4096); + y_meta_scanlines = MMM_COLOR_FMT_Y_META_SCANLINES( + color_fmt, height); + y_meta_plane = MMM_COLOR_FMT_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_scanlines = MMM_COLOR_FMT_UV_META_SCANLINES( + color_fmt, height); + uv_meta_plane = MMM_COLOR_FMT_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane); + } + break; + case MMM_COLOR_FMT_NV12_BPP10_UBWC: + y_ubwc_plane = MMM_COLOR_FMT_ALIGN(y_stride * y_sclines, 4096); + uv_ubwc_plane = MMM_COLOR_FMT_ALIGN(uv_stride * uv_sclines, + 4096); + y_meta_stride = MMM_COLOR_FMT_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = MMM_COLOR_FMT_Y_META_SCANLINES(color_fmt, + height); + y_meta_plane = MMM_COLOR_FMT_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = MMM_COLOR_FMT_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = MMM_COLOR_FMT_UV_META_SCANLINES(color_fmt, + height); + uv_meta_plane = MMM_COLOR_FMT_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane; + break; + case MMM_COLOR_FMT_P010_UBWC: + y_ubwc_plane = MMM_COLOR_FMT_ALIGN(y_stride * y_sclines, 4096); + uv_ubwc_plane = MMM_COLOR_FMT_ALIGN(uv_stride * uv_sclines, + 4096); + y_meta_stride = MMM_COLOR_FMT_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = MMM_COLOR_FMT_Y_META_SCANLINES(color_fmt, + height); + y_meta_plane = MMM_COLOR_FMT_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = MMM_COLOR_FMT_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = MMM_COLOR_FMT_UV_META_SCANLINES(color_fmt, + height); + uv_meta_plane = MMM_COLOR_FMT_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane; + break; + case MMM_COLOR_FMT_RGBA8888: + rgb_plane = MMM_COLOR_FMT_ALIGN(rgb_stride * rgb_scanlines, + 4096); + size = rgb_plane; + break; + case MMM_COLOR_FMT_RGBA8888_UBWC: + case MMM_COLOR_FMT_RGBA1010102_UBWC: + case MMM_COLOR_FMT_RGB565_UBWC: + case MMM_COLOR_FMT_RGBA16161616F_UBWC: + rgb_ubwc_plane = MMM_COLOR_FMT_ALIGN(rgb_stride * rgb_scanlines, + 4096); + rgb_meta_stride = MMM_COLOR_FMT_RGB_META_STRIDE(color_fmt, + width); + rgb_meta_scanlines = MMM_COLOR_FMT_RGB_META_SCANLINES(color_fmt, + height); + rgb_meta_plane = MMM_COLOR_FMT_ALIGN(rgb_meta_stride * + rgb_meta_scanlines, 4096); + size = rgb_ubwc_plane + rgb_meta_plane; + break; + default: + break; + } +invalid_input: + return MMM_COLOR_FMT_ALIGN(size, 4096); +} + +static inline unsigned int MMM_COLOR_FMT_BUFFER_SIZE_USED( + unsigned int color_fmt, unsigned int width, + unsigned int height, unsigned int interlace) +{ + unsigned int size = 0; + unsigned int y_stride, uv_stride, y_sclines, uv_sclines; + unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0; + unsigned int y_meta_stride = 0, y_meta_scanlines = 0; + unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0; + unsigned int y_meta_plane = 0, uv_meta_plane = 0; + + if (!width || !height) + goto invalid_input; + + if (!interlace && color_fmt == MMM_COLOR_FMT_NV12_UBWC) { + y_stride = MMM_COLOR_FMT_Y_STRIDE(color_fmt, width); + uv_stride = MMM_COLOR_FMT_UV_STRIDE(color_fmt, width); + y_sclines = MMM_COLOR_FMT_Y_SCANLINES(color_fmt, height); + y_ubwc_plane = MMM_COLOR_FMT_ALIGN(y_stride * y_sclines, 4096); + uv_sclines = MMM_COLOR_FMT_UV_SCANLINES(color_fmt, height); + uv_ubwc_plane = MMM_COLOR_FMT_ALIGN(uv_stride * uv_sclines, + 4096); + y_meta_stride = MMM_COLOR_FMT_Y_META_STRIDE(color_fmt, width); + y_meta_scanlines = + MMM_COLOR_FMT_Y_META_SCANLINES(color_fmt, height); + y_meta_plane = MMM_COLOR_FMT_ALIGN( + y_meta_stride * y_meta_scanlines, 4096); + uv_meta_stride = MMM_COLOR_FMT_UV_META_STRIDE(color_fmt, width); + uv_meta_scanlines = + MMM_COLOR_FMT_UV_META_SCANLINES(color_fmt, height); + uv_meta_plane = MMM_COLOR_FMT_ALIGN(uv_meta_stride * + uv_meta_scanlines, 4096); + size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane + + uv_meta_plane); + size = MMM_COLOR_FMT_ALIGN(size, 4096); + } else { + size = MMM_COLOR_FMT_BUFFER_SIZE(color_fmt, width, height); + } +invalid_input: + return size; +} + +#endif diff --git a/include/uapi/display/media/msm_sde_rotator.h b/include/uapi/display/media/msm_sde_rotator.h new file mode 100644 index 000000000..e8a2083f9 --- /dev/null +++ b/include/uapi/display/media/msm_sde_rotator.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __UAPI_MSM_SDE_ROTATOR_H__ +#define __UAPI_MSM_SDE_ROTATOR_H__ + +#include +#include +#include + +/* SDE Rotator pixel format definitions */ +#define SDE_PIX_FMT_XRGB_8888 \ + v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */ +#define SDE_PIX_FMT_ARGB_8888 \ + v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */ +#define SDE_PIX_FMT_ABGR_8888 \ + v4l2_fourcc('R', 'A', '2', '4') /* 32-bit ABGR 8:8:8:8 */ +#define SDE_PIX_FMT_RGBA_8888 \ + v4l2_fourcc('A', 'B', '2', '4') /* 32-bit RGBA 8:8:8:8 */ +#define SDE_PIX_FMT_BGRA_8888 \ + v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */ +#define SDE_PIX_FMT_RGBX_8888 \ + v4l2_fourcc('X', 'B', '2', '4') /* 32-bit RGBX 8:8:8:8 */ +#define SDE_PIX_FMT_BGRX_8888 \ + v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */ +#define SDE_PIX_FMT_XBGR_8888 \ + v4l2_fourcc('R', 'X', '2', '4') /* 32-bit XBGR 8:8:8:8 */ +#define SDE_PIX_FMT_RGBA_5551 \ + v4l2_fourcc('R', 'A', '1', '5') /* 16-bit RGBA 5:5:5:1 */ +#define SDE_PIX_FMT_ARGB_1555 \ + v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ +#define SDE_PIX_FMT_ABGR_1555 \ + v4l2_fourcc('A', 'B', '1', '5') /* 16-bit ABGR 1:5:5:5 */ +#define SDE_PIX_FMT_BGRA_5551 \ + v4l2_fourcc('B', 'A', '1', '5') /* 16-bit BGRA 5:5:5:1 */ +#define SDE_PIX_FMT_BGRX_5551 \ + v4l2_fourcc('B', 'X', '1', '5') /* 16-bit BGRX 5:5:5:1 */ +#define SDE_PIX_FMT_RGBX_5551 \ + v4l2_fourcc('R', 'X', '1', '5') /* 16-bit RGBX 5:5:5:1 */ +#define SDE_PIX_FMT_XBGR_1555 \ + v4l2_fourcc('X', 'B', '1', '5') /* 16-bit XBGR 1:5:5:5 */ +#define SDE_PIX_FMT_XRGB_1555 \ + v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */ +#define SDE_PIX_FMT_ARGB_4444 \ + v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */ +#define SDE_PIX_FMT_RGBA_4444 \ + v4l2_fourcc('R', 'A', '1', '2') /* 16-bit RGBA 4:4:4:4 */ +#define SDE_PIX_FMT_BGRA_4444 \ + v4l2_fourcc('b', 'A', '1', '2') /* 16-bit BGRA 4:4:4:4 */ +#define SDE_PIX_FMT_ABGR_4444 \ + v4l2_fourcc('A', 'B', '1', '2') /* 16-bit ABGR 4:4:4:4 */ +#define SDE_PIX_FMT_RGBX_4444 \ + v4l2_fourcc('R', 'X', '1', '2') /* 16-bit RGBX 4:4:4:4 */ +#define SDE_PIX_FMT_XRGB_4444 \ + v4l2_fourcc('X', 'R', '1', '2') /* 16 xxxxrrrr ggggbbbb */ +#define SDE_PIX_FMT_BGRX_4444 \ + v4l2_fourcc('B', 'X', '1', '2') /* 16-bit BGRX 4:4:4:4 */ +#define SDE_PIX_FMT_XBGR_4444 \ + v4l2_fourcc('X', 'B', '1', '2') /* 16-bit XBGR 4:4:4:4 */ +#define SDE_PIX_FMT_RGB_888 \ + v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define SDE_PIX_FMT_BGR_888 \ + v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define SDE_PIX_FMT_RGB_565 \ + v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define SDE_PIX_FMT_BGR_565 \ + v4l2_fourcc('B', 'G', '1', '6') /* 16-bit BGR 5:6:5 */ +#define SDE_PIX_FMT_Y_CB_CR_H2V2 \ + v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define SDE_PIX_FMT_Y_CR_CB_H2V2 \ + v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define SDE_PIX_FMT_Y_CR_CB_GH2V2 \ + v4l2_fourcc('Y', 'U', '4', '2') /* Planar YVU 4:2:0 A16 */ +#define SDE_PIX_FMT_Y_CBCR_H2V2 \ + v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define SDE_PIX_FMT_Y_CRCB_H2V2 \ + v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ +#define SDE_PIX_FMT_Y_CBCR_H1V2 \ + v4l2_fourcc('N', 'H', '1', '6') /* Y/CbCr 4:2:2 */ +#define SDE_PIX_FMT_Y_CRCB_H1V2 \ + v4l2_fourcc('N', 'H', '6', '1') /* Y/CrCb 4:2:2 */ +#define SDE_PIX_FMT_Y_CBCR_H2V1 \ + v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */ +#define SDE_PIX_FMT_Y_CRCB_H2V1 \ + v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ +#define SDE_PIX_FMT_YCBYCR_H2V1 \ + v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define SDE_PIX_FMT_Y_CBCR_H2V2_VENUS \ + v4l2_fourcc('Q', 'N', 'V', '2') /* Y/CbCr 4:2:0 Venus */ +#define SDE_PIX_FMT_Y_CRCB_H2V2_VENUS \ + v4l2_fourcc('Q', 'N', 'V', '1') /* Y/CrCb 4:2:0 Venus */ +#define SDE_PIX_FMT_RGBA_8888_UBWC \ + v4l2_fourcc('Q', 'R', 'G', 'B') /* RGBA 8:8:8:8 UBWC */ +#define SDE_PIX_FMT_RGBX_8888_UBWC \ + v4l2_fourcc('Q', 'X', 'B', '4') /* RGBX 8:8:8:8 UBWC */ +#define SDE_PIX_FMT_RGB_565_UBWC \ + v4l2_fourcc('Q', 'R', 'G', '6') /* RGB 5:6:5 UBWC */ +#define SDE_PIX_FMT_Y_CBCR_H2V2_UBWC \ + v4l2_fourcc('Q', '1', '2', '8') /* UBWC 8-bit Y/CbCr 4:2:0 */ +#define SDE_PIX_FMT_RGBA_1010102 \ + v4l2_fourcc('A', 'B', '3', '0') /* RGBA 10:10:10:2 */ +#define SDE_PIX_FMT_RGBX_1010102 \ + v4l2_fourcc('X', 'B', '3', '0') /* RGBX 10:10:10:2 */ +#define SDE_PIX_FMT_ARGB_2101010 \ + v4l2_fourcc('A', 'R', '3', '0') /* ARGB 2:10:10:10 */ +#define SDE_PIX_FMT_XRGB_2101010 \ + v4l2_fourcc('X', 'R', '3', '0') /* XRGB 2:10:10:10 */ +#define SDE_PIX_FMT_BGRA_1010102 \ + v4l2_fourcc('B', 'A', '3', '0') /* BGRA 10:10:10:2 */ +#define SDE_PIX_FMT_BGRX_1010102 \ + v4l2_fourcc('B', 'X', '3', '0') /* BGRX 10:10:10:2 */ +#define SDE_PIX_FMT_ABGR_2101010 \ + v4l2_fourcc('R', 'A', '3', '0') /* ABGR 2:10:10:10 */ +#define SDE_PIX_FMT_XBGR_2101010 \ + v4l2_fourcc('R', 'X', '3', '0') /* XBGR 2:10:10:10 */ +#define SDE_PIX_FMT_RGBA_1010102_UBWC \ + v4l2_fourcc('Q', 'R', 'B', 'A') /* RGBA 10:10:10:2 UBWC */ +#define SDE_PIX_FMT_RGBX_1010102_UBWC \ + v4l2_fourcc('Q', 'X', 'B', 'A') /* RGBX 10:10:10:2 UBWC */ +#define SDE_PIX_FMT_Y_CBCR_H2V2_P010 \ + v4l2_fourcc('P', '0', '1', '0') /* Y/CbCr 4:2:0 P10 */ +#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS \ + v4l2_fourcc('Q', 'P', '1', '0') /* Y/CbCr 4:2:0 P10 Venus*/ +#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10 \ + v4l2_fourcc('T', 'P', '1', '0') /* Y/CbCr 4:2:0 TP10 */ +#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC \ + v4l2_fourcc('Q', '1', '2', 'A') /* UBWC Y/CbCr 4:2:0 TP10 */ +#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC \ + v4l2_fourcc('Q', '1', '2', 'B') /* UBWC Y/CbCr 4:2:0 P10 */ + +/* + * struct msm_sde_rotator_fence - v4l2 buffer fence info + * @index: id number of the buffer + * @type: enum v4l2_buf_type; buffer type + * @fd: file descriptor of the fence associated with this buffer + */ +struct msm_sde_rotator_fence { + __u32 index; + __u32 type; + __s32 fd; + __u32 reserved[5]; +}; + +/* + * struct msm_sde_rotator_comp_ratio - v4l2 buffer compression ratio + * @index: id number of the buffer + * @type: enum v4l2_buf_type; buffer type + * @numer: numerator of the ratio + * @denom: denominator of the ratio + */ +struct msm_sde_rotator_comp_ratio { + __u32 index; + __u32 type; + __u32 numer; + __u32 denom; + __u32 reserved[4]; +}; + +/* SDE Rotator private ioctl ID */ +#define VIDIOC_G_SDE_ROTATOR_FENCE \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct msm_sde_rotator_fence) +#define VIDIOC_S_SDE_ROTATOR_FENCE \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_sde_rotator_fence) +#define VIDIOC_G_SDE_ROTATOR_COMP_RATIO \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 12, struct msm_sde_rotator_comp_ratio) +#define VIDIOC_S_SDE_ROTATOR_COMP_RATIO \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_sde_rotator_comp_ratio) + +/* SDE Rotator private control ID's */ +#define V4L2_CID_SDE_ROTATOR_SECURE (V4L2_CID_USER_BASE + 0x1000) + +/* + * This control Id indicates this context is associated with the + * secure camera. + */ +#define V4L2_CID_SDE_ROTATOR_SECURE_CAMERA (V4L2_CID_USER_BASE + 0x2000) + +#endif /* __UAPI_MSM_SDE_ROTATOR_H__ */ diff --git a/msm/Android.mk b/msm/Android.mk new file mode 100644 index 000000000..1ac179452 --- /dev/null +++ b/msm/Android.mk @@ -0,0 +1,88 @@ +DISPLAY_SELECT := CONFIG_DRM_MSM=m + +LOCAL_PATH := $(call my-dir) +ifeq ($(TARGET_BOARD_PLATFORM), niobe) +LOCAL_MODULE_DDK_BUILD := false +else +LOCAL_MODULE_DDK_BUILD := true +endif +include $(CLEAR_VARS) + +LOCAL_MODULE_DDK_SUBTARGET_REGEX := "display_drivers*" +ifeq ($(TARGET_BOARD_PLATFORM), volcano) + LOCAL_MODULE_DDK_SUBTARGET_REGEX := "$(TARGET_BOARD_PLATFORM)_display_drivers.*" +endif + +# This makefile is only for DLKM +ifneq ($(findstring vendor,$(LOCAL_PATH)),) + +ifneq ($(findstring opensource,$(LOCAL_PATH)),) + DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/display-drivers +endif # opensource + +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) + +# Build display.ko as msm_drm.ko +########################################################### +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := DISPLAY_ROOT=$(DISPLAY_BLD_DIR) +KBUILD_OPTIONS += MODNAME=msm_drm +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) +KBUILD_OPTIONS += $(DISPLAY_SELECT) + +ifneq ($(TARGET_BOARD_AUTO),true) +ifneq ($(TARGET_BOARD_PLATFORM), pitti) +KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers +endif +ifneq ($(TARGET_BOARD_PLATFORM), taro) + KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,sync-fence-module-symvers)/Module.symvers + KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,msm-ext-disp-module-symvers)/Module.symvers + KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers + KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,sec-module-symvers)/Module.symvers +endif +endif + +# For incremental compilation +include $(CLEAR_VARS) +########################################################### +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_drm-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +# Include kp_module.ko in the /vendor/lib/modules (vendor.img) +# BOARD_VENDOR_KERNEL_MODULES += $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### + +########################################################### +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*) +LOCAL_MODULE := msm_drm.ko +LOCAL_MODULE_KBUILD_NAME := msm_drm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) + +ifneq ($(TARGET_BOARD_AUTO),true) +ifneq ($(TARGET_BOARD_PLATFORM), pitti) +LOCAL_REQUIRED_MODULES += mmrm-module-symvers +LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers +endif +ifneq ($(TARGET_BOARD_PLATFORM), taro) + LOCAL_REQUIRED_MODULES += sync-fence-module-symvers + LOCAL_REQUIRED_MODULES += msm-ext-disp-module-symvers + LOCAL_REQUIRED_MODULES += hw-fence-module-symvers + LOCAL_REQUIRED_MODULES += sec-module-symvers + LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,sync-fence-module-symvers)/Module.symvers + LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,msm-ext-disp-module-symvers)/Module.symvers + LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers + LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,sec-module-symvers)/Module.symvers +endif +endif + +include $(DLKM_DIR)/Build_external_kernelmodule.mk +########################################################### +endif # DLKM check diff --git a/msm/Kbuild b/msm/Kbuild new file mode 100644 index 000000000..ee3b560e8 --- /dev/null +++ b/msm/Kbuild @@ -0,0 +1,298 @@ +# SPDX-License-Identifier: GPL-2.0-only + +KDIR := $(TOP)/kernel_platform/msm-kernel + +ifeq ($(CONFIG_ARCH_WAIPIO), y) +ifeq ($(CONFIG_ARCH_QTI_VM), y) + include $(DISPLAY_ROOT)/config/gki_waipiodisptui.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_waipiodisptuiconf.h +else + include $(DISPLAY_ROOT)/config/gki_waipiodisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_waipiodispconf.h +endif +endif + +ifeq ($(CONFIG_ARCH_NEO), y) + include $(DISPLAY_ROOT)/config/gki_neodisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_neodispconf.h +endif + +ifeq ($(CONFIG_ARCH_PARROT), y) + include $(DISPLAY_ROOT)/config/gki_parrotdisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_parrotdispconf.h +endif + +ifeq ($(CONFIG_ARCH_PITTI), y) + include $(DISPLAY_ROOT)/config/gki_pittidisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_pittidispconf.h +endif + +ifeq ($(CONFIG_ARCH_NIOBE), y) + include $(DISPLAY_ROOT)/config/gki_niobedisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_niobedispconf.h +endif + +ifeq ($(CONFIG_ARCH_PINEAPPLE), y) +ifeq ($(CONFIG_ARCH_QTI_VM), y) + include $(DISPLAY_ROOT)/config/gki_pineappledisptui.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_pineappledisptuiconf.h +else + include $(DISPLAY_ROOT)/config/gki_pineappledisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_pineappledispconf.h +endif +endif + +ifeq ($(CONFIG_ARCH_KALAMA), y) +ifeq ($(CONFIG_ARCH_QTI_VM), y) + include $(DISPLAY_ROOT)/config/gki_kalamadisptui.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_kalamadisptuiconf.h +else + include $(DISPLAY_ROOT)/config/gki_kalamadisp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_kalamadispconf.h +endif +endif + +ifeq (y, $(findstring y, $(CONFIG_ARCH_SA8155) $(CONFIG_ARCH_SA6155) $(CONFIG_ARCH_SA8195))) + include $(DISPLAY_ROOT)/config/augen3disp.conf + LINUX_INC += -include $(DISPLAY_ROOT)/config/augen3dispconf.h +endif + +LINUX_INC += -include/linux \ + -include/linux/drm + +LINUX_INC += -I$(DISPLAY_ROOT) \ + -I$(DISPLAY_ROOT)/include \ + -I$(KERNEL_ROOT)/drivers/clk/qcom \ + -I$(KERNEL_SRC)/drivers/clk/qcom \ + -I$(KERNEL_ROOT)/kernel/irq \ + -I$(KERNEL_SRC)/kernel/irq \ + -I$(DISPLAY_ROOT)/include/linux \ + -I$(DISPLAY_ROOT)/rotator \ + -I$(DISPLAY_ROOT)/msm \ + -I$(DISPLAY_ROOT)/msm/dp \ + -I$(DISPLAY_ROOT)/msm/dsi \ + -I$(DISPLAY_ROOT)/msm/sde \ + -I$(DISPLAY_ROOT)/include/uapi/display \ + -I$(DISPLAY_ROOT)/msm/mi_disp + +CDEFINES += -DANI_LITTLE_BYTE_ENDIAN \ + -DANI_LITTLE_BIT_ENDIAN \ + -DDOT11F_LITTLE_ENDIAN_HOST \ + -DANI_COMPILER_TYPE_GCC \ + -DANI_OS_TYPE_ANDROID=6 \ + -DPTT_SOCK_SVC_ENABLE \ + -Wall\ + -Werror\ + -D__linux__ + +# Add FACTORY_BUILD macro to disable Some uesless function on factory_build. +ifeq ($(FACTORY_BUILD), 1) + CDEFINES += -DDISPLAY_FACTORY_BUILD +endif + +KBUILD_CPPFLAGS += $(CDEFINES) + +ccflags-y += $(LINUX_INC) + +ifeq ($(call cc-option-yn, -Wmaybe-uninitialized),y) +EXTRA_CFLAGS += -Wmaybe-uninitialized +endif + +KBUILD_EXTRA_SYMBOLS +=$(OUT)/obj/vendor/qcom/opensource/display-drivers/hdcp/Module.symvers +KBUILD_EXTRA_SYMBOLS +=$(OUT)/obj/vendor/qcom/opensource/display-drivers/msm/Module.symvers + +ifeq ($(call cc-option-yn, -Wheader-guard),y) +EXTRA_CFLAGS += -Wheader-guard +endif + +ccflags-y += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \ + -Wformat-invalid-specifier -Wformat-zero-length -Wnonnull + +ifneq ($(MODNAME), qdsp6v2) +CHIP_NAME ?= $(MODNAME) +CDEFINES += -DMULTI_IF_NAME=\"$(CHIP_NAME)\" +endif + +######### CONFIG_DRM_MSM ######## +obj-m += msm_drm.o + +msm_drm-$(CONFIG_HDCP_QSEECOM) := ../hdcp/msm_hdcp.o \ + dp/dp_hdcp2p2.o \ + sde_hdcp_1x.o \ + sde_hdcp_2x.o + +msm_drm-$(CONFIG_MSM_SDE_ROTATOR) += ../rotator/sde_rotator_dev.o \ + ../rotator/sde_rotator_dev.o \ + ../rotator/sde_rotator_core.o \ + ../rotator/sde_rotator_base.o \ + ../rotator/sde_rotator_formats.o \ + ../rotator/sde_rotator_util.o \ + ../rotator/sde_rotator_io_util.o \ + ../rotator/sde_rotator_smmu.o \ + ../rotator/sde_rotator_r1_wb.o \ + ../rotator/sde_rotator_r1_pipe.o \ + ../rotator/sde_rotator_r1_ctl.o \ + ../rotator/sde_rotator_r1.o \ + ../rotator/sde_rotator_r3.o + +ifeq ($(CONFIG_MSM_SDE_ROTATOR), y) +msm_drm-$(CONFIG_SYNC_FILE) += ../rotator/sde_rotator_sync.o + +msm_drm-$(CONFIG_DEBUG_FS) += ../rotator/sde_rotator_debug.o \ + ../rotator/sde_rotator_r1_debug.o \ + ../rotator/sde_rotator_r3_debug.o +endif + +msm_drm-$(CONFIG_DRM_SDE_VM) += sde/sde_vm_common.o \ + sde/sde_vm_primary.o \ + sde/sde_vm_trusted.o \ + sde/sde_vm_msgq.o + +msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_altmode.o \ + dp/dp_parser.o \ + dp/dp_power.o \ + dp/dp_catalog.o \ + dp/dp_catalog_v420.o \ + dp/dp_catalog_v200.o \ + dp/dp_aux.o \ + dp/dp_panel.o \ + dp/dp_link.o \ + dp/dp_ctrl.o \ + dp/dp_audio.o \ + dp/dp_debug.o \ + dp/dp_hpd.o \ + dp/dp_aux_bridge.o \ + dp/dp_bridge_hpd.o \ + dp/dp_mst_sim.o \ + dp/dp_mst_sim_helper.o \ + dp/dp_gpio_hpd.o \ + dp/dp_lphw_hpd.o \ + dp/dp_display.o \ + dp/dp_drm.o \ + dp/dp_pll.o \ + dp/dp_pll_5nm.o \ + dp/dp_pll_4nm.o + +msm_drm-$(CONFIG_DRM_MSM_DP_MST) += dp/dp_mst_drm.o + +msm_drm-$(CONFIG_DRM_MSM_DP_USBPD_LEGACY) += dp/dp_usbpd.o + +msm_drm-$(CONFIG_DRM_MSM_SDE) += sde/sde_crtc.o \ + sde/sde_encoder.o \ + sde/sde_encoder_dce.o \ + sde/sde_encoder_phys_vid.o \ + sde/sde_encoder_phys_cmd.o \ + sde/sde_irq.o sde/sde_core_irq.o \ + sde/sde_core_perf.o \ + sde/sde_rm.o \ + sde/sde_kms_utils.o \ + sde/sde_kms.o \ + sde/sde_plane.o \ + sde/sde_connector.o \ + sde/sde_color_processing.o \ + sde/sde_vbif.o \ + sde_dbg.o \ + sde_dbg_evtlog.o \ + sde_io_util.o \ + sde_vm_event.o \ + sde/sde_hw_reg_dma_v1_color_proc.o \ + sde/sde_hw_color_proc_v4.o \ + sde/sde_hw_ad4.o \ + sde/sde_hw_uidle.o \ + sde_edid_parser.o \ + sde/sde_hw_catalog.o \ + sde/sde_hw_cdm.o \ + sde/sde_hw_dspp.o \ + sde/sde_hw_intf.o \ + sde/sde_hw_lm.o \ + sde/sde_hw_ctl.o \ + sde/sde_hw_util.o \ + sde/sde_hw_sspp.o \ + sde/sde_hw_wb.o \ + sde/sde_hw_pingpong.o \ + sde/sde_hw_top.o \ + sde/sde_hw_interrupts.o \ + sde/sde_hw_vbif.o \ + sde/sde_formats.o \ + sde_power_handle.o \ + sde/sde_hw_color_processing_v1_7.o \ + sde/sde_reg_dma.o \ + sde/sde_hw_reg_dma_v1.o \ + sde/sde_hw_dsc.o \ + sde/sde_hw_dsc_1_2.o \ + sde/sde_hw_vdc.o \ + sde/sde_hw_ds.o \ + sde/sde_fence.o \ + sde/sde_hw_qdss.o \ + sde_dsc_helper.o \ + sde_vdc_helper.o \ + sde/sde_hw_dnsc_blur.o \ + sde/sde_hw_rc.o + +msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \ + sde/sde_encoder_phys_wb.o + +msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \ + sde_rsc_hw.o \ + sde_rsc_hw_v3.o + +msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi_phy.o \ + dsi/dsi_pwr.o \ + dsi/dsi_phy.o \ + dsi/dsi_phy_hw_v3_0.o \ + dsi/dsi_phy_hw_v4_0.o \ + dsi/dsi_phy_hw_v5_0.o \ + dsi/dsi_phy_timing_calc.o \ + dsi/dsi_phy_timing_v3_0.o \ + dsi/dsi_phy_timing_v4_0.o \ + dsi/dsi_pll.o \ + dsi/dsi_pll_5nm.o \ + dsi/dsi_pll_4nm.o \ + dsi/dsi_ctrl_hw_cmn.o \ + dsi/dsi_ctrl_hw_2_2.o \ + dsi/dsi_ctrl.o \ + dsi/dsi_catalog.o \ + dsi/dsi_drm.o \ + dsi/dsi_display.o \ + dsi/dsi_panel.o \ + dsi/dsi_clk_manager.o \ + dsi/dsi_display_test.o \ + dsi/lcd_bias.o + + +msm_drm-$(CONFIG_DSI_PARSER) += dsi/dsi_parser.o + +msm_drm-$(CONFIG_THERMAL_OF) += msm_cooling_device.o + +msm_drm-$(CONFIG_DRM_MSM) += msm_atomic.o \ + msm_fb.o \ + msm_drv.o \ + msm_gem.o \ + msm_gem_prime.o \ + msm_gem_vma.o \ + msm_smmu.o \ + msm_prop.o + +msm_drm-$(CONFIG_DRM_MSM_MI_DISP) += mi_disp/mi_disp_core.o \ + mi_disp/mi_disp_feature.o \ + mi_disp/mi_disp_sysfs.o \ + mi_disp/mi_disp_file.o \ + mi_disp/mi_disp_print.o \ + mi_disp/mi_disp_procfs.o \ + mi_disp/mi_disp_debugfs.o \ + mi_disp/mi_disp_print.o \ + mi_disp/mi_disp_log.o \ + mi_disp/mi_disp_flatmode.o\ + mi_disp/mi_disp_lhbm.o \ + mi_disp/mi_disp_parser.o \ + mi_disp/mi_dsi_display.o \ + mi_disp/mi_dsi_panel.o \ + mi_disp/mi_sde_crtc.o \ + mi_disp/mi_sde_encoder.o \ + mi_disp/mi_sde_connector.o \ + mi_disp/mi_cooling_device.o \ + mi_disp/mi_dsi_panel_count.o \ + mi_disp/mi_hwconf_manager.o + +CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" + diff --git a/msm/Makefile b/msm/Makefile new file mode 100644 index 000000000..edaa73f1e --- /dev/null +++ b/msm/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +KBUILD_OPTIONS+= DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../ + +all: modules + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + +clean: + rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers + rm -rf .tmp_versions + diff --git a/msm/dp/dp_altmode.c b/msm/dp/dp_altmode.c new file mode 100644 index 000000000..d67030319 --- /dev/null +++ b/msm/dp/dp_altmode.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dp_altmode.h" +#include "dp_debug.h" +#include "sde_dbg.h" + + +#define ALTMODE_CONFIGURE_MASK (0x3f) +#define ALTMODE_HPD_STATE_MASK (0x40) +#define ALTMODE_HPD_IRQ_MASK (0x80) + +struct dp_altmode_private { + bool forced_disconnect; + struct device *dev; + struct dp_hpd_cb *dp_cb; + struct dp_altmode dp_altmode; + struct altmode_client *amclient; + bool connected; + u32 lanes; +}; + +enum dp_altmode_pin_assignment { + DPAM_HPD_OUT, + DPAM_HPD_A, + DPAM_HPD_B, + DPAM_HPD_C, + DPAM_HPD_D, + DPAM_HPD_E, + DPAM_HPD_F, +}; + +static int dp_altmode_set_usb_dp_mode(struct dp_altmode_private *altmode) +{ + int rc = 0; + struct device_node *np; + struct device_node *usb_node; + struct platform_device *usb_pdev; + int timeout = 250; + + if (!altmode || !altmode->dev) { + DP_ERR("invalid args\n"); + return -EINVAL; + } + + np = altmode->dev->of_node; + + usb_node = of_parse_phandle(np, "usb-controller", 0); + if (!usb_node) { + DP_ERR("unable to get usb node\n"); + return -EINVAL; + } + + usb_pdev = of_find_device_by_node(usb_node); + if (!usb_pdev) { + of_node_put(usb_node); + DP_ERR("unable to get usb pdev\n"); + return -EINVAL; + } + + while (timeout) { + rc = dwc3_msm_set_dp_mode(&usb_pdev->dev, altmode->connected, altmode->lanes); + if (rc != -EBUSY && rc != -EAGAIN) + break; + + DP_WARN("USB busy, retry\n"); + + /* wait for hw recommended delay for usb */ + msleep(20); + timeout--; + } + of_node_put(usb_node); + platform_device_put(usb_pdev); + + if (rc) + DP_ERR("Error releasing SS lanes: %d\n", rc); + + return rc; +} + +static void dp_altmode_send_pan_ack(struct altmode_client *amclient, + u8 port_index) +{ + int rc; + struct altmode_pan_ack_msg ack; + + ack.cmd_type = ALTMODE_PAN_ACK; + ack.port_index = port_index; + + rc = altmode_send_data(amclient, &ack, sizeof(ack)); + if (rc < 0) { + DP_ERR("failed: %d\n", rc); + return; + } + + DP_DEBUG("port=%d\n", port_index); +} + +static int dp_altmode_notify(void *priv, void *data, size_t len) +{ + int rc = 0; + struct dp_altmode_private *altmode = + (struct dp_altmode_private *) priv; + u8 port_index, dp_data, orientation; + u8 *payload = (u8 *) data; + u8 pin, hpd_state, hpd_irq; + bool force_multi_func = altmode->dp_altmode.base.force_multi_func; + + port_index = payload[0]; + orientation = payload[1]; + dp_data = payload[8]; + + pin = dp_data & ALTMODE_CONFIGURE_MASK; + hpd_state = (dp_data & ALTMODE_HPD_STATE_MASK) >> 6; + hpd_irq = (dp_data & ALTMODE_HPD_IRQ_MASK) >> 7; + + altmode->dp_altmode.base.hpd_high = !!hpd_state; + altmode->dp_altmode.base.hpd_irq = !!hpd_irq; + altmode->dp_altmode.base.multi_func = force_multi_func ? true : + !(pin == DPAM_HPD_C || pin == DPAM_HPD_E || pin == DPAM_HPD_OUT); + + DP_DEBUG("payload=0x%x\n", dp_data); + DP_DEBUG("port_index=%d, orientation=%d, pin=%d, hpd_state=%d\n", + port_index, orientation, pin, hpd_state); + DP_DEBUG("multi_func=%d, hpd_high=%d, hpd_irq=%d\n", + altmode->dp_altmode.base.multi_func, + altmode->dp_altmode.base.hpd_high, + altmode->dp_altmode.base.hpd_irq); + DP_DEBUG("connected=%d\n", altmode->connected); + SDE_EVT32_EXTERNAL(dp_data, port_index, orientation, pin, hpd_state, + altmode->dp_altmode.base.multi_func, + altmode->dp_altmode.base.hpd_high, + altmode->dp_altmode.base.hpd_irq, altmode->connected); + + if (!pin) { + /* Cable detach */ + if (altmode->connected) { + altmode->connected = false; + altmode->dp_altmode.base.alt_mode_cfg_done = false; + altmode->dp_altmode.base.orientation = ORIENTATION_NONE; + if (altmode->dp_cb && altmode->dp_cb->disconnect) + altmode->dp_cb->disconnect(altmode->dev); + + rc = dp_altmode_set_usb_dp_mode(altmode); + if (rc) + DP_ERR("failed to clear usb dp mode, rc: %d\n", rc); + } + goto ack; + } + + /* Configure */ + if (!altmode->connected) { + altmode->connected = true; + altmode->dp_altmode.base.alt_mode_cfg_done = true; + altmode->forced_disconnect = false; + altmode->lanes = 4; + + if (altmode->dp_altmode.base.multi_func) + altmode->lanes = 2; + + DP_DEBUG("Connected=%d, lanes=%d\n",altmode->connected,altmode->lanes); + + switch (orientation) { + case 0: + orientation = ORIENTATION_CC1; + break; + case 1: + orientation = ORIENTATION_CC2; + break; + case 2: + orientation = ORIENTATION_NONE; + break; + default: + orientation = ORIENTATION_NONE; + break; + } + + altmode->dp_altmode.base.orientation = orientation; + + rc = dp_altmode_set_usb_dp_mode(altmode); + if (rc) + goto ack; + + if (altmode->dp_cb && altmode->dp_cb->configure) + altmode->dp_cb->configure(altmode->dev); + goto ack; + } + + /* Attention */ + if (altmode->forced_disconnect) + goto ack; + + if (altmode->dp_cb && altmode->dp_cb->attention) + altmode->dp_cb->attention(altmode->dev); +ack: + dp_altmode_send_pan_ack(altmode->amclient, port_index); + return rc; +} + +static void dp_altmode_register(void *priv) +{ + struct dp_altmode_private *altmode = priv; + struct altmode_client_data cd = { + .callback = &dp_altmode_notify, + }; + + cd.name = "displayport"; + cd.svid = USB_SID_DISPLAYPORT; + cd.priv = altmode; + + altmode->amclient = altmode_register_client(altmode->dev, &cd); + if (IS_ERR_OR_NULL(altmode->amclient)) + DP_ERR("failed to register as client: %ld\n", + PTR_ERR(altmode->amclient)); + else + DP_DEBUG("success\n"); +} + +static int dp_altmode_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + struct dp_altmode *dp_altmode; + struct dp_altmode_private *altmode; + + dp_altmode = container_of(dp_hpd, struct dp_altmode, base); + altmode = container_of(dp_altmode, struct dp_altmode_private, + dp_altmode); + + dp_altmode->base.hpd_high = hpd; + altmode->forced_disconnect = !hpd; + altmode->dp_altmode.base.alt_mode_cfg_done = hpd; + + if (hpd) + altmode->dp_cb->configure(altmode->dev); + else + altmode->dp_cb->disconnect(altmode->dev); + + return 0; +} + +static int dp_altmode_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + struct dp_altmode *dp_altmode; + struct dp_altmode_private *altmode; + struct dp_altmode *status; + + dp_altmode = container_of(dp_hpd, struct dp_altmode, base); + altmode = container_of(dp_altmode, struct dp_altmode_private, + dp_altmode); + + status = &altmode->dp_altmode; + + status->base.hpd_high = (vdo & BIT(7)) ? true : false; + status->base.hpd_irq = (vdo & BIT(8)) ? true : false; + + if (altmode->dp_cb && altmode->dp_cb->attention) + altmode->dp_cb->attention(altmode->dev); + + return 0; +} + +struct dp_hpd *dp_altmode_get(struct device *dev, struct dp_hpd_cb *cb) +{ + int rc = 0; + struct dp_altmode_private *altmode; + struct dp_altmode *dp_altmode; + + if (!cb) { + DP_ERR("invalid cb data\n"); + return ERR_PTR(-EINVAL); + } + + altmode = kzalloc(sizeof(*altmode), GFP_KERNEL); + if (!altmode) + return ERR_PTR(-ENOMEM); + + altmode->dev = dev; + altmode->dp_cb = cb; + + dp_altmode = &altmode->dp_altmode; + dp_altmode->base.register_hpd = NULL; + dp_altmode->base.simulate_connect = dp_altmode_simulate_connect; + dp_altmode->base.simulate_attention = dp_altmode_simulate_attention; + + rc = altmode_register_notifier(dev, dp_altmode_register, altmode); + if (rc < 0) { + DP_ERR("altmode probe notifier registration failed: %d\n", rc); + goto error; + } + + DP_DEBUG("success\n"); + + return &dp_altmode->base; +error: + kfree(altmode); + return ERR_PTR(rc); +} + +void dp_altmode_put(struct dp_hpd *dp_hpd) +{ + struct dp_altmode *dp_altmode; + struct dp_altmode_private *altmode; + + dp_altmode = container_of(dp_hpd, struct dp_altmode, base); + if (!dp_altmode) + return; + + altmode = container_of(dp_altmode, struct dp_altmode_private, + dp_altmode); + + altmode_deregister_client(altmode->amclient); + altmode_deregister_notifier(altmode->dev, altmode); + + kfree(altmode); +} diff --git a/msm/dp/dp_altmode.h b/msm/dp/dp_altmode.h new file mode 100644 index 000000000..b8e403ed1 --- /dev/null +++ b/msm/dp/dp_altmode.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_ALTMODE_H_ +#define _DP_ALTMODE_H_ + +#include +#include "dp_hpd.h" + +struct device; + +struct dp_altmode { + struct dp_hpd base; +}; + +struct dp_hpd *dp_altmode_get(struct device *dev, struct dp_hpd_cb *cb); + +void dp_altmode_put(struct dp_hpd *pd); +#endif /* _DP_ALTMODE_H_ */ + diff --git a/msm/dp/dp_audio.c b/msm/dp/dp_audio.c new file mode 100644 index 000000000..4e1537969 --- /dev/null +++ b/msm/dp/dp_audio.c @@ -0,0 +1,915 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif + +#include "dp_catalog.h" +#include "dp_audio.h" +#include "dp_panel.h" +#include "dp_debug.h" + +struct dp_audio_private { + struct platform_device *ext_pdev; + struct platform_device *pdev; + struct dp_catalog_audio *catalog; + struct msm_ext_disp_init_data ext_audio_data; + struct dp_panel *panel; + + bool ack_enabled; + atomic_t session_on; + bool engine_on; + + u32 channels; + + struct completion hpd_comp; + struct workqueue_struct *notify_workqueue; + struct delayed_work notify_delayed_work; + struct mutex ops_lock; + + struct dp_audio dp_audio; + + atomic_t acked; +}; + +static u32 dp_audio_get_header(struct dp_catalog_audio *catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->get_header(catalog); + + return catalog->data; +} + +static void dp_audio_set_header(struct dp_catalog_audio *catalog, + u32 data, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->data = data; + catalog->set_header(catalog); +} + +static void dp_audio_stream_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x02; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + new_value = 0x0; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = audio->channels - 1; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x1; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x17; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x84; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x1b; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x05; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x0F; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); + value &= 0x0000ffff; + + new_value = 0x0; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog_audio *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + value &= 0x0000ffff; + + new_value = 0x06; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); + value &= 0xffff0000; + + new_value = 0x0F; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); +} + +static void dp_audio_setup_sdp(struct dp_audio_private *audio) +{ + if (!atomic_read(&audio->session_on)) { + DP_WARN("session inactive\n"); + return; + } + + /* always program stream 0 first before actual stream cfg */ + audio->catalog->stream_id = DP_STREAM_0; + audio->catalog->config_sdp(audio->catalog); + + if (audio->panel->stream_id == DP_STREAM_1) { + audio->catalog->stream_id = DP_STREAM_1; + audio->catalog->config_sdp(audio->catalog); + } + + dp_audio_stream_sdp(audio); + dp_audio_timestamp_sdp(audio); + dp_audio_infoframe_sdp(audio); + dp_audio_copy_management_sdp(audio); + dp_audio_isrc_sdp(audio); +} + +static void dp_audio_setup_acr(struct dp_audio_private *audio) +{ + u32 select = 0; + struct dp_catalog_audio *catalog = audio->catalog; + + if (!atomic_read(&audio->session_on)) { + DP_WARN("session inactive\n"); + return; + } + + switch (audio->dp_audio.bw_code) { + case DP_LINK_BW_1_62: + select = 0; + break; + case DP_LINK_BW_2_7: + select = 1; + break; + case DP_LINK_BW_5_4: + select = 2; + break; + case DP_LINK_BW_8_1: + select = 3; + break; + default: + DP_DEBUG("Unknown link rate\n"); + select = 0; + break; + } + + catalog->data = select; + catalog->config_acr(catalog); +} + +static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +{ + struct dp_catalog_audio *catalog = audio->catalog; + + audio->engine_on = enable; + if (!atomic_read(&audio->session_on)) { + DP_WARN("session inactive. enable=%d\n", enable); + return; + } + catalog->data = enable; + + if (audio->panel->get_panel_on(audio->panel)) + catalog->enable(catalog); + +} + +static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +{ + struct msm_ext_disp_data *ext_data; + struct dp_audio *dp_audio; + + if (!pdev) { + DP_ERR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + ext_data = platform_get_drvdata(pdev); + if (!ext_data) { + DP_ERR("invalid ext disp data\n"); + return ERR_PTR(-EINVAL); + } + + dp_audio = ext_data->intf_data; + if (!dp_audio) { + DP_ERR("invalid intf data\n"); + return ERR_PTR(-EINVAL); + } + + return container_of(dp_audio, struct dp_audio_private, dp_audio); +} + +static int dp_audio_info_setup(struct platform_device *pdev, + struct msm_ext_disp_audio_setup_params *params) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + return rc; + } + + if (audio->dp_audio.tui_active) { + DP_DEBUG("TUI session active\n"); + return 0; + } + + mutex_lock(&audio->ops_lock); + + audio->channels = params->num_of_channels; + + if (audio->panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id: %d\n", + audio->panel->stream_id); + rc = -EINVAL; + mutex_unlock(&audio->ops_lock); + return rc; + } + + dp_audio_setup_sdp(audio); + dp_audio_setup_acr(audio); + dp_audio_enable(audio, true); + + mutex_unlock(&audio->ops_lock); + + DP_DEBUG("audio stream configured\n"); + + return rc; +} + +static int dp_audio_get_edid_blk(struct platform_device *pdev, + struct msm_ext_disp_audio_edid_blk *blk) +{ + int rc = 0; + struct dp_audio_private *audio; + struct sde_edid_ctrl *edid; + + if (!blk) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + if (!audio->panel || !audio->panel->edid_ctrl) { + DP_ERR("invalid panel data\n"); + rc = -EINVAL; + goto end; + } + + edid = audio->panel->edid_ctrl; + + blk->audio_data_blk = edid->audio_data_block; + blk->audio_data_blk_size = edid->adb_size; + + blk->spk_alloc_data_blk = edid->spkr_alloc_data_block; + blk->spk_alloc_data_blk_size = edid->sadb_size; +end: + return rc; +} + +static int dp_audio_get_cable_status(struct platform_device *pdev, u32 vote) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + return atomic_read(&audio->session_on); +end: + return rc; +} + +static int dp_audio_get_intf_id(struct platform_device *pdev) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + return EXT_DISPLAY_TYPE_DP; +end: + return rc; +} + +static void dp_audio_teardown_done(struct platform_device *pdev) +{ + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) + return; + + if (audio->dp_audio.tui_active) { + DP_DEBUG("TUI session active\n"); + return; + } + + if (audio->panel->stream_id >= DP_STREAM_MAX) { + DP_WARN("invalid stream id: %d\n", + audio->panel->stream_id); + return; + } + + mutex_lock(&audio->ops_lock); + dp_audio_enable(audio, false); + mutex_unlock(&audio->ops_lock); + + atomic_set(&audio->acked, 1); + complete_all(&audio->hpd_comp); + + DP_DEBUG("audio engine disabled\n"); +} + +static int dp_audio_ack_done(struct platform_device *pdev, u32 ack) +{ + int rc = 0, ack_hpd; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + if (ack & AUDIO_ACK_SET_ENABLE) { + audio->ack_enabled = ack & AUDIO_ACK_ENABLE ? + true : false; + + DP_DEBUG("audio ack feature %s\n", + audio->ack_enabled ? "enabled" : "disabled"); + goto end; + } + + if (!audio->ack_enabled) + goto end; + + ack_hpd = ack & AUDIO_ACK_CONNECT; + + DP_DEBUG("acknowledging audio (%d)\n", ack_hpd); + + if (!audio->engine_on) { + atomic_set(&audio->acked, 1); + complete_all(&audio->hpd_comp); + } +end: + return rc; +} + +static int dp_audio_codec_ready(struct platform_device *pdev) +{ + int rc = 0; + struct dp_audio_private *audio; + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + DP_ERR("invalid input\n"); + rc = PTR_ERR(audio); + goto end; + } + + queue_delayed_work(audio->notify_workqueue, + &audio->notify_delayed_work, HZ/4); +end: + return rc; +} + +static int dp_audio_register_ext_disp(struct dp_audio_private *audio) +{ + int rc = 0; + struct device_node *pd = NULL; + const char *phandle = "qcom,ext-disp"; + struct msm_ext_disp_init_data *ext; + struct msm_ext_disp_audio_codec_ops *ops; + + ext = &audio->ext_audio_data; + ops = &ext->codec_ops; + + ext->codec.type = EXT_DISPLAY_TYPE_DP; + ext->codec.ctrl_id = 0; + ext->codec.stream_id = audio->panel->stream_id; + ext->pdev = audio->pdev; + ext->intf_data = &audio->dp_audio; + + ops->audio_info_setup = dp_audio_info_setup; + ops->get_audio_edid_blk = dp_audio_get_edid_blk; + ops->cable_status = dp_audio_get_cable_status; + ops->get_intf_id = dp_audio_get_intf_id; + ops->teardown_done = dp_audio_teardown_done; + ops->acknowledge = dp_audio_ack_done; + ops->ready = dp_audio_codec_ready; + + if (!audio->pdev->dev.of_node) { + DP_ERR("cannot find audio dev.of_node\n"); + rc = -ENODEV; + goto end; + } + + pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0); + if (!pd) { + DP_ERR("cannot parse %s handle\n", phandle); + rc = -ENODEV; + goto end; + } + + audio->ext_pdev = of_find_device_by_node(pd); + if (!audio->ext_pdev) { + DP_ERR("cannot find %s pdev\n", phandle); + rc = -ENODEV; + goto end; + } +#if IS_ENABLED(CONFIG_MSM_EXT_DISPLAY) + rc = msm_ext_disp_register_intf(audio->ext_pdev, ext); + if (rc) + DP_ERR("failed to register disp\n"); +#endif +end: + if (pd) + of_node_put(pd); + + return rc; +} + +static int dp_audio_deregister_ext_disp(struct dp_audio_private *audio) +{ + int rc = 0; + struct device_node *pd = NULL; + const char *phandle = "qcom,ext-disp"; + struct msm_ext_disp_init_data *ext; + + ext = &audio->ext_audio_data; + + if (!audio->pdev->dev.of_node) { + DP_ERR("cannot find audio dev.of_node\n"); + rc = -ENODEV; + goto end; + } + + pd = of_parse_phandle(audio->pdev->dev.of_node, phandle, 0); + if (!pd) { + DP_ERR("cannot parse %s handle\n", phandle); + rc = -ENODEV; + goto end; + } + + audio->ext_pdev = of_find_device_by_node(pd); + if (!audio->ext_pdev) { + DP_ERR("cannot find %s pdev\n", phandle); + rc = -ENODEV; + goto end; + } + +#if IS_ENABLED(CONFIG_MSM_EXT_DISPLAY) + rc = msm_ext_disp_deregister_intf(audio->ext_pdev, ext); + if (rc) + DP_ERR("failed to deregister disp\n"); +#endif + +end: + return rc; +} + +static int dp_audio_notify(struct dp_audio_private *audio, u32 state) +{ + int rc = 0; + struct msm_ext_disp_init_data *ext = &audio->ext_audio_data; + + atomic_set(&audio->acked, 0); + + if (!ext->intf_ops.audio_notify) { + DP_ERR("audio notify not defined\n"); + goto end; + } + + reinit_completion(&audio->hpd_comp); + rc = ext->intf_ops.audio_notify(audio->ext_pdev, + &ext->codec, state); + if (rc) + goto end; + + if (atomic_read(&audio->acked)) + goto end; + + if (state == EXT_DISPLAY_CABLE_DISCONNECT && !audio->engine_on) + goto end; + + if (state == EXT_DISPLAY_CABLE_CONNECT) + goto end; + + rc = wait_for_completion_timeout(&audio->hpd_comp, HZ * 4); + if (!rc) { + DP_ERR("timeout. state=%d err=%d\n", state, rc); + rc = -ETIMEDOUT; + goto end; + } + + DP_DEBUG("success\n"); +end: + return rc; +} + +static int dp_audio_config(struct dp_audio_private *audio, u32 state) +{ + int rc = 0; + struct msm_ext_disp_init_data *ext = &audio->ext_audio_data; + + if (!ext || !ext->intf_ops.audio_config) { + DP_ERR("audio_config not defined\n"); + goto end; + } + + /* + * DP Audio sets default STREAM_0 only, other streams are + * set by audio driver based on the hardware/software support. + */ + if (audio->panel->stream_id == DP_STREAM_0) { + rc = ext->intf_ops.audio_config(audio->ext_pdev, + &ext->codec, state); + if (rc) + DP_ERR("failed to config audio, err=%d\n", + rc); + } +end: + return rc; +} + +static int dp_audio_on(struct dp_audio *dp_audio) +{ + int rc = 0; + struct dp_audio_private *audio; + struct msm_ext_disp_init_data *ext; + + if (!dp_audio) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + if (IS_ERR(audio)) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_audio_register_ext_disp(audio); + + ext = &audio->ext_audio_data; + + atomic_set(&audio->session_on, 1); + + rc = dp_audio_config(audio, EXT_DISPLAY_CABLE_CONNECT); + if (rc) + goto end; + + rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT); + if (rc) + goto end; + + DP_DEBUG("success\n"); +end: + return rc; +} + +static int dp_audio_off(struct dp_audio *dp_audio, bool skip_wait) +{ + int rc = 0; + struct dp_audio_private *audio; + struct msm_ext_disp_init_data *ext; + bool work_pending = false; + + if (!dp_audio) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + if (!atomic_read(&audio->session_on)) { + DP_DEBUG("audio already off\n"); + return rc; + } + + ext = &audio->ext_audio_data; + + work_pending = cancel_delayed_work_sync(&audio->notify_delayed_work); + if (work_pending) + DP_DEBUG("pending notification work completed\n"); + + if (!skip_wait) { + rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_DISCONNECT); + if (rc) + goto end; + } + + DP_DEBUG("success\n"); +end: + dp_audio_config(audio, EXT_DISPLAY_CABLE_DISCONNECT); + + atomic_set(&audio->session_on, 0); + audio->engine_on = false; + + dp_audio_deregister_ext_disp(audio); + + return rc; +} + +static void dp_audio_notify_work_fn(struct work_struct *work) +{ + struct dp_audio_private *audio; + struct delayed_work *dw = to_delayed_work(work); + + audio = container_of(dw, struct dp_audio_private, notify_delayed_work); + + dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT); +} + +static int dp_audio_create_notify_workqueue(struct dp_audio_private *audio) +{ + audio->notify_workqueue = create_workqueue("sdm_dp_audio_notify"); + if (IS_ERR_OR_NULL(audio->notify_workqueue)) { + DP_ERR("Error creating notify_workqueue\n"); + return -EPERM; + } + + INIT_DELAYED_WORK(&audio->notify_delayed_work, dp_audio_notify_work_fn); + + return 0; +} + +static void dp_audio_destroy_notify_workqueue(struct dp_audio_private *audio) +{ + if (audio->notify_workqueue) + destroy_workqueue(audio->notify_workqueue); +} + +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog_audio *catalog) +{ + int rc = 0; + struct dp_audio_private *audio; + struct dp_audio *dp_audio; + + if (!pdev || !panel || !catalog) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL); + if (!audio) { + rc = -ENOMEM; + goto error; + } + + rc = dp_audio_create_notify_workqueue(audio); + if (rc) + goto error_notify_workqueue; + + init_completion(&audio->hpd_comp); + + audio->pdev = pdev; + audio->panel = panel; + audio->catalog = catalog; + + atomic_set(&audio->acked, 0); + + dp_audio = &audio->dp_audio; + + mutex_init(&audio->ops_lock); + + dp_audio->on = dp_audio_on; + dp_audio->off = dp_audio_off; + + catalog->init(catalog); + + return dp_audio; + +error_notify_workqueue: + devm_kfree(&pdev->dev, audio); +error: + return ERR_PTR(rc); +} + +void dp_audio_put(struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio; + + if (!dp_audio) + return; + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + mutex_destroy(&audio->ops_lock); + + dp_audio_destroy_notify_workqueue(audio); + + devm_kfree(&audio->pdev->dev, audio); +} diff --git a/msm/dp/dp_audio.h b/msm/dp/dp_audio.h new file mode 100644 index 000000000..f81f0079e --- /dev/null +++ b/msm/dp/dp_audio.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUDIO_H_ +#define _DP_AUDIO_H_ + +#include + +#include "dp_panel.h" +#include "dp_catalog.h" + +/** + * struct dp_audio + * @lane_count: number of lanes configured in current session + * @bw_code: link rate's bandwidth code for current session + * @tui_active: set to true if TUI is active in the system + */ +struct dp_audio { + u32 lane_count; + u32 bw_code; + bool tui_active; + + /** + * on() + * + * Notifies user mode clients that DP is powered on, and that audio + * playback can start on the external display. + * + * @dp_audio: an instance of struct dp_audio. + * + * Returns the error code in case of failure, 0 in success case. + */ + int (*on)(struct dp_audio *dp_audio); + + /** + * off() + * + * Notifies user mode clients that DP is shutting down, and audio + * playback should be stopped on the external display. + * + * @dp_audio: an instance of struct dp_audio. + * @skip_wait: flag to skip any waits + * + * Returns the error code in case of failure, 0 in success case. + */ + int (*off)(struct dp_audio *dp_audio, bool skip_wait); +}; + +/** + * dp_audio_get() + * + * Creates and instance of dp audio. + * + * @pdev: caller's platform device instance. + * @panel: an instance of dp_panel module. + * @catalog: an instance of dp_catalog_audio module. + * + * Returns the error code in case of failure, otherwize + * an instance of newly created dp_module. + */ +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog_audio *catalog); + +/** + * dp_audio_put() + * + * Cleans the dp_audio instance. + * + * @dp_audio: an instance of dp_audio. + */ +void dp_audio_put(struct dp_audio *dp_audio); +#endif /* _DP_AUDIO_H_ */ diff --git a/msm/dp/dp_aux.c b/msm/dp/dp_aux.c new file mode 100644 index 000000000..e764d10a4 --- /dev/null +++ b/msm/dp/dp_aux.c @@ -0,0 +1,958 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include + +#if IS_ENABLED(CONFIG_QCOM_FSA4480_I2C) +#include +#endif +#if IS_ENABLED(CONFIG_QCOM_WCD939X_I2C) +#include +#endif + +#include "dp_aux.h" +#include "dp_hpd.h" +#include "dp_debug.h" + +#define DP_AUX_ENUM_STR(x) #x +#define DP_AUX_IPC_NUM_PAGES 10 + +#define DP_AUX_DEBUG(dp_aux, fmt, ...) \ + do { \ + if (dp_aux) \ + ipc_log_string(dp_aux->ipc_log_context, "[d][%-4d]"fmt,\ + current->pid, ##__VA_ARGS__); \ + DP_DEBUG_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_AUX_WARN(dp_aux, fmt, ...) \ + do { \ + if (dp_aux) \ + ipc_log_string(dp_aux->ipc_log_context, "[w][%-4d]"fmt,\ + current->pid, ##__VA_ARGS__); \ + DP_WARN_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_AUX_WARN_RATELIMITED(dp_aux, fmt, ...) \ + do { \ + if (dp_aux) \ + ipc_log_string(dp_aux->ipc_log_context, "[w][%-4d]"fmt,\ + current->pid, ##__VA_ARGS__); \ + DP_WARN_RATELIMITED_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_AUX_ERR(dp_aux, fmt, ...) \ + do { \ + if (dp_aux) \ + ipc_log_string(dp_aux->ipc_log_context, "[e][%-4d]"fmt,\ + current->pid, ##__VA_ARGS__); \ + DP_ERR_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_AUX_ERR_RATELIMITED(dp_aux, fmt, ...) \ + do { \ + if (dp_aux) \ + ipc_log_string(dp_aux->ipc_log_context, "[e][%-4d]"fmt,\ + current->pid, ##__VA_ARGS__); \ + DP_ERR_RATELIMITED_V(fmt, ##__VA_ARGS__); \ + } while (0) + +enum { + DP_AUX_DATA_INDEX_WRITE = BIT(31), +}; + +struct dp_aux_private { + struct device *dev; + struct dp_aux dp_aux; + struct dp_catalog_aux *catalog; + struct dp_aux_cfg *cfg; + struct device_node *aux_switch_node; + struct mutex mutex; + struct completion comp; + struct drm_dp_aux drm_aux; + + struct dp_aux_bridge *aux_bridge; + struct dp_aux_bridge *sim_bridge; + bool bridge_in_transfer; + bool sim_in_transfer; + + bool cmd_busy; + bool native; + bool read; + bool no_send_addr; + bool no_send_stop; + bool enabled; + + u32 offset; + u32 segment; + u32 aux_error_num; + u32 retry_cnt; + + bool switch_enable; + int switch_orientation; + + atomic_t aborted; +}; + +static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + char prefix[64]; + int i, linelen, remaining = msg->size; + const int rowsize = 16; + u8 linebuf[64]; + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + struct dp_aux *dp_aux = &aux->dp_aux; + + snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ", + (msg->request & DP_AUX_I2C_MOT) ? "I2C" : "NAT", + (msg->request & DP_AUX_I2C_READ) ? "RD" : "WR", + msg->address, msg->size); + + for (i = 0; i < msg->size; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + + hex_dump_to_buffer(msg->buffer + i, linelen, rowsize, 1, + linebuf, sizeof(linebuf), false); + + if (msg->size == 1 && msg->address == 0) + DP_DEBUG_V("%s%s\n", prefix, linebuf); + else + DP_AUX_DEBUG(dp_aux, "%s%s\n", prefix, linebuf); + } +} + +static char *dp_aux_get_error(u32 aux_error) +{ + switch (aux_error) { + case DP_AUX_ERR_NONE: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE); + case DP_AUX_ERR_ADDR: + return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR); + case DP_AUX_ERR_TOUT: + return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT); + case DP_AUX_ERR_NACK: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK); + case DP_AUX_ERR_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER); + case DP_AUX_ERR_NACK_DEFER: + return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER); + default: + return "unknown"; + } +} + +static u32 dp_aux_write(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data[4], reg, len; + u8 *msgdata = msg->buffer; + int const aux_cmd_fifo_len = 128; + int i = 0; + struct dp_aux *dp_aux = &aux->dp_aux; + + if (aux->read) + len = 4; + else + len = msg->size + 4; + + /* + * cmd fifo only has depth of 144 bytes + * limit buf length to 128 bytes here + */ + if (len > aux_cmd_fifo_len) { + DP_AUX_ERR(dp_aux, "buf len error\n"); + return 0; + } + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (aux->read) + data[0] |= BIT(4); /* R/W */ + + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ + data[2] = msg->address & 0xff; /* addr[7:0] */ + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ + + for (i = 0; i < len; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */ + if (i == 0) + reg |= DP_AUX_DATA_INDEX_WRITE; + aux->catalog->data = reg; + aux->catalog->write_data(aux->catalog); + } + + aux->catalog->clear_trans(aux->catalog, false); + aux->catalog->clear_hw_interrupts(aux->catalog); + + reg = 0; /* Transaction number == 1 */ + if (!aux->native) { /* i2c */ + reg |= BIT(8); + + if (aux->no_send_addr) + reg |= BIT(10); + + if (aux->no_send_stop) + reg |= BIT(11); + } + + reg |= BIT(9); + aux->catalog->data = reg; + aux->catalog->write_trans(aux->catalog); + + return len; +} + +static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 ret = 0, len = 0, timeout; + int const aux_timeout_ms = HZ/4; + struct dp_aux *dp_aux = &aux->dp_aux; + char prefix[64]; + + snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ", + (msg->request & DP_AUX_I2C_MOT) ? "I2C" : "NAT", + (msg->request & DP_AUX_I2C_READ) ? "RD" : "WR", + msg->address, msg->size); + + reinit_completion(&aux->comp); + + len = dp_aux_write(aux, msg); + if (len == 0) { + DP_AUX_ERR(dp_aux, "DP AUX write failed: %s\n", prefix); + return -EINVAL; + } + + timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms); + if (!timeout) { + DP_AUX_WARN_RATELIMITED(dp_aux, "aux timeout during [%s]\n", prefix); + return -ETIMEDOUT; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + ret = len; + } else { + DP_AUX_WARN_RATELIMITED(dp_aux, "aux err [%s] during [%s]\n", + dp_aux_get_error(aux->aux_error_num), prefix); + ret = -EINVAL; + } + + return ret; +} + +static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + u32 i, actual_i; + u32 len = msg->size; + struct dp_aux *dp_aux = &aux->dp_aux; + + aux->catalog->clear_trans(aux->catalog, true); + + data = 0; + data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ + data |= BIT(0); /* read */ + + aux->catalog->data = data; + aux->catalog->write_data(aux->catalog); + + dp = msg->buffer; + + /* discard first byte */ + data = aux->catalog->read_data(aux->catalog); + + for (i = 0; i < len; i++) { + data = aux->catalog->read_data(aux->catalog); + *dp++ = (u8)((data >> 8) & 0xff); + + actual_i = (data >> 16) & 0xFF; + if (i != actual_i) + DP_AUX_WARN(dp_aux, "Index mismatch: expected %d, found %d\n", + i, actual_i); + } +} + +static void dp_aux_native_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->catalog->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) + aux->aux_error_num = DP_AUX_ERR_NONE; + else if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + aux->catalog->clear_hw_interrupts(aux->catalog); + } + + complete(&aux->comp); +} + +static void dp_aux_i2c_handler(struct dp_aux_private *aux) +{ + u32 isr = aux->catalog->isr; + + if (isr & DP_INTR_AUX_I2C_DONE) { + if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER)) + aux->aux_error_num = DP_AUX_ERR_NACK; + else + aux->aux_error_num = DP_AUX_ERR_NONE; + } else { + if (isr & DP_INTR_WRONG_ADDR) + aux->aux_error_num = DP_AUX_ERR_ADDR; + else if (isr & DP_INTR_TIMEOUT) + aux->aux_error_num = DP_AUX_ERR_TOUT; + if (isr & DP_INTR_NACK_DEFER) + aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; + if (isr & DP_INTR_I2C_NACK) + aux->aux_error_num = DP_AUX_ERR_NACK; + if (isr & DP_INTR_I2C_DEFER) + aux->aux_error_num = DP_AUX_ERR_DEFER; + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + aux->catalog->clear_hw_interrupts(aux->catalog); + } + } + + complete(&aux->comp); +} + +static void dp_aux_isr(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->catalog->get_irq(aux->catalog, aux->cmd_busy); + + if (!aux->cmd_busy) + return; + + if (aux->native) + dp_aux_native_handler(aux); + else + dp_aux_i2c_handler(aux); +} + +static void dp_aux_reconfig(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->catalog->update_aux_cfg(aux->catalog, + aux->cfg, PHY_AUX_CFG1); + aux->catalog->reset(aux->catalog); +} + +static void dp_aux_abort_transaction(struct dp_aux *dp_aux, bool abort) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + atomic_set(&aux->aborted, abort); +} + +static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg) +{ + u32 const edid_address = 0x50; + u32 const segment_address = 0x30; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *data = NULL; + + if (aux->native || i2c_read || ((input_msg->address != edid_address) && + (input_msg->address != segment_address))) + return; + + + data = input_msg->buffer; + if (input_msg->address == segment_address) + aux->segment = *data; + else + aux->offset = *data; +} + +/** + * dp_aux_transfer_helper() - helper function for EDID read transactions + * + * @aux: DP AUX private structure + * @input_msg: input message from DRM upstream APIs + * @send_seg: send the seg to sink + * + * return: void + * + * This helper function is used to fix EDID reads for non-compliant + * sinks that do not handle the i2c middle-of-transaction flag correctly. + */ +static void dp_aux_transfer_helper(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg, bool send_seg) +{ + struct drm_dp_aux_msg helper_msg; + u32 const message_size = 0x10; + u32 const segment_address = 0x30; + u32 const edid_block_length = 0x80; + bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) + return; + + /* + * Sending the segment value and EDID offset will be performed + * from the DRM upstream EDID driver for each block. Avoid + * duplicate AUX transactions related to this while reading the + * first 16 bytes of each block. + */ + if (!(aux->offset % edid_block_length) || !send_seg) + goto end; + + aux->read = false; + aux->cmd_busy = true; + aux->no_send_addr = true; + aux->no_send_stop = true; + + /* + * Send the segment address for i2c reads for segment > 0 and for which + * the middle-of-transaction flag is set. This is required to support + * EDID reads of more than 2 blocks as the segment address is reset to 0 + * since we are overriding the middle-of-transaction flag for read + * transactions. + */ + if (aux->segment) { + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = segment_address; + helper_msg.buffer = &aux->segment; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + } + + /* + * Send the offset address for every i2c read in which the + * middle-of-transaction flag is set. This will ensure that the sink + * will update its read pointer and return the correct portion of the + * EDID buffer in the subsequent i2c read trasntion triggered in the + * native AUX transfer function. + */ + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = input_msg->address; + helper_msg.buffer = &aux->offset; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); +end: + aux->offset += message_size; + if (aux->offset == 0x80 || aux->offset == 0x100) + aux->segment = 0x0; /* reset segment at end of block */ +} + +static int dp_aux_transfer_ready(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg, bool send_seg) +{ + int ret = 0; + int const aux_cmd_native_max = 16; + int const aux_cmd_i2c_max = 128; + struct dp_aux *dp_aux = &aux->dp_aux; + + if (atomic_read(&aux->aborted)) { + ret = -ETIMEDOUT; + goto error; + } + + aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if ((msg->size == 0) || (msg->buffer == NULL)) { + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + goto error; + } + + /* msg sanity check */ + if ((aux->native && (msg->size > aux_cmd_native_max)) || + (msg->size > aux_cmd_i2c_max)) { + DP_AUX_ERR(dp_aux, "%s: invalid msg: size(%zu), request(%x)\n", + __func__, msg->size, msg->request); + ret = -EINVAL; + goto error; + } + + dp_aux_update_offset_and_segment(aux, msg); + + dp_aux_transfer_helper(aux, msg, send_seg); + + aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (aux->read) { + aux->no_send_addr = true; + aux->no_send_stop = false; + } else { + aux->no_send_addr = true; + aux->no_send_stop = true; + } + + aux->cmd_busy = true; +error: + return ret; +} + +static inline bool dp_aux_is_sideband_msg(u32 address, size_t size) +{ + return (address >= 0x1000 && address + size < 0x1800) || + (address >= 0x2000 && address + size < 0x2200); +} + +/* + * This function does the real job to process an AUX transaction. + * It will call aux_reset() function to reset the AUX channel, + * if the waiting is timeout. + */ +static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t ret; + int const retry_count = 5; + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + + mutex_lock(&aux->mutex); + + ret = dp_aux_transfer_ready(aux, msg, true); + if (ret) + goto unlock_exit; + + if (!aux->cmd_busy) { + ret = msg->size; + goto unlock_exit; + } + + ret = dp_aux_cmd_fifo_tx(aux, msg); + if ((ret < 0) && !atomic_read(&aux->aborted)) { + aux->retry_cnt++; + if (!(aux->retry_cnt % retry_count)) + aux->catalog->update_aux_cfg(aux->catalog, + aux->cfg, PHY_AUX_CFG1); + aux->catalog->reset(aux->catalog); + goto unlock_exit; + } else if (ret < 0) { + goto unlock_exit; + } + + if (aux->aux_error_num == DP_AUX_ERR_NONE) { + if (aux->read) + dp_aux_cmd_fifo_rx(aux, msg); + + dp_aux_hex_dump(drm_aux, msg); + + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + } + + /* Return requested size for success or retry */ + ret = msg->size; + aux->retry_cnt = 0; + +unlock_exit: + aux->cmd_busy = false; + mutex_unlock(&aux->mutex); + return ret; +} + +static ssize_t dp_aux_bridge_transfer(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + ssize_t size; + + if (aux->bridge_in_transfer) { + size = dp_aux_transfer(drm_aux, msg); + } else { + aux->bridge_in_transfer = true; + size = aux->aux_bridge->transfer(aux->aux_bridge, + drm_aux, msg); + aux->bridge_in_transfer = false; + dp_aux_hex_dump(drm_aux, msg); + } + + return size; +} + +static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + struct dp_aux_private *aux = container_of(drm_aux, + struct dp_aux_private, drm_aux); + ssize_t size; + int aborted; + + mutex_lock(&aux->mutex); + aborted = atomic_read(&aux->aborted); + mutex_unlock(&aux->mutex); + if (aborted) { + size = -ETIMEDOUT; + goto end; + } + + if (aux->sim_in_transfer) { + if (aux->aux_bridge && aux->aux_bridge->transfer) + size = dp_aux_bridge_transfer(drm_aux, msg); + else + size = dp_aux_transfer(drm_aux, msg); + } else { + aux->sim_in_transfer = true; + size = aux->sim_bridge->transfer(aux->sim_bridge, + drm_aux, msg); + aux->sim_in_transfer = false; + dp_aux_hex_dump(drm_aux, msg); + } +end: + return size; +} + +static void dp_aux_reset_phy_config_indices(struct dp_aux_cfg *aux_cfg) +{ + int i = 0; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + aux_cfg[i].current_index = 0; +} + +static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg) +{ + struct dp_aux_private *aux; + + if (!dp_aux || !aux_cfg) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (aux->enabled) + return; + + dp_aux_reset_phy_config_indices(aux_cfg); + aux->catalog->setup(aux->catalog, aux_cfg); + aux->catalog->reset(aux->catalog); + aux->catalog->enable(aux->catalog, true); + atomic_set(&aux->aborted, 0); + aux->retry_cnt = 0; + aux->enabled = true; +} + +static void dp_aux_deinit(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (!aux->enabled) + return; + + atomic_set(&aux->aborted, 1); + aux->catalog->enable(aux->catalog, false); + aux->enabled = false; +} + +static int dp_aux_register(struct dp_aux *dp_aux, struct drm_device *drm_dev) +{ + struct dp_aux_private *aux; + int ret = 0; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + ret = -EINVAL; + goto exit; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->drm_aux.name = "sde_dp_aux"; + aux->drm_aux.dev = aux->dev; + aux->drm_aux.transfer = dp_aux_transfer; +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) + aux->drm_aux.drm_dev = drm_dev; +#endif + atomic_set(&aux->aborted, 1); + ret = drm_dp_aux_register(&aux->drm_aux); + if (ret) { + DP_AUX_ERR(dp_aux, "%s: failed to register drm aux: %d\n", __func__, ret); + goto exit; + } + dp_aux->drm_aux = &aux->drm_aux; + + /* if bridge is defined, override transfer function */ + if (aux->aux_bridge && aux->aux_bridge->transfer) + aux->drm_aux.transfer = dp_aux_bridge_transfer; +exit: + return ret; +} + +static void dp_aux_deregister(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + drm_dp_aux_unregister(&aux->drm_aux); +} + +static void dp_aux_set_sim_mode(struct dp_aux *dp_aux, + struct dp_aux_bridge *sim_bridge) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_lock(&aux->mutex); + + aux->sim_bridge = sim_bridge; + + if (sim_bridge) { + atomic_set(&aux->aborted, 0); + aux->drm_aux.transfer = dp_aux_transfer_debug; + } else if (aux->aux_bridge && aux->aux_bridge->transfer) { + aux->drm_aux.transfer = dp_aux_bridge_transfer; + } else { + aux->drm_aux.transfer = dp_aux_transfer; + } + + mutex_unlock(&aux->mutex); +} + +#if IS_ENABLED(CONFIG_QCOM_FSA4480_I2C) +static int dp_aux_configure_fsa_switch(struct dp_aux *dp_aux, + bool enable, int orientation) +{ + struct dp_aux_private *aux; + int rc = 0; + enum fsa_function event = FSA_USBC_DISPLAYPORT_DISCONNECTED; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + rc = -EINVAL; + goto end; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (!aux->aux_switch_node) { + DP_AUX_DEBUG(dp_aux, "undefined fsa4480 handle\n"); + rc = -EINVAL; + goto end; + } + + if (enable) { + switch (orientation) { + case ORIENTATION_CC1: + event = FSA_USBC_ORIENTATION_CC1; + break; + case ORIENTATION_CC2: + event = FSA_USBC_ORIENTATION_CC2; + break; + default: + DP_AUX_ERR(dp_aux, "invalid orientation\n"); + rc = -EINVAL; + goto end; + } + } + + DP_AUX_DEBUG(dp_aux, "enable=%d, orientation=%d, event=%d\n", + enable, orientation, event); + + rc = fsa4480_switch_event(aux->aux_switch_node, event); + + if (rc) + DP_AUX_ERR(dp_aux, "failed to configure fsa4480 i2c device (%d)\n", rc); +end: + return rc; +} +#endif + +#if IS_ENABLED(CONFIG_QCOM_WCD939X_I2C) +static int dp_aux_configure_wcd_switch(struct dp_aux *dp_aux, + bool enable, int orientation) +{ + struct dp_aux_private *aux; + int rc = 0; + enum wcd_usbss_cable_status status = WCD_USBSS_CABLE_DISCONNECT; + enum wcd_usbss_cable_types event = WCD_USBSS_DP_AUX_CC1; + + if (!dp_aux) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + rc = -EINVAL; + goto end; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + if (!aux->aux_switch_node) { + DP_AUX_DEBUG(dp_aux, "undefined wcd939x switch handle\n"); + rc = -EINVAL; + goto end; + } + + if ((aux->switch_enable == enable) && (aux->switch_orientation == orientation)) + goto end; + + if (enable) { + status = WCD_USBSS_CABLE_CONNECT; + + switch (orientation) { + case ORIENTATION_CC1: + event = WCD_USBSS_DP_AUX_CC1; + break; + case ORIENTATION_CC2: + event = WCD_USBSS_DP_AUX_CC2; + break; + default: + DP_AUX_ERR(dp_aux, "invalid orientation\n"); + rc = -EINVAL; + goto end; + } + } + + DP_AUX_DEBUG(dp_aux, "enable=%d, orientation=%d, event=%d\n", + enable, orientation, event); + + rc = wcd_usbss_switch_update(event, status); + if (rc) { + DP_AUX_ERR(dp_aux, "failed to configure wcd939x i2c device (%d)\n", rc); + } else { + aux->switch_enable = enable; + aux->switch_orientation = orientation; + } +end: + return rc; +} +#endif + +struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog, + struct dp_parser *parser, struct device_node *aux_switch, + struct dp_aux_bridge *aux_bridge, void *ipc_log_context, + enum dp_aux_switch_type switch_type) +{ + int rc = 0; + struct dp_aux_private *aux; + struct dp_aux *dp_aux = NULL; + + if (!catalog || !parser) { + DP_AUX_ERR(dp_aux, "invalid input\n"); + rc = -ENODEV; + goto error; + } + + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) { + rc = -ENOMEM; + goto error; + } + + init_completion(&aux->comp); + aux->cmd_busy = false; + mutex_init(&aux->mutex); + + aux->dev = dev; + aux->catalog = catalog; + aux->cfg = parser->aux_cfg; + aux->aux_switch_node = aux_switch; + aux->aux_bridge = aux_bridge; + dp_aux = &aux->dp_aux; + aux->retry_cnt = 0; + aux->switch_orientation = -1; + + dp_aux->isr = dp_aux_isr; + dp_aux->init = dp_aux_init; + dp_aux->deinit = dp_aux_deinit; + dp_aux->drm_aux_register = dp_aux_register; + dp_aux->drm_aux_deregister = dp_aux_deregister; + dp_aux->reconfig = dp_aux_reconfig; + dp_aux->abort = dp_aux_abort_transaction; + dp_aux->set_sim_mode = dp_aux_set_sim_mode; + dp_aux->ipc_log_context = ipc_log_context; + + /*Condition to avoid allocating function pointers for aux bypass mode*/ + if (switch_type != DP_AUX_SWITCH_BYPASS) { +#if IS_ENABLED(CONFIG_QCOM_FSA4480_I2C) + if (switch_type == DP_AUX_SWITCH_FSA4480) { + dp_aux->switch_configure = dp_aux_configure_fsa_switch; + dp_aux->switch_register_notifier = fsa4480_reg_notifier; + dp_aux->switch_unregister_notifier = fsa4480_unreg_notifier; + } +#endif +#if IS_ENABLED(CONFIG_QCOM_WCD939X_I2C) + if (switch_type == DP_AUX_SWITCH_WCD939x) { + dp_aux->switch_configure = dp_aux_configure_wcd_switch; + dp_aux->switch_register_notifier = wcd_usbss_reg_notifier; + dp_aux->switch_unregister_notifier = wcd_usbss_unreg_notifier; + } +#endif + } + + return dp_aux; +error: + return ERR_PTR(rc); +} + +void dp_aux_put(struct dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) + return; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_destroy(&aux->mutex); + + devm_kfree(aux->dev, aux); +} diff --git a/msm/dp/dp_aux.h b/msm/dp/dp_aux.h new file mode 100644 index 000000000..846c7a952 --- /dev/null +++ b/msm/dp/dp_aux.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUX_H_ +#define _DP_AUX_H_ + +#include "dp_catalog.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif +#include "dp_aux_bridge.h" + +#define DP_STATE_NOTIFICATION_SENT BIT(0) +#define DP_STATE_TRAIN_1_STARTED BIT(1) +#define DP_STATE_TRAIN_1_SUCCEEDED BIT(2) +#define DP_STATE_TRAIN_1_FAILED BIT(3) +#define DP_STATE_TRAIN_2_STARTED BIT(4) +#define DP_STATE_TRAIN_2_SUCCEEDED BIT(5) +#define DP_STATE_TRAIN_2_FAILED BIT(6) +#define DP_STATE_CTRL_POWERED_ON BIT(7) +#define DP_STATE_CTRL_POWERED_OFF BIT(8) +#define DP_STATE_LINK_MAINTENANCE_STARTED BIT(9) +#define DP_STATE_LINK_MAINTENANCE_COMPLETED BIT(10) +#define DP_STATE_LINK_MAINTENANCE_FAILED BIT(11) +#define DP_STATE_AUX_TIMEOUT BIT(12) +#define DP_STATE_PLL_LOCKED BIT(13) + +enum dp_aux_switch_type { + DP_AUX_SWITCH_BYPASS, + DP_AUX_SWITCH_FSA4480, + DP_AUX_SWITCH_WCD939x, +}; + +enum dp_aux_error { + DP_AUX_ERR_NONE = 0, + DP_AUX_ERR_ADDR = -1, + DP_AUX_ERR_TOUT = -2, + DP_AUX_ERR_NACK = -3, + DP_AUX_ERR_DEFER = -4, + DP_AUX_ERR_NACK_DEFER = -5, + DP_AUX_ERR_PHY = -6, +}; + +struct dp_aux { + u32 state; + + bool read; + + struct mutex *access_lock; + void *ipc_log_context; + + struct drm_dp_aux *drm_aux; + int (*drm_aux_register)(struct dp_aux *aux, struct drm_device *drm_dev); + void (*drm_aux_deregister)(struct dp_aux *aux); + void (*isr)(struct dp_aux *aux); + void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg); + void (*deinit)(struct dp_aux *aux); + void (*reconfig)(struct dp_aux *aux); + void (*abort)(struct dp_aux *aux, bool abort); + void (*set_sim_mode)(struct dp_aux *aux, struct dp_aux_bridge *sim_bridge); + int (*switch_configure)(struct dp_aux *aux, bool enable, int orientation); + int (*switch_register_notifier)(struct notifier_block *nb, struct device_node *node); + int (*switch_unregister_notifier)(struct notifier_block *nb, struct device_node *node); +}; + +struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog, + struct dp_parser *parser, struct device_node *aux_switch, + struct dp_aux_bridge *aux_bridge, void *ipc_log_context, + enum dp_aux_switch_type switch_type); +void dp_aux_put(struct dp_aux *aux); + +#endif /*__DP_AUX_H_*/ diff --git a/msm/dp/dp_aux_bridge.c b/msm/dp/dp_aux_bridge.c new file mode 100644 index 000000000..f21a3aeb9 --- /dev/null +++ b/msm/dp/dp_aux_bridge.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp_aux_bridge.h" + +static DEFINE_MUTEX(dp_aux_bridge_lock); +static LIST_HEAD(du_aux_bridge_list); + +int dp_aux_add_bridge(struct dp_aux_bridge *bridge) +{ + mutex_lock(&dp_aux_bridge_lock); + list_add_tail(&bridge->head, &du_aux_bridge_list); + mutex_unlock(&dp_aux_bridge_lock); + + return 0; +} + +#if IS_ENABLED(CONFIG_OF) +struct dp_aux_bridge *of_dp_aux_find_bridge(struct device_node *np) +{ + struct dp_aux_bridge *bridge; + + mutex_lock(&dp_aux_bridge_lock); + + list_for_each_entry(bridge, &du_aux_bridge_list, head) { + if (bridge->of_node == np) { + mutex_unlock(&dp_aux_bridge_lock); + return bridge; + } + } + + mutex_unlock(&dp_aux_bridge_lock); + return NULL; +} +#endif /* CONFIG_OF */ + diff --git a/msm/dp/dp_aux_bridge.h b/msm/dp/dp_aux_bridge.h new file mode 100644 index 000000000..d39a63fdd --- /dev/null +++ b/msm/dp/dp_aux_bridge.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DP_AUX_BRIDGE_H_ +#define _DP_AUX_BRIDGE_H_ + +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif + +/** + * enum dp_aux_bridge_flag - DP aux bridge capability flag + * DP_AUX_BRIDGE_HPD: HPD will be generated by DP aux bridge + * DP_AUX_BRIDGE_MST: MST simulator is used by DP aux bridge + */ +enum dp_aux_bridge_flag { + DP_AUX_BRIDGE_HPD = (1 << 0), + DP_AUX_BRIDGE_MST = (1 << 1), +}; + +/** + * struct dp_aux_bridge - DP aux bridge control structure + * @of_node: device node pointer to the bridge + * @dev_priv: pointer to the bridge driver's internal context + * @flag: flag for capability + * @mst_ctx: pointer to mst context when DP_AUX_BRIDGE_MST is set + * @head: to keep track of all added bridges + */ +struct dp_aux_bridge { +#if IS_ENABLED(CONFIG_OF) + struct device_node *of_node; +#endif /* CONFIG_OF */ + void *dev_priv; + u32 flag; + void *mst_ctx; + struct list_head head; + + /** + * @register_hpd: + * + * This callback is invoked whenever bridge is registered + * for HPD handling + * + * The attach callback is optional. + * + * Host will pass HPD callback handle to bridge, with + * arguments @hpd_cb(void* dev, bool hpd, bool hpd_irq): + * + * @dev: private handle passed in register_hpd + * @hpd: true if HPD is high, false if HPD is low + * @hpd_irq: true if this is a HPD irq. @hpd will be + * ignored when hpd_irq is true. + * + * RETURNS: + * + * Zero on success, error code on failure. + */ + int (*register_hpd)(struct dp_aux_bridge *bridge, + int (*hpd_cb)(void *, bool, bool), void *dev); + + /** + * @transfer: + * + * This callback is invoked whenever dp_aux transfer + * is called from host. Inside @transfer bridge can still + * call @drm_aux->transfer to trigger the actual + * DPCD/I2C transfer at host side. + * + * The attach callback is optional. + * + * RETURNS: + * + * Size of the bytes transferred, error code on failure. + */ + ssize_t (*transfer)(struct dp_aux_bridge *bridge, + struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg); +}; + +/** + * dp_aux_add_bridge - Register DP aux bridge + * @bridge: bridge pointer + * return: 0 if successful + */ +int dp_aux_add_bridge(struct dp_aux_bridge *bridge); + +#if IS_ENABLED(CONFIG_OF) +/** + * of_dp_aux_find_bridge - Find registered DP aux bridge + * @np: device node pointer to the bridge + * return: DP aux bridge pointer, NULL if not found + */ +struct dp_aux_bridge *of_dp_aux_find_bridge(struct device_node *np); +#endif /* CONFIG_OF */ + +#endif /* _DP_AUX_BRIDGE_H_ */ + diff --git a/msm/dp/dp_bridge_hpd.c b/msm/dp/dp_bridge_hpd.c new file mode 100644 index 000000000..53f4e10ba --- /dev/null +++ b/msm/dp/dp_bridge_hpd.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include "dp_bridge_hpd.h" + +struct dp_bridge_hpd_private { + struct device *dev; + struct dp_hpd base; + struct dp_aux_bridge *bridge; + struct delayed_work work; + struct dp_hpd_cb *cb; + bool hpd; + bool hpd_irq; + struct mutex hpd_lock; +}; + +static int dp_bridge_hpd_connect(struct dp_bridge_hpd_private *bridge_hpd, + bool hpd) +{ + int rc = 0; + + if (!bridge_hpd) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + bridge_hpd->base.hpd_high = hpd; + bridge_hpd->base.alt_mode_cfg_done = hpd; + bridge_hpd->base.hpd_irq = false; + + if (!bridge_hpd->cb || + !bridge_hpd->cb->configure || + !bridge_hpd->cb->disconnect) { + pr_err("invalid cb\n"); + rc = -EINVAL; + goto error; + } + + if (hpd) + rc = bridge_hpd->cb->configure(bridge_hpd->dev); + else + rc = bridge_hpd->cb->disconnect(bridge_hpd->dev); + +error: + return rc; +} + +static int dp_bridge_hpd_attention(struct dp_bridge_hpd_private *bridge_hpd) +{ + int rc = 0; + + if (!bridge_hpd) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + bridge_hpd->base.hpd_irq = true; + + if (bridge_hpd->cb && bridge_hpd->cb->attention) + rc = bridge_hpd->cb->attention(bridge_hpd->dev); + +error: + return rc; +} + +static void dp_bridge_hpd_work(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct dp_bridge_hpd_private *bridge_hpd = container_of(dw, + struct dp_bridge_hpd_private, work); + + mutex_lock(&bridge_hpd->hpd_lock); + + if (bridge_hpd->hpd_irq) + dp_bridge_hpd_attention(bridge_hpd); + else + dp_bridge_hpd_connect(bridge_hpd, bridge_hpd->hpd); + + mutex_unlock(&bridge_hpd->hpd_lock); +} + +static int dp_bridge_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + int rc = 0; + struct dp_bridge_hpd_private *bridge_hpd; + + if (!dp_hpd) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + bridge_hpd = container_of(dp_hpd, struct dp_bridge_hpd_private, base); + + dp_bridge_hpd_connect(bridge_hpd, hpd); +error: + return rc; +} + +static int dp_bridge_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + int rc = 0; + struct dp_bridge_hpd_private *bridge_hpd; + + if (!dp_hpd) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + bridge_hpd = container_of(dp_hpd, struct dp_bridge_hpd_private, base); + + dp_bridge_hpd_attention(bridge_hpd); +error: + return rc; +} + +static int dp_bridge_hpd_cb(void *dp_hpd, bool hpd, bool hpd_irq) +{ + struct dp_bridge_hpd_private *bridge_hpd = dp_hpd; + + mutex_lock(&bridge_hpd->hpd_lock); + + bridge_hpd->hpd = hpd; + bridge_hpd->hpd_irq = hpd_irq; + queue_delayed_work(system_wq, &bridge_hpd->work, 0); + + mutex_unlock(&bridge_hpd->hpd_lock); + + return 0; +} + +static int dp_bridge_hpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_bridge_hpd_private *bridge_hpd; + + if (!dp_hpd) + return -EINVAL; + + bridge_hpd = container_of(dp_hpd, struct dp_bridge_hpd_private, base); + + return bridge_hpd->bridge->register_hpd(bridge_hpd->bridge, + dp_bridge_hpd_cb, bridge_hpd); +} + +struct dp_hpd *dp_bridge_hpd_get(struct device *dev, + struct dp_hpd_cb *cb, struct dp_aux_bridge *aux_bridge) +{ + int rc = 0; + struct dp_bridge_hpd_private *bridge_hpd; + + if (!dev || !cb) { + pr_err("invalid device\n"); + rc = -EINVAL; + goto error; + } + + bridge_hpd = devm_kzalloc(dev, sizeof(*bridge_hpd), GFP_KERNEL); + if (!bridge_hpd) { + rc = -ENOMEM; + goto error; + } + + bridge_hpd->dev = dev; + bridge_hpd->cb = cb; + bridge_hpd->bridge = aux_bridge; + mutex_init(&bridge_hpd->hpd_lock); + INIT_DELAYED_WORK(&bridge_hpd->work, dp_bridge_hpd_work); + bridge_hpd->base.simulate_connect = dp_bridge_hpd_simulate_connect; + bridge_hpd->base.simulate_attention = dp_bridge_hpd_simulate_attention; + bridge_hpd->base.register_hpd = dp_bridge_hpd_register; + + return &bridge_hpd->base; +error: + return ERR_PTR(rc); +} + +void dp_bridge_hpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_bridge_hpd_private *bridge_hpd; + + if (!dp_hpd) + return; + + bridge_hpd = container_of(dp_hpd, struct dp_bridge_hpd_private, base); + + devm_kfree(bridge_hpd->dev, bridge_hpd); +} diff --git a/msm/dp/dp_bridge_hpd.h b/msm/dp/dp_bridge_hpd.h new file mode 100644 index 000000000..6f640c832 --- /dev/null +++ b/msm/dp/dp_bridge_hpd.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DP_BRIDGE_HPD_H_ +#define _DP_BRIDGE_HPD_H_ + +#include "dp_hpd.h" + +/** + * dp_bridge_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * @cb: callback function for HPD response + * @aux_bridge: handle for aux_bridge driver data + * return: pointer to allocated gpio hpd module data + * + * This function sets up the gpio hpd module + */ +struct dp_hpd *dp_bridge_hpd_get(struct device *dev, + struct dp_hpd_cb *cb, struct dp_aux_bridge *aux_bridge); + +/** + * dp_bridge_hpd_put() + * + * Cleans up dp_hpd instance + * + * @hpd: instance of gpio_hpd + */ +void dp_bridge_hpd_put(struct dp_hpd *hpd); + +#endif /* _DP_BRIDGE_HPD_H_ */ diff --git a/msm/dp/dp_catalog.c b/msm/dp/dp_catalog.c new file mode 100644 index 000000000..4144942c4 --- /dev/null +++ b/msm/dp/dp_catalog.c @@ -0,0 +1,3130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + + +#include +#include + +#include "dp_catalog.h" +#include "dp_reg.h" +#include "dp_debug.h" +#include "dp_link.h" + +#define DP_GET_MSB(x) (x >> 8) +#define DP_GET_LSB(x) (x & 0xff) + +#define DP_PHY_READY BIT(1) + +#define dp_catalog_get_priv(x) ({ \ + struct dp_catalog *dp_catalog; \ + dp_catalog = container_of(x, struct dp_catalog, x); \ + container_of(dp_catalog, struct dp_catalog_private, \ + dp_catalog); \ +}) + +#define DP_INTERRUPT_STATUS1 \ + (DP_INTR_AUX_I2C_DONE| \ + DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ + DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \ + DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \ + DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR) + +#define DP_INTR_MASK1 (DP_INTERRUPT_STATUS1 << 2) + +#define DP_INTERRUPT_STATUS2 \ + (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \ + DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED | DP_INTR_SST_FIFO_UNDERFLOW) + +#define DP_INTR_MASK2 (DP_INTERRUPT_STATUS2 << 2) + + +#define DP_INTERRUPT_STATUS3 \ + (DP_INTR_SST_ML_FIFO_OVERFLOW | DP_INTR_MST0_ML_FIFO_OVERFLOW | \ + DP_INTR_MST1_ML_FIFO_OVERFLOW | DP_INTR_DP1_FRAME_END | DP_INTR_SDP0_COLLISION | \ + DP_INTR_SDP1_COLLISION) + +#define DP_INTR_MASK3 (DP_INTERRUPT_STATUS3 << 2) + +#define DP_INTERRUPT_STATUS5 \ + (DP_INTR_MST_DP0_VCPF_SENT | DP_INTR_MST_DP1_VCPF_SENT) + +#define DP_INTR_MASK5 (DP_INTERRUPT_STATUS5 << 2) +#define DP_TPG_PATTERN_MAX 9 +#define DP_TPG_PATTERN_DEFAULT 8 + +#define DP_INTERRUPT_STATUS6 \ + (DP_INTR_SST_BS_LATE | DP_INTR_DP0_BACKPRESSURE_ERROR | DP_INTR_DP1_BACKPRESSURE_ERROR) + +#define DP_INTR_MASK6 (DP_INTERRUPT_STATUS6 << 2) + +#define dp_catalog_fill_io(x) { \ + catalog->io.x = parser->get_io(parser, #x); \ +} + +#define dp_catalog_fill_io_buf(x) { \ + parser->get_io_buf(parser, #x); \ +} + +#define dp_read(x) ({ \ + catalog->read(catalog, io_data, x); \ +}) + +#define dp_write(x, y) ({ \ + catalog->write(catalog, io_data, x, y); \ +}) + +static u8 const vm_pre_emphasis[4][4] = { + {0x00, 0x0B, 0x12, 0xFF}, /* pe0, 0 db */ + {0x00, 0x0A, 0x12, 0xFF}, /* pe1, 3.5 db */ + {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0xFF, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +/* voltage swing, 0.2v and 1.0v are not support */ +static u8 const vm_voltage_swing[4][4] = { + {0x07, 0x0F, 0x14, 0xFF}, /* sw0, 0.4v */ + {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */ + {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0xFF, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ +}; + +static u8 const vm_pre_emphasis_hbr3_hbr2[4][4] = { + {0x00, 0x0C, 0x15, 0x1A}, + {0x02, 0x0E, 0x16, 0xFF}, + {0x02, 0x11, 0xFF, 0xFF}, + {0x04, 0xFF, 0xFF, 0xFF} +}; + +static u8 const vm_voltage_swing_hbr3_hbr2[4][4] = { + {0x02, 0x12, 0x16, 0x1A}, + {0x09, 0x19, 0x1F, 0xFF}, + {0x10, 0x1F, 0xFF, 0xFF}, + {0x1F, 0xFF, 0xFF, 0xFF} +}; + +static u8 const vm_pre_emphasis_hbr_rbr[4][4] = { + {0x00, 0x0C, 0x14, 0x19}, + {0x00, 0x0B, 0x12, 0xFF}, + {0x00, 0x0B, 0xFF, 0xFF}, + {0x04, 0xFF, 0xFF, 0xFF} +}; + +static u8 const vm_voltage_swing_hbr_rbr[4][4] = { + {0x08, 0x0F, 0x16, 0x1F}, + {0x11, 0x1E, 0x1F, 0xFF}, + {0x19, 0x1F, 0xFF, 0xFF}, + {0x1F, 0xFF, 0xFF, 0xFF} +}; + +enum dp_flush_bit { + DP_PPS_FLUSH, + DP_DHDR_FLUSH, +}; + +/* audio related catalog functions */ +struct dp_catalog_private { + struct device *dev; + struct dp_catalog_io io; + struct dp_parser *parser; + + u32 (*read)(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset); + void (*write)(struct dp_catalog_private *catlog, + struct dp_io_data *io_data, u32 offset, u32 data); + + u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_catalog dp_catalog; + + char exe_mode[SZ_4]; + u32 dp_core_version; + u32 dp_phy_version; +}; + +static u32 dp_read_sw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset) +{ + u32 data = 0; + + if (io_data->buf) + memcpy(&data, io_data->buf + offset, sizeof(offset)); + + return data; +} + +static void dp_write_sw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + if (io_data->buf) + memcpy(io_data->buf + offset, &data, sizeof(data)); +} + +static u32 dp_read_hw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset) +{ + u32 data = 0; + + data = readl_relaxed(io_data->io.base + offset); + + return data; +} + +static void dp_write_hw(struct dp_catalog_private *catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + writel_relaxed(data, io_data->io.base + offset); +} + +static u32 dp_read_sub_sw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_sw(catalog, io_data, offset); +} + +static void dp_write_sub_sw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_sw(catalog, io_data, offset, data); +} + +static u32 dp_read_sub_hw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_hw(catalog, io_data, offset); +} + +static void dp_write_sub_hw(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset, u32 data) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_hw(catalog, io_data, offset, data); +} + +/* aux related catalog functions */ +static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + return dp_read(DP_AUX_DATA); +end: + return 0; +} + +static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux) +{ + int rc = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + dp_write(DP_AUX_DATA, aux->data); +end: + return rc; +} + +static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux) +{ + int rc = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + dp_write(DP_AUX_TRANS_CTRL, aux->data); +end: + return rc; +} + +static int dp_catalog_aux_clear_trans(struct dp_catalog_aux *aux, bool read) +{ + int rc = 0; + u32 data = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + if (read) { + data = dp_read(DP_AUX_TRANS_CTRL); + data &= ~BIT(9); + dp_write(DP_AUX_TRANS_CTRL, data); + } else { + dp_write(DP_AUX_TRANS_CTRL, 0); + } +end: + return rc; +} + +static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 data = 0; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_phy; + + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS); + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR, 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_aux_reset(struct dp_catalog_aux *aux) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + aux_ctrl = dp_read(DP_AUX_CTRL); + + aux_ctrl |= BIT(1); + dp_write(DP_AUX_CTRL, aux_ctrl); + usleep_range(1000, 1010); /* h/w recommended delay */ + + aux_ctrl &= ~BIT(1); + + dp_write(DP_AUX_CTRL, aux_ctrl); + wmb(); /* make sure AUX reset is done here */ +} + +static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_aux; + + aux_ctrl = dp_read(DP_AUX_CTRL); + + if (enable) { + aux_ctrl |= BIT(0); + dp_write(DP_AUX_CTRL, aux_ctrl); + wmb(); /* make sure AUX module is enabled */ + + dp_write(DP_TIMEOUT_COUNT, 0xffff); + dp_write(DP_AUX_LIMITS, 0xffff); + } else { + aux_ctrl &= ~BIT(0); + dp_write(DP_AUX_CTRL, aux_ctrl); + } +} + +static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type) +{ + struct dp_catalog_private *catalog; + u32 new_index = 0, current_index = 0; + struct dp_io_data *io_data; + + if (!aux || !cfg || (type >= PHY_AUX_CFG_MAX)) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + + io_data = catalog->io.dp_phy; + + current_index = cfg[type].current_index; + new_index = (current_index + 1) % cfg[type].cfg_cnt; + DP_DEBUG("Updating %s from 0x%08x to 0x%08x\n", + dp_phy_aux_config_type_to_string(type), + cfg[type].lut[current_index], cfg[type].lut[new_index]); + + dp_write(cfg[type].offset, cfg[type].lut[new_index]); + cfg[type].current_index = new_index; +} + +static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + int i = 0; + + if (!aux || !cfg) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + + io_data = catalog->io.dp_phy; + dp_write(DP_PHY_PD_CTL, 0x65); + wmb(); /* make sure PD programming happened */ + + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io.dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1b); + + io_data = catalog->io.dp_phy; + dp_write(DP_PHY_PD_CTL, 0x02); + wmb(); /* make sure PD programming happened */ + dp_write(DP_PHY_PD_CTL, 0x7d); + + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io.dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f); + + /* DP AUX CFG register programming */ + io_data = catalog->io.dp_phy; + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp_write(cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + + dp_write(DP_PHY_AUX_INTERRUPT_MASK, 0x1F); + wmb(); /* make sure AUX configuration is done before enabling it */ +} + +static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy) +{ + u32 ack; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(aux); + io_data = catalog->io.dp_ahb; + + aux->isr = dp_read(DP_INTR_STATUS); + aux->isr &= ~DP_INTR_MASK1; + ack = aux->isr & DP_INTERRUPT_STATUS1; + ack <<= 1; + ack |= DP_INTR_MASK1; + dp_write(DP_INTR_STATUS, ack); +} + +static bool dp_catalog_ctrl_wait_for_phy_ready( + struct dp_catalog_private *catalog) +{ + u32 phy_version; + u32 reg, state; + void __iomem *base = catalog->io.dp_phy->io.base; + bool success = true; + u32 const poll_sleep_us = 500; + u32 const pll_timeout_us = 10000; + + phy_version = dp_catalog_get_dp_phy_version(&catalog->dp_catalog); + if (phy_version >= 0x60000000) { + reg = DP_PHY_STATUS_V600; + } else { + reg = DP_PHY_STATUS; + } + + if (readl_poll_timeout_atomic((base + reg), state, + ((state & DP_PHY_READY) > 0), + poll_sleep_us, pll_timeout_us)) { + DP_ERR("PHY status failed, status=%x\n", state); + + success = false; + } + + return success; +} + +/* controller related catalog functions */ +static int dp_catalog_ctrl_late_phy_init(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt, bool flipped) +{ + int rc = 0; + u32 bias0_en, drvr0_en, bias1_en, drvr1_en; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(ctrl); + + switch (lane_cnt) { + case 1: + drvr0_en = flipped ? 0x13 : 0x10; + bias0_en = flipped ? 0x3E : 0x15; + drvr1_en = flipped ? 0x10 : 0x13; + bias1_en = flipped ? 0x15 : 0x3E; + break; + case 2: + drvr0_en = flipped ? 0x10 : 0x10; + bias0_en = flipped ? 0x3F : 0x15; + drvr1_en = flipped ? 0x10 : 0x10; + bias1_en = flipped ? 0x15 : 0x3F; + break; + case 4: + default: + drvr0_en = 0x10; + bias0_en = 0x3F; + drvr1_en = 0x10; + bias1_en = 0x3F; + break; + } + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_HIGHZ_DRVR_EN_V420, drvr0_en); + dp_write(TXn_TRANSCEIVER_BIAS_EN_V420, bias0_en); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_HIGHZ_DRVR_EN_V420, drvr1_en); + dp_write(TXn_TRANSCEIVER_BIAS_EN_V420, bias1_en); + + io_data = catalog->io.dp_phy; + dp_write(DP_PHY_CFG, 0x18); + /* add hardware recommended delay */ + udelay(2000); + dp_write(DP_PHY_CFG, 0x19); + + /* + * Make sure all the register writes are completed before + * doing any other operation + */ + wmb(); + + if (!dp_catalog_ctrl_wait_for_phy_ready(catalog)) { + rc = -EINVAL; + goto lock_err; + } + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_POL_INV_V420, 0x0a); + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_POL_INV_V420, 0x0a); + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL_V420, 0x27); + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL_V420, 0x27); + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + /* Make sure the PHY register writes are done */ + wmb(); +lock_err: + return rc; +} + +static u32 dp_catalog_ctrl_read_hdcp_status(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + return dp_read(DP_HDCP_STATUS); +} + +static void dp_catalog_panel_sdp_update(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 sdp_cfg3_off = 0; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + sdp_cfg3_off = MMSS_DP1_SDP_CFG3 - MMSS_DP_SDP_CFG3; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + dp_write(MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x01); + dp_write(MMSS_DP_SDP_CFG3 + sdp_cfg3_off, 0x00); +} + +static void dp_catalog_panel_setup_vsif_infoframe_sdp( + struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct drm_msm_ext_hdr_metadata *hdr; + struct dp_io_data *io_data; + u32 header, parity, data, mst_offset = 0; + u8 buf[SZ_64], off = 0; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + mst_offset = MMSS_DP1_VSCEXT_0 - MMSS_DP_VSCEXT_0; + + catalog = dp_catalog_get_priv(panel); + hdr = &panel->hdr_meta; + io_data = catalog->io.dp_link; + + /* HEADER BYTE 1 */ + header = panel->dhdr_vsif_sdp.HB1; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_1_BIT) + | (parity << PARITY_BYTE_1_BIT)); + dp_write(MMSS_DP_VSCEXT_0 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + /* HEADER BYTE 2 */ + header = panel->dhdr_vsif_sdp.HB2; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_2_BIT) + | (parity << PARITY_BYTE_2_BIT)); + dp_write(MMSS_DP_VSCEXT_1 + mst_offset, data); + + /* HEADER BYTE 3 */ + header = panel->dhdr_vsif_sdp.HB3; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_3_BIT) + | (parity << PARITY_BYTE_3_BIT)); + data |= dp_read(MMSS_DP_VSCEXT_1 + mst_offset); + dp_write(MMSS_DP_VSCEXT_1 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + print_hex_dump_debug("[drm-dp] VSCEXT: ", + DUMP_PREFIX_NONE, 16, 4, buf, off, false); +} + +static void dp_catalog_panel_setup_hdr_infoframe_sdp( + struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct drm_msm_ext_hdr_metadata *hdr; + struct dp_io_data *io_data; + u32 header, parity, data, mst_offset = 0; + u8 buf[SZ_64], off = 0; + u32 const version = 0x01; + u32 const length = 0x1a; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + mst_offset = MMSS_DP1_GENERIC2_0 - MMSS_DP_GENERIC2_0; + + catalog = dp_catalog_get_priv(panel); + hdr = &panel->hdr_meta; + io_data = catalog->io.dp_link; + + /* HEADER BYTE 1 */ + header = panel->shdr_if_sdp.HB1; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_1_BIT) + | (parity << PARITY_BYTE_1_BIT)); + dp_write(MMSS_DP_GENERIC2_0 + mst_offset, + data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + /* HEADER BYTE 2 */ + header = panel->shdr_if_sdp.HB2; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_2_BIT) + | (parity << PARITY_BYTE_2_BIT)); + dp_write(MMSS_DP_GENERIC2_1 + mst_offset, data); + + /* HEADER BYTE 3 */ + header = panel->shdr_if_sdp.HB3; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_3_BIT) + | (parity << PARITY_BYTE_3_BIT)); + data |= dp_read(MMSS_DP_VSCEXT_1 + mst_offset); + dp_write(MMSS_DP_GENERIC2_1 + mst_offset, + data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = version; + data |= length << 8; + data |= hdr->eotf << 16; + dp_write(MMSS_DP_GENERIC2_2 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->display_primaries_x[0]) | + (DP_GET_MSB(hdr->display_primaries_x[0]) << 8) | + (DP_GET_LSB(hdr->display_primaries_y[0]) << 16) | + (DP_GET_MSB(hdr->display_primaries_y[0]) << 24)); + dp_write(MMSS_DP_GENERIC2_3 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->display_primaries_x[1]) | + (DP_GET_MSB(hdr->display_primaries_x[1]) << 8) | + (DP_GET_LSB(hdr->display_primaries_y[1]) << 16) | + (DP_GET_MSB(hdr->display_primaries_y[1]) << 24)); + dp_write(MMSS_DP_GENERIC2_4 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->display_primaries_x[2]) | + (DP_GET_MSB(hdr->display_primaries_x[2]) << 8) | + (DP_GET_LSB(hdr->display_primaries_y[2]) << 16) | + (DP_GET_MSB(hdr->display_primaries_y[2]) << 24)); + dp_write(MMSS_DP_GENERIC2_5 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->white_point_x) | + (DP_GET_MSB(hdr->white_point_x) << 8) | + (DP_GET_LSB(hdr->white_point_y) << 16) | + (DP_GET_MSB(hdr->white_point_y) << 24)); + dp_write(MMSS_DP_GENERIC2_6 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->max_luminance) | + (DP_GET_MSB(hdr->max_luminance) << 8) | + (DP_GET_LSB(hdr->min_luminance) << 16) | + (DP_GET_MSB(hdr->min_luminance) << 24)); + dp_write(MMSS_DP_GENERIC2_7 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (DP_GET_LSB(hdr->max_content_light_level) | + (DP_GET_MSB(hdr->max_content_light_level) << 8) | + (DP_GET_LSB(hdr->max_average_light_level) << 16) | + (DP_GET_MSB(hdr->max_average_light_level) << 24)); + dp_write(MMSS_DP_GENERIC2_8 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = 0; + dp_write(MMSS_DP_GENERIC2_9 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + print_hex_dump_debug("[drm-dp] HDR: ", + DUMP_PREFIX_NONE, 16, 4, buf, off, false); +} + +static void dp_catalog_panel_setup_vsc_sdp(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 header, parity, data, mst_offset = 0; + u8 off = 0; + u8 buf[SZ_128]; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + if (panel->stream_id == DP_STREAM_1) + mst_offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + /* HEADER BYTE 1 */ + header = panel->vsc_colorimetry.header.HB1; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_1_BIT) + | (parity << PARITY_BYTE_1_BIT)); + dp_write(MMSS_DP_GENERIC0_0 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + /* HEADER BYTE 2 */ + header = panel->vsc_colorimetry.header.HB2; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_2_BIT) + | (parity << PARITY_BYTE_2_BIT)); + dp_write(MMSS_DP_GENERIC0_1 + mst_offset, data); + + /* HEADER BYTE 3 */ + header = panel->vsc_colorimetry.header.HB3; + parity = dp_header_get_parity(header); + data = ((header << HEADER_BYTE_3_BIT) + | (parity << PARITY_BYTE_3_BIT)); + data |= dp_read(MMSS_DP_GENERIC0_1 + mst_offset); + dp_write(MMSS_DP_GENERIC0_1 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = 0; + dp_write(MMSS_DP_GENERIC0_2 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_3 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_4 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_5 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = (panel->vsc_colorimetry.data[16] & 0xFF) | + ((panel->vsc_colorimetry.data[17] & 0xFF) << 8) | + ((panel->vsc_colorimetry.data[18] & 0x7) << 16); + + dp_write(MMSS_DP_GENERIC0_6 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + data = 0; + dp_write(MMSS_DP_GENERIC0_7 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_8 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + dp_write(MMSS_DP_GENERIC0_9 + mst_offset, data); + memcpy(buf + off, &data, sizeof(data)); + off += sizeof(data); + + print_hex_dump_debug("[drm-dp] VSC: ", + DUMP_PREFIX_NONE, 16, 4, buf, off, false); +} + +static void dp_catalog_panel_config_sdp(struct dp_catalog_panel *panel, + bool en) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg, cfg2; + u32 sdp_cfg_off = 0; + u32 sdp_cfg2_off = 0; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + } + + cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + + if (en) { + /* GEN0_SDP_EN */ + cfg |= BIT(17); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC0_SDPSIZE */ + cfg2 |= BIT(16); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + + /* setup the GENERIC0 in case of en = true */ + dp_catalog_panel_setup_vsc_sdp(panel); + + } else { + /* GEN0_SDP_EN */ + cfg &= ~BIT(17); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC0_SDPSIZE */ + cfg2 &= ~BIT(16); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + } + + dp_catalog_panel_sdp_update(panel); +} + +static void dp_catalog_panel_config_misc(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 reg_offset = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + reg_offset = DP1_MISC1_MISC0 - DP_MISC1_MISC0; + + DP_DEBUG("misc settings = 0x%x\n", panel->misc_val); + dp_write(DP_MISC1_MISC0 + reg_offset, panel->misc_val); +} + +static int dp_catalog_panel_set_colorspace(struct dp_catalog_panel *panel, +bool vsc_supported) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (vsc_supported) { + dp_catalog_panel_setup_vsc_sdp(panel); + dp_catalog_panel_sdp_update(panel); + } else + dp_catalog_panel_config_misc(panel); + + return 0; +} + +static void dp_catalog_panel_config_hdr(struct dp_catalog_panel *panel, bool en, + u32 dhdr_max_pkts, bool flush) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg, cfg2, cfg4, misc; + u32 sdp_cfg_off = 0; + u32 sdp_cfg2_off = 0; + u32 sdp_cfg4_off = 0; + u32 misc1_misc0_off = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + sdp_cfg4_off = MMSS_DP1_SDP_CFG4 - MMSS_DP_SDP_CFG4; + misc1_misc0_off = DP1_MISC1_MISC0 - DP_MISC1_MISC0; + } + + cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + misc = dp_read(DP_MISC1_MISC0 + misc1_misc0_off); + + if (en) { + if (dhdr_max_pkts) { + /* VSCEXT_SDP_EN */ + cfg |= BIT(16); + /* DHDR_EN, DHDR_PACKET_LIMIT */ + cfg4 = (dhdr_max_pkts << 1) | BIT(0); + dp_write(MMSS_DP_SDP_CFG4 + sdp_cfg4_off, cfg4); + dp_catalog_panel_setup_vsif_infoframe_sdp(panel); + } + + /* GEN2_SDP_EN */ + cfg |= BIT(19); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC2_SDPSIZE */ + cfg2 |= BIT(20); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + + dp_catalog_panel_setup_hdr_infoframe_sdp(panel); + + if (panel->hdr_meta.eotf) + DP_DEBUG("Enabled\n"); + else + DP_DEBUG("Reset\n"); + } else { + /* VSCEXT_SDP_ENG */ + cfg &= ~BIT(16) & ~BIT(19); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, cfg); + + /* GENERIC0_SDPSIZE GENERIC2_SDPSIZE */ + cfg2 &= ~BIT(20); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, cfg2); + + /* DHDR_EN, DHDR_PACKET_LIMIT */ + cfg4 = 0; + dp_write(MMSS_DP_SDP_CFG4 + sdp_cfg4_off, cfg4); + + DP_DEBUG("Disabled\n"); + } + + if (flush) { + DP_DEBUG("flushing HDR metadata\n"); + dp_catalog_panel_sdp_update(panel); + } +} + +static void dp_catalog_panel_update_transfer_unit( + struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!panel || panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + dp_write(DP_VALID_BOUNDARY, panel->valid_boundary); + dp_write(DP_TU, panel->dp_tu); + dp_write(DP_VALID_BOUNDARY_2, panel->valid_boundary2); +} + +static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + dp_write(DP_STATE_CTRL, state); + /* make sure to change the hw state */ + wmb(); +} + +static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u8 ln_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + cfg = dp_read(DP_CONFIGURATION_CTRL); + /* + * Reset ASSR (alternate scrambler seed reset) by resetting BIT(10). + * ASSR should be set to disable for TPS4 link training pattern. + * Forcing it to 0 as the power on reset value of register enables it. + */ + cfg &= ~(BIT(4) | BIT(5) | BIT(10)); + cfg |= (ln_cnt - 1) << 4; + dp_write(DP_CONFIGURATION_CTRL, cfg); + + cfg = dp_read(DP_MAINLINK_CTRL); + cfg |= 0x02000000; + dp_write(DP_MAINLINK_CTRL, cfg); + + DP_DEBUG("DP_MAINLINK_CTRL=0x%x\n", cfg); +} + +static void dp_catalog_panel_config_ctrl(struct dp_catalog_panel *panel, + u32 cfg) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0, mainlink_ctrl; + u32 reg; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = DP1_CONFIGURATION_CTRL - DP_CONFIGURATION_CTRL; + + DP_DEBUG("DP_CONFIGURATION_CTRL=0x%x\n", cfg); + + dp_write(DP_CONFIGURATION_CTRL + strm_reg_off, cfg); + + mainlink_ctrl = dp_read(DP_MAINLINK_CTRL); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else if (panel->stream_id == DP_STREAM_1) + io_data = catalog->io.dp_p1; + + if (mainlink_ctrl & BIT(8)) + dp_write(MMSS_DP_ASYNC_FIFO_CONFIG, 0x01); + else + dp_write(MMSS_DP_ASYNC_FIFO_CONFIG, 0x00); + + reg = dp_read(MMSS_DP_TIMING_ENGINE_EN); + reg |= BIT(8); + dp_write(MMSS_DP_TIMING_ENGINE_EN, reg); +} + +static void dp_catalog_panel_config_dto(struct dp_catalog_panel *panel, + bool ack) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 dsc_dto; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + switch (panel->stream_id) { + case DP_STREAM_0: + io_data = catalog->io.dp_p0; + break; + case DP_STREAM_1: + io_data = catalog->io.dp_p1; + break; + default: + DP_ERR("invalid stream id\n"); + return; + } + + dsc_dto = dp_read(MMSS_DP_DSC_DTO); + if (ack) + dsc_dto = BIT(1); + else + dsc_dto &= ~BIT(1); + dp_write(MMSS_DP_DSC_DTO, dsc_dto); +} + +static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl, + bool flipped, char *lane_map) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + dp_write(DP_LOGICAL2PHYSICAL_LANE_MAPPING, 0xe4); +} + +static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl, + u8 ln_pnswap) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 cfg0, cfg1; + + catalog = dp_catalog_get_priv(ctrl); + + cfg0 = 0x0a; + cfg1 = 0x0a; + + cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0; + cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2; + cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0; + cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2; + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_POL_INV, cfg0); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_POL_INV, cfg1); +} + +static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + u32 mainlink_ctrl, reg; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + if (enable) { + reg = dp_read(DP_MAINLINK_CTRL); + mainlink_ctrl = reg & ~(0x03); + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink is turned off before reset */ + mainlink_ctrl = reg | 0x02; + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink entered reset */ + mainlink_ctrl = reg & ~(0x03); + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink reset done */ + mainlink_ctrl = reg | 0x01; + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + wmb(); /* make sure mainlink turned on */ + } else { + mainlink_ctrl = dp_read(DP_MAINLINK_CTRL); + mainlink_ctrl &= ~BIT(0); + dp_write(DP_MAINLINK_CTRL, mainlink_ctrl); + } +} + +static void dp_catalog_panel_config_msa(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid; + u32 const nvid_fixed = 0x8000; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0; + u32 mvid_reg_off = 0, nvid_reg_off = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_mmss_cc; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = MMSS_DP_PIXEL1_M - MMSS_DP_PIXEL_M; + + pixel_m = dp_read(MMSS_DP_PIXEL_M + strm_reg_off); + pixel_n = dp_read(MMSS_DP_PIXEL_N + strm_reg_off); + DP_DEBUG("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + DP_DEBUG("rate = %d\n", rate); + + if (panel->widebus_en) + mvid <<= 1; + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + DP_DEBUG("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write(DP_SOFTWARE_MVID + mvid_reg_off, mvid); + dp_write(DP_SOFTWARE_NVID + nvid_reg_off, nvid); +} + +static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl, + u32 pattern) +{ + int bit, cnt = 10; + u32 data; + const u32 link_training_offset = 3; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + switch (pattern) { + case DP_TRAINING_PATTERN_4: + bit = 3; + break; + case DP_TRAINING_PATTERN_3: + case DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_1: + bit = pattern - 1; + break; + default: + DP_ERR("invalid pattern\n"); + return; + } + + DP_DEBUG("hw: bit=%d train=%d\n", bit, pattern); + dp_write(DP_STATE_CTRL, BIT(bit)); + + bit += link_training_offset; + + while (cnt--) { + data = dp_read(DP_MAINLINK_READY); + if (data & BIT(bit)) + break; + } + + if (cnt == 0) + DP_ERR("set link_train=%d failed\n", pattern); +} + +static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.usb3_dp_com; + + DP_DEBUG("Program PHYMODE to DP only\n"); + dp_write(USB3_DP_COM_RESET_OVRD_CTRL, 0x0a); + dp_write(USB3_DP_COM_PHY_MODE_CTRL, 0x02); + dp_write(USB3_DP_COM_SW_RESET, 0x01); + /* make sure usb3 com phy software reset is done */ + wmb(); + + if (!flip) /* CC1 */ + dp_write(USB3_DP_COM_TYPEC_CTRL, 0x02); + else /* CC2 */ + dp_write(USB3_DP_COM_TYPEC_CTRL, 0x03); + + dp_write(USB3_DP_COM_SWI_CTRL, 0x00); + dp_write(USB3_DP_COM_SW_RESET, 0x00); + /* make sure the software reset is done */ + wmb(); + + dp_write(USB3_DP_COM_POWER_DOWN_CTRL, 0x01); + dp_write(USB3_DP_COM_RESET_OVRD_CTRL, 0x00); + /* make sure phy is brought out of reset */ + wmb(); +} + +static int dp_catalog_ctrl_setup_misr(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 val; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_phy; + dp_write(DP_PHY_MISR_CTRL, 0x3); + /* make sure misr hw is reset */ + wmb(); + dp_write(DP_PHY_MISR_CTRL, 0x1); + /* make sure misr is brought out of reset */ + wmb(); + + io_data = catalog->io.dp_link; + val = 1; // frame count + val |= BIT(10); // clear status + val |= BIT(8); // enable + dp_write(DP_MISR40_CTRL, val); + /* make sure misr control is applied */ + wmb(); + + return 0; +} + +static int dp_catalog_ctrl_read_misr(struct dp_catalog_ctrl *ctrl, struct dp_misr40_data *data) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 val; + int i, j; + u32 addr; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_phy; + val = dp_read(DP_PHY_MISR_STATUS); + if (!val) { + DP_WARN("phy misr not ready!"); + return -EAGAIN; + } + + addr = DP_PHY_MISR_TX0; + for (i = 0; i < 8; i++) { + data->phy_misr[i] = 0; + for (j = 0; j < 4; j++) { + val = dp_read(addr) & 0xff; + data->phy_misr[i] |= val << (j * 8); + addr += 4; + } + } + + io_data = catalog->io.dp_link; + for (i = 0; i < 8; i++) + data->ctrl_misr[i] = dp_read(DP_MISR40_TX0 + (i * 4)); + + return 0; +} + +static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel, u32 pattern) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 reg; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else if (panel->stream_id == DP_STREAM_1) + io_data = catalog->io.dp_p1; + + if (!pattern) { + dp_write(MMSS_DP_TPG_MAIN_CONTROL, 0x0); + dp_write(MMSS_DP_BIST_ENABLE, 0x0); + reg = dp_read(MMSS_DP_TIMING_ENGINE_EN); + reg &= ~0x1; + dp_write(MMSS_DP_TIMING_ENGINE_EN, reg); + wmb(); /* ensure Timing generator is turned off */ + return; + } + + if (pattern > DP_TPG_PATTERN_MAX) + pattern = DP_TPG_PATTERN_DEFAULT; + + dp_write(MMSS_DP_INTF_HSYNC_CTL, + panel->hsync_ctl); + dp_write(MMSS_DP_INTF_VSYNC_PERIOD_F0, + panel->vsync_period * panel->hsync_period); + dp_write(MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, + panel->v_sync_width * panel->hsync_period); + dp_write(MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + dp_write(MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + dp_write(MMSS_DP_INTF_DISPLAY_HCTL, panel->display_hctl); + dp_write(MMSS_DP_INTF_ACTIVE_HCTL, 0); + dp_write(MMSS_INTF_DISPLAY_V_START_F0, panel->display_v_start); + dp_write(MMSS_DP_INTF_DISPLAY_V_END_F0, panel->display_v_end); + dp_write(MMSS_INTF_DISPLAY_V_START_F1, 0); + dp_write(MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + dp_write(MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + dp_write(MMSS_DP_INTF_POLARITY_CTL, 0); + wmb(); /* ensure TPG registers are programmed */ + + dp_write(MMSS_DP_TPG_MAIN_CONTROL, (1 << pattern)); + dp_write(MMSS_DP_TPG_VIDEO_CONFIG, 0x5); + wmb(); /* ensure TPG config is programmed */ + dp_write(MMSS_DP_BIST_ENABLE, 0x1); + reg = dp_read(MMSS_DP_TIMING_ENGINE_EN); + reg |= 0x1; + dp_write(MMSS_DP_TIMING_ENGINE_EN, reg); + wmb(); /* ensure Timing generator is turned on */ +} + +static void dp_catalog_panel_dsc_cfg(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 reg, offset; + int i; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else + io_data = catalog->io.dp_p1; + + dp_write(MMSS_DP_DSC_DTO_COUNT, panel->dsc.dto_count); + + reg = dp_read(MMSS_DP_DSC_DTO); + if (panel->dsc.dto_en) { + reg |= BIT(0); + reg |= BIT(3); + reg |= (panel->dsc.dto_n << 8); + reg |= (panel->dsc.dto_d << 16); + } + dp_write(MMSS_DP_DSC_DTO, reg); + + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_0) + offset = 0; + else + offset = DP1_COMPRESSION_MODE_CTRL - DP_COMPRESSION_MODE_CTRL; + + dp_write(DP_PPS_HB_0_3 + offset, 0x7F1000); + dp_write(DP_PPS_PB_0_3 + offset, 0xA22300); + + for (i = 0; i < panel->dsc.parity_word_len; i++) + dp_write(DP_PPS_PB_4_7 + (i << 2) + offset, + panel->dsc.parity_word[i]); + + for (i = 0; i < panel->dsc.pps_word_len; i++) + dp_write(DP_PPS_PPS_0_3 + (i << 2) + offset, + panel->dsc.pps_word[i]); + + reg = 0; + if (panel->dsc.dsc_en) { + reg = BIT(0); + reg |= (panel->dsc.eol_byte_num << 3); + reg |= (panel->dsc.slice_per_pkt << 5); + reg |= (panel->dsc.bytes_per_pkt << 16); + reg |= (panel->dsc.be_in_lane << 10); + } + dp_write(DP_COMPRESSION_MODE_CTRL + offset, reg); + + DP_DEBUG("compression:0x%x for stream:%d\n", + reg, panel->stream_id); +} + +static void dp_catalog_panel_dp_flush(struct dp_catalog_panel *panel, + enum dp_flush_bit flush_bit) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 dp_flush, offset; + struct dp_dsc_cfg_data *dsc; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + dsc = &panel->dsc; + + if (panel->stream_id == DP_STREAM_0) + offset = 0; + else + offset = MMSS_DP1_FLUSH - MMSS_DP_FLUSH; + + dp_flush = dp_read(MMSS_DP_FLUSH + offset); + + if ((flush_bit == DP_PPS_FLUSH) && + dsc->continuous_pps) + dp_flush &= ~BIT(2); + + dp_flush |= BIT(flush_bit); + dp_write(MMSS_DP_FLUSH + offset, dp_flush); +} + +static void dp_catalog_panel_pps_flush(struct dp_catalog_panel *panel) +{ + dp_catalog_panel_dp_flush(panel, DP_PPS_FLUSH); + DP_DEBUG("pps flush for stream:%d\n", panel->stream_id); +} + +static void dp_catalog_panel_dhdr_flush(struct dp_catalog_panel *panel) +{ + dp_catalog_panel_dp_flush(panel, DP_DHDR_FLUSH); + DP_DEBUG("dhdr flush for stream:%d\n", panel->stream_id); +} + + +static bool dp_catalog_panel_dhdr_busy(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 dp_flush, offset; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return false; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_0) + offset = 0; + else + offset = MMSS_DP1_FLUSH - MMSS_DP_FLUSH; + + dp_flush = dp_read(MMSS_DP_FLUSH + offset); + + return dp_flush & BIT(DP_DHDR_FLUSH) ? true : false; +} + +static int dp_catalog_panel_get_src_crc(struct dp_catalog_panel *panel, u16 *crc) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 offset; + u32 reg; + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return -EINVAL; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_0) + offset = MMSS_DP_PSR_CRC_RG; + else + offset = MMSS_DP1_CRC_RG; + + reg = dp_read(offset); //GR + crc[0] = reg & 0xffff; + crc[1] = reg >> 16; + crc[2] = dp_read(offset + 4); //B + + return 0; +} + +static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl) +{ + u32 sw_reset; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + sw_reset = dp_read(DP_SW_RESET); + + sw_reset |= BIT(0); + dp_write(DP_SW_RESET, sw_reset); + usleep_range(1000, 1010); /* h/w recommended delay */ + + sw_reset &= ~BIT(0); + dp_write(DP_SW_RESET, sw_reset); +} + +static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl) +{ + u32 data; + int cnt = 10; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + goto end; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + while (--cnt) { + /* DP_MAINLINK_READY */ + data = dp_read(DP_MAINLINK_READY); + if (data & BIT(0)) + return true; + + usleep_range(1000, 1010); /* 1ms wait before next reg read */ + } + DP_ERR("mainlink not ready\n"); +end: + return false; +} + +static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + if (enable) { + dp_write(DP_INTR_STATUS, DP_INTR_MASK1); + dp_write(DP_INTR_STATUS2, DP_INTR_MASK2); + dp_write(DP_INTR_STATUS3, DP_INTR_MASK3); + dp_write(DP_INTR_STATUS5, DP_INTR_MASK5); + dp_write(DP_INTR_STATUS6, DP_INTR_MASK6); + } else { + /* disable interrupts */ + dp_write(DP_INTR_STATUS, 0x00); + dp_write(DP_INTR_STATUS2, 0x00); + dp_write(DP_INTR_STATUS3, 0x00); + dp_write(DP_INTR_STATUS5, 0x00); + dp_write(DP_INTR_STATUS6, 0x00); + wmb(); + + /* clear all pending interrupts */ + dp_write(DP_INTR_STATUS, DP_INTERRUPT_STATUS1 << 1); + dp_write(DP_INTR_STATUS2, DP_INTERRUPT_STATUS2 << 1); + dp_write(DP_INTR_STATUS3, DP_INTERRUPT_STATUS3 << 1); + dp_write(DP_INTR_STATUS5, DP_INTERRUPT_STATUS5 << 1); + dp_write(DP_INTR_STATUS6, DP_INTERRUPT_STATUS6 << 1); + + wmb(); + } +} + +static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl) +{ + u32 ack = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + ctrl->isr = dp_read(DP_INTR_STATUS2); + ctrl->isr &= ~DP_INTR_MASK2; + ack = ctrl->isr & DP_INTERRUPT_STATUS2; + ack <<= 1; + ack |= DP_INTR_MASK2; + dp_write(DP_INTR_STATUS2, ack); + + ctrl->isr3 = dp_read(DP_INTR_STATUS3); + ctrl->isr3 &= ~DP_INTR_MASK3; + ack = ctrl->isr3 & DP_INTERRUPT_STATUS3; + ack <<= 1; + ack |= DP_INTR_MASK3; + dp_write(DP_INTR_STATUS3, ack); + + ctrl->isr5 = dp_read(DP_INTR_STATUS5); + ctrl->isr5 &= ~DP_INTR_MASK5; + ack = ctrl->isr5 & DP_INTERRUPT_STATUS5; + ack <<= 1; + ack |= DP_INTR_MASK5; + dp_write(DP_INTR_STATUS5, ack); + + ctrl->isr6 = dp_read(DP_INTR_STATUS6); + ctrl->isr6 &= ~DP_INTR_MASK6; + ack = ctrl->isr6 & DP_INTERRUPT_STATUS6; + ack <<= 1; + ack |= DP_INTR_MASK6; + dp_write(DP_INTR_STATUS6, ack); + +} + +static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_ahb; + + dp_write(DP_PHY_CTRL, 0x5); /* bit 0 & 2 */ + usleep_range(1000, 1010); /* h/w recommended delay */ + dp_write(DP_PHY_CTRL, 0x0); + wmb(); /* make sure PHY reset done */ +} + +static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl, + bool flipped, u8 ln_cnt) +{ + u32 info = 0x0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u8 orientation = BIT(!!flipped); + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_phy; + + info |= (ln_cnt & 0x0F); + info |= ((orientation & 0x0F) << 4); + DP_DEBUG("Shared Info = 0x%x\n", info); + + dp_write(DP_PHY_SPARE0, info); +} + +static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl, + u8 v_level, u8 p_level, bool high) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u8 value0, value1; + u32 version; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + DP_DEBUG("hw: v=%d p=%d\n", v_level, p_level); + + io_data = catalog->io.dp_ahb; + version = dp_read(DP_HW_VERSION); + + if (version == 0x10020004) { + if (high) { + value0 = vm_voltage_swing_hbr3_hbr2[v_level][p_level]; + value1 = vm_pre_emphasis_hbr3_hbr2[v_level][p_level]; + } else { + value0 = vm_voltage_swing_hbr_rbr[v_level][p_level]; + value1 = vm_pre_emphasis_hbr_rbr[v_level][p_level]; + } + } else { + value0 = vm_voltage_swing[v_level][p_level]; + value1 = vm_pre_emphasis[v_level][p_level]; + } + + /* program default setting first */ + + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + /* Enable MUX to use Cursor values from these registers */ + value0 |= BIT(5); + value1 |= BIT(5); + + /* Configure host and panel only if both values are allowed */ + if (value0 != 0xFF && value1 != 0xFF) { + io_data = catalog->io.dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + io_data = catalog->io.dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + DP_DEBUG("hw: vx_value=0x%x px_value=0x%x\n", + value0, value1); + } else { + DP_ERR("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n", + v_level, value0, p_level, value1); + } +} + +static void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog_ctrl *ctrl, + u32 pattern) +{ + struct dp_catalog_private *catalog; + u32 value = 0x0; + struct dp_io_data *io_data = NULL; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + dp_write(DP_STATE_CTRL, 0x0); + + switch (pattern) { + case DP_PHY_TEST_PATTERN_D10_2: + dp_write(DP_STATE_CTRL, 0x1); + break; + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + value &= ~(1 << 16); + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + value |= 0xFC; + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + dp_write(DP_MAINLINK_LEVELS, 0x2); + dp_write(DP_STATE_CTRL, 0x10); + break; + case DP_PHY_TEST_PATTERN_PRBS7: + dp_write(DP_STATE_CTRL, 0x20); + break; + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + dp_write(DP_STATE_CTRL, 0x40); + /* 00111110000011111000001111100000 */ + dp_write(DP_TEST_80BIT_CUSTOM_PATTERN_REG0, 0x3E0F83E0); + /* 00001111100000111110000011111000 */ + dp_write(DP_TEST_80BIT_CUSTOM_PATTERN_REG1, 0x0F83E0F8); + /* 1111100000111110 */ + dp_write(DP_TEST_80BIT_CUSTOM_PATTERN_REG2, 0x0000F83E); + break; + case DP_PHY_TEST_PATTERN_CP2520: + value = dp_read(DP_MAINLINK_CTRL); + value &= ~BIT(4); + dp_write(DP_MAINLINK_CTRL, value); + + value = BIT(16); + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + value |= 0xFC; + dp_write(DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, value); + dp_write(DP_MAINLINK_LEVELS, 0x2); + dp_write(DP_STATE_CTRL, 0x10); + + value = dp_read(DP_MAINLINK_CTRL); + value |= BIT(0); + dp_write(DP_MAINLINK_CTRL, value); + break; + case DP_PHY_TEST_PATTERN_CP2520_3: + dp_write(DP_MAINLINK_CTRL, 0x01); + dp_write(DP_STATE_CTRL, 0x8); + break; + default: + DP_DEBUG("No valid test pattern requested: 0x%x\n", pattern); + return; + } + + /* Make sure the test pattern is programmed in the hardware */ + wmb(); +} + +static u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return 0; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + return dp_read(DP_MAINLINK_READY); +} + +static void dp_catalog_ctrl_fec_config(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 reg; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + io_data = catalog->io.dp_link; + + reg = dp_read(DP_MAINLINK_CTRL); + + /* + * fec_en = BIT(12) + * fec_seq_mode = BIT(22) + * sde_flush = BIT(23) | BIT(24) + * fb_boundary_sel = BIT(25) + */ + if (enable) + reg |= BIT(12) | BIT(22) | BIT(23) | BIT(24) | BIT(25); + else + reg &= ~BIT(12); + + dp_write(DP_MAINLINK_CTRL, reg); + /* make sure mainlink configuration is updated with fec sequence */ + wmb(); +} + +u32 dp_catalog_get_dp_core_version(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!dp_catalog) { + DP_ERR("invalid input\n"); + return 0; + } + + catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + if (catalog->dp_core_version) + return catalog->dp_core_version; + + io_data = catalog->io.dp_ahb; + + return dp_read(DP_HW_VERSION); +} + +u32 dp_catalog_get_dp_phy_version(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!dp_catalog) { + DP_ERR("invalid input\n"); + return 0; + } + + catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + if (catalog->dp_phy_version) + return catalog->dp_phy_version; + + io_data = catalog->io.dp_phy; + catalog->dp_phy_version = (dp_read(DP_PHY_REVISION_ID3) << 24) | + (dp_read(DP_PHY_REVISION_ID2) << 16) | + (dp_read(DP_PHY_REVISION_ID1) << 8) | + dp_read(DP_PHY_REVISION_ID0); + + return catalog->dp_phy_version; +} + +static int dp_catalog_reg_dump(struct dp_catalog *dp_catalog, + char *name, u8 **out_buf, u32 *out_buf_len) +{ + int ret = 0; + u8 *buf; + u32 len; + struct dp_io_data *io_data; + struct dp_catalog_private *catalog; + struct dp_parser *parser; + + if (!dp_catalog) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + catalog = container_of(dp_catalog, struct dp_catalog_private, + dp_catalog); + + parser = catalog->parser; + parser->get_io_buf(parser, name); + io_data = parser->get_io(parser, name); + if (!io_data) { + DP_ERR("IO %s not found\n", name); + ret = -EINVAL; + goto end; + } + + buf = io_data->buf; + len = io_data->io.len; + + if (!buf || !len) { + DP_ERR("no buffer available\n"); + ret = -ENOMEM; + goto end; + } + + if (!strcmp(catalog->exe_mode, "hw") || + !strcmp(catalog->exe_mode, "all")) { + u32 i, data; + u32 const rowsize = 4; + void __iomem *addr = io_data->io.base; + + memset(buf, 0, len); + + for (i = 0; i < len / rowsize; i++) { + data = readl_relaxed(addr); + memcpy(buf + (rowsize * i), &data, sizeof(u32)); + + addr += rowsize; + } + } + + *out_buf = buf; + *out_buf_len = len; +end: + if (ret) + parser->clear_io_buf(parser); + + return ret; +} + +static void dp_catalog_ctrl_mst_config(struct dp_catalog_ctrl *ctrl, + bool enable) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 reg; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + reg = dp_read(DP_MAINLINK_CTRL); + if (enable) + reg |= (0x04000100); + else + reg &= ~(0x04000100); + + dp_write(DP_MAINLINK_CTRL, reg); + /* make sure mainlink MST configuration is updated */ + wmb(); +} + +static void dp_catalog_ctrl_trigger_act(struct dp_catalog_ctrl *ctrl) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + dp_write(DP_MST_ACT, 0x1); + /* make sure ACT signal is performed */ + wmb(); +} + +static void dp_catalog_ctrl_read_act_complete_sts(struct dp_catalog_ctrl *ctrl, + bool *sts) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 reg; + + if (!ctrl || !sts) { + DP_ERR("invalid input\n"); + return; + } + + *sts = false; + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + reg = dp_read(DP_MST_ACT); + + if (!reg) + *sts = true; +} + +static void dp_catalog_ctrl_channel_alloc(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_slot, u32 tot_slot_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 i, slot_reg_1, slot_reg_2, slot; + u32 reg_off = 0; + int const num_slots_per_reg = 32; + + if (!ctrl || ch >= DP_STREAM_MAX) { + DP_ERR("invalid input. ch %d\n", ch); + return; + } + + if (ch_start_slot > DP_MAX_TIME_SLOTS || + (ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) { + DP_ERR("invalid slots start %d, tot %d\n", + ch_start_slot, tot_slot_cnt); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + DP_DEBUG("ch %d, start_slot %d, tot_slot %d\n", + ch, ch_start_slot, tot_slot_cnt); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32; + + slot_reg_1 = 0; + slot_reg_2 = 0; + + if (ch_start_slot && tot_slot_cnt) { + ch_start_slot--; + for (i = 0; i < tot_slot_cnt; i++) { + if (ch_start_slot < num_slots_per_reg) { + slot_reg_1 |= BIT(ch_start_slot); + } else { + slot = ch_start_slot - num_slots_per_reg; + slot_reg_2 |= BIT(slot); + } + ch_start_slot++; + } + } + + DP_DEBUG("ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, + slot_reg_1, slot_reg_2); + + dp_write(DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); + dp_write(DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); +} + +static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_slot, u32 tot_slot_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 i, slot_reg_1, slot_reg_2, slot; + u32 reg_off = 0; + + if (!ctrl || ch >= DP_STREAM_MAX) { + DP_ERR("invalid input. ch %d\n", ch); + return; + } + + if (ch_start_slot > DP_MAX_TIME_SLOTS || + (ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) { + DP_ERR("invalid slots start %d, tot %d\n", + ch_start_slot, tot_slot_cnt); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + DP_DEBUG("dealloc ch %d, start_slot %d, tot_slot %d\n", + ch, ch_start_slot, tot_slot_cnt); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32; + + slot_reg_1 = dp_read(DP_DP0_TIMESLOT_1_32 + reg_off); + slot_reg_2 = dp_read(DP_DP0_TIMESLOT_33_63 + reg_off); + + ch_start_slot = ch_start_slot - 1; + for (i = 0; i < tot_slot_cnt; i++) { + if (ch_start_slot < 33) { + slot_reg_1 &= ~BIT(ch_start_slot); + } else { + slot = ch_start_slot - 33; + slot_reg_2 &= ~BIT(slot); + } + ch_start_slot++; + } + + DP_DEBUG("dealloc ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, + slot_reg_1, slot_reg_2); + + dp_write(DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); + dp_write(DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); +} + +static void dp_catalog_ctrl_update_rg(struct dp_catalog_ctrl *ctrl, u32 ch, + u32 x_int, u32 y_frac_enum) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 rg, reg_off = 0; + + if (!ctrl || ch >= DP_STREAM_MAX) { + DP_ERR("invalid input. ch %d\n", ch); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + rg = y_frac_enum; + rg |= (x_int << 16); + + DP_DEBUG("ch: %d x_int:%d y_frac_enum:%d rg:%d\n", ch, x_int, + y_frac_enum, rg); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_RG - DP_DP0_RG; + + dp_write(DP_DP0_RG + reg_off, rg); +} + +static void dp_catalog_ctrl_mainlink_levels(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 mainlink_levels, safe_to_exit_level = 14; + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + switch (lane_cnt) { + case 1: + safe_to_exit_level = 14; + break; + case 2: + safe_to_exit_level = 8; + break; + case 4: + safe_to_exit_level = 5; + break; + default: + DP_DEBUG("setting the default safe_to_exit_level = %u\n", + safe_to_exit_level); + break; + } + + mainlink_levels = dp_read(DP_MAINLINK_LEVELS); + mainlink_levels &= 0xFE0; + mainlink_levels |= safe_to_exit_level; + + DP_DEBUG("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", + mainlink_levels, safe_to_exit_level); + + dp_write(DP_MAINLINK_LEVELS, mainlink_levels); +} + + +/* panel related catalog functions */ +static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 offset = 0, reg; + + if (!panel) { + DP_ERR("invalid input\n"); + goto end; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + goto end; + } + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + offset = DP1_TOTAL_HOR_VER - DP_TOTAL_HOR_VER; + + dp_write(DP_TOTAL_HOR_VER + offset, panel->total); + dp_write(DP_START_HOR_VER_FROM_SYNC + offset, panel->sync_start); + dp_write(DP_HSYNC_VSYNC_WIDTH_POLARITY + offset, panel->width_blanking); + dp_write(DP_ACTIVE_HOR_VER + offset, panel->dp_active); + + if (panel->stream_id == DP_STREAM_0) + io_data = catalog->io.dp_p0; + else + io_data = catalog->io.dp_p1; + + reg = dp_read(MMSS_DP_INTF_CONFIG); + + if (panel->widebus_en) + reg |= BIT(4); + else + reg &= ~BIT(4); + + dp_write(MMSS_DP_INTF_CONFIG, reg); +end: + return 0; +} + +static void dp_catalog_hpd_config_hpd(struct dp_catalog_hpd *hpd, bool en) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!hpd) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv(hpd); + io_data = catalog->io.dp_aux; + + if (en) { + u32 reftimer = dp_read(DP_DP_HPD_REFTIMER); + + /* Arm only the UNPLUG and HPD_IRQ interrupts */ + dp_write(DP_DP_HPD_INT_ACK, 0xF); + dp_write(DP_DP_HPD_INT_MASK, 0xA); + + /* Enable REFTIMER to count 1ms */ + reftimer |= BIT(16); + dp_write(DP_DP_HPD_REFTIMER, reftimer); + + /* Connect_time is 250us & disconnect_time is 2ms */ + dp_write(DP_DP_HPD_EVENT_TIME_0, 0x3E800FA); + dp_write(DP_DP_HPD_EVENT_TIME_1, 0x1F407D0); + + /* Enable HPD */ + dp_write(DP_DP_HPD_CTRL, 0x1); + + } else { + /* Disable HPD */ + dp_write(DP_DP_HPD_CTRL, 0x0); + } +} + +static u32 dp_catalog_hpd_get_interrupt(struct dp_catalog_hpd *hpd) +{ + u32 isr = 0; + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + + if (!hpd) { + DP_ERR("invalid input\n"); + return isr; + } + + catalog = dp_catalog_get_priv(hpd); + + io_data = catalog->io.dp_aux; + isr = dp_read(DP_DP_HPD_INT_STATUS); + dp_write(DP_DP_HPD_INT_ACK, (isr & 0xf)); + + return isr; +} + +static void dp_catalog_audio_init(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { + { + MMSS_DP_AUDIO_STREAM_0, + MMSS_DP_AUDIO_STREAM_1, + MMSS_DP_AUDIO_STREAM_1, + }, + { + MMSS_DP_AUDIO_TIMESTAMP_0, + MMSS_DP_AUDIO_TIMESTAMP_1, + MMSS_DP_AUDIO_TIMESTAMP_1, + }, + { + MMSS_DP_AUDIO_INFOFRAME_0, + MMSS_DP_AUDIO_INFOFRAME_1, + MMSS_DP_AUDIO_INFOFRAME_1, + }, + { + MMSS_DP_AUDIO_COPYMANAGEMENT_0, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + }, + { + MMSS_DP_AUDIO_ISRC_0, + MMSS_DP_AUDIO_ISRC_1, + MMSS_DP_AUDIO_ISRC_1, + }, + }; + + if (!audio) + return; + + catalog = dp_catalog_get_priv(audio); + + catalog->audio_map = sdp_map; +} + +static void dp_catalog_audio_config_sdp(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 sdp_cfg = 0, sdp_cfg_off = 0; + u32 sdp_cfg2 = 0, sdp_cfg2_off = 0; + + if (!audio) + return; + + if (audio->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", audio->stream_id); + return; + } + + if (audio->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + } + + catalog = dp_catalog_get_priv(audio); + io_data = catalog->io.dp_link; + + sdp_cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + + /* AUDIO_TIMESTAMP_SDP_EN */ + sdp_cfg |= BIT(1); + /* AUDIO_STREAM_SDP_EN */ + sdp_cfg |= BIT(2); + /* AUDIO_COPY_MANAGEMENT_SDP_EN */ + sdp_cfg |= BIT(5); + /* AUDIO_ISRC_SDP_EN */ + sdp_cfg |= BIT(6); + /* AUDIO_INFOFRAME_SDP_EN */ + sdp_cfg |= BIT(20); + + DP_DEBUG("sdp_cfg = 0x%x\n", sdp_cfg); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, sdp_cfg); + + sdp_cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg_off); + /* IFRM_REGSRC -> Do not use reg values */ + sdp_cfg2 &= ~BIT(0); + /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ + sdp_cfg2 &= ~BIT(1); + + DP_DEBUG("sdp_cfg2 = 0x%x\n", sdp_cfg2); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg_off, sdp_cfg2); +} + +static void dp_catalog_audio_get_header(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_io_data *io_data; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + + if (!audio) + return; + + catalog = dp_catalog_get_priv(audio); + + io_data = catalog->io.dp_link; + sdp_map = catalog->audio_map; + sdp = audio->sdp_type; + header = audio->sdp_header; + + audio->data = dp_read(sdp_map[sdp][header]); +} + +static void dp_catalog_audio_set_header(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_io_data *io_data; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + u32 data; + + if (!audio) + return; + + catalog = dp_catalog_get_priv(audio); + + io_data = catalog->io.dp_link; + sdp_map = catalog->audio_map; + sdp = audio->sdp_type; + header = audio->sdp_header; + data = audio->data; + + dp_write(sdp_map[sdp][header], data); +} + +static void dp_catalog_audio_config_acr(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 acr_ctrl, select; + + catalog = dp_catalog_get_priv(audio); + + select = audio->data; + io_data = catalog->io.dp_link; + + acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); + + DP_DEBUG("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl); + + dp_write(MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); +} + +static void dp_catalog_audio_enable(struct dp_catalog_audio *audio) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + bool enable; + u32 audio_ctrl; + + catalog = dp_catalog_get_priv(audio); + + io_data = catalog->io.dp_link; + enable = !!audio->data; + + audio_ctrl = dp_read(MMSS_DP_AUDIO_CFG); + + if (enable) + audio_ctrl |= BIT(0); + else + audio_ctrl &= ~BIT(0); + + DP_DEBUG("dp_audio_cfg = 0x%x\n", audio_ctrl); + dp_write(MMSS_DP_AUDIO_CFG, audio_ctrl); + + /* make sure audio engine is disabled */ + wmb(); +} + +static void dp_catalog_config_spd_header(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 value, new_value, offset = 0; + u8 parity_byte; + + if (!panel || panel->stream_id >= DP_STREAM_MAX) + return; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; + + /* Config header and parity byte 1 */ + value = dp_read(MMSS_DP_GENERIC1_0 + offset); + + new_value = 0x83; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + DP_DEBUG("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_write(MMSS_DP_GENERIC1_0 + offset, value); + + /* Config header and parity byte 2 */ + value = dp_read(MMSS_DP_GENERIC1_1 + offset); + + new_value = 0x1b; + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + DP_DEBUG("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_write(MMSS_DP_GENERIC1_1 + offset, value); + + /* Config header and parity byte 3 */ + value = dp_read(MMSS_DP_GENERIC1_1 + offset); + + new_value = (0x0 | (0x12 << 2)); + parity_byte = dp_header_get_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + DP_DEBUG("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_write(MMSS_DP_GENERIC1_1 + offset, value); +} + +static void dp_catalog_panel_config_spd(struct dp_catalog_panel *panel) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data; + u32 spd_cfg = 0, spd_cfg2 = 0; + u8 *vendor = NULL, *product = NULL; + u32 offset = 0; + u32 sdp_cfg_off = 0; + u32 sdp_cfg2_off = 0; + + /* + * Source Device Information + * 00h unknown + * 01h Digital STB + * 02h DVD + * 03h D-VHS + * 04h HDD Video + * 05h DVC + * 06h DSC + * 07h Video CD + * 08h Game + * 09h PC general + * 0ah Bluray-Disc + * 0bh Super Audio CD + * 0ch HD DVD + * 0dh PMP + * 0eh-ffh reserved + */ + u32 device_type = 0; + + if (!panel || panel->stream_id >= DP_STREAM_MAX) + return; + + catalog = dp_catalog_get_priv(panel); + io_data = catalog->io.dp_link; + + if (panel->stream_id == DP_STREAM_1) + offset = MMSS_DP1_GENERIC0_0 - MMSS_DP_GENERIC0_0; + + dp_catalog_config_spd_header(panel); + + vendor = panel->spd_vendor_name; + product = panel->spd_product_description; + + dp_write(MMSS_DP_GENERIC1_2 + offset, + ((vendor[0] & 0x7f) | + ((vendor[1] & 0x7f) << 8) | + ((vendor[2] & 0x7f) << 16) | + ((vendor[3] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_3 + offset, + ((vendor[4] & 0x7f) | + ((vendor[5] & 0x7f) << 8) | + ((vendor[6] & 0x7f) << 16) | + ((vendor[7] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_4 + offset, + ((product[0] & 0x7f) | + ((product[1] & 0x7f) << 8) | + ((product[2] & 0x7f) << 16) | + ((product[3] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_5 + offset, + ((product[4] & 0x7f) | + ((product[5] & 0x7f) << 8) | + ((product[6] & 0x7f) << 16) | + ((product[7] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_6 + offset, + ((product[8] & 0x7f) | + ((product[9] & 0x7f) << 8) | + ((product[10] & 0x7f) << 16) | + ((product[11] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_7 + offset, + ((product[12] & 0x7f) | + ((product[13] & 0x7f) << 8) | + ((product[14] & 0x7f) << 16) | + ((product[15] & 0x7f) << 24))); + dp_write(MMSS_DP_GENERIC1_8 + offset, device_type); + dp_write(MMSS_DP_GENERIC1_9 + offset, 0x00); + + if (panel->stream_id == DP_STREAM_1) { + sdp_cfg_off = MMSS_DP1_SDP_CFG - MMSS_DP_SDP_CFG; + sdp_cfg2_off = MMSS_DP1_SDP_CFG2 - MMSS_DP_SDP_CFG2; + } + + spd_cfg = dp_read(MMSS_DP_SDP_CFG + sdp_cfg_off); + /* GENERIC1_SDP for SPD Infoframe */ + spd_cfg |= BIT(18); + dp_write(MMSS_DP_SDP_CFG + sdp_cfg_off, spd_cfg); + + spd_cfg2 = dp_read(MMSS_DP_SDP_CFG2 + sdp_cfg2_off); + /* 28 data bytes for SPD Infoframe with GENERIC1 set */ + spd_cfg2 |= BIT(17); + dp_write(MMSS_DP_SDP_CFG2 + sdp_cfg2_off, spd_cfg2); + + dp_catalog_panel_sdp_update(panel); +} + +static void dp_catalog_get_io_buf(struct dp_catalog_private *catalog) +{ + struct dp_parser *parser = catalog->parser; + + dp_catalog_fill_io_buf(dp_ahb); + dp_catalog_fill_io_buf(dp_aux); + dp_catalog_fill_io_buf(dp_link); + dp_catalog_fill_io_buf(dp_p0); + dp_catalog_fill_io_buf(dp_phy); + dp_catalog_fill_io_buf(dp_ln_tx0); + dp_catalog_fill_io_buf(dp_ln_tx1); + dp_catalog_fill_io_buf(dp_pll); + dp_catalog_fill_io_buf(usb3_dp_com); + dp_catalog_fill_io_buf(dp_mmss_cc); + dp_catalog_fill_io_buf(hdcp_physical); + dp_catalog_fill_io_buf(dp_p1); + dp_catalog_fill_io_buf(dp_tcsr); +} + +static void dp_catalog_get_io(struct dp_catalog_private *catalog) +{ + struct dp_parser *parser = catalog->parser; + + dp_catalog_fill_io(dp_ahb); + dp_catalog_fill_io(dp_aux); + dp_catalog_fill_io(dp_link); + dp_catalog_fill_io(dp_p0); + dp_catalog_fill_io(dp_phy); + dp_catalog_fill_io(dp_ln_tx0); + dp_catalog_fill_io(dp_ln_tx1); + dp_catalog_fill_io(dp_pll); + dp_catalog_fill_io(usb3_dp_com); + dp_catalog_fill_io(dp_mmss_cc); + dp_catalog_fill_io(hdcp_physical); + dp_catalog_fill_io(dp_p1); + dp_catalog_fill_io(dp_tcsr); +} + +static void dp_catalog_set_exe_mode(struct dp_catalog *dp_catalog, char *mode) +{ + struct dp_catalog_private *catalog; + + if (!dp_catalog) { + DP_ERR("invalid input\n"); + return; + } + + catalog = container_of(dp_catalog, struct dp_catalog_private, + dp_catalog); + + strlcpy(catalog->exe_mode, mode, sizeof(catalog->exe_mode)); + + if (!strcmp(catalog->exe_mode, "hw")) + catalog->parser->clear_io_buf(catalog->parser); + else + dp_catalog_get_io_buf(catalog); + + if (!strcmp(catalog->exe_mode, "hw") || + !strcmp(catalog->exe_mode, "all")) { + catalog->read = dp_read_hw; + catalog->write = dp_write_hw; + + dp_catalog->sub->read = dp_read_sub_hw; + dp_catalog->sub->write = dp_write_sub_hw; + } else { + catalog->read = dp_read_sw; + catalog->write = dp_write_sw; + + dp_catalog->sub->read = dp_read_sub_sw; + dp_catalog->sub->write = dp_write_sub_sw; + } +} + +static int dp_catalog_init(struct device *dev, struct dp_catalog *dp_catalog, + struct dp_parser *parser) +{ + int rc = 0; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (parser->hw_cfg.phy_version >= DP_PHY_VERSION_4_2_0) + dp_catalog->sub = dp_catalog_get_v420(dev, dp_catalog, &catalog->io); + else if (parser->hw_cfg.phy_version == DP_PHY_VERSION_2_0_0) + dp_catalog->sub = dp_catalog_get_v200(dev, dp_catalog, &catalog->io); + else + goto end; + + if (IS_ERR(dp_catalog->sub)) { + rc = PTR_ERR(dp_catalog->sub); + dp_catalog->sub = NULL; + } else { + dp_catalog->sub->read = dp_read_sub_hw; + dp_catalog->sub->write = dp_write_sub_hw; + } +end: + return rc; +} + +void dp_catalog_put(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, struct dp_catalog_private, + dp_catalog); + + if (dp_catalog->sub && dp_catalog->sub->put) + dp_catalog->sub->put(dp_catalog); + + catalog->parser->clear_io_buf(catalog->parser); + devm_kfree(catalog->dev, catalog); +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser) +{ + int rc = 0; + struct dp_catalog *dp_catalog; + struct dp_catalog_private *catalog; + struct dp_catalog_aux aux = { + .read_data = dp_catalog_aux_read_data, + .write_data = dp_catalog_aux_write_data, + .write_trans = dp_catalog_aux_write_trans, + .clear_trans = dp_catalog_aux_clear_trans, + .reset = dp_catalog_aux_reset, + .update_aux_cfg = dp_catalog_aux_update_cfg, + .enable = dp_catalog_aux_enable, + .setup = dp_catalog_aux_setup, + .get_irq = dp_catalog_aux_get_irq, + .clear_hw_interrupts = dp_catalog_aux_clear_hw_interrupts, + }; + struct dp_catalog_ctrl ctrl = { + .state_ctrl = dp_catalog_ctrl_state_ctrl, + .config_ctrl = dp_catalog_ctrl_config_ctrl, + .lane_mapping = dp_catalog_ctrl_lane_mapping, + .lane_pnswap = dp_catalog_ctrl_lane_pnswap, + .mainlink_ctrl = dp_catalog_ctrl_mainlink_ctrl, + .set_pattern = dp_catalog_ctrl_set_pattern, + .reset = dp_catalog_ctrl_reset, + .usb_reset = dp_catalog_ctrl_usb_reset, + .mainlink_ready = dp_catalog_ctrl_mainlink_ready, + .enable_irq = dp_catalog_ctrl_enable_irq, + .phy_reset = dp_catalog_ctrl_phy_reset, + .phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg, + .update_vx_px = dp_catalog_ctrl_update_vx_px, + .get_interrupt = dp_catalog_ctrl_get_interrupt, + .read_hdcp_status = dp_catalog_ctrl_read_hdcp_status, + .send_phy_pattern = dp_catalog_ctrl_send_phy_pattern, + .read_phy_pattern = dp_catalog_ctrl_read_phy_pattern, + .mst_config = dp_catalog_ctrl_mst_config, + .trigger_act = dp_catalog_ctrl_trigger_act, + .read_act_complete_sts = dp_catalog_ctrl_read_act_complete_sts, + .channel_alloc = dp_catalog_ctrl_channel_alloc, + .update_rg = dp_catalog_ctrl_update_rg, + .channel_dealloc = dp_catalog_ctrl_channel_dealloc, + .fec_config = dp_catalog_ctrl_fec_config, + .mainlink_levels = dp_catalog_ctrl_mainlink_levels, + .late_phy_init = dp_catalog_ctrl_late_phy_init, + .setup_misr = dp_catalog_ctrl_setup_misr, + .read_misr = dp_catalog_ctrl_read_misr, + }; + struct dp_catalog_hpd hpd = { + .config_hpd = dp_catalog_hpd_config_hpd, + .get_interrupt = dp_catalog_hpd_get_interrupt, + }; + struct dp_catalog_audio audio = { + .init = dp_catalog_audio_init, + .config_acr = dp_catalog_audio_config_acr, + .enable = dp_catalog_audio_enable, + .config_sdp = dp_catalog_audio_config_sdp, + .set_header = dp_catalog_audio_set_header, + .get_header = dp_catalog_audio_get_header, + }; + struct dp_catalog_panel panel = { + .timing_cfg = dp_catalog_panel_timing_cfg, + .config_hdr = dp_catalog_panel_config_hdr, + .config_sdp = dp_catalog_panel_config_sdp, + .tpg_config = dp_catalog_panel_tpg_cfg, + .config_spd = dp_catalog_panel_config_spd, + .config_misc = dp_catalog_panel_config_misc, + .set_colorspace = dp_catalog_panel_set_colorspace, + .config_msa = dp_catalog_panel_config_msa, + .update_transfer_unit = dp_catalog_panel_update_transfer_unit, + .config_ctrl = dp_catalog_panel_config_ctrl, + .config_dto = dp_catalog_panel_config_dto, + .dsc_cfg = dp_catalog_panel_dsc_cfg, + .pps_flush = dp_catalog_panel_pps_flush, + .dhdr_flush = dp_catalog_panel_dhdr_flush, + .dhdr_busy = dp_catalog_panel_dhdr_busy, + .get_src_crc = dp_catalog_panel_get_src_crc, + }; + + if (!dev || !parser) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); + if (!catalog) { + rc = -ENOMEM; + goto error; + } + + catalog->dev = dev; + catalog->parser = parser; + + catalog->read = dp_read_hw; + catalog->write = dp_write_hw; + + dp_catalog_get_io(catalog); + + strlcpy(catalog->exe_mode, "hw", sizeof(catalog->exe_mode)); + + if (parser->valid_lt_params) { + ctrl.swing_hbr2_3 = parser->swing_hbr2_3; + ctrl.pre_emp_hbr2_3 = parser->pre_emp_hbr2_3; + + ctrl.swing_hbr_rbr = parser->swing_hbr_rbr; + ctrl.pre_emp_hbr_rbr = parser->pre_emp_hbr_rbr; + ctrl.valid_lt_params = true; + } else { + ctrl.swing_hbr2_3 = NULL; + ctrl.pre_emp_hbr2_3 = NULL; + + ctrl.swing_hbr_rbr = NULL; + ctrl.pre_emp_hbr_rbr = NULL; + ctrl.valid_lt_params = false; + } + + dp_catalog = &catalog->dp_catalog; + + dp_catalog->aux = aux; + dp_catalog->ctrl = ctrl; + dp_catalog->hpd = hpd; + dp_catalog->audio = audio; + dp_catalog->panel = panel; + + rc = dp_catalog_init(dev, dp_catalog, parser); + if (rc) { + dp_catalog_put(dp_catalog); + goto error; + } + + dp_catalog->set_exe_mode = dp_catalog_set_exe_mode; + dp_catalog->get_reg_dump = dp_catalog_reg_dump; + + return dp_catalog; +error: + return ERR_PTR(rc); +} diff --git a/msm/dp/dp_catalog.h b/msm/dp/dp_catalog.h new file mode 100644 index 000000000..adbaa8981 --- /dev/null +++ b/msm/dp/dp_catalog.h @@ -0,0 +1,376 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CATALOG_H_ +#define _DP_CATALOG_H_ + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif +#include + +#include "dp_parser.h" + +/* interrupts */ +#define DP_INTR_HPD BIT(0) +#define DP_INTR_AUX_I2C_DONE BIT(3) +#define DP_INTR_WRONG_ADDR BIT(6) +#define DP_INTR_TIMEOUT BIT(9) +#define DP_INTR_NACK_DEFER BIT(12) +#define DP_INTR_WRONG_DATA_CNT BIT(15) +#define DP_INTR_I2C_NACK BIT(18) +#define DP_INTR_I2C_DEFER BIT(21) +#define DP_INTR_PLL_UNLOCKED BIT(24) +#define DP_INTR_AUX_ERROR BIT(27) + +#define DP_INTR_READY_FOR_VIDEO BIT(0) +#define DP_INTR_IDLE_PATTERN_SENT BIT(3) +#define DP_INTR_FRAME_END BIT(6) +#define DP_INTR_CRC_UPDATED BIT(9) +#define DP_INTR_SST_FIFO_UNDERFLOW BIT(28) + +#define DP_INTR_MST_DP0_VCPF_SENT BIT(0) +#define DP_INTR_MST_DP1_VCPF_SENT BIT(3) + +#define DP_INTR_SST_ML_FIFO_OVERFLOW BIT(12) +#define DP_INTR_MST0_ML_FIFO_OVERFLOW BIT(15) +#define DP_INTR_MST1_ML_FIFO_OVERFLOW BIT(18) +#define DP_INTR_DP1_FRAME_END BIT(21) +#define DP_INTR_SDP0_COLLISION BIT(24) +#define DP_INTR_SDP1_COLLISION BIT(27) + +#define DP_INTR_DP0_BACKPRESSURE_ERROR (BIT(1) | BIT(0)) +#define DP_INTR_DP1_BACKPRESSURE_ERROR (BIT(5) | BIT(4)) +#define DP_INTR_SST_BS_LATE BIT(8) + + +#define DP_MAX_TIME_SLOTS 64 + +/* stream id */ +enum dp_stream_id { + DP_STREAM_0, + DP_STREAM_1, + DP_STREAM_MAX, +}; + +struct dp_catalog_vsc_sdp_colorimetry { + struct dp_sdp_header header; + u8 data[32]; +}; + +struct dp_misr40_data { + u32 ctrl_misr[8]; + u32 phy_misr[8]; +}; + +struct dp_catalog_aux { + u32 data; + u32 isr; + + u32 (*read_data)(struct dp_catalog_aux *aux); + int (*write_data)(struct dp_catalog_aux *aux); + int (*write_trans)(struct dp_catalog_aux *aux); + int (*clear_trans)(struct dp_catalog_aux *aux, bool read); + void (*reset)(struct dp_catalog_aux *aux); + void (*enable)(struct dp_catalog_aux *aux, bool enable); + void (*update_aux_cfg)(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg, enum dp_phy_aux_config_type type); + void (*setup)(struct dp_catalog_aux *aux, + struct dp_aux_cfg *aux_cfg); + void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy); + void (*clear_hw_interrupts)(struct dp_catalog_aux *aux); +}; + +struct dp_catalog_ctrl { + u32 isr; + u32 isr3; + u32 isr5; + u32 isr6; + + u8 *swing_hbr2_3; + u8 *pre_emp_hbr2_3; + u8 *swing_hbr_rbr; + u8 *pre_emp_hbr_rbr; + bool valid_lt_params; + + void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state); + void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt); + void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped, + char *lane_map); + void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap); + void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern); + void (*reset)(struct dp_catalog_ctrl *ctrl); + void (*usb_reset)(struct dp_catalog_ctrl *ctrl, bool flip); + bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl); + void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*phy_reset)(struct dp_catalog_ctrl *ctrl); + void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped, + u8 lane_cnt); + void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level, + u8 p_level, bool high); + void (*get_interrupt)(struct dp_catalog_ctrl *ctrl); + u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl); + void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl, + u32 pattern); + u32 (*read_phy_pattern)(struct dp_catalog_ctrl *ctrl); + void (*mst_config)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*trigger_act)(struct dp_catalog_ctrl *ctrl); + void (*read_act_complete_sts)(struct dp_catalog_ctrl *ctrl, bool *sts); + void (*channel_alloc)(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt); + void (*update_rg)(struct dp_catalog_ctrl *ctrl, u32 ch, u32 x_int, + u32 y_frac_enum); + void (*channel_dealloc)(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt); + void (*fec_config)(struct dp_catalog_ctrl *ctrl, bool enable); + void (*mainlink_levels)(struct dp_catalog_ctrl *ctrl, u8 lane_cnt); + + int (*late_phy_init)(struct dp_catalog_ctrl *ctrl, + u8 lane_cnt, bool flipped); + int (*setup_misr)(struct dp_catalog_ctrl *ctrl); + int (*read_misr)(struct dp_catalog_ctrl *ctrl, struct dp_misr40_data *data); +}; + +struct dp_catalog_hpd { + void (*config_hpd)(struct dp_catalog_hpd *hpd, bool en); + u32 (*get_interrupt)(struct dp_catalog_hpd *hpd); +}; + +#define HEADER_BYTE_2_BIT 0 +#define PARITY_BYTE_2_BIT 8 +#define HEADER_BYTE_1_BIT 16 +#define PARITY_BYTE_1_BIT 24 +#define HEADER_BYTE_3_BIT 16 +#define PARITY_BYTE_3_BIT 24 + +enum dp_catalog_audio_sdp_type { + DP_AUDIO_SDP_STREAM, + DP_AUDIO_SDP_TIMESTAMP, + DP_AUDIO_SDP_INFOFRAME, + DP_AUDIO_SDP_COPYMANAGEMENT, + DP_AUDIO_SDP_ISRC, + DP_AUDIO_SDP_MAX, +}; + +enum dp_catalog_audio_header_type { + DP_AUDIO_SDP_HEADER_1, + DP_AUDIO_SDP_HEADER_2, + DP_AUDIO_SDP_HEADER_3, + DP_AUDIO_SDP_HEADER_MAX, +}; + +struct dp_catalog_audio { + enum dp_catalog_audio_sdp_type sdp_type; + enum dp_catalog_audio_header_type sdp_header; + u32 data; + + enum dp_stream_id stream_id; + + void (*init)(struct dp_catalog_audio *audio); + void (*enable)(struct dp_catalog_audio *audio); + void (*config_acr)(struct dp_catalog_audio *audio); + void (*config_sdp)(struct dp_catalog_audio *audio); + void (*set_header)(struct dp_catalog_audio *audio); + void (*get_header)(struct dp_catalog_audio *audio); +}; + +struct dp_dsc_cfg_data { + bool dsc_en; + bool continuous_pps; + char pps[128]; + u32 pps_len; + u32 pps_word[32]; + u32 pps_word_len; + u8 parity[32]; + u8 parity_len; + u32 parity_word[8]; + u32 parity_word_len; + u32 slice_per_pkt; + u32 bytes_per_pkt; + u32 eol_byte_num; + u32 be_in_lane; + u32 dto_en; + u32 dto_n; + u32 dto_d; + u32 dto_count; +}; + +struct dp_catalog_panel { + u32 total; + u32 sync_start; + u32 width_blanking; + u32 dp_active; + u8 *spd_vendor_name; + u8 *spd_product_description; + + struct dp_catalog_vsc_sdp_colorimetry vsc_colorimetry; + struct dp_sdp_header dhdr_vsif_sdp; + struct dp_sdp_header shdr_if_sdp; + struct drm_msm_ext_hdr_metadata hdr_meta; + + /* TPG */ + u32 hsync_period; + u32 vsync_period; + u32 display_v_start; + u32 display_v_end; + u32 v_sync_width; + u32 hsync_ctl; + u32 display_hctl; + + /* TU */ + u32 dp_tu; + u32 valid_boundary; + u32 valid_boundary2; + + u32 misc_val; + + enum dp_stream_id stream_id; + + bool widebus_en; + struct dp_dsc_cfg_data dsc; + + int (*timing_cfg)(struct dp_catalog_panel *panel); + void (*config_hdr)(struct dp_catalog_panel *panel, bool en, + u32 dhdr_max_pkts, bool flush); + void (*config_sdp)(struct dp_catalog_panel *panel, bool en); + int (*set_colorspace)(struct dp_catalog_panel *panel, + bool vsc_supported); + void (*tpg_config)(struct dp_catalog_panel *panel, u32 pattern); + void (*config_spd)(struct dp_catalog_panel *panel); + void (*config_misc)(struct dp_catalog_panel *panel); + void (*config_msa)(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz); + void (*update_transfer_unit)(struct dp_catalog_panel *panel); + void (*config_ctrl)(struct dp_catalog_panel *panel, u32 cfg); + void (*config_dto)(struct dp_catalog_panel *panel, bool ack); + void (*dsc_cfg)(struct dp_catalog_panel *panel); + void (*pps_flush)(struct dp_catalog_panel *panel); + void (*dhdr_flush)(struct dp_catalog_panel *panel); + bool (*dhdr_busy)(struct dp_catalog_panel *panel); + int (*get_src_crc)(struct dp_catalog_panel *panel, u16 *crc); +}; + +struct dp_catalog; +struct dp_catalog_sub { + u32 (*read)(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset); + void (*write)(struct dp_catalog *dp_catalog, + struct dp_io_data *io_data, u32 offset, u32 data); + + void (*put)(struct dp_catalog *catalog); +}; + +struct dp_catalog_io { + struct dp_io_data *dp_ahb; + struct dp_io_data *dp_aux; + struct dp_io_data *dp_link; + struct dp_io_data *dp_p0; + struct dp_io_data *dp_phy; + struct dp_io_data *dp_ln_tx0; + struct dp_io_data *dp_ln_tx1; + struct dp_io_data *dp_mmss_cc; + struct dp_io_data *dp_pll; + struct dp_io_data *usb3_dp_com; + struct dp_io_data *hdcp_physical; + struct dp_io_data *dp_p1; + struct dp_io_data *dp_tcsr; +}; + +struct dp_catalog { + struct dp_catalog_aux aux; + struct dp_catalog_ctrl ctrl; + struct dp_catalog_audio audio; + struct dp_catalog_panel panel; + struct dp_catalog_hpd hpd; + + struct dp_catalog_sub *sub; + + void (*set_exe_mode)(struct dp_catalog *dp_catalog, char *mode); + int (*get_reg_dump)(struct dp_catalog *dp_catalog, + char *mode, u8 **out_buf, u32 *out_buf_len); +}; + +static inline u8 dp_ecc_get_g0_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[3]; + g[1] = c[0] ^ c[3]; + g[2] = c[1]; + g[3] = c[2]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static inline u8 dp_ecc_get_g1_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[0] ^ c[3]; + g[1] = c[0] ^ c[1] ^ c[3]; + g[2] = c[1] ^ c[2]; + g[3] = c[2] ^ c[3]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static inline u8 dp_header_get_parity(u32 data) +{ + u8 x0 = 0; + u8 x1 = 0; + u8 ci = 0; + u8 iData = 0; + u8 i = 0; + u8 parity_byte; + u8 num_byte = (data > 0xFF) ? 8 : 2; + + for (i = 0; i < num_byte; i++) { + iData = (data >> i*4) & 0xF; + + ci = iData ^ x1; + x1 = x0 ^ dp_ecc_get_g1_value(ci); + x0 = dp_ecc_get_g0_value(ci); + } + + parity_byte = x1 | (x0 << 4); + + return parity_byte; +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser); +void dp_catalog_put(struct dp_catalog *catalog); + +struct dp_catalog_sub *dp_catalog_get_v420(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io); + +struct dp_catalog_sub *dp_catalog_get_v200(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io); + +u32 dp_catalog_get_dp_core_version(struct dp_catalog *dp_catalog); +u32 dp_catalog_get_dp_phy_version(struct dp_catalog *dp_catalog); +#endif /* _DP_CATALOG_H_ */ diff --git a/msm/dp/dp_catalog_v200.c b/msm/dp/dp_catalog_v200.c new file mode 100644 index 000000000..97d78a120 --- /dev/null +++ b/msm/dp/dp_catalog_v200.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + */ + +#include + +#include "dp_catalog.h" +#include "dp_reg.h" +#include "dp_debug.h" + +#define dp_catalog_get_priv_v200(x) ({ \ + struct dp_catalog *catalog; \ + catalog = container_of(x, struct dp_catalog, x); \ + container_of(catalog->sub, \ + struct dp_catalog_private_v200, sub); \ +}) + +#define dp_read(x) ({ \ + catalog->sub.read(catalog->dpc, io_data, x); \ +}) + +#define dp_write(x, y) ({ \ + catalog->sub.write(catalog->dpc, io_data, x, y); \ +}) + +struct dp_catalog_private_v200 { + struct device *dev; + struct dp_catalog_io *io; + struct dp_catalog *dpc; + struct dp_catalog_sub sub; +}; + +static void dp_catalog_aux_clear_hw_int_v200(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u32 data = 0; + + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(aux); + io_data = catalog->io->dp_phy; + + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V200); + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V200, 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V200, 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V200, 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_aux_setup_v200(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + int i = 0, sw_reset = 0; + + if (!aux || !cfg) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(aux); + io_data = catalog->io->dp_ahb; + + sw_reset = dp_read(DP_SW_RESET); + + sw_reset |= BIT(0); + dp_write(DP_SW_RESET, sw_reset); + usleep_range(1000, 1010); /* h/w recommended delay */ + + sw_reset &= ~BIT(0); + dp_write(DP_SW_RESET, sw_reset); + + dp_write(DP_PHY_CTRL, 0x4); /* bit 2 */ + udelay(1000); + dp_write(DP_PHY_CTRL, 0x0); /* bit 2 */ + wmb(); /* make sure programming happened */ + + io_data = catalog->io->dp_tcsr; + dp_write(0x4c, 0x1); /* bit 0 & 2 */ + wmb(); /* make sure programming happened */ + + io_data = catalog->io->dp_phy; + dp_write(DP_PHY_PD_CTL, 0x3c); + wmb(); /* make sure PD programming happened */ + dp_write(DP_PHY_PD_CTL, 0x3d); + wmb(); /* make sure PD programming happened */ + + /* DP AUX CFG register programming */ + io_data = catalog->io->dp_phy; + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp_write(cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + + dp_write(DP_PHY_AUX_INTERRUPT_MASK_V200, 0x1F); + wmb(); /* make sure AUX configuration is done before enabling it */ +} + +static void dp_catalog_panel_config_msa_v200(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid; + u32 const nvid_fixed = 0x8000; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u32 strm_reg_off = 0; + u32 mvid_reg_off = 0, nvid_reg_off = 0; + + if (!panel) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id:%d\n", panel->stream_id); + return; + } + + catalog = dp_catalog_get_priv_v200(panel); + io_data = catalog->io->dp_mmss_cc; + + if (panel->stream_id == DP_STREAM_1) + strm_reg_off = MMSS_DP_PIXEL1_M_V200 - + MMSS_DP_PIXEL_M_V200; + + pixel_m = dp_read(MMSS_DP_PIXEL_M_V200 + strm_reg_off); + pixel_n = dp_read(MMSS_DP_PIXEL_N_V200 + strm_reg_off); + DP_DEBUG("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n); + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + DP_DEBUG("rate = %d\n", rate); + + if (panel->widebus_en) + mvid <<= 1; + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + io_data = catalog->io->dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_reg_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_reg_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + DP_DEBUG("mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write(DP_SOFTWARE_MVID + mvid_reg_off, mvid); + dp_write(DP_SOFTWARE_NVID + nvid_reg_off, nvid); +} + +static void dp_catalog_ctrl_lane_mapping_v200(struct dp_catalog_ctrl *ctrl, + bool flipped, char *lane_map) +{ + struct dp_catalog_private_v200 *catalog; + struct dp_io_data *io_data; + u8 l_map[4] = { 0 }, i = 0, j = 0; + u32 lane_map_reg = 0; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v200(ctrl); + io_data = catalog->io->dp_link; + + /* For flip case, swap phy lanes with ML0 and ML3, ML1 and ML2 */ + if (flipped) { + for (i = 0; i < DP_MAX_PHY_LN; i++) { + if (lane_map[i] == DP_ML0) { + for (j = 0; j < DP_MAX_PHY_LN; j++) { + if (lane_map[j] == DP_ML3) { + l_map[i] = DP_ML3; + l_map[j] = DP_ML0; + break; + } + } + } else if (lane_map[i] == DP_ML1) { + for (j = 0; j < DP_MAX_PHY_LN; j++) { + if (lane_map[j] == DP_ML2) { + l_map[i] = DP_ML2; + l_map[j] = DP_ML1; + break; + } + } + } + } + } else { + /* Normal orientation */ + for (i = 0; i < DP_MAX_PHY_LN; i++) + l_map[i] = lane_map[i]; + } + + lane_map_reg = ((l_map[3]&3)<<6)|((l_map[2]&3)<<4)|((l_map[1]&3)<<2) + |(l_map[0]&3); + + dp_write(DP_LOGICAL2PHYSICAL_LANE_MAPPING, lane_map_reg); +} + +static void dp_catalog_ctrl_usb_reset_v200(struct dp_catalog_ctrl *ctrl, + bool flip) +{ +} + +static void dp_catalog_put_v200(struct dp_catalog *catalog) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!catalog) + return; + + catalog_priv = container_of(catalog->sub, + struct dp_catalog_private_v200, sub); + + devm_kfree(catalog_priv->dev, catalog_priv); +} + +struct dp_catalog_sub *dp_catalog_get_v200(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io) +{ + struct dp_catalog_private_v200 *catalog_priv; + + if (!dev || !catalog) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL); + if (!catalog_priv) + return ERR_PTR(-ENOMEM); + + catalog_priv->dev = dev; + catalog_priv->io = io; + catalog_priv->dpc = catalog; + + catalog_priv->sub.put = dp_catalog_put_v200; + + catalog->aux.clear_hw_interrupts = dp_catalog_aux_clear_hw_int_v200; + catalog->aux.setup = dp_catalog_aux_setup_v200; + + catalog->panel.config_msa = dp_catalog_panel_config_msa_v200; + + catalog->ctrl.lane_mapping = dp_catalog_ctrl_lane_mapping_v200; + catalog->ctrl.usb_reset = dp_catalog_ctrl_usb_reset_v200; + + return &catalog_priv->sub; +} diff --git a/msm/dp/dp_catalog_v420.c b/msm/dp/dp_catalog_v420.c new file mode 100644 index 000000000..cbadd9251 --- /dev/null +++ b/msm/dp/dp_catalog_v420.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + + +#include "dp_catalog.h" +#include "dp_reg.h" +#include "dp_debug.h" +#include "dp_pll.h" +#include +#include + +#define dp_catalog_get_priv_v420(x) ({ \ + struct dp_catalog *catalog; \ + catalog = container_of(x, struct dp_catalog, x); \ + container_of(catalog->sub, \ + struct dp_catalog_private_v420, sub); \ +}) + +#define dp_read(x) ({ \ + catalog->sub.read(catalog->dpc, io_data, x); \ +}) + +#define dp_write(x, y) ({ \ + catalog->sub.write(catalog->dpc, io_data, x, y); \ +}) + +#define MAX_VOLTAGE_LEVELS 4 +#define MAX_PRE_EMP_LEVELS 4 + +static u8 const vm_pre_emphasis[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x00, 0x0E, 0x16, 0xFF}, /* pe0, 0 db */ + {0x00, 0x0E, 0x16, 0xFF}, /* pe1, 3.5 db */ + {0x00, 0x0E, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0xFF, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +/* voltage swing, 0.2v and 1.0v are not support */ +static u8 const vm_voltage_swing[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = { + {0x07, 0x0F, 0x16, 0xFF}, /* sw0, 0.4v */ + {0x11, 0x1E, 0x1F, 0xFF}, /* sw1, 0.6 v */ + {0x1A, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0xFF, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ +}; + +struct dp_catalog_private_v420 { + struct device *dev; + struct dp_catalog_sub sub; + struct dp_catalog_io *io; + struct dp_catalog *dpc; +}; + +static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux, + struct dp_aux_cfg *cfg) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + int i = 0; + u32 phy_version; + if (!aux || !cfg) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v420(aux); + + io_data = catalog->io->dp_phy; + dp_write(DP_PHY_PD_CTL, 0x67); + wmb(); /* make sure PD programming happened */ + + phy_version = dp_catalog_get_dp_phy_version(catalog->dpc); + if (phy_version >= 0x60000000) { + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io->dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600, 0x17); + wmb(); /* make sure BIAS programming happened */ + } else { + /* Turn on BIAS current for PHY/PLL */ + io_data = catalog->io->dp_pll; + dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17); + wmb(); /* make sure BIAS programming happened */ + } + + io_data = catalog->io->dp_phy; + /* DP AUX CFG register programming */ + for (i = 0; i < PHY_AUX_CFG_MAX; i++) { + DP_DEBUG("%s: offset=0x%08x, value=0x%08x\n", + dp_phy_aux_config_type_to_string(i), + cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + dp_write(cfg[i].offset, cfg[i].lut[cfg[i].current_index]); + } + wmb(); /* make sure DP AUX CFG programming happened */ + + dp_write(DP_PHY_AUX_INTERRUPT_MASK_V420, 0x1F); +} + +static void dp_catalog_aux_clear_hw_int_v420(struct dp_catalog_aux *aux) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u32 data = 0; + u32 phy_version; + if (!aux) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v420(aux); + phy_version = dp_catalog_get_dp_phy_version(catalog->dpc); + io_data = catalog->io->dp_phy; + if (phy_version >= 0x60000000) + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V600); + else + data = dp_read(DP_PHY_AUX_INTERRUPT_STATUS_V420); + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x1f); + wmb(); /* make sure 0x1f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0x9f); + wmb(); /* make sure 0x9f is written before next write */ + + dp_write(DP_PHY_AUX_INTERRUPT_CLEAR_V420, 0); + wmb(); /* make sure register is cleared */ +} + +static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel, + u32 rate, u32 stream_rate_khz) +{ + u32 mvid, nvid, mvid_off = 0, nvid_off = 0; + u32 const nvid_fixed = 0x8000; + struct dp_catalog *dp_catalog; + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + unsigned long num, den; + u32 const input_scale = 10; + u64 f1, f2; + + if (!panel || !rate) { + DP_ERR("invalid input\n"); + return; + } + + if (panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", panel->stream_id); + return; + } + + dp_catalog = container_of(panel, struct dp_catalog, panel); + catalog = container_of(dp_catalog->sub, struct dp_catalog_private_v420, sub); + + /* + * MND calculator requires the target clock to be less than half the input clock. To meet + * this requirement, the input clock is scaled here and then the resulting M value is + * scaled by the same factor to offset the pre-scale. + */ + rational_best_approximation(rate * input_scale, stream_rate_khz, + (unsigned long)(1 << 16) - 1, + (unsigned long)(1 << 16) - 1, &den, &num); + + mvid = (num & 0xFFFF); + nvid = (den & 0xFFFF); + mvid *= input_scale; + + if (nvid < nvid_fixed) { + f1 = drm_fixp_from_fraction(nvid_fixed, nvid); + f2 = drm_fixp_from_fraction(mvid, 1); + f1 = drm_fixp_mul(f1, f2); + mvid = drm_fixp2int(f1); + nvid = nvid_fixed; + } + + io_data = catalog->io->dp_link; + + if (panel->stream_id == DP_STREAM_1) { + mvid_off = DP1_SOFTWARE_MVID - DP_SOFTWARE_MVID; + nvid_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID; + } + + DP_DEBUG("pclk=%ld, lclk=%ld, mvid=0x%x, nvid=0x%x\n", stream_rate_khz, rate, mvid, nvid); + dp_write(DP_SOFTWARE_MVID + mvid_off, mvid); + dp_write(DP_SOFTWARE_NVID + nvid_off, nvid); +} + +static void dp_catalog_ctrl_phy_lane_cfg_v420(struct dp_catalog_ctrl *ctrl, + bool flipped, u8 ln_cnt) +{ + u32 info = 0x0; + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u8 orientation = BIT(!!flipped); + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + catalog = dp_catalog_get_priv_v420(ctrl); + io_data = catalog->io->dp_phy; + + info |= (ln_cnt & 0x0F); + info |= ((orientation & 0x0F) << 4); + DP_DEBUG("Shared Info = 0x%x\n", info); + + dp_write(DP_PHY_SPARE0_V420, info); +} + +static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl, + u8 v_level, u8 p_level, bool high) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u8 value0, value1; + u32 version; + u32 phy_version; + int idx; + + if (!ctrl || !((v_level < MAX_VOLTAGE_LEVELS) + && (p_level < MAX_PRE_EMP_LEVELS))) { + DP_ERR("invalid input\n"); + return; + } + + DP_DEBUG("hw: v=%d p=%d, high=%d\n", v_level, p_level, high); + + catalog = dp_catalog_get_priv_v420(ctrl); + phy_version = dp_catalog_get_dp_phy_version(catalog->dpc); + + io_data = catalog->io->dp_ahb; + version = dp_read(DP_HW_VERSION); + DP_DEBUG("version: 0x%x\n", version); + + /* + * For DP controller versions >= 1.2.3 + */ + if (version >= 0x10020003 && ctrl->valid_lt_params) { + idx = v_level * MAX_VOLTAGE_LEVELS + p_level; + if (high) { + value0 = ctrl->swing_hbr2_3[idx]; + value1 = ctrl->pre_emp_hbr2_3[idx]; + } else { + value0 = ctrl->swing_hbr_rbr[idx]; + value1 = ctrl->pre_emp_hbr_rbr[idx]; + } + } else { + value0 = vm_voltage_swing[v_level][p_level]; + value1 = vm_pre_emphasis[v_level][p_level]; + } + + /* program default setting first */ + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL_V420, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL_V420, 0x2A); + dp_write(TXn_TX_EMP_POST1_LVL, 0x20); + + /* Enable MUX to use Cursor values from these registers */ + value0 |= BIT(5); + value1 |= BIT(5); + + /* Configure host and panel only if both values are allowed */ + if (value0 != 0xFF && value1 != 0xFF) { + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_DRV_LVL_V420, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_DRV_LVL_V420, value0); + dp_write(TXn_TX_EMP_POST1_LVL, value1); + + DP_DEBUG("hw: vx_value=0x%x px_value=0x%x\n", + value0, value1); + } else { + DP_ERR("invalid vx (0x%x=0x%x), px (0x%x=0x%x\n", + v_level, value0, p_level, value1); + } +} + +static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl, + u8 ln_pnswap) +{ + struct dp_catalog_private_v420 *catalog; + struct dp_io_data *io_data; + u32 cfg0, cfg1; + + catalog = dp_catalog_get_priv_v420(ctrl); + + cfg0 = 0x0a; + cfg1 = 0x0a; + + cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0; + cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2; + cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0; + cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2; + + io_data = catalog->io->dp_ln_tx0; + dp_write(TXn_TX_POL_INV_V420, cfg0); + + io_data = catalog->io->dp_ln_tx1; + dp_write(TXn_TX_POL_INV_V420, cfg1); +} + +static void dp_catalog_put_v420(struct dp_catalog *catalog) +{ + struct dp_catalog_private_v420 *catalog_priv; + + if (!catalog) + return; + + catalog_priv = container_of(catalog->sub, + struct dp_catalog_private_v420, sub); + devm_kfree(catalog_priv->dev, catalog_priv); +} + +struct dp_catalog_sub *dp_catalog_get_v420(struct device *dev, + struct dp_catalog *catalog, struct dp_catalog_io *io) +{ + struct dp_catalog_private_v420 *catalog_priv; + + if (!dev || !catalog) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog_priv = devm_kzalloc(dev, sizeof(*catalog_priv), GFP_KERNEL); + if (!catalog_priv) + return ERR_PTR(-ENOMEM); + + catalog_priv->dev = dev; + catalog_priv->io = io; + catalog_priv->dpc = catalog; + + catalog_priv->sub.put = dp_catalog_put_v420; + + catalog->aux.setup = dp_catalog_aux_setup_v420; + catalog->aux.clear_hw_interrupts = dp_catalog_aux_clear_hw_int_v420; + catalog->panel.config_msa = dp_catalog_panel_config_msa_v420; + catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420; + catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420; + catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420; + + return &catalog_priv->sub; +} diff --git a/msm/dp/dp_ctrl.c b/msm/dp/dp_ctrl.c new file mode 100644 index 000000000..ae023255d --- /dev/null +++ b/msm/dp/dp_ctrl.c @@ -0,0 +1,1617 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "dp_ctrl.h" +#include "dp_debug.h" +#include "sde_dbg.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) +#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3) + +#define DP_CTRL_INTR_MST_DP0_VCPF_SENT BIT(0) +#define DP_CTRL_INTR_MST_DP1_VCPF_SENT BIT(3) + +/* dp state ctrl */ +#define ST_TRAIN_PATTERN_1 BIT(0) +#define ST_TRAIN_PATTERN_2 BIT(1) +#define ST_TRAIN_PATTERN_3 BIT(2) +#define ST_TRAIN_PATTERN_4 BIT(3) +#define ST_SYMBOL_ERR_RATE_MEASUREMENT BIT(4) +#define ST_PRBS7 BIT(5) +#define ST_CUSTOM_80_BIT_PATTERN BIT(6) +#define ST_SEND_VIDEO BIT(7) +#define ST_PUSH_IDLE BIT(8) +#define MST_DP0_PUSH_VCPF BIT(12) +#define MST_DP0_FORCE_VCPF BIT(13) +#define MST_DP1_PUSH_VCPF BIT(14) +#define MST_DP1_FORCE_VCPF BIT(15) + +#define MR_LINK_TRAINING1 0x8 +#define MR_LINK_SYMBOL_ERM 0x80 +#define MR_LINK_PRBS7 0x100 +#define MR_LINK_CUSTOM80 0x200 +#define MR_LINK_TRAINING4 0x40 + +#define DP_MAX_LANES 4 + +struct dp_mst_ch_slot_info { + u32 start_slot; + u32 tot_slots; +}; + +struct dp_mst_channel_info { + struct dp_mst_ch_slot_info slot_info[DP_STREAM_MAX]; +}; + +struct dp_ctrl_private { + struct dp_ctrl dp_ctrl; + + struct device *dev; + struct dp_aux *aux; + struct dp_panel *panel; + struct dp_link *link; + struct dp_power *power; + struct dp_parser *parser; + struct dp_catalog_ctrl *catalog; + struct dp_pll *pll; + + struct completion idle_comp; + struct completion video_comp; + + bool orientation; + bool power_on; + bool mst_mode; + bool fec_mode; + bool dsc_mode; + bool sim_mode; + + atomic_t aborted; + + u8 initial_lane_count; + u8 initial_bw_code; + + u32 vic; + u32 stream_count; + u32 training_2_pattern; + struct dp_mst_channel_info mst_ch_info; +}; + +enum notification_status { + NOTIFY_UNKNOWN, + NOTIFY_CONNECT, + NOTIFY_DISCONNECT, + NOTIFY_CONNECT_IRQ_HPD, + NOTIFY_DISCONNECT_IRQ_HPD, +}; + +static void dp_ctrl_idle_patterns_sent(struct dp_ctrl_private *ctrl) +{ + complete(&ctrl->idle_comp); +} + +static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl) +{ + complete(&ctrl->video_comp); +} + +static void dp_ctrl_abort(struct dp_ctrl *dp_ctrl, bool abort) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + atomic_set(&ctrl->aborted, abort); +} + +static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state) +{ + ctrl->catalog->state_ctrl(ctrl->catalog, state); +} + +static void dp_ctrl_push_idle(struct dp_ctrl_private *ctrl, + enum dp_stream_id strm) +{ + int const idle_pattern_completion_timeout_ms = HZ / 10; + u32 state = 0x0; + + if (!ctrl->power_on) + return; + + if (!ctrl->mst_mode) { + state = ST_PUSH_IDLE; + goto trigger_idle; + } + + if (strm >= DP_STREAM_MAX) { + DP_ERR("mst push idle, invalid stream:%d\n", strm); + return; + } + + state |= (strm == DP_STREAM_0) ? MST_DP0_PUSH_VCPF : MST_DP1_PUSH_VCPF; + +trigger_idle: + reinit_completion(&ctrl->idle_comp); + dp_ctrl_state_ctrl(ctrl, state); + + if (!wait_for_completion_timeout(&ctrl->idle_comp, + idle_pattern_completion_timeout_ms)) + DP_WARN("time out\n"); + else + DP_DEBUG("mainlink off done\n"); +} + +/** + * dp_ctrl_configure_source_link_params() - configures DP TX source params + * @ctrl: Display Port Driver data + * @enable: enable or disable DP transmitter + * + * Configures the DP transmitter source params including details such as lane + * configuration, output format and sink/panel timing information. + */ +static void dp_ctrl_configure_source_link_params(struct dp_ctrl_private *ctrl, + bool enable) +{ + if (!ctrl->power->clk_status(ctrl->power, DP_LINK_PM)) { + DP_WARN("DP link clocks are off\n"); + return; + } + + if (!ctrl->power->clk_status(ctrl->power, DP_CORE_PM)) { + DP_WARN("DP core clocks are off\n"); + return; + } + + if (enable) { + ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation, + ctrl->parser->l_map); + ctrl->catalog->lane_pnswap(ctrl->catalog, + ctrl->parser->l_pnswap); + ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode); + ctrl->catalog->config_ctrl(ctrl->catalog, + ctrl->link->link_params.lane_count); + ctrl->catalog->mainlink_levels(ctrl->catalog, + ctrl->link->link_params.lane_count); + ctrl->catalog->mainlink_ctrl(ctrl->catalog, true); + } else { + ctrl->catalog->mainlink_ctrl(ctrl->catalog, false); + } +} + +static void dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +{ + if (!wait_for_completion_timeout(&ctrl->video_comp, HZ / 2)) + DP_WARN("SEND_VIDEO time out\n"); + else + DP_DEBUG("SEND_VIDEO triggered\n"); +} + +static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl) +{ + int i, ret; + u8 buf[DP_MAX_LANES]; + u8 v_level = ctrl->link->phy_params.v_level; + u8 p_level = ctrl->link->phy_params.p_level; + u8 size = min_t(u8, sizeof(buf), ctrl->link->link_params.lane_count); + u32 max_level_reached = 0; + + if (v_level == ctrl->link->phy_params.max_v_level) { + DP_DEBUG("max voltage swing level reached %d\n", v_level); + max_level_reached |= DP_TRAIN_MAX_SWING_REACHED; + } + + if (p_level == ctrl->link->phy_params.max_p_level) { + DP_DEBUG("max pre-emphasis level reached %d\n", p_level); + max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } + + p_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + for (i = 0; i < size; i++) + buf[i] = v_level | p_level | max_level_reached; + + DP_DEBUG("lanes: %d, swing: 0x%x, pre-emp: 0x%x\n", + size, v_level, p_level); + + ret = drm_dp_dpcd_write(ctrl->aux->drm_aux, + DP_TRAINING_LANE0_SET, buf, size); + + return ret <= 0 ? -EINVAL : 0; +} + +static void dp_ctrl_update_hw_vx_px(struct dp_ctrl_private *ctrl) +{ + struct dp_link *link = ctrl->link; + bool high = false; + + if (ctrl->link->link_params.bw_code == DP_LINK_BW_5_4 || + ctrl->link->link_params.bw_code == DP_LINK_BW_8_1) + high = true; + + ctrl->catalog->update_vx_px(ctrl->catalog, + link->phy_params.v_level, link->phy_params.p_level, high); +} + +static int dp_ctrl_update_sink_pattern(struct dp_ctrl_private *ctrl, u8 pattern) +{ + u8 buf = pattern; + int ret; + + DP_DEBUG("sink: pattern=%x\n", pattern); + + if (pattern && pattern != DP_TRAINING_PATTERN_4) + buf |= DP_LINK_SCRAMBLING_DISABLE; + + ret = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, + DP_TRAINING_PATTERN_SET, buf); + + return ret <= 0 ? -EINVAL : 0; +} + +static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + int ret = 0, len; + u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS; + u32 link_status_read_max_retries = 100; + + while (--link_status_read_max_retries) { + len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux, + link_status); + if (len != DP_LINK_STATUS_SIZE) { + DP_ERR("DP link status read failed, err: %d\n", len); + ret = len; + break; + } + + if (!(link_status[offset] & DP_LINK_STATUS_UPDATED)) + break; + } + + return ret; +} + +static int dp_ctrl_lane_count_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = -EAGAIN; + u8 lanes = ctrl->link->link_params.lane_count; + + if (ctrl->panel->link_info.revision != 0x14) + return -EINVAL; + + switch (lanes) { + case 4: + ctrl->link->link_params.lane_count = 2; + break; + case 2: + ctrl->link->link_params.lane_count = 1; + break; + default: + if (lanes != ctrl->initial_lane_count) + ret = -EINVAL; + break; + } + + DP_DEBUG("new lane count=%d\n", ctrl->link->link_params.lane_count); + + return ret; +} + +static bool dp_ctrl_is_link_rate_rbr(struct dp_ctrl_private *ctrl) +{ + return ctrl->link->link_params.bw_code == DP_LINK_BW_1_62; +} + +static u8 dp_ctrl_get_active_lanes(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + u8 lane, count = 0; + + for (lane = 0; lane < ctrl->link->link_params.lane_count; lane++) { + if (link_status[lane / 2] & (1 << (lane * 4))) + count++; + else + break; + } + + return count; +} + +static int dp_ctrl_link_training_1(struct dp_ctrl_private *ctrl) +{ + int tries, old_v_level, ret = -EINVAL; + u8 link_status[DP_LINK_STATUS_SIZE]; + u8 pattern = 0; + int const maximum_retries = 5; + + ctrl->aux->state &= ~DP_STATE_TRAIN_1_FAILED; + ctrl->aux->state &= ~DP_STATE_TRAIN_1_SUCCEEDED; + ctrl->aux->state |= DP_STATE_TRAIN_1_STARTED; + + if (ctrl->sim_mode) { + DP_DEBUG("simulation enabled, skip clock recovery\n"); + ret = 0; + goto skip_training; + } + + dp_ctrl_state_ctrl(ctrl, 0); + /* Make sure to clear the current pattern before starting a new one */ + wmb(); + + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + while (!atomic_read(&ctrl->aborted)) { + /* update hardware with current swing/pre-emp values */ + dp_ctrl_update_hw_vx_px(ctrl); + + if (!pattern) { + pattern = DP_TRAINING_PATTERN_1; + + ctrl->catalog->set_pattern(ctrl->catalog, pattern); + + /* update sink with current settings */ + ret = dp_ctrl_update_sink_pattern(ctrl, pattern); + if (ret) + break; + } + + ret = dp_ctrl_update_sink_vx_px(ctrl); + if (ret) + break; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + drm_dp_link_train_clock_recovery_delay(ctrl->aux->drm_aux, ctrl->panel->dpcd); +#else + drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd); +#endif + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + break; + + if (!drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.lane_count)) + ret = -EINVAL; + else + break; + + if (ctrl->link->phy_params.v_level == ctrl->link->phy_params.max_v_level) { + DP_ERR_RATELIMITED_V("max v_level reached\n"); + break; + } + + if (old_v_level == ctrl->link->phy_params.v_level) { + if (++tries >= maximum_retries) { + DP_ERR("max tries reached\n"); + ret = -ETIMEDOUT; + break; + } + } else { + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + } + + DP_DEBUG("clock recovery not done, adjusting vx px\n"); + + ctrl->link->adjust_levels(ctrl->link, link_status); + } + + if (ret && dp_ctrl_is_link_rate_rbr(ctrl)) { + u8 active_lanes = dp_ctrl_get_active_lanes(ctrl, link_status); + + if (active_lanes) { + ctrl->link->link_params.lane_count = active_lanes; + ctrl->link->link_params.bw_code = ctrl->initial_bw_code; + + /* retry with new settings */ + ret = -EAGAIN; + } + } + +skip_training: + ctrl->aux->state &= ~DP_STATE_TRAIN_1_STARTED; + + if (ret) + ctrl->aux->state |= DP_STATE_TRAIN_1_FAILED; + else + ctrl->aux->state |= DP_STATE_TRAIN_1_SUCCEEDED; + + return ret; +} + +static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!ctrl) + return -EINVAL; + + switch (ctrl->link->link_params.bw_code) { + case DP_LINK_BW_8_1: + ctrl->link->link_params.bw_code = DP_LINK_BW_5_4; + break; + case DP_LINK_BW_5_4: + ctrl->link->link_params.bw_code = DP_LINK_BW_2_7; + break; + case DP_LINK_BW_2_7: + case DP_LINK_BW_1_62: + default: + ctrl->link->link_params.bw_code = DP_LINK_BW_1_62; + break; + } + + DP_DEBUG("new bw code=0x%x\n", ctrl->link->link_params.bw_code); + + return ret; +} + +static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +{ + dp_ctrl_update_sink_pattern(ctrl, 0); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + drm_dp_link_train_channel_eq_delay(ctrl->aux->drm_aux, ctrl->panel->dpcd); +#else + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); +#endif +} + +static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl) +{ + int tries = 0, ret = -EINVAL; + u8 dpcd_pattern, pattern = 0; + int const maximum_retries = 5; + u8 link_status[DP_LINK_STATUS_SIZE]; + + ctrl->aux->state &= ~DP_STATE_TRAIN_2_FAILED; + ctrl->aux->state &= ~DP_STATE_TRAIN_2_SUCCEEDED; + ctrl->aux->state |= DP_STATE_TRAIN_2_STARTED; + + if (ctrl->sim_mode) { + DP_DEBUG("simulation enabled, skip channel equalization\n"); + ret = 0; + goto skip_training; + } + + dp_ctrl_state_ctrl(ctrl, 0); + /* Make sure to clear the current pattern before starting a new one */ + wmb(); + + dpcd_pattern = ctrl->training_2_pattern; + + while (!atomic_read(&ctrl->aborted)) { + /* update hardware with current swing/pre-emp values */ + dp_ctrl_update_hw_vx_px(ctrl); + + if (!pattern) { + pattern = dpcd_pattern; + + /* program hw to send pattern */ + ctrl->catalog->set_pattern(ctrl->catalog, pattern); + + /* update sink with current pattern */ + ret = dp_ctrl_update_sink_pattern(ctrl, pattern); + if (ret) + break; + } + + ret = dp_ctrl_update_sink_vx_px(ctrl); + if (ret) + break; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + drm_dp_link_train_channel_eq_delay(ctrl->aux->drm_aux, ctrl->panel->dpcd); +#else + drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd); +#endif + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + break; + + /* check if CR bits still remain set */ + if (!drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.lane_count)) { + ret = -EINVAL; + break; + } + + if (!drm_dp_channel_eq_ok(link_status, + ctrl->link->link_params.lane_count)) + ret = -EINVAL; + else + break; + + if (tries >= maximum_retries) { + ret = dp_ctrl_lane_count_down_shift(ctrl); + break; + } + tries++; + + ctrl->link->adjust_levels(ctrl->link, link_status); + } + +skip_training: + ctrl->aux->state &= ~DP_STATE_TRAIN_2_STARTED; + + if (ret) + ctrl->aux->state |= DP_STATE_TRAIN_2_FAILED; + else + ctrl->aux->state |= DP_STATE_TRAIN_2_SUCCEEDED; + return ret; +} + +static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + u8 const encoding = 0x1, downspread = 0x00; + struct drm_dp_link link_info = {0}; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + link_info.num_lanes = ctrl->link->link_params.lane_count; + link_info.rate = drm_dp_bw_code_to_link_rate( + ctrl->link->link_params.bw_code); + link_info.capabilities = ctrl->panel->link_info.capabilities; + + ret = dp_link_configure(ctrl->aux->drm_aux, &link_info); + if (ret) + goto end; + + ret = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, + DP_DOWNSPREAD_CTRL, downspread); + if (ret <= 0) { + ret = -EINVAL; + goto end; + } + + ret = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, + DP_MAIN_LINK_CHANNEL_CODING_SET, encoding); + if (ret <= 0) { + ret = -EINVAL; + goto end; + } + + /* disable FEC before link training */ + ctrl->catalog->fec_config(ctrl->catalog, false); + + ret = dp_ctrl_link_training_1(ctrl); + if (ret) { + DP_ERR("link training #1 failed\n"); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DP_INFO("link training #1 successful\n"); + + ret = dp_ctrl_link_training_2(ctrl); + if (ret) { + DP_ERR("link training #2 failed\n"); + goto end; + } + + /* print success info as this is a result of user initiated action */ + DP_INFO("link training #2 successful\n"); + +end: + dp_ctrl_state_ctrl(ctrl, 0); + /* Make sure to clear the current pattern before starting a new one */ + wmb(); + + dp_ctrl_clear_training_pattern(ctrl); + return ret; +} + +static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + goto end; + + /* + * As part of previous calls, DP controller state might have + * transitioned to PUSH_IDLE. In order to start transmitting a link + * training pattern, we have to first to a DP software reset. + */ + ctrl->catalog->reset(ctrl->catalog); + + if (ctrl->fec_mode) + drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_FEC_CONFIGURATION, + 0x01); + + ret = dp_ctrl_link_train(ctrl); + +end: + return ret; +} + +static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl, + char *name, enum dp_pm_type clk_type, u32 rate) +{ + u32 num = ctrl->parser->mp[clk_type].num_clk; + struct dss_clk *cfg = ctrl->parser->mp[clk_type].clk_config; + + /* convert to HZ for byte2 ops */ + rate *= ctrl->pll->clk_factor; + + while (num && strcmp(cfg->clk_name, name)) { + num--; + cfg++; + } + + DP_DEBUG("setting rate=%d on clk=%s\n", rate, name); + + if (num) + cfg->rate = rate; + else + DP_ERR("%s clock could not be set with rate %d\n", name, rate); +} + +static int dp_ctrl_enable_link_clock(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + u32 rate = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code); + enum dp_pm_type type = DP_LINK_PM; + + DP_DEBUG("rate=%d\n", rate); + + dp_ctrl_set_clock_rate(ctrl, "link_clk_src", type, rate); + + if (ctrl->pll->pll_cfg) { + ret = ctrl->pll->pll_cfg(ctrl->pll, rate); + if (ret < 0) { + DP_ERR("DP pll cfg failed\n"); + return ret; + } + } + + if (ctrl->pll->pll_prepare) { + ret = ctrl->pll->pll_prepare(ctrl->pll); + if (ret < 0) { + DP_ERR("DP pll prepare failed\n"); + return ret; + } + } + + ret = ctrl->power->clk_enable(ctrl->power, type, true); + if (ret) { + DP_ERR("Unabled to start link clocks\n"); + ret = -EINVAL; + } + + return ret; +} + +static void dp_ctrl_disable_link_clock(struct dp_ctrl_private *ctrl) +{ + int rc = 0; + + ctrl->power->clk_enable(ctrl->power, DP_LINK_PM, false); + if (ctrl->pll->pll_unprepare) { + rc = ctrl->pll->pll_unprepare(ctrl->pll); + if (rc < 0) + DP_ERR("pll unprepare failed\n"); + } +} + +static void dp_ctrl_select_training_pattern(struct dp_ctrl_private *ctrl, + bool downgrade) +{ + u32 pattern; + + if (drm_dp_tps4_supported(ctrl->panel->dpcd)) + pattern = DP_TRAINING_PATTERN_4; + else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) + pattern = DP_TRAINING_PATTERN_3; + else + pattern = DP_TRAINING_PATTERN_2; + + if (!downgrade) + goto end; + + switch (pattern) { + case DP_TRAINING_PATTERN_4: + pattern = DP_TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_3: + pattern = DP_TRAINING_PATTERN_2; + break; + default: + break; + } +end: + ctrl->training_2_pattern = pattern; +} + +static int dp_ctrl_link_setup(struct dp_ctrl_private *ctrl, bool shallow) +{ + int rc = -EINVAL; + bool downgrade = false; + u32 link_train_max_retries = 100; + struct dp_catalog_ctrl *catalog; + struct dp_link_params *link_params; + + catalog = ctrl->catalog; + link_params = &ctrl->link->link_params; + + catalog->phy_lane_cfg(catalog, ctrl->orientation, + link_params->lane_count); + + while (1) { + DP_DEBUG("bw_code=%d, lane_count=%d\n", + link_params->bw_code, link_params->lane_count); + + rc = dp_ctrl_enable_link_clock(ctrl); + if (rc) + break; + + ctrl->catalog->late_phy_init(ctrl->catalog, + ctrl->link->link_params.lane_count, + ctrl->orientation); + + dp_ctrl_configure_source_link_params(ctrl, true); + + if (!(--link_train_max_retries % 10)) { + struct dp_link_params *link = &ctrl->link->link_params; + + link->lane_count = ctrl->initial_lane_count; + link->bw_code = ctrl->initial_bw_code; + downgrade = true; + } + + dp_ctrl_select_training_pattern(ctrl, downgrade); + + rc = dp_ctrl_setup_main_link(ctrl); + if (!rc) + break; + + /* + * Shallow means link training failure is not important. + * If it fails, we still keep the link clocks on. + * In this mode, the system expects DP to be up + * even though the cable is removed. Disconnect interrupt + * will eventually trigger and shutdown DP. + */ + if (shallow) { + rc = 0; + break; + } + + if (!link_train_max_retries || atomic_read(&ctrl->aborted)) { + dp_ctrl_disable_link_clock(ctrl); + break; + } + + if (rc != -EAGAIN) { + dp_ctrl_link_rate_down_shift(ctrl); + ctrl->panel->init(ctrl->panel); + } + + dp_ctrl_configure_source_link_params(ctrl, false); + dp_ctrl_disable_link_clock(ctrl); + + /* hw recommended delays before retrying link training */ + msleep(20); + } + + return rc; +} + +static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl, + struct dp_panel *dp_panel) +{ + int ret = 0; + u32 pclk; + enum dp_pm_type clk_type; + char clk_name[32] = ""; + + ret = ctrl->power->set_pixel_clk_parent(ctrl->power, + dp_panel->stream_id); + + if (ret) + return ret; + + if (dp_panel->stream_id == DP_STREAM_0) { + clk_type = DP_STREAM0_PM; + strlcpy(clk_name, "strm0_pixel_clk", 32); + } else if (dp_panel->stream_id == DP_STREAM_1) { + clk_type = DP_STREAM1_PM; + strlcpy(clk_name, "strm1_pixel_clk", 32); + } else { + DP_ERR("Invalid stream:%d for clk enable\n", + dp_panel->stream_id); + return -EINVAL; + } + + pclk = dp_panel->pinfo.widebus_en ? + (dp_panel->pinfo.pixel_clk_khz >> 1) : + (dp_panel->pinfo.pixel_clk_khz); + + dp_ctrl_set_clock_rate(ctrl, clk_name, clk_type, pclk); + + ret = ctrl->power->clk_enable(ctrl->power, clk_type, true); + if (ret) { + DP_ERR("Unabled to start stream:%d clocks\n", + dp_panel->stream_id); + ret = -EINVAL; + } + + return ret; +} + +static int dp_ctrl_disable_stream_clocks(struct dp_ctrl_private *ctrl, + struct dp_panel *dp_panel) +{ + int ret = 0; + + if (dp_panel->stream_id == DP_STREAM_0) { + return ctrl->power->clk_enable(ctrl->power, + DP_STREAM0_PM, false); + } else if (dp_panel->stream_id == DP_STREAM_1) { + return ctrl->power->clk_enable(ctrl->power, + DP_STREAM1_PM, false); + } else { + DP_ERR("Invalid stream:%d for clk disable\n", + dp_panel->stream_id); + ret = -EINVAL; + } + return ret; +} +static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip, bool reset) +{ + struct dp_ctrl_private *ctrl; + struct dp_catalog_ctrl *catalog; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return -EINVAL; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->orientation = flip; + catalog = ctrl->catalog; + + if (reset) { + catalog->usb_reset(ctrl->catalog, flip); + catalog->phy_reset(ctrl->catalog); + } + catalog->enable_irq(ctrl->catalog, true); + atomic_set(&ctrl->aborted, 0); + + return 0; +} + +/** + * dp_ctrl_host_deinit() - Uninitialize DP controller + * @ctrl: Display Port Driver data + * + * Perform required steps to uninitialize DP controller + * and its resources. + */ +static void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->catalog->enable_irq(ctrl->catalog, false); + + DP_DEBUG("Host deinitialized successfully\n"); +} + +static void dp_ctrl_send_video(struct dp_ctrl_private *ctrl) +{ + reinit_completion(&ctrl->video_comp); + ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO); +} + +static void dp_ctrl_fec_setup(struct dp_ctrl_private *ctrl) +{ + u8 fec_sts = 0; + int i, max_retries = 3; + bool fec_en_detected = false; + + if (!ctrl->fec_mode) + return; + + /* FEC should be set only for the first stream */ + if (ctrl->stream_count > 1) + return; + + /* Need to try to enable multiple times due to BS symbols collisions */ + for (i = 0; i < max_retries; i++) { + ctrl->catalog->fec_config(ctrl->catalog, ctrl->fec_mode); + + /* wait for controller to start fec sequence */ + usleep_range(900, 1000); + + /* read back FEC status and check if it is enabled */ + drm_dp_dpcd_readb(ctrl->aux->drm_aux, DP_FEC_STATUS, &fec_sts); + if (fec_sts & DP_FEC_DECODE_EN_DETECTED) { + fec_en_detected = true; + break; + } + } + + SDE_EVT32_EXTERNAL(i, fec_en_detected); + DP_DEBUG("retries %d, fec_en_detected %d\n", i, fec_en_detected); + + if (!fec_en_detected) + DP_WARN("failed to enable sink fec\n"); +} + +static int dp_ctrl_mst_send_act(struct dp_ctrl_private *ctrl) +{ + bool act_complete; + + if (!ctrl->mst_mode) + return 0; + + ctrl->catalog->trigger_act(ctrl->catalog); + msleep(20); /* needs 1 frame time */ + + ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete); + + if (!act_complete) + DP_ERR("mst act trigger complete failed\n"); + else + DP_MST_DEBUG("mst ACT trigger complete SUCCESS\n"); + + return 0; +} + +static int dp_ctrl_link_maintenance(struct dp_ctrl *dp_ctrl) +{ + int ret = 0; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return -EINVAL; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_COMPLETED; + ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_FAILED; + + if (!ctrl->power_on) { + DP_ERR("ctrl off\n"); + ret = -EINVAL; + goto end; + } + + if (atomic_read(&ctrl->aborted)) + goto end; + + ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_STARTED; + ret = dp_ctrl_setup_main_link(ctrl); + ctrl->aux->state &= ~DP_STATE_LINK_MAINTENANCE_STARTED; + + if (ret) { + ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_FAILED; + goto end; + } + + ctrl->aux->state |= DP_STATE_LINK_MAINTENANCE_COMPLETED; + + if (ctrl->stream_count) { + dp_ctrl_send_video(ctrl); + dp_ctrl_mst_send_act(ctrl); + dp_ctrl_wait4video_ready(ctrl); + dp_ctrl_fec_setup(ctrl); + } +end: + return ret; +} + +static void dp_ctrl_process_phy_test_request(struct dp_ctrl *dp_ctrl) +{ + int ret = 0; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) { + DP_ERR("Invalid input data\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->link->phy_params.phy_test_pattern_sel) { + DP_DEBUG("no test pattern selected by sink\n"); + return; + } + + DP_DEBUG("start\n"); + + /* + * The global reset will need DP link ralated clocks to be + * running. Add the global reset just before disabling the + * link clocks and core clocks. + */ + ctrl->catalog->reset(ctrl->catalog); + ctrl->dp_ctrl.stream_pre_off(&ctrl->dp_ctrl, ctrl->panel); + ctrl->dp_ctrl.stream_off(&ctrl->dp_ctrl, ctrl->panel); + ctrl->dp_ctrl.off(&ctrl->dp_ctrl); + + ctrl->aux->init(ctrl->aux, ctrl->parser->aux_cfg); + + ret = ctrl->dp_ctrl.on(&ctrl->dp_ctrl, ctrl->mst_mode, + ctrl->fec_mode, ctrl->dsc_mode, false); + if (ret) + DP_ERR("failed to enable DP controller\n"); + + ctrl->dp_ctrl.stream_on(&ctrl->dp_ctrl, ctrl->panel); + DP_DEBUG("end\n"); +} + +static void dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +{ + bool success = false; + u32 pattern_sent = 0x0; + u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel; + + dp_ctrl_update_hw_vx_px(ctrl); + ctrl->catalog->send_phy_pattern(ctrl->catalog, pattern_requested); + dp_ctrl_update_sink_vx_px(ctrl); + ctrl->link->send_test_response(ctrl->link); + + pattern_sent = ctrl->catalog->read_phy_pattern(ctrl->catalog); + DP_DEBUG("pattern_request: %s. pattern_sent: 0x%x\n", + dp_link_get_phy_test_pattern(pattern_requested), + pattern_sent); + + switch (pattern_sent) { + case MR_LINK_TRAINING1: + if (pattern_requested == DP_PHY_TEST_PATTERN_D10_2) + success = true; + break; + case MR_LINK_SYMBOL_ERM: + if ((pattern_requested == DP_PHY_TEST_PATTERN_ERROR_COUNT) + || (pattern_requested == DP_PHY_TEST_PATTERN_CP2520)) + success = true; + break; + case MR_LINK_PRBS7: + if (pattern_requested == DP_PHY_TEST_PATTERN_PRBS7) + success = true; + break; + case MR_LINK_CUSTOM80: + if (pattern_requested == DP_PHY_TEST_PATTERN_80BIT_CUSTOM) + success = true; + break; + case MR_LINK_TRAINING4: + if (pattern_requested == DP_PHY_TEST_PATTERN_CP2520_3) + success = true; + break; + default: + success = false; + break; + } + + DP_DEBUG("%s: %s\n", success ? "success" : "failed", + dp_link_get_phy_test_pattern(pattern_requested)); +} + +static void dp_ctrl_mst_calculate_rg(struct dp_ctrl_private *ctrl, + struct dp_panel *panel, u32 *p_x_int, u32 *p_y_frac_enum) +{ + u64 min_slot_cnt, max_slot_cnt; + u64 raw_target_sc, target_sc_fixp; + u64 ts_denom, ts_enum, ts_int; + u64 pclk = panel->pinfo.pixel_clk_khz; + u64 lclk = 0; + u64 lanes = ctrl->link->link_params.lane_count; + u64 bpp = panel->pinfo.bpp; + u64 pbn = panel->pinfo.pbn_no_overhead; // before dsc/fec overhead + u64 numerator, denominator, temp, temp1, temp2; + u32 x_int = 0, y_frac_enum = 0; + u64 target_strm_sym, ts_int_fixp, ts_frac_fixp, y_frac_enum_fixp; + + lclk = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code); + if (panel->pinfo.comp_info.enabled) + bpp = panel->pinfo.comp_info.tgt_bpp; + + /* min_slot_cnt */ + numerator = pclk * bpp * 64 * 1000; + denominator = lclk * lanes * 8 * 1000; + min_slot_cnt = drm_fixp_from_fraction(numerator, denominator); + + /* max_slot_cnt */ + numerator = pbn * 54 * 1000; + denominator = lclk * lanes; + max_slot_cnt = drm_fixp_from_fraction(numerator, denominator); + + /* raw_target_sc */ + numerator = max_slot_cnt + min_slot_cnt; + denominator = drm_fixp_from_fraction(2, 1); + raw_target_sc = drm_fixp_div(numerator, denominator); + + DP_DEBUG("raw_target_sc before overhead:0x%llx\n", raw_target_sc); + DP_DEBUG("dsc_overhead_fp:0x%llx\n", panel->pinfo.dsc_overhead_fp); + + /* apply fec and dsc overhead factor */ + if (panel->pinfo.dsc_overhead_fp) + raw_target_sc = drm_fixp_mul(raw_target_sc, + panel->pinfo.dsc_overhead_fp); + + if (panel->fec_overhead_fp) + raw_target_sc = drm_fixp_mul(raw_target_sc, + panel->fec_overhead_fp); + + DP_DEBUG("raw_target_sc after overhead:0x%llx\n", raw_target_sc); + + /* target_sc */ + temp = drm_fixp_from_fraction(256 * lanes, 1); + numerator = drm_fixp_mul(raw_target_sc, temp); + denominator = drm_fixp_from_fraction(256 * lanes, 1); + target_sc_fixp = drm_fixp_div(numerator, denominator); + + ts_enum = 256 * lanes; + ts_denom = drm_fixp_from_fraction(256 * lanes, 1); + ts_int = drm_fixp2int(target_sc_fixp); + + temp = drm_fixp2int_ceil(raw_target_sc); + if (temp != ts_int) { + temp = drm_fixp_from_fraction(ts_int, 1); + temp1 = raw_target_sc - temp; + temp2 = drm_fixp_mul(temp1, ts_denom); + ts_enum = drm_fixp2int(temp2); + } + + /* target_strm_sym */ + ts_int_fixp = drm_fixp_from_fraction(ts_int, 1); + ts_frac_fixp = drm_fixp_from_fraction(ts_enum, drm_fixp2int(ts_denom)); + temp = ts_int_fixp + ts_frac_fixp; + temp1 = drm_fixp_from_fraction(lanes, 1); + target_strm_sym = drm_fixp_mul(temp, temp1); + + /* x_int */ + x_int = drm_fixp2int(target_strm_sym); + + /* y_enum_frac */ + temp = drm_fixp_from_fraction(x_int, 1); + temp1 = target_strm_sym - temp; + temp2 = drm_fixp_from_fraction(256, 1); + y_frac_enum_fixp = drm_fixp_mul(temp1, temp2); + + temp1 = drm_fixp2int(y_frac_enum_fixp); + temp2 = drm_fixp2int_ceil(y_frac_enum_fixp); + + y_frac_enum = (u32)((temp1 == temp2) ? temp1 : temp1 + 1); + + panel->mst_target_sc = raw_target_sc; + *p_x_int = x_int; + *p_y_frac_enum = y_frac_enum; + + DP_DEBUG("x_int: %d, y_frac_enum: %d\n", x_int, y_frac_enum); +} + +static void dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl, + struct dp_panel *panel) +{ + u32 x_int, y_frac_enum, lanes, bw_code; + int i; + + if (!ctrl->mst_mode) + return; + + DP_MST_DEBUG("mst stream channel allocation\n"); + + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + ctrl->catalog->channel_alloc(ctrl->catalog, + i, + ctrl->mst_ch_info.slot_info[i].start_slot, + ctrl->mst_ch_info.slot_info[i].tot_slots); + } + + lanes = ctrl->link->link_params.lane_count; + bw_code = ctrl->link->link_params.bw_code; + + dp_ctrl_mst_calculate_rg(ctrl, panel, &x_int, &y_frac_enum); + + ctrl->catalog->update_rg(ctrl->catalog, panel->stream_id, + x_int, y_frac_enum); + + DP_MST_DEBUG("mst stream:%d, start_slot:%d, tot_slots:%d\n", + panel->stream_id, + panel->channel_start_slot, panel->channel_total_slots); + + DP_MST_DEBUG("mst lane_cnt:%d, bw:%d, x_int:%d, y_frac:%d\n", + lanes, bw_code, x_int, y_frac_enum); +} + +static void dp_ctrl_dsc_setup(struct dp_ctrl_private *ctrl, struct dp_panel *panel) +{ + int rlen; + u32 dsc_enable; + struct dp_panel_info *pinfo = &panel->pinfo; + + if (!ctrl->fec_mode) + return; + + /* Set DP_DSC_ENABLE DPCD register if compression is enabled for SST monitor. + * Set DP_DSC_ENABLE DPCD register if compression is enabled for + * atleast 1 of the MST monitor. + */ + dsc_enable = (pinfo->comp_info.enabled == true) ? 1 : 0; + + if (ctrl->mst_mode && (panel->stream_id == DP_STREAM_1) && !dsc_enable) + return; + + rlen = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_DSC_ENABLE, + dsc_enable); + if (rlen < 1) + DP_WARN("failed to enable sink dsc\n"); +} + +static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) +{ + int rc = 0; + bool link_ready = false; + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || !panel) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->power_on) { + DP_DEBUG("controller powered off\n"); + return -EPERM; + } + + rc = dp_ctrl_enable_stream_clocks(ctrl, panel); + if (rc) { + DP_ERR("failure on stream clock enable\n"); + return rc; + } + + panel->pclk_on = true; + rc = panel->hw_cfg(panel, true); + if (rc) + return rc; + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + dp_ctrl_send_phy_test_pattern(ctrl); + return 0; + } + + dp_ctrl_mst_stream_setup(ctrl, panel); + + dp_ctrl_send_video(ctrl); + + dp_ctrl_mst_send_act(ctrl); + + dp_ctrl_wait4video_ready(ctrl); + + ctrl->stream_count++; + + link_ready = ctrl->catalog->mainlink_ready(ctrl->catalog); + DP_DEBUG("mainlink %s\n", link_ready ? "READY" : "NOT READY"); + + /* wait for link training completion before fec config as per spec */ + dp_ctrl_fec_setup(ctrl); + dp_ctrl_dsc_setup(ctrl, panel); + panel->sink_crc_enable(panel, true); + + return rc; +} + +static void dp_ctrl_mst_stream_pre_off(struct dp_ctrl *dp_ctrl, + struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + bool act_complete; + int i; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->mst_mode) + return; + + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + ctrl->catalog->channel_alloc(ctrl->catalog, + i, + ctrl->mst_ch_info.slot_info[i].start_slot, + ctrl->mst_ch_info.slot_info[i].tot_slots); + } + + ctrl->catalog->trigger_act(ctrl->catalog); + msleep(20); /* needs 1 frame time */ + ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete); + + if (!act_complete) + DP_ERR("mst stream_off act trigger complete failed\n"); + else + DP_MST_DEBUG("mst stream_off ACT trigger complete SUCCESS\n"); +} + +static void dp_ctrl_stream_pre_off(struct dp_ctrl *dp_ctrl, + struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || !panel) { + DP_ERR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + dp_ctrl_push_idle(ctrl, panel->stream_id); + + dp_ctrl_mst_stream_pre_off(dp_ctrl, panel); +} + +static void dp_ctrl_stream_off(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || !panel) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->power_on) + return; + + panel->hw_cfg(panel, false); + + panel->pclk_on = false; + dp_ctrl_disable_stream_clocks(ctrl, panel); + ctrl->stream_count--; +} + +static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode, + bool fec_mode, bool dsc_mode, bool shallow) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + u32 rate = 0; + + if (!dp_ctrl) { + rc = -EINVAL; + goto end; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (ctrl->power_on) + goto end; + + if (atomic_read(&ctrl->aborted)) { + rc = -EPERM; + goto end; + } + + ctrl->mst_mode = mst_mode; + if (fec_mode) { + ctrl->fec_mode = fec_mode; + ctrl->dsc_mode = dsc_mode; + } + + rate = ctrl->panel->link_info.rate; + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + DP_DEBUG("using phy test link parameters\n"); + } else { + ctrl->link->link_params.bw_code = + drm_dp_link_rate_to_bw_code(rate); + ctrl->link->link_params.lane_count = + ctrl->panel->link_info.num_lanes; + } + + DP_DEBUG("bw_code=%d, lane_count=%d\n", + ctrl->link->link_params.bw_code, + ctrl->link->link_params.lane_count); + + /* backup initial lane count and bw code */ + ctrl->initial_lane_count = ctrl->link->link_params.lane_count; + ctrl->initial_bw_code = ctrl->link->link_params.bw_code; + + rc = dp_ctrl_link_setup(ctrl, shallow); + if (!rc) + ctrl->power_on = true; +end: + return rc; +} + +static void dp_ctrl_off(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->power_on) + return; + + ctrl->catalog->fec_config(ctrl->catalog, false); + dp_ctrl_configure_source_link_params(ctrl, false); + dp_ctrl_state_ctrl(ctrl, 0); + + /* Make sure DP is disabled before clk disable */ + wmb(); + + dp_ctrl_disable_link_clock(ctrl); + + ctrl->mst_mode = false; + ctrl->fec_mode = false; + ctrl->dsc_mode = false; + ctrl->power_on = false; + memset(&ctrl->mst_ch_info, 0, sizeof(ctrl->mst_ch_info)); + DP_DEBUG("DP off done\n"); +} + +static void dp_ctrl_set_mst_channel_info(struct dp_ctrl *dp_ctrl, + enum dp_stream_id strm, + u32 start_slot, u32 tot_slots) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl || strm >= DP_STREAM_MAX) { + DP_ERR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->mst_ch_info.slot_info[strm].start_slot = start_slot; + ctrl->mst_ch_info.slot_info[strm].tot_slots = tot_slots; +} + +static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->catalog->get_interrupt(ctrl->catalog); + SDE_EVT32_EXTERNAL(ctrl->catalog->isr, ctrl->catalog->isr3, ctrl->catalog->isr5, + ctrl->catalog->isr6); + + if (ctrl->catalog->isr & DP_CTRL_INTR_READY_FOR_VIDEO) + dp_ctrl_video_ready(ctrl); + + if (ctrl->catalog->isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) + dp_ctrl_idle_patterns_sent(ctrl); + + if (ctrl->catalog->isr5 & DP_CTRL_INTR_MST_DP0_VCPF_SENT) + dp_ctrl_idle_patterns_sent(ctrl); + + if (ctrl->catalog->isr5 & DP_CTRL_INTR_MST_DP1_VCPF_SENT) + dp_ctrl_idle_patterns_sent(ctrl); +} + +void dp_ctrl_set_sim_mode(struct dp_ctrl *dp_ctrl, bool en) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + ctrl->sim_mode = en; + DP_INFO("sim_mode=%d\n", ctrl->sim_mode); +} + +int dp_ctrl_setup_misr(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + return ctrl->catalog->setup_misr(ctrl->catalog); +} + +int dp_ctrl_read_misr(struct dp_ctrl *dp_ctrl, struct dp_misr40_data *data) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + return ctrl->catalog->read_misr(ctrl->catalog, data); +} + +struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + struct dp_ctrl *dp_ctrl; + + if (!in->dev || !in->panel || !in->aux || + !in->link || !in->catalog) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + ctrl = devm_kzalloc(in->dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + rc = -ENOMEM; + goto error; + } + + init_completion(&ctrl->idle_comp); + init_completion(&ctrl->video_comp); + + /* in parameters */ + ctrl->parser = in->parser; + ctrl->panel = in->panel; + ctrl->power = in->power; + ctrl->aux = in->aux; + ctrl->link = in->link; + ctrl->catalog = in->catalog; + ctrl->pll = in->pll; + ctrl->dev = in->dev; + ctrl->mst_mode = false; + ctrl->fec_mode = false; + + dp_ctrl = &ctrl->dp_ctrl; + + /* out parameters */ + dp_ctrl->init = dp_ctrl_host_init; + dp_ctrl->deinit = dp_ctrl_host_deinit; + dp_ctrl->on = dp_ctrl_on; + dp_ctrl->off = dp_ctrl_off; + dp_ctrl->abort = dp_ctrl_abort; + dp_ctrl->isr = dp_ctrl_isr; + dp_ctrl->link_maintenance = dp_ctrl_link_maintenance; + dp_ctrl->process_phy_test_request = dp_ctrl_process_phy_test_request; + dp_ctrl->stream_on = dp_ctrl_stream_on; + dp_ctrl->stream_off = dp_ctrl_stream_off; + dp_ctrl->stream_pre_off = dp_ctrl_stream_pre_off; + dp_ctrl->set_mst_channel_info = dp_ctrl_set_mst_channel_info; + dp_ctrl->set_sim_mode = dp_ctrl_set_sim_mode; + dp_ctrl->setup_misr = dp_ctrl_setup_misr; + dp_ctrl->read_misr = dp_ctrl_read_misr; + + return dp_ctrl; +error: + return ERR_PTR(rc); +} + +void dp_ctrl_put(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + devm_kfree(ctrl->dev, ctrl); +} diff --git a/msm/dp/dp_ctrl.h b/msm/dp/dp_ctrl.h new file mode 100644 index 000000000..37f78d043 --- /dev/null +++ b/msm/dp/dp_ctrl.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CTRL_H_ +#define _DP_CTRL_H_ + +#include "dp_aux.h" +#include "dp_panel.h" +#include "dp_link.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_debug.h" + +struct dp_ctrl { + int (*init)(struct dp_ctrl *dp_ctrl, bool flip, bool reset); + void (*deinit)(struct dp_ctrl *dp_ctrl); + int (*on)(struct dp_ctrl *dp_ctrl, bool mst_mode, bool fec_en, + bool dsc_en, bool shallow); + void (*off)(struct dp_ctrl *dp_ctrl); + void (*abort)(struct dp_ctrl *dp_ctrl, bool abort); + void (*isr)(struct dp_ctrl *dp_ctrl); + bool (*handle_sink_request)(struct dp_ctrl *dp_ctrl); + void (*process_phy_test_request)(struct dp_ctrl *dp_ctrl); + int (*link_maintenance)(struct dp_ctrl *dp_ctrl); + int (*stream_on)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*stream_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*stream_pre_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*set_mst_channel_info)(struct dp_ctrl *dp_ctrl, + enum dp_stream_id strm, + u32 ch_start_slot, u32 ch_tot_slots); + void (*set_sim_mode)(struct dp_ctrl *dp_ctrl, bool en); + int (*setup_misr)(struct dp_ctrl *dp_ctrl); + int (*read_misr)(struct dp_ctrl *dp_ctrl, struct dp_misr40_data *data); +}; + +struct dp_ctrl_in { + struct device *dev; + struct dp_panel *panel; + struct dp_aux *aux; + struct dp_link *link; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog_ctrl *catalog; + struct dp_pll *pll; +}; + +struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in); +void dp_ctrl_put(struct dp_ctrl *dp_ctrl); + +#endif /* _DP_CTRL_H_ */ diff --git a/msm/dp/dp_debug.c b/msm/dp/dp_debug.c new file mode 100644 index 000000000..2add356ac --- /dev/null +++ b/msm/dp/dp_debug.c @@ -0,0 +1,2598 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif +#include + +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_debug.h" +#include "drm/drm_connector.h" +#include "sde_connector.h" +#include "dp_display.h" +#include "dp_pll.h" +#include "dp_hpd.h" +#include "dp_mst_sim.h" +#include "dp_mst_drm.h" + +#define DEBUG_NAME "drm_dp" + +struct dp_debug_private { + struct dentry *root; + + u32 dpcd_offset; + u32 dpcd_size; + + u32 mst_con_id; + u32 mst_edid_idx; + bool hotplug; + u32 sim_mode; + + char exe_mode[SZ_32]; + char reg_dump[SZ_32]; + + struct dp_hpd *hpd; + struct dp_link *link; + struct dp_panel *panel; + struct dp_aux *aux; + struct dp_catalog *catalog; + struct drm_connector **connector; + struct device *dev; + struct dp_debug dp_debug; + struct dp_parser *parser; + struct dp_ctrl *ctrl; + struct dp_pll *pll; + struct dp_display *display; + struct mutex lock; + struct dp_aux_bridge *sim_bridge; +}; + +static int dp_debug_sim_hpd_cb(void *arg, bool hpd, bool hpd_irq) +{ + struct dp_debug_private *debug = arg; + int vdo = 0; + + if (hpd_irq) { + vdo |= BIT(7); + + if (hpd) + vdo |= BIT(8); + + return debug->hpd->simulate_attention(debug->hpd, vdo); + } else { + return debug->hpd->simulate_connect(debug->hpd, hpd); + } +} + +static int dp_debug_attach_sim_bridge(struct dp_debug_private *debug) +{ + int ret; + + if (!debug->sim_bridge) { + ret = dp_sim_create_bridge(debug->dev, &debug->sim_bridge); + if (ret) + return ret; + + if (debug->sim_bridge->register_hpd) + debug->sim_bridge->register_hpd(debug->sim_bridge, + dp_debug_sim_hpd_cb, debug); + } + + dp_sim_update_port_num(debug->sim_bridge, 1); + + return 0; +} + +static void dp_debug_enable_sim_mode(struct dp_debug_private *debug, + u32 mode_mask) +{ + /* return if mode is already enabled */ + if ((debug->sim_mode & mode_mask) == mode_mask) + return; + + /* create bridge if not yet */ + if (dp_debug_attach_sim_bridge(debug)) + return; + + /* switch to bridge mode */ + if (!debug->sim_mode) + debug->aux->set_sim_mode(debug->aux, debug->sim_bridge); + + /* update sim mode */ + debug->sim_mode |= mode_mask; + dp_sim_set_sim_mode(debug->sim_bridge, debug->sim_mode); +} + +static void dp_debug_disable_sim_mode(struct dp_debug_private *debug, + u32 mode_mask) +{ + /* return if mode is already disabled */ + if (!(debug->sim_mode & mode_mask)) + return; + + /* update sim mode */ + debug->sim_mode &= ~mode_mask; + dp_sim_set_sim_mode(debug->sim_bridge, debug->sim_mode); + + dp_sim_update_port_num(debug->sim_bridge, 0); + + /* switch to normal mode */ + if (!debug->sim_mode) + debug->aux->set_sim_mode(debug->aux, NULL); +} + +static ssize_t dp_debug_write_edid(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + u8 *buf = NULL, *buf_t = NULL, *edid = NULL; + const int char_to_nib = 2; + size_t edid_size = 0; + size_t size = 0, edid_buf_index = 0; + ssize_t rc = count; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto bail; + + size = min_t(size_t, count, SZ_1K); + + buf = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto bail; + } + + if (copy_from_user(buf, user_buff, size)) + goto bail; + + edid_size = size / char_to_nib; + buf_t = buf; + size = edid_size; + + edid = kzalloc(size, GFP_KERNEL); + if (!edid) + goto bail; + + while (size--) { + char t[3]; + int d; + + memcpy(t, buf_t, sizeof(char) * char_to_nib); + t[char_to_nib] = '\0'; + + if (kstrtoint(t, 16, &d)) { + DP_ERR("kstrtoint error\n"); + goto bail; + } + + edid[edid_buf_index++] = d; + buf_t += char_to_nib; + } + + dp_debug_enable_sim_mode(debug, DP_SIM_MODE_EDID); + dp_mst_clear_edid_cache(debug->display); + dp_sim_update_port_edid(debug->sim_bridge, debug->mst_edid_idx, + edid, edid_size); +bail: + kfree(buf); + kfree(edid); + + mutex_unlock(&debug->lock); + return rc; +} + +static ssize_t dp_debug_write_dpcd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + u8 *buf = NULL, *buf_t = NULL, *dpcd = NULL; + const int char_to_nib = 2; + size_t dpcd_size = 0; + size_t size = 0, dpcd_buf_index = 0; + ssize_t rc = count; + char offset_ch[5]; + u32 offset, data_len; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto bail; + + size = min_t(size_t, count, SZ_2K); + + if (size < 4) + goto bail; + + buf = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto bail; + } + + if (copy_from_user(buf, user_buff, size)) + goto bail; + + memcpy(offset_ch, buf, 4); + offset_ch[4] = '\0'; + + if (kstrtoint(offset_ch, 16, &offset)) { + DP_ERR("offset kstrtoint error\n"); + goto bail; + } + debug->dpcd_offset = offset; + + size -= 4; + if (size < char_to_nib) + goto bail; + + dpcd_size = size / char_to_nib; + data_len = dpcd_size; + buf_t = buf + 4; + + dpcd = kzalloc(dpcd_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(dpcd)) { + rc = -ENOMEM; + goto bail; + } + + while (dpcd_size--) { + char t[3]; + int d; + + memcpy(t, buf_t, sizeof(char) * char_to_nib); + t[char_to_nib] = '\0'; + + if (kstrtoint(t, 16, &d)) { + DP_ERR("kstrtoint error\n"); + goto bail; + } + + dpcd[dpcd_buf_index++] = d; + + buf_t += char_to_nib; + } + + /* + * if link training status registers are reprogramed, + * read link training status from simulator, otherwise + * read link training status from real aux channel. + */ + if (offset <= DP_LANE0_1_STATUS && + offset + dpcd_buf_index > DP_LANE0_1_STATUS) + dp_debug_enable_sim_mode(debug, + DP_SIM_MODE_DPCD_READ | DP_SIM_MODE_LINK_TRAIN); + else + dp_debug_enable_sim_mode(debug, DP_SIM_MODE_DPCD_READ); + + dp_sim_write_dpcd_reg(debug->sim_bridge, + dpcd, dpcd_buf_index, offset); + debug->dpcd_size = dpcd_buf_index; + +bail: + kfree(buf); + kfree(dpcd); + + mutex_unlock(&debug->lock); + return rc; +} + +static ssize_t dp_debug_read_dpcd(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + int const buf_size = SZ_4K; + u32 offset = 0; + u32 len = 0; + u8 *dpcd; + + if (!debug || !debug->aux) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&debug->lock); + dpcd = kzalloc(buf_size, GFP_KERNEL); + if (!dpcd) + goto bail; + + /* + * In simulation mode, this function returns the last written DPCD node. + * For a real monitor plug in, it dumps the first byte at the last written DPCD address + * unless the address is 0, in which case the first 20 bytes are dumped + */ + if (debug->dp_debug.sim_mode) { + dp_sim_read_dpcd_reg(debug->sim_bridge, dpcd, debug->dpcd_size, debug->dpcd_offset); + } else { + if (debug->dpcd_offset) { + debug->dpcd_size = 1; + if (drm_dp_dpcd_read(debug->aux->drm_aux, debug->dpcd_offset, dpcd, + debug->dpcd_size) != 1) + goto bail; + } else { + debug->dpcd_size = sizeof(debug->panel->dpcd); + memcpy(dpcd, debug->panel->dpcd, debug->dpcd_size); + } + } + + len += scnprintf(buf + len, buf_size - len, "%04x: ", debug->dpcd_offset); + + while (offset < debug->dpcd_size) + len += scnprintf(buf + len, buf_size - len, "%02x ", dpcd[offset++]); + + kfree(dpcd); + + len = min_t(size_t, count, len); + if (!copy_to_user(user_buff, buf, len)) + *ppos += len; + +bail: + mutex_unlock(&debug->lock); + kfree(buf); + + return len; +} + +static ssize_t dp_debug_read_crc(struct file *file, char __user *user_buff, size_t count, + loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + int const buf_size = SZ_4K; + u32 len = 0; + u16 src_crc[3] = {0}; + u16 sink_crc[3] = {0}; + struct dp_misr40_data misr40 = {0}; + u32 retries = 2; + struct drm_connector *drm_conn; + struct sde_connector *sde_conn; + struct dp_panel *panel; + int i; + int rc; + + if (!debug || !debug->aux) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&debug->lock); + + if (!debug->panel || !debug->ctrl) + goto bail; + + if (debug->panel->mst_state) { + drm_conn = drm_connector_lookup((*debug->connector)->dev, NULL, debug->mst_con_id); + if (!drm_conn) { + DP_ERR("connector %u not in mst list\n", debug->mst_con_id); + goto bail; + } + + sde_conn = to_sde_connector(drm_conn); + panel = sde_conn->drv_panel; + drm_connector_put(drm_conn); + + if (!panel) + goto bail; + } else { + panel = debug->panel; + } + + if (!panel->pclk_on) + goto bail; + + panel->get_sink_crc(panel, sink_crc); + if (!(sink_crc[0] + sink_crc[1] + sink_crc[2])) { + panel->sink_crc_enable(panel, true); + mutex_unlock(&debug->lock); + msleep(30); + mutex_lock(&debug->lock); + panel->get_sink_crc(panel, sink_crc); + } + + panel->get_src_crc(panel, src_crc); + + len += scnprintf(buf + len, buf_size - len, "FRAME_CRC:\nSource vs Sink\n"); + + len += scnprintf(buf + len, buf_size - len, "CRC_R: %04X %04X\n", src_crc[0], sink_crc[0]); + len += scnprintf(buf + len, buf_size - len, "CRC_G: %04X %04X\n", src_crc[1], sink_crc[1]); + len += scnprintf(buf + len, buf_size - len, "CRC_B: %04X %04X\n", src_crc[2], sink_crc[2]); + + debug->ctrl->setup_misr(debug->ctrl); + + while (retries--) { + mutex_unlock(&debug->lock); + msleep(30); + mutex_lock(&debug->lock); + + rc = debug->ctrl->read_misr(debug->ctrl, &misr40); + if (rc != -EAGAIN) + break; + } + + len += scnprintf(buf + len, buf_size - len, "\nMISR40:\nCTLR vs PHY\n"); + for (i = 0; i < 4; i++) { + len += scnprintf(buf + len, buf_size - len, "Lane%d %08X%08X %08X%08X\n", i, + misr40.ctrl_misr[2 * i], misr40.ctrl_misr[(2 * i) + 1], + misr40.phy_misr[2 * i], misr40.phy_misr[(2 * i) + 1]); + } + + len = min_t(size_t, count, len); + if (!copy_to_user(user_buff, buf, len)) + *ppos += len; + +bail: + mutex_unlock(&debug->lock); + kfree(buf); + + return len; +} + +static ssize_t dp_debug_write_hpd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int const hpd_data_mask = 0x7; + int hpd = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &hpd) != 0) + goto end; + + hpd &= hpd_data_mask; + debug->hotplug = !!(hpd & BIT(0)); + + debug->dp_debug.psm_enabled = !!(hpd & BIT(1)); + + /* + * print hotplug value as this code is executed + * only while running in debug mode which is manually + * triggered by a tester or a script. + */ + DP_INFO("%s\n", debug->hotplug ? "[CONNECT]" : "[DISCONNECT]"); + + debug->hpd->simulate_connect(debug->hpd, debug->hotplug); +end: + return len; +} + +static ssize_t dp_debug_write_edid_modes(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct dp_panel *panel; + char buf[SZ_32]; + size_t len = 0; + int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio; + + if (!debug) + return -ENODEV; + + if (*ppos) + goto end; + + panel = debug->panel; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto clear; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %d %d %d", &hdisplay, &vdisplay, &vrefresh, + &aspect_ratio) != 4) + goto clear; + + if (!hdisplay || !vdisplay || !vrefresh) + goto clear; + + panel->mode_override = true; + panel->hdisplay = hdisplay; + panel->vdisplay = vdisplay; + panel->vrefresh = vrefresh; + panel->aspect_ratio = aspect_ratio; + goto end; +clear: + DP_DEBUG("clearing debug modes\n"); + panel->mode_override = false; +end: + return len; +} + +static ssize_t dp_debug_write_edid_modes_mst(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct drm_connector *connector; + struct sde_connector *sde_conn; + struct dp_panel *panel = NULL; + char buf[SZ_512]; + char *read_buf; + size_t len = 0; + + int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio = 0; + int con_id = 0, offset = 0, debug_en = 0; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto end; + + len = min_t(size_t, count, SZ_512 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + read_buf = buf; + + while (sscanf(read_buf, "%d %d %d %d %d %d%n", &debug_en, &con_id, + &hdisplay, &vdisplay, &vrefresh, &aspect_ratio, + &offset) == 6) { + connector = drm_connector_lookup((*debug->connector)->dev, + NULL, con_id); + if (connector) { + sde_conn = to_sde_connector(connector); + panel = sde_conn->drv_panel; + if (panel && sde_conn->mst_port) { + panel->mode_override = debug_en; + panel->hdisplay = hdisplay; + panel->vdisplay = vdisplay; + panel->vrefresh = vrefresh; + panel->aspect_ratio = aspect_ratio; + } else { + DP_ERR("connector id %d is not mst\n", con_id); + } + drm_connector_put(connector); + } else { + DP_ERR("invalid connector id %d\n", con_id); + } + + read_buf += offset; + } +end: + mutex_unlock(&debug->lock); + return len; +} + +static ssize_t dp_debug_write_mst_con_id(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct drm_connector *connector; + struct sde_connector *sde_conn; + struct drm_dp_mst_port *mst_port; + struct dp_panel *dp_panel; + char buf[SZ_32]; + size_t len = 0; + int con_id = 0, status; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto end; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto clear; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %d", &con_id, &status) != 2) + goto end; + + if (!con_id) + goto clear; + + connector = drm_connector_lookup((*debug->connector)->dev, + NULL, con_id); + if (!connector) { + DP_ERR("invalid connector id %u\n", con_id); + goto end; + } + + sde_conn = to_sde_connector(connector); + + if (!sde_conn->drv_panel || !sde_conn->mst_port) { + DP_ERR("invalid connector state %d\n", con_id); + goto out; + } + + debug->mst_con_id = con_id; + + if (status == connector_status_unknown) + goto out; + + if (status == connector_status_connected) + DP_INFO("plug mst connector %d\n", con_id); + else if (status == connector_status_disconnected) + DP_INFO("unplug mst connector %d\n", con_id); + + mst_port = sde_conn->mst_port; + dp_panel = sde_conn->drv_panel; + if (!dp_panel) + goto out; + + if (debug->dp_debug.sim_mode) + dp_sim_update_port_status(debug->sim_bridge, mst_port->port_num, status); + else + dp_panel->mst_hide = (status == connector_status_disconnected); + + drm_kms_helper_hotplug_event(connector->dev); + +out: + drm_connector_put(connector); + goto end; +clear: + DP_DEBUG("clearing mst_con_id\n"); + debug->mst_con_id = 0; +end: + mutex_unlock(&debug->lock); + return len; +} + +static ssize_t dp_debug_write_mst_con_add(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + const int dp_en = BIT(3), hpd_high = BIT(7), hpd_irq = BIT(8); + int vdo = dp_en | hpd_high | hpd_irq; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + debug->dp_debug.mst_sim_add_con = true; + debug->hpd->simulate_attention(debug->hpd, vdo); +end: + return len; +} + +static ssize_t dp_debug_write_mst_con_remove(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + char buf[SZ_32]; + size_t len = 0; + int con_id = 0; + bool in_list = false; + const int dp_en = BIT(3), hpd_high = BIT(7), hpd_irq = BIT(8); + int vdo = dp_en | hpd_high | hpd_irq; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%d", &con_id) != 1) { + len = 0; + goto end; + } + + if (!con_id) + goto end; + + drm_connector_list_iter_begin((*debug->connector)->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->base.id == con_id) { + in_list = true; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + + if (!in_list) { + DRM_ERROR("invalid connector id %u\n", con_id); + goto end; + } + + debug->dp_debug.mst_sim_remove_con = true; + debug->dp_debug.mst_sim_remove_con_id = con_id; + debug->hpd->simulate_attention(debug->hpd, vdo); +end: + return len; +} + +static ssize_t dp_debug_mmrm_clk_cb_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + struct dss_clk_mmrm_cb mmrm_cb_data; + struct mmrm_client_notifier_data notifier_data; + struct dp_display *dp_display; + int cb_type; + + if (!debug) + return -ENODEV; + if (*ppos) + return 0; + + dp_display = debug->display; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &cb_type) != 0) + return 0; + if (cb_type != MMRM_CLIENT_RESOURCE_VALUE_CHANGE) + return 0; + + notifier_data.cb_type = MMRM_CLIENT_RESOURCE_VALUE_CHANGE; + mmrm_cb_data.phandle = (void *)dp_display; + notifier_data.pvt_data = (void *)&mmrm_cb_data; + + dp_display_mmrm_callback(¬ifier_data); + + return len; +} + +static ssize_t dp_debug_bw_code_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 max_bw_code = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &max_bw_code) != 0) + return 0; + + if (!is_link_rate_valid(max_bw_code)) { + DP_ERR("Unsupported bw code %d\n", max_bw_code); + return len; + } + debug->panel->max_bw_code = max_bw_code; + DP_DEBUG("max_bw_code: %d\n", max_bw_code); + + return len; +} + +static ssize_t dp_debug_mst_mode_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[64]; + ssize_t len; + + len = scnprintf(buf, sizeof(buf), + "mst_mode = %d, mst_state = %d\n", + debug->parser->has_mst, + debug->panel->mst_state); + + return simple_read_from_buffer(user_buff, count, ppos, buf, len); +} + +static ssize_t dp_debug_mst_mode_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 mst_mode = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &mst_mode) != 0) + return 0; + + debug->parser->has_mst = mst_mode ? true : false; + DP_DEBUG("mst_enable: %d\n", mst_mode); + + return len; +} + +static ssize_t dp_debug_max_pclk_khz_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 max_pclk = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &max_pclk) != 0) + return 0; + + if (max_pclk > debug->parser->max_pclk_khz) + DP_ERR("requested: %d, max_pclk_khz:%d\n", max_pclk, + debug->parser->max_pclk_khz); + else + debug->dp_debug.max_pclk_khz = max_pclk; + + DP_DEBUG("max_pclk_khz: %d\n", max_pclk); + + return len; +} + +static ssize_t dp_debug_max_pclk_khz_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len += snprintf(buf + len, (SZ_4K - len), + "max_pclk_khz = %d, org: %d\n", + debug->dp_debug.max_pclk_khz, + debug->parser->max_pclk_khz); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static ssize_t dp_debug_mst_sideband_mode_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int mst_sideband_mode = 0; + u32 mst_port_cnt = 0; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %u", &mst_sideband_mode, &mst_port_cnt) != 2) { + DP_ERR("invalid input\n"); + goto bail; + } + + if (!mst_port_cnt) + mst_port_cnt = 1; + + debug->mst_edid_idx = 0; + + if (mst_sideband_mode) + dp_debug_disable_sim_mode(debug, DP_SIM_MODE_MST); + else + dp_debug_enable_sim_mode(debug, DP_SIM_MODE_MST); + + dp_sim_update_port_num(debug->sim_bridge, mst_port_cnt); + + buf[0] = !mst_sideband_mode; + dp_sim_write_dpcd_reg(debug->sim_bridge, buf, 1, DP_MSTM_CAP); + + DP_DEBUG("mst_sideband_mode: %d port_cnt:%d\n", + mst_sideband_mode, mst_port_cnt); + +bail: + mutex_unlock(&debug->lock); + return count; +} + +static ssize_t dp_debug_tpg_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 tpg_pattern = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto bail; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &tpg_pattern) != 0) + goto bail; + + DP_DEBUG("tpg_pattern: %d\n", tpg_pattern); + + if (tpg_pattern == debug->dp_debug.tpg_pattern) + goto bail; + + if (debug->panel) + debug->panel->tpg_config(debug->panel, tpg_pattern); + + debug->dp_debug.tpg_pattern = tpg_pattern; +bail: + return len; +} + +static ssize_t dp_debug_write_exe_mode(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%3s", debug->exe_mode) != 1) + goto end; + + if (strcmp(debug->exe_mode, "hw") && + strcmp(debug->exe_mode, "sw") && + strcmp(debug->exe_mode, "all")) + goto end; + + debug->catalog->set_exe_mode(debug->catalog, debug->exe_mode); +end: + return len; +} + +static ssize_t dp_debug_read_connected(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len += snprintf(buf, SZ_8, "%d\n", debug->hpd->hpd_high); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static ssize_t dp_debug_write_hdcp(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int hdcp = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &hdcp) != 0) + goto end; + + debug->dp_debug.hdcp_disabled = !hdcp; +end: + return len; +} + +static ssize_t dp_debug_read_hdcp(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = sizeof(debug->dp_debug.hdcp_status); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, debug->dp_debug.hdcp_status, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len) +{ + if (rc >= *max_size) { + DP_ERR("buffer overflow\n"); + return -EINVAL; + } + *len += rc; + *max_size = SZ_4K - *len; + + return 0; +} + +static ssize_t dp_debug_read_edid_modes(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + struct drm_connector *connector; + struct drm_display_mode *mode; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + connector = *debug->connector; + + if (!connector) { + DP_ERR("connector is NULL\n"); + rc = -EINVAL; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto error; + } + + mutex_lock(&connector->dev->mode_config.mutex); + list_for_each_entry(mode, &connector->modes, head) { + ret = snprintf(buf + len, max_size, + "%s %d %d %d %d %d 0x%x\n", + mode->name, drm_mode_vrefresh(mode), mode->picture_aspect_ratio, + mode->htotal, mode->vtotal, mode->clock, mode->flags); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&connector->dev->mode_config.mutex); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_edid_modes_mst(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + struct drm_connector *connector; + struct drm_display_mode *mode; + + if (!debug) { + DP_ERR("invalid data\n"); + return -ENODEV; + } + + if (*ppos) + return 0; + + connector = drm_connector_lookup((*debug->connector)->dev, + NULL, debug->mst_con_id); + if (!connector) { + DP_ERR("connector %u not in mst list\n", debug->mst_con_id); + return 0; + } + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) + goto clean; + + mutex_lock(&connector->dev->mode_config.mutex); + list_for_each_entry(mode, &connector->modes, head) { + ret = snprintf(buf + len, max_size, + "%s %d %d %d %d %d 0x%x\n", + mode->name, drm_mode_vrefresh(mode), + mode->picture_aspect_ratio, mode->htotal, + mode->vtotal, mode->clock, mode->flags); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + mutex_unlock(&connector->dev->mode_config.mutex); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + len = -EFAULT; + goto clean; + } + + *ppos += len; +clean: + kfree(buf); + drm_connector_put(connector); + return len; +} + +static ssize_t dp_debug_read_mst_con_id(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + ret = snprintf(buf, max_size, "%u\n", debug->mst_con_id); + len += ret; + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_mst_conn_info(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + struct sde_connector *sde_conn; + struct dp_display *display; + char *buf; + u32 len = 0, ret = 0, max_size = SZ_4K; + int rc = 0; + + if (!debug) { + DP_ERR("invalid data\n"); + rc = -ENODEV; + goto error; + } + + if (*ppos) + goto error; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto error; + } + + drm_connector_list_iter_begin((*debug->connector)->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + sde_conn = to_sde_connector(connector); + display = sde_conn->display; + if (!sde_conn->mst_port || + display->base_connector != (*debug->connector)) + continue; + ret = scnprintf(buf + len, max_size, + "conn name:%s, conn id:%d state:%d\n", + connector->name, connector->base.id, + connector->status); + if (dp_debug_check_buffer_overflow(ret, &max_size, &len)) + break; + } + drm_connector_list_iter_end(&conn_iter); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + rc = -EFAULT; + goto error; + } + + *ppos += len; + kfree(buf); + + return len; +error: + return rc; +} + +static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, + size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0, rc = 0; + u32 max_size = SZ_4K; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + rc = snprintf(buf + len, max_size, "\tstate=0x%x\n", debug->aux->state); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tlink_rate=%u\n", + debug->panel->link_info.rate); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tnum_lanes=%u\n", + debug->panel->link_info.num_lanes); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tresolution=%dx%d@%dHz\n", + debug->panel->pinfo.h_active, + debug->panel->pinfo.v_active, + debug->panel->pinfo.refresh_rate); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tpclock=%dKHz\n", + debug->panel->pinfo.pixel_clk_khz); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\tbpp=%d\n", + debug->panel->pinfo.bpp); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + /* Link Information */ + rc = snprintf(buf + len, max_size, "\ttest_req=%s\n", + dp_link_get_test_name(debug->link->sink_request)); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tlane_count=%d\n", debug->link->link_params.lane_count); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tbw_code=%d\n", debug->link->link_params.bw_code); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tv_level=%d\n", debug->link->phy_params.v_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "\tp_level=%d\n", debug->link->phy_params.p_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + goto error; + + *ppos += len; + + kfree(buf); + return len; +error: + kfree(buf); + return -EINVAL; +} + +static ssize_t dp_debug_bw_code_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len += snprintf(buf + len, (SZ_4K - len), + "max_bw_code = %d\n", debug->panel->max_bw_code); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static ssize_t dp_debug_tpg_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len += scnprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_pattern); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static int dp_debug_print_hdr_params_to_buf(struct drm_connector *connector, + char *buf, u32 size) +{ + int rc; + u32 i, len = 0, max_size = size; + struct sde_connector *c_conn; + struct sde_connector_state *c_state; + struct drm_msm_ext_hdr_metadata *hdr; + + c_conn = to_sde_connector(connector); + c_state = to_sde_connector_state(connector->state); + + hdr = &c_state->hdr_meta; + + rc = snprintf(buf + len, max_size, + "============SINK HDR PARAMETERS===========\n"); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "eotf = %d\n", + c_conn->hdr_eotf); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "type_one = %d\n", + c_conn->hdr_metadata_type_one); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "hdr_plus_app_ver = %d\n", + c_conn->hdr_plus_app_ver); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "max_luminance = %d\n", + c_conn->hdr_max_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "avg_luminance = %d\n", + c_conn->hdr_avg_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "min_luminance = %d\n", + c_conn->hdr_min_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, + "============VIDEO HDR PARAMETERS===========\n"); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "hdr_state = %d\n", hdr->hdr_state); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "hdr_supported = %d\n", + hdr->hdr_supported); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "eotf = %d\n", hdr->eotf); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "white_point_x = %d\n", + hdr->white_point_x); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "white_point_y = %d\n", + hdr->white_point_y); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "max_luminance = %d\n", + hdr->max_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "min_luminance = %d\n", + hdr->min_luminance); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "max_content_light_level = %d\n", + hdr->max_content_light_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "min_content_light_level = %d\n", + hdr->max_average_light_level); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + for (i = 0; i < HDR_PRIMARIES_COUNT; i++) { + rc = snprintf(buf + len, max_size, "primaries_x[%d] = %d\n", + i, hdr->display_primaries_x[i]); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + + rc = snprintf(buf + len, max_size, "primaries_y[%d] = %d\n", + i, hdr->display_primaries_y[i]); + if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) + goto error; + } + + if (hdr->hdr_plus_payload && hdr->hdr_plus_payload_size) { + u32 rowsize = 16, rem; + struct sde_connector_dyn_hdr_metadata *dhdr = + &c_state->dyn_hdr_meta; + + /** + * Do not use user pointer from hdr->hdr_plus_payload directly, + * instead use kernel's cached copy of payload data. + */ + for (i = 0; i < dhdr->dynamic_hdr_payload_size; i += rowsize) { + rc = snprintf(buf + len, max_size, "DHDR: "); + if (dp_debug_check_buffer_overflow(rc, &max_size, + &len)) + goto error; + + rem = dhdr->dynamic_hdr_payload_size - i; + rc = hex_dump_to_buffer(&dhdr->dynamic_hdr_payload[i], + min(rowsize, rem), rowsize, 1, buf + len, + max_size, false); + if (dp_debug_check_buffer_overflow(rc, &max_size, + &len)) + goto error; + + rc = snprintf(buf + len, max_size, "\n"); + if (dp_debug_check_buffer_overflow(rc, &max_size, + &len)) + goto error; + } + } + + return len; +error: + return -EOVERFLOW; +} + +static ssize_t dp_debug_read_hdr(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf = NULL; + u32 len = 0; + u32 max_size = SZ_4K; + struct drm_connector *connector; + + if (!debug) { + DP_ERR("invalid data\n"); + return -ENODEV; + } + + connector = *debug->connector; + + if (!connector) { + DP_ERR("connector is NULL\n"); + return -EINVAL; + } + + if (*ppos) + return 0; + + buf = kzalloc(max_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len = dp_debug_print_hdr_params_to_buf(connector, buf, max_size); + if (len == -EOVERFLOW) { + kfree(buf); + return len; + } + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static ssize_t dp_debug_read_hdr_mst(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf = NULL; + u32 len = 0, max_size = SZ_4K; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + bool in_list = false; + + if (!debug) { + DP_ERR("invalid data\n"); + return -ENODEV; + } + + drm_connector_list_iter_begin((*debug->connector)->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->base.id == debug->mst_con_id) { + in_list = true; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + + if (!in_list) { + DP_ERR("connector %u not in mst list\n", debug->mst_con_id); + return -EINVAL; + } + + if (!connector) { + DP_ERR("connector is NULL\n"); + return -EINVAL; + } + + if (*ppos) + return 0; + + + buf = kzalloc(max_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) + return -ENOMEM; + + len = dp_debug_print_hdr_params_to_buf(connector, buf, max_size); + if (len == -EOVERFLOW) { + kfree(buf); + return len; + } + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + +static void dp_debug_set_sim_mode(struct dp_debug_private *debug, bool sim) +{ + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + struct sde_connector *sde_conn; + struct dp_display *display; + struct dp_panel *panel; + + if (sim) { + debug->dp_debug.sim_mode = true; + dp_debug_enable_sim_mode(debug, DP_SIM_MODE_ALL); + } else { + if (debug->hotplug) { + DP_WARN("sim mode off before hotplug disconnect\n"); + debug->hpd->simulate_connect(debug->hpd, false); + debug->hotplug = false; + } + debug->aux->abort(debug->aux, true); + debug->ctrl->abort(debug->ctrl, true); + + debug->dp_debug.sim_mode = false; + + debug->mst_edid_idx = 0; + dp_debug_disable_sim_mode(debug, DP_SIM_MODE_ALL); + } + + /* clear override settings in panel */ + drm_connector_list_iter_begin((*debug->connector)->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + sde_conn = to_sde_connector(connector); + display = sde_conn->display; + if (display->base_connector == (*debug->connector)) { + panel = sde_conn->drv_panel; + if (panel) { + panel->mode_override = false; + panel->mst_hide = false; + } + } + } + drm_connector_list_iter_end(&conn_iter); + + /* + * print simulation status as this code is executed + * only while running in debug mode which is manually + * triggered by a tester or a script. + */ + DP_INFO("%s\n", sim ? "[ON]" : "[OFF]"); +} + +static ssize_t dp_debug_write_sim(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int sim; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + mutex_lock(&debug->lock); + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &sim) != 0) + goto end; + + dp_debug_set_sim_mode(debug, sim); +end: + mutex_unlock(&debug->lock); + return len; +} + +static ssize_t dp_debug_write_attention(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int vdo; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &vdo) != 0) + goto end; + + debug->hpd->simulate_attention(debug->hpd, vdo); +end: + return len; +} + +static ssize_t dp_debug_write_dump(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_32]; + size_t len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_32 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%31s", debug->reg_dump) != 1) + goto end; + + /* qfprom register dump not supported */ + if (!strcmp(debug->reg_dump, "qfprom_physical")) + strlcpy(debug->reg_dump, "clear", sizeof(debug->reg_dump)); +end: + return len; +} + +static ssize_t dp_debug_read_dump(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + int rc = 0; + struct dp_debug_private *debug = file->private_data; + u8 *buf = NULL; + u32 len = 0; + char prefix[SZ_32]; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + if (!debug->hpd->hpd_high || !strlen(debug->reg_dump)) + goto end; + + rc = debug->catalog->get_reg_dump(debug->catalog, + debug->reg_dump, &buf, &len); + if (rc) + goto end; + + snprintf(prefix, sizeof(prefix), "%s: ", debug->reg_dump); + print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, + 16, 4, buf, len, false); + + len = min_t(size_t, count, len); + if (copy_to_user(user_buff, buf, len)) + return -EFAULT; + + *ppos += len; +end: + return len; +} + +static const struct file_operations dp_debug_fops = { + .open = simple_open, + .read = dp_debug_read_info, +}; + +static const struct file_operations edid_modes_fops = { + .open = simple_open, + .read = dp_debug_read_edid_modes, + .write = dp_debug_write_edid_modes, +}; + +static const struct file_operations edid_modes_mst_fops = { + .open = simple_open, + .read = dp_debug_read_edid_modes_mst, + .write = dp_debug_write_edid_modes_mst, +}; + +static const struct file_operations mst_conn_info_fops = { + .open = simple_open, + .read = dp_debug_read_mst_conn_info, +}; + +static const struct file_operations mst_con_id_fops = { + .open = simple_open, + .read = dp_debug_read_mst_con_id, + .write = dp_debug_write_mst_con_id, +}; + +static const struct file_operations mst_con_add_fops = { + .open = simple_open, + .write = dp_debug_write_mst_con_add, +}; + +static const struct file_operations mst_con_remove_fops = { + .open = simple_open, + .write = dp_debug_write_mst_con_remove, +}; + +static const struct file_operations hpd_fops = { + .open = simple_open, + .write = dp_debug_write_hpd, +}; + +static const struct file_operations edid_fops = { + .open = simple_open, + .write = dp_debug_write_edid, +}; + +static const struct file_operations dpcd_fops = { + .open = simple_open, + .write = dp_debug_write_dpcd, + .read = dp_debug_read_dpcd, +}; + +static const struct file_operations crc_fops = { + .open = simple_open, + .read = dp_debug_read_crc, +}; + +static const struct file_operations connected_fops = { + .open = simple_open, + .read = dp_debug_read_connected, +}; + +static const struct file_operations bw_code_fops = { + .open = simple_open, + .read = dp_debug_bw_code_read, + .write = dp_debug_bw_code_write, +}; +static const struct file_operations exe_mode_fops = { + .open = simple_open, + .write = dp_debug_write_exe_mode, +}; + +static const struct file_operations tpg_fops = { + .open = simple_open, + .read = dp_debug_tpg_read, + .write = dp_debug_tpg_write, +}; + +static const struct file_operations hdr_fops = { + .open = simple_open, + .read = dp_debug_read_hdr, +}; + +static const struct file_operations hdr_mst_fops = { + .open = simple_open, + .read = dp_debug_read_hdr_mst, +}; + +static const struct file_operations sim_fops = { + .open = simple_open, + .write = dp_debug_write_sim, +}; + +static const struct file_operations attention_fops = { + .open = simple_open, + .write = dp_debug_write_attention, +}; + +static const struct file_operations dump_fops = { + .open = simple_open, + .write = dp_debug_write_dump, + .read = dp_debug_read_dump, +}; + +static const struct file_operations mst_mode_fops = { + .open = simple_open, + .write = dp_debug_mst_mode_write, + .read = dp_debug_mst_mode_read, +}; + +static const struct file_operations mst_sideband_mode_fops = { + .open = simple_open, + .write = dp_debug_mst_sideband_mode_write, +}; + +static const struct file_operations max_pclk_khz_fops = { + .open = simple_open, + .write = dp_debug_max_pclk_khz_write, + .read = dp_debug_max_pclk_khz_read, +}; + +static const struct file_operations hdcp_fops = { + .open = simple_open, + .write = dp_debug_write_hdcp, + .read = dp_debug_read_hdcp, +}; + +static const struct file_operations mmrm_clk_cb_fops = { + .open = simple_open, + .write = dp_debug_mmrm_clk_cb_write, +}; + +static int dp_debug_init_mst(struct dp_debug_private *debug, struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("mst_con_id", 0644, dir, + debug, &mst_con_id_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create mst_con_id failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("mst_con_info", 0644, dir, + debug, &mst_conn_info_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create mst_conn_info failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("mst_con_add", 0644, dir, + debug, &mst_con_add_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DRM_ERROR("[%s] debugfs create mst_con_add failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("mst_con_remove", 0644, dir, + debug, &mst_con_remove_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DRM_ERROR("[%s] debugfs create mst_con_remove failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("mst_mode", 0644, dir, + debug, &mst_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs mst_mode failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("mst_sideband_mode", 0644, dir, + debug, &mst_sideband_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs mst_sideband_mode failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + debugfs_create_u32("mst_edid_idx", 0644, dir, &debug->mst_edid_idx); + + return rc; +} + +static int dp_debug_init_link(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("max_bw_code", 0644, dir, + debug, &bw_code_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_bw_code failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("max_pclk_khz", 0644, dir, + debug, &max_pclk_khz_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs max_pclk_khz failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + debugfs_create_u32("max_lclk_khz", 0644, dir, &debug->parser->max_lclk_khz); + + debugfs_create_u32("lane_count", 0644, dir, &debug->panel->lane_count); + + debugfs_create_u32("link_bw_code", 0644, dir, &debug->panel->link_bw_code); + + debugfs_create_u32("max_bpp", 0644, dir, &debug->panel->max_supported_bpp); + + file = debugfs_create_file("mmrm_clk_cb", 0644, dir, debug, &mmrm_clk_cb_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs mmrm_clk_cb failed, rc=%d\n", DEBUG_NAME, rc); + return rc; + } + + return rc; +} + +static int dp_debug_init_hdcp(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + + debugfs_create_bool("hdcp_wait_sink_sync", 0644, dir, &debug->dp_debug.hdcp_wait_sink_sync); + + debugfs_create_bool("force_encryption", 0644, dir, &debug->dp_debug.force_encryption); + + return rc; +} + +static int dp_debug_init_sink_caps(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("edid_modes", 0644, dir, + debug, &edid_modes_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create edid_modes failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("edid_modes_mst", 0644, dir, + debug, &edid_modes_mst_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create edid_modes_mst failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("edid", 0644, dir, + debug, &edid_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs edid failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("dpcd", 0644, dir, + debug, &dpcd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs dpcd failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("crc", 0644, dir, debug, &crc_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs crc failed, rc=%d\n", DEBUG_NAME, rc); + return rc; + } + + return rc; +} + +static int dp_debug_init_status(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("dp_debug", 0444, dir, + debug, &dp_debug_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create file failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("connected", 0444, dir, + debug, &connected_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs connected failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("hdr", 0400, dir, debug, &hdr_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdr failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("hdr_mst", 0400, dir, debug, &hdr_mst_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdr_mst failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("hdcp", 0644, dir, debug, &hdcp_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hdcp failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + return rc; +} + +static int dp_debug_init_sim(struct dp_debug_private *debug, struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("hpd", 0644, dir, debug, &hpd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs hpd failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("sim", 0644, dir, debug, &sim_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs sim failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("attention", 0644, dir, + debug, &attention_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs attention failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + debugfs_create_bool("skip_uevent", 0644, dir, &debug->dp_debug.skip_uevent); + + debugfs_create_bool("force_multi_func", 0644, dir, &debug->hpd->force_multi_func); + + return rc; +} + +static int dp_debug_init_dsc_fec(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + + debugfs_create_bool("dsc_feature_enable", 0644, dir, &debug->parser->dsc_feature_enable); + + debugfs_create_bool("fec_feature_enable", 0644, dir, &debug->parser->fec_feature_enable); + + return rc; +} + +static int dp_debug_init_tpg(struct dp_debug_private *debug, struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("tpg_ctrl", 0644, dir, + debug, &tpg_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs tpg failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + return rc; +} + +static int dp_debug_init_reg_dump(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + struct dentry *file; + + file = debugfs_create_file("exe_mode", 0644, dir, + debug, &exe_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs register failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + file = debugfs_create_file("dump", 0644, dir, + debug, &dump_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs dump failed, rc=%d\n", + DEBUG_NAME, rc); + return rc; + } + + return rc; +} + +static int dp_debug_init_feature_toggle(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + + debugfs_create_bool("ssc_enable", 0644, dir, &debug->pll->ssc_en); + + debugfs_create_bool("widebus_mode", 0644, dir, &debug->parser->has_widebus); + + return rc; +} + +static int dp_debug_init_configs(struct dp_debug_private *debug, + struct dentry *dir) +{ + int rc = 0; + + debugfs_create_ulong("connect_notification_delay_ms", 0644, dir, + &debug->dp_debug.connect_notification_delay_ms); + + debug->dp_debug.connect_notification_delay_ms = + DEFAULT_CONNECT_NOTIFICATION_DELAY_MS; + + debugfs_create_u32("disconnect_delay_ms", 0644, dir, &debug->dp_debug.disconnect_delay_ms); + + debug->dp_debug.disconnect_delay_ms = DEFAULT_DISCONNECT_DELAY_MS; + + return rc; + +} + +static int dp_debug_init(struct dp_debug *dp_debug) +{ + int rc = 0; + struct dp_debug_private *debug = container_of(dp_debug, + struct dp_debug_private, dp_debug); + struct dentry *dir; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) { + DP_WARN("Not creating debug root dir."); + debug->root = NULL; + return 0; + } + + dir = debugfs_create_dir(DEBUG_NAME, NULL); + if (IS_ERR_OR_NULL(dir)) { + if (!dir) + rc = -EINVAL; + else + rc = PTR_ERR(dir); + DP_ERR("[%s] debugfs create dir failed, rc = %d\n", + DEBUG_NAME, rc); + goto error; + } + + debug->root = dir; + + rc = dp_debug_init_status(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_sink_caps(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_mst(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_link(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_hdcp(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_sim(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_dsc_fec(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_tpg(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_reg_dump(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_feature_toggle(debug, dir); + if (rc) + goto error_remove_dir; + + rc = dp_debug_init_configs(debug, dir); + if (rc) + goto error_remove_dir; + + return 0; + +error_remove_dir: + debugfs_remove_recursive(dir); +error: + return rc; +} + +static void dp_debug_abort(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + mutex_lock(&debug->lock); + // disconnect has already been handled. so clear hotplug + debug->hotplug = false; + dp_debug_set_sim_mode(debug, false); + mutex_unlock(&debug->lock); +} + +static void dp_debug_set_mst_con(struct dp_debug *dp_debug, int con_id) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + mutex_lock(&debug->lock); + debug->mst_con_id = con_id; + mutex_unlock(&debug->lock); + DP_INFO("Selecting mst connector %d\n", con_id); +} + +struct dp_debug *dp_debug_get(struct dp_debug_in *in) +{ + int rc = 0; + struct dp_debug_private *debug; + struct dp_debug *dp_debug; + + if (!in->dev || !in->panel || !in->hpd || !in->link || + !in->catalog || !in->ctrl || !in->pll) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + debug = devm_kzalloc(in->dev, sizeof(*debug), GFP_KERNEL); + if (!debug) { + rc = -ENOMEM; + goto error; + } + + debug->hpd = in->hpd; + debug->link = in->link; + debug->panel = in->panel; + debug->aux = in->aux; + debug->dev = in->dev; + debug->connector = in->connector; + debug->catalog = in->catalog; + debug->parser = in->parser; + debug->ctrl = in->ctrl; + debug->pll = in->pll; + debug->display = in->display; + + dp_debug = &debug->dp_debug; + + mutex_init(&debug->lock); + + rc = dp_debug_init(dp_debug); + if (rc) { + devm_kfree(in->dev, debug); + goto error; + } + + debug->aux->access_lock = &debug->lock; + dp_debug->abort = dp_debug_abort; + dp_debug->set_mst_con = dp_debug_set_mst_con; + + dp_debug->max_pclk_khz = debug->parser->max_pclk_khz; + + return dp_debug; +error: + return ERR_PTR(rc); +} + +static int dp_debug_deinit(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return -EINVAL; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + debugfs_remove_recursive(debug->root); + + if (debug->sim_bridge) + dp_sim_destroy_bridge(debug->sim_bridge); + + return 0; +} + +void dp_debug_put(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + dp_debug_deinit(dp_debug); + + mutex_destroy(&debug->lock); + + devm_kfree(debug->dev, debug); +} diff --git a/msm/dp/dp_debug.h b/msm/dp/dp_debug.h new file mode 100644 index 000000000..1a5816f58 --- /dev/null +++ b/msm/dp/dp_debug.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DEBUG_H_ +#define _DP_DEBUG_H_ + +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_link.h" +#include "dp_aux.h" +#include "dp_display.h" +#include "dp_pll.h" +#include + +#define DP_IPC_LOG(fmt, ...) \ + do { \ + void *ipc_logging_context = get_ipc_log_context(); \ + ipc_log_string(ipc_logging_context, fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_DEBUG(fmt, ...) \ + do { \ + DP_IPC_LOG("[d][%-4d]"fmt, current->pid, ##__VA_ARGS__); \ + DP_DEBUG_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_INFO(fmt, ...) \ + do { \ + DP_IPC_LOG("[i][%-4d]"fmt, current->pid, ##__VA_ARGS__); \ + DP_INFO_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_WARN(fmt, ...) \ + do { \ + DP_IPC_LOG("[w][%-4d]"fmt, current->pid, ##__VA_ARGS__); \ + DP_WARN_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_ERR(fmt, ...) \ + do { \ + DP_IPC_LOG("[e][%-4d]"fmt, current->pid, ##__VA_ARGS__); \ + DP_ERR_V(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DP_DEBUG_V(fmt, ...) \ + do { \ + if (drm_debug_enabled(DRM_UT_KMS)) \ + DRM_DEBUG("[msm-dp-debug][%-4d]"fmt, current->pid, \ + ##__VA_ARGS__); \ + else \ + pr_debug("[drm:%s][msm-dp-debug][%-4d]"fmt, __func__,\ + current->pid, ##__VA_ARGS__); \ + } while (0) + +#define DP_INFO_V(fmt, ...) \ + do { \ + if (drm_debug_enabled(DRM_UT_KMS)) \ + DRM_INFO("[msm-dp-info][%-4d]"fmt, current->pid, \ + ##__VA_ARGS__); \ + else \ + pr_info("[drm:%s][msm-dp-info][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__); \ + } while (0) + +#define DP_WARN_V(fmt, ...) \ + pr_warn("[drm:%s][msm-dp-warn][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__) + +#define DP_WARN_RATELIMITED_V(fmt, ...) \ + pr_warn_ratelimited("[drm:%s][msm-dp-warn][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__) + +#define DP_ERR_V(fmt, ...) \ + pr_err("[drm:%s][msm-dp-err][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__) + +#define DP_ERR_RATELIMITED_V(fmt, ...) \ + pr_err_ratelimited("[drm:%s][msm-dp-err][%-4d]"fmt, __func__, \ + current->pid, ##__VA_ARGS__) + +#define DEFAULT_DISCONNECT_DELAY_MS 0 +#define MAX_DISCONNECT_DELAY_MS 10000 +#define DEFAULT_CONNECT_NOTIFICATION_DELAY_MS 150 +#define MAX_CONNECT_NOTIFICATION_DELAY_MS 5000 + +/** + * struct dp_debug + * @sim_mode: specifies whether sim mode enabled + * @psm_enabled: specifies whether psm enabled + * @hdcp_disabled: specifies if hdcp is disabled + * @hdcp_wait_sink_sync: used to wait for sink synchronization before HDCP auth + * @tpg_pattern: selects tpg pattern on the controller + * @max_pclk_khz: max pclk supported + * @force_encryption: enable/disable forced encryption for HDCP 2.2 + * @skip_uevent: skip hotplug uevent to the user space + * @hdcp_status: string holding hdcp status information + * @mst_sim_add_con: specifies whether new sim connector is to be added + * @mst_sim_remove_con: specifies whether sim connector is to be removed + * @mst_sim_remove_con_id: specifies id of sim connector to be removed + * @connect_notification_delay_ms: time (in ms) to wait for any attention + * messages before sending the connect notification uevent + * @disconnect_delay_ms: time (in ms) to wait before turning off the mainlink + * in response to HPD low of cable disconnect event + */ +struct dp_debug { + bool sim_mode; + bool psm_enabled; + bool hdcp_disabled; + bool hdcp_wait_sink_sync; + u32 tpg_pattern; + u32 max_pclk_khz; + bool force_encryption; + bool skip_uevent; + char hdcp_status[SZ_128]; + bool mst_sim_add_con; + bool mst_sim_remove_con; + int mst_sim_remove_con_id; + unsigned long connect_notification_delay_ms; + u32 disconnect_delay_ms; + + void (*abort)(struct dp_debug *dp_debug); + void (*set_mst_con)(struct dp_debug *dp_debug, int con_id); +}; + +/** + * struct dp_debug_in + * @dev: device instance of the caller + * @panel: instance of panel module + * @hpd: instance of hpd module + * @link: instance of link module + * @aux: instance of aux module + * @connector: double pointer to display connector + * @catalog: instance of catalog module + * @parser: instance of parser module + * @ctrl: instance of controller module + * @pll: instance of pll module + * @display: instance of display module + */ +struct dp_debug_in { + struct device *dev; + struct dp_panel *panel; + struct dp_hpd *hpd; + struct dp_link *link; + struct dp_aux *aux; + struct drm_connector **connector; + struct dp_catalog *catalog; + struct dp_parser *parser; + struct dp_ctrl *ctrl; + struct dp_pll *pll; + struct dp_display *display; +}; + +/** + * dp_debug_get() - configure and get the DisplayPlot debug module data + * + * @in: input structure containing data to initialize the debug module + * return: pointer to allocated debug module data + * + * This function sets up the debug module and provides a way + * for debugfs input to be communicated with existing modules + */ +struct dp_debug *dp_debug_get(struct dp_debug_in *in); + +/** + * dp_debug_put() + * + * Cleans up dp_debug instance + * + * @dp_debug: instance of dp_debug + */ +void dp_debug_put(struct dp_debug *dp_debug); +#endif /* _DP_DEBUG_H_ */ diff --git a/msm/dp/dp_display.c b/msm/dp/dp_display.c new file mode 100644 index 000000000..503f8be56 --- /dev/null +++ b/msm/dp/dp_display.c @@ -0,0 +1,3986 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sde_connector.h" + +#include "msm_drv.h" +#include "dp_hpd.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_audio.h" +#include "dp_display.h" +#include "sde_hdcp.h" +#include "dp_debug.h" +#include "dp_pll.h" +#include "sde_dbg.h" + +#define DRM_DP_IPC_NUM_PAGES 10 +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define dp_display_state_show(x) { \ + DP_ERR("%s: state (0x%x): %s\n", x, dp->state, \ + dp_display_state_name(dp->state)); \ + SDE_EVT32_EXTERNAL(dp->state); } + +#define dp_display_state_warn(x) { \ + DP_WARN("%s: state (0x%x): %s\n", x, dp->state, \ + dp_display_state_name(dp->state)); \ + SDE_EVT32_EXTERNAL(dp->state); } + +#define dp_display_state_log(x) { \ + DP_DEBUG("%s: state (0x%x): %s\n", x, dp->state, \ + dp_display_state_name(dp->state)); \ + SDE_EVT32_EXTERNAL(dp->state); } + +#define dp_display_state_is(x) (dp->state & (x)) +#define dp_display_state_add(x) { \ + (dp->state |= (x)); \ + dp_display_state_log("add "#x); } +#define dp_display_state_remove(x) { \ + (dp->state &= ~(x)); \ + dp_display_state_log("remove "#x); } + +#define MAX_TMDS_CLOCK_HDMI_1_4 340000 + +enum dp_display_states { + DP_STATE_DISCONNECTED = 0, + DP_STATE_CONFIGURED = BIT(0), + DP_STATE_INITIALIZED = BIT(1), + DP_STATE_READY = BIT(2), + DP_STATE_CONNECTED = BIT(3), + DP_STATE_CONNECT_NOTIFIED = BIT(4), + DP_STATE_DISCONNECT_NOTIFIED = BIT(5), + DP_STATE_ENABLED = BIT(6), + DP_STATE_SUSPENDED = BIT(7), + DP_STATE_ABORTED = BIT(8), + DP_STATE_HDCP_ABORTED = BIT(9), + DP_STATE_SRC_PWRDN = BIT(10), + DP_STATE_TUI_ACTIVE = BIT(11), +}; + +static char *dp_display_state_name(enum dp_display_states state) +{ + static char buf[SZ_1K]; + u32 len = 0; + + memset(buf, 0, SZ_1K); + + if (state & DP_STATE_CONFIGURED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "CONFIGURED"); + + if (state & DP_STATE_INITIALIZED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "INITIALIZED"); + + if (state & DP_STATE_READY) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "READY"); + + if (state & DP_STATE_CONNECTED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "CONNECTED"); + + if (state & DP_STATE_CONNECT_NOTIFIED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "CONNECT_NOTIFIED"); + + if (state & DP_STATE_DISCONNECT_NOTIFIED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "DISCONNECT_NOTIFIED"); + + if (state & DP_STATE_ENABLED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "ENABLED"); + + if (state & DP_STATE_SUSPENDED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "SUSPENDED"); + + if (state & DP_STATE_ABORTED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "ABORTED"); + + if (state & DP_STATE_HDCP_ABORTED) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "HDCP_ABORTED"); + + if (state & DP_STATE_SRC_PWRDN) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "SRC_PWRDN"); + + if (state & DP_STATE_TUI_ACTIVE) + len += scnprintf(buf + len, sizeof(buf) - len, "|%s|", + "TUI_ACTIVE"); + + if (!strlen(buf)) + return "DISCONNECTED"; + + return buf; +} + +static struct dp_display *g_dp_display; +#define HPD_STRING_SIZE 30 + +struct dp_hdcp_dev { + void *fd; + struct sde_hdcp_ops *ops; + enum sde_hdcp_version ver; +}; + +struct dp_hdcp { + void *data; + struct sde_hdcp_ops *ops; + + u32 source_cap; + + struct dp_hdcp_dev dev[HDCP_VERSION_MAX]; +}; + +struct dp_mst { + bool mst_active; + + bool drm_registered; + struct dp_mst_drm_cbs cbs; +}; + +struct dp_display_private { + char *name; + int irq; + + enum drm_connector_status cached_connector_status; + enum dp_display_states state; + enum dp_aux_switch_type switch_type; + + struct platform_device *pdev; + struct device_node *aux_switch_node; + bool aux_switch_ready; + struct dp_aux_bridge *aux_bridge; + struct dentry *root; + struct completion notification_comp; + struct completion attention_comp; + + struct dp_hpd *hpd; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog *catalog; + struct dp_aux *aux; + struct dp_link *link; + struct dp_panel *panel; + struct dp_ctrl *ctrl; + struct dp_debug *debug; + struct dp_pll *pll; + + struct dp_panel *active_panels[DP_STREAM_MAX]; + struct dp_hdcp hdcp; + + struct dp_hpd_cb hpd_cb; + struct dp_display_mode mode; + struct dp_display dp_display; + struct msm_drm_private *priv; + + struct workqueue_struct *wq; + struct delayed_work hdcp_cb_work; + struct work_struct connect_work; + struct work_struct attention_work; + struct work_struct disconnect_work; + struct mutex session_lock; + struct mutex accounting_lock; + bool hdcp_delayed_off; + bool no_aux_switch; + + u32 active_stream_cnt; + struct dp_mst mst; + + u32 tot_dsc_blks_in_use; + u32 tot_lm_blks_in_use; + + bool process_hpd_connect; + struct dev_pm_qos_request pm_qos_req[NR_CPUS]; + bool pm_qos_requested; + + struct notifier_block usb_nb; +}; + +static const struct of_device_id dp_dt_match[] = { + {.compatible = "qcom,dp-display"}, + {} +}; + +static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp) +{ + return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops; +} + +static irqreturn_t dp_display_irq(int irq, void *dev_id) +{ + struct dp_display_private *dp = dev_id; + + if (!dp) { + DP_ERR("invalid data\n"); + return IRQ_NONE; + } + + /* DP HPD isr */ + if (dp->hpd->type == DP_HPD_LPHW) + dp->hpd->isr(dp->hpd); + + /* DP controller isr */ + dp->ctrl->isr(dp->ctrl); + + /* DP aux isr */ + dp->aux->isr(dp->aux); + + /* HDCP isr */ + if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->isr) { + if (dp->hdcp.ops->isr(dp->hdcp.data)) + DP_ERR("dp_hdcp_isr failed\n"); + } + + return IRQ_HANDLED; +} +static bool dp_display_is_ds_bridge(struct dp_panel *panel) +{ + return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT); +} + +static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) +{ + return dp_display_is_ds_bridge(dp->panel) && + (dp->link->sink_count.count == 0); +} + +static bool dp_display_is_ready(struct dp_display_private *dp) +{ + return dp->hpd->hpd_high && dp_display_state_is(DP_STATE_CONNECTED) && + !dp_display_is_sink_count_zero(dp) && + dp->hpd->alt_mode_cfg_done; +} + +static void dp_audio_enable(struct dp_display_private *dp, bool enable) +{ + struct dp_panel *dp_panel; + int idx; + + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { + if (!dp->active_panels[idx]) + continue; + dp_panel = dp->active_panels[idx]; + + if (dp_panel->audio_supported) { + if (enable) { + dp_panel->audio->bw_code = + dp->link->link_params.bw_code; + dp_panel->audio->lane_count = + dp->link->link_params.lane_count; + dp_panel->audio->on(dp_panel->audio); + } else { + dp_panel->audio->off(dp_panel->audio, false); + } + } + } +} + +static void dp_display_qos_request(struct dp_display_private *dp, bool add_vote) +{ + struct device *cpu_dev; + int cpu = 0; + struct cpumask *cpu_mask; + u32 latency = dp->parser->qos_cpu_latency; + unsigned long mask = dp->parser->qos_cpu_mask; + + if (!dp->parser->qos_cpu_mask || (dp->pm_qos_requested == add_vote)) + return; + + cpu_mask = to_cpumask(&mask); + for_each_cpu(cpu, cpu_mask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, cpu); + continue; + } + + if (add_vote) + dev_pm_qos_add_request(cpu_dev, &dp->pm_qos_req[cpu], + DEV_PM_QOS_RESUME_LATENCY, latency); + else + dev_pm_qos_remove_request(&dp->pm_qos_req[cpu]); + } + + SDE_EVT32_EXTERNAL(add_vote, mask, latency); + dp->pm_qos_requested = add_vote; +} + +static void dp_display_update_hdcp_status(struct dp_display_private *dp, + bool reset) +{ + if (reset) { + dp->link->hdcp_status.hdcp_state = HDCP_STATE_INACTIVE; + dp->link->hdcp_status.hdcp_version = HDCP_VERSION_NONE; + } + + memset(dp->debug->hdcp_status, 0, sizeof(dp->debug->hdcp_status)); + + snprintf(dp->debug->hdcp_status, sizeof(dp->debug->hdcp_status), + "%s: %s\ncaps: %d\n", + sde_hdcp_version(dp->link->hdcp_status.hdcp_version), + sde_hdcp_state_name(dp->link->hdcp_status.hdcp_state), + dp->hdcp.source_cap); +} + +static void dp_display_update_hdcp_info(struct dp_display_private *dp) +{ + void *fd = NULL; + struct dp_hdcp_dev *dev = NULL; + struct sde_hdcp_ops *ops = NULL; + int i = HDCP_VERSION_2P2; + + dp_display_update_hdcp_status(dp, true); + + dp->hdcp.data = NULL; + dp->hdcp.ops = NULL; + + if (dp->debug->hdcp_disabled || dp->debug->sim_mode) + return; + + while (i) { + dev = &dp->hdcp.dev[i]; + ops = dev->ops; + fd = dev->fd; + + i >>= 1; + + if (!(dp->hdcp.source_cap & dev->ver)) + continue; + + if (ops->sink_support(fd)) { + dp->hdcp.data = fd; + dp->hdcp.ops = ops; + dp->link->hdcp_status.hdcp_version = dev->ver; + break; + } + } + + DP_DEBUG("HDCP version supported: %s\n", + sde_hdcp_version(dp->link->hdcp_status.hdcp_version)); +} + +static void dp_display_check_source_hdcp_caps(struct dp_display_private *dp) +{ + int i; + struct dp_hdcp_dev *hdcp_dev = dp->hdcp.dev; + + if (dp->debug->hdcp_disabled) { + DP_DEBUG("hdcp disabled\n"); + return; + } + + for (i = 0; i < HDCP_VERSION_MAX; i++) { + struct dp_hdcp_dev *dev = &hdcp_dev[i]; + struct sde_hdcp_ops *ops = dev->ops; + void *fd = dev->fd; + + if (!fd || !ops) + continue; + + if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active)) + continue; + + if (!(dp->hdcp.source_cap & dev->ver) && + ops->feature_supported && + ops->feature_supported(fd)) + dp->hdcp.source_cap |= dev->ver; + } + + dp_display_update_hdcp_status(dp, false); +} + +static void dp_display_hdcp_register_streams(struct dp_display_private *dp) +{ + int rc; + size_t i; + struct sde_hdcp_ops *ops = dp->hdcp.ops; + void *data = dp->hdcp.data; + + if (dp_display_is_ready(dp) && dp->mst.mst_active && ops && + ops->register_streams){ + struct stream_info streams[DP_STREAM_MAX]; + int index = 0; + + DP_DEBUG("Registering all active panel streams with HDCP\n"); + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + if (!dp->active_panels[i]) + continue; + streams[index].stream_id = i; + streams[index].virtual_channel = + dp->active_panels[i]->vcpi; + index++; + } + + if (index > 0) { + rc = ops->register_streams(data, index, streams); + if (rc) + DP_ERR("failed to register streams. rc = %d\n", + rc); + } + } +} + +static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp, + enum dp_stream_id stream_id) +{ + if (dp->hdcp.ops->deregister_streams && dp->active_panels[stream_id]) { + struct stream_info stream = {stream_id, + dp->active_panels[stream_id]->vcpi}; + + DP_DEBUG("Deregistering stream within HDCP library\n"); + dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream); + } +} + +static void dp_display_hdcp_process_delayed_off(struct dp_display_private *dp) +{ + if (dp->hdcp_delayed_off) { + if (dp->hdcp.ops && dp->hdcp.ops->off) + dp->hdcp.ops->off(dp->hdcp.data); + dp_display_update_hdcp_status(dp, true); + dp->hdcp_delayed_off = false; + } +} + +static int dp_display_hdcp_process_sink_sync(struct dp_display_private *dp) +{ + u8 sink_status = 0; + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + if (dp->debug->hdcp_wait_sink_sync) { + drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, + &sink_status); + sink_status &= (DP_RECEIVE_PORT_0_STATUS | + DP_RECEIVE_PORT_1_STATUS); + if (sink_status < 1) { + DP_DEBUG("Sink not synchronized. Queuing again then exiting\n"); + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ); + return -EAGAIN; + } + /* + * Some sinks need more time to stabilize after synchronization + * and before it can handle an HDCP authentication request. + * Adding the delay for better interoperability. + */ + msleep(6000); + } + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT); + + return 0; +} + +static int dp_display_hdcp_start(struct dp_display_private *dp) +{ + if (dp->link->hdcp_status.hdcp_state != HDCP_STATE_INACTIVE) + return -EINVAL; + + dp_display_check_source_hdcp_caps(dp); + dp_display_update_hdcp_info(dp); + + if (dp_display_is_hdcp_enabled(dp)) { + if (dp->hdcp.ops && dp->hdcp.ops->on && + dp->hdcp.ops->on(dp->hdcp.data)) { + dp_display_update_hdcp_status(dp, true); + return 0; + } + } else { + dp_display_update_hdcp_status(dp, true); + return 0; + } + + return -EINVAL; +} + +static void dp_display_hdcp_print_auth_state(struct dp_display_private *dp) +{ + u32 hdcp_auth_state; + int rc; + + rc = dp->catalog->ctrl.read_hdcp_status(&dp->catalog->ctrl); + if (rc >= 0) { + hdcp_auth_state = (rc >> 20) & 0x3; + DP_DEBUG("hdcp auth state %d\n", hdcp_auth_state); + } +} + +static void dp_display_hdcp_process_state(struct dp_display_private *dp) +{ + struct dp_link_hdcp_status *status; + struct sde_hdcp_ops *ops; + void *data; + int rc = 0; + + status = &dp->link->hdcp_status; + + ops = dp->hdcp.ops; + data = dp->hdcp.data; + + if (status->hdcp_state != HDCP_STATE_AUTHENTICATED && + dp->debug->force_encryption && ops && ops->force_encryption) + ops->force_encryption(data, dp->debug->force_encryption); + + if (status->hdcp_state == HDCP_STATE_AUTHENTICATED) + dp_display_qos_request(dp, false); + else + dp_display_qos_request(dp, true); + + switch (status->hdcp_state) { + case HDCP_STATE_INACTIVE: + dp_display_hdcp_register_streams(dp); + if (dp->hdcp.ops && dp->hdcp.ops->authenticate) + rc = dp->hdcp.ops->authenticate(data); + if (!rc) + status->hdcp_state = HDCP_STATE_AUTHENTICATING; + break; + case HDCP_STATE_AUTH_FAIL: + if (dp_display_is_ready(dp) && + dp_display_state_is(DP_STATE_ENABLED)) { + if (ops && ops->on && ops->on(data)) { + dp_display_update_hdcp_status(dp, true); + return; + } + dp_display_hdcp_register_streams(dp); + if (ops && ops->reauthenticate) { + rc = ops->reauthenticate(data); + if (rc) + DP_ERR("failed rc=%d\n", rc); + } + status->hdcp_state = HDCP_STATE_AUTHENTICATING; + } else { + DP_DEBUG("not reauthenticating, cable disconnected\n"); + } + break; + default: + dp_display_hdcp_register_streams(dp); + break; + } +} + +static void dp_display_abort_hdcp(struct dp_display_private *dp, + bool abort) +{ + u8 i = HDCP_VERSION_2P2; + struct dp_hdcp_dev *dev = NULL; + + while (i) { + dev = &dp->hdcp.dev[i]; + i >>= 1; + if (!(dp->hdcp.source_cap & dev->ver)) + continue; + + dev->ops->abort(dev->fd, abort); + } +} + +static void dp_display_hdcp_cb_work(struct work_struct *work) +{ + struct dp_display_private *dp; + struct delayed_work *dw = to_delayed_work(work); + struct dp_link_hdcp_status *status; + int rc = 0; + + dp = container_of(dw, struct dp_display_private, hdcp_cb_work); + + if (!dp_display_state_is(DP_STATE_ENABLED | DP_STATE_CONNECTED) || + dp_display_state_is(DP_STATE_ABORTED | DP_STATE_HDCP_ABORTED)) + return; + + if (dp_display_state_is(DP_STATE_SUSPENDED)) { + DP_DEBUG("System suspending. Delay HDCP operations\n"); + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ); + return; + } + + dp_display_hdcp_process_delayed_off(dp); + + rc = dp_display_hdcp_process_sink_sync(dp); + if (rc) + return; + + rc = dp_display_hdcp_start(dp); + if (!rc) + return; + + dp_display_hdcp_print_auth_state(dp); + + status = &dp->link->hdcp_status; + DP_DEBUG("%s: %s\n", sde_hdcp_version(status->hdcp_version), + sde_hdcp_state_name(status->hdcp_state)); + + dp_display_update_hdcp_status(dp, false); + + dp_display_hdcp_process_state(dp); +} + +static void dp_display_notify_hdcp_status_cb(void *ptr, + enum sde_hdcp_state state) +{ + struct dp_display_private *dp = ptr; + + if (!dp) { + DP_ERR("invalid input\n"); + return; + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, + dp->link->hdcp_status.hdcp_state); + + dp->link->hdcp_status.hdcp_state = state; + + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, + dp->link->hdcp_status.hdcp_state); +} + +static void dp_display_deinitialize_hdcp(struct dp_display_private *dp) +{ + if (!dp) { + DP_ERR("invalid input\n"); + return; + } + + sde_hdcp_1x_deinit(dp->hdcp.dev[HDCP_VERSION_1X].fd); + sde_dp_hdcp2p2_deinit(dp->hdcp.dev[HDCP_VERSION_2P2].fd); +} + +static int dp_display_initialize_hdcp(struct dp_display_private *dp) +{ + struct sde_hdcp_init_data hdcp_init_data; + struct dp_parser *parser; + void *fd; + int rc = 0; + + if (!dp) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + parser = dp->parser; + + hdcp_init_data.client_id = HDCP_CLIENT_DP; + hdcp_init_data.drm_aux = dp->aux->drm_aux; + hdcp_init_data.cb_data = (void *)dp; + hdcp_init_data.workq = dp->wq; + hdcp_init_data.sec_access = true; + hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb; + hdcp_init_data.dp_ahb = &parser->get_io(parser, "dp_ahb")->io; + hdcp_init_data.dp_aux = &parser->get_io(parser, "dp_aux")->io; + hdcp_init_data.dp_link = &parser->get_io(parser, "dp_link")->io; + hdcp_init_data.dp_p0 = &parser->get_io(parser, "dp_p0")->io; + hdcp_init_data.hdcp_io = &parser->get_io(parser, + "hdcp_physical")->io; + hdcp_init_data.revision = &dp->panel->link_info.revision; + hdcp_init_data.msm_hdcp_dev = dp->parser->msm_hdcp_dev; + + fd = sde_hdcp_1x_init(&hdcp_init_data); + if (IS_ERR_OR_NULL(fd)) { + DP_DEBUG("Error initializing HDCP 1.x\n"); + return -EINVAL; + } + + dp->hdcp.dev[HDCP_VERSION_1X].fd = fd; + dp->hdcp.dev[HDCP_VERSION_1X].ops = sde_hdcp_1x_get(fd); + dp->hdcp.dev[HDCP_VERSION_1X].ver = HDCP_VERSION_1X; + DP_INFO("HDCP 1.3 initialized\n"); + + fd = sde_dp_hdcp2p2_init(&hdcp_init_data); + if (IS_ERR_OR_NULL(fd)) { + DP_DEBUG("Error initializing HDCP 2.x\n"); + rc = -EINVAL; + goto error; + } + + dp->hdcp.dev[HDCP_VERSION_2P2].fd = fd; + dp->hdcp.dev[HDCP_VERSION_2P2].ops = sde_dp_hdcp2p2_get(fd); + dp->hdcp.dev[HDCP_VERSION_2P2].ver = HDCP_VERSION_2P2; + DP_INFO("HDCP 2.2 initialized\n"); + + return 0; +error: + sde_hdcp_1x_deinit(dp->hdcp.dev[HDCP_VERSION_1X].fd); + + return rc; +} + +static void dp_display_pause_audio(struct dp_display_private *dp, bool pause) +{ + struct dp_panel *dp_panel; + int idx; + + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { + if (!dp->active_panels[idx]) + continue; + dp_panel = dp->active_panels[idx]; + + if (dp_panel->audio_supported) + dp_panel->audio->tui_active = pause; + } +} + +static int dp_display_pre_hw_release(void *data) +{ + struct dp_display_private *dp; + struct dp_display *dp_display = data; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + if (!dp_display) + return -EINVAL; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + dp_display_state_add(DP_STATE_TUI_ACTIVE); + cancel_work_sync(&dp->connect_work); + cancel_work_sync(&dp->attention_work); + cancel_work_sync(&dp->disconnect_work); + flush_workqueue(dp->wq); + + dp_display_pause_audio(dp, true); + disable_irq(dp->irq); + + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT); + return 0; +} + +static int dp_display_post_hw_acquire(void *data) +{ + struct dp_display_private *dp; + struct dp_display *dp_display = data; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + if (!dp_display) + return -EINVAL; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + dp_display_state_remove(DP_STATE_TUI_ACTIVE); + dp_display_pause_audio(dp, false); + enable_irq(dp->irq); + + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT); + return 0; +} + + +static int dp_display_bind(struct device *dev, struct device *master, + void *data) +{ + int rc = 0; + struct dp_display_private *dp; + struct drm_device *drm; + struct platform_device *pdev = to_platform_device(dev); + struct msm_vm_ops vm_event_ops = { + .vm_pre_hw_release = dp_display_pre_hw_release, + .vm_post_hw_acquire = dp_display_post_hw_acquire, + }; + + if (!dev || !pdev || !master) { + DP_ERR("invalid param(s), dev %pK, pdev %pK, master %pK\n", + dev, pdev, master); + rc = -EINVAL; + goto end; + } + + drm = dev_get_drvdata(master); + dp = platform_get_drvdata(pdev); + if (!drm || !dp) { + DP_ERR("invalid param(s), drm %pK, dp %pK\n", + drm, dp); + rc = -EINVAL; + goto end; + } + + dp->dp_display.drm_dev = drm; + dp->priv = drm->dev_private; + msm_register_vm_event(master, dev, &vm_event_ops, + (void *)&dp->dp_display); +end: + return rc; +} + +static void dp_display_unbind(struct device *dev, struct device *master, + void *data) +{ + struct dp_display_private *dp; + struct platform_device *pdev = to_platform_device(dev); + + if (!dev || !pdev) { + DP_ERR("invalid param(s)\n"); + return; + } + + dp = platform_get_drvdata(pdev); + if (!dp) { + DP_ERR("Invalid params\n"); + return; + } + + if (dp->power) + (void)dp->power->power_client_deinit(dp->power); + if (dp->aux) + (void)dp->aux->drm_aux_deregister(dp->aux); + dp_display_deinitialize_hdcp(dp); +} + +static const struct component_ops dp_display_comp_ops = { + .bind = dp_display_bind, + .unbind = dp_display_unbind, +}; + +static bool dp_display_send_hpd_event(struct dp_display_private *dp) +{ + struct drm_device *dev = NULL; + struct drm_connector *connector; + char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE], + bpp[HPD_STRING_SIZE], pattern[HPD_STRING_SIZE]; + char *envp[5]; + struct dp_display *display; + int rc = 0; + + connector = dp->dp_display.base_connector; + display = &dp->dp_display; + + if (!connector) { + DP_ERR("connector not set\n"); + return false; + } + + connector->status = display->is_sst_connected ? connector_status_connected : + connector_status_disconnected; + + if (dp->cached_connector_status == connector->status) { + DP_DEBUG("connector status (%d) unchanged, skipping uevent\n", + dp->cached_connector_status); + return false; + } + + dp->cached_connector_status = connector->status; + + dev = connector->dev; + + if (dp->debug->skip_uevent) { + DP_INFO("skipping uevent\n"); + return false; + } + + snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name); + snprintf(status, HPD_STRING_SIZE, "status=%s", + drm_get_connector_status_name(connector->status)); + snprintf(bpp, HPD_STRING_SIZE, "bpp=%d", + dp_link_bit_depth_to_bpp( + dp->link->test_video.test_bit_depth)); + snprintf(pattern, HPD_STRING_SIZE, "pattern=%d", + dp->link->test_video.test_video_pattern); + + DP_INFO("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern); + envp[0] = name; + envp[1] = status; + envp[2] = bpp; + envp[3] = pattern; + envp[4] = NULL; + + rc = kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); + DP_INFO("uevent %s: %d\n", rc ? "failure" : "success", rc); + + return true; +} + +static int dp_display_send_hpd_notification(struct dp_display_private *dp, bool skip_wait) +{ + int ret = 0; + bool hpd = !!dp_display_state_is(DP_STATE_CONNECTED); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, hpd); + + /* + * Send the notification only if there is any change. This check is + * necessary since it is possible that the connect_work may or may not + * skip sending the notification in order to respond to a pending + * attention message. Attention work thread will always attempt to + * send the notification after successfully handling the attention + * message. This check here will avoid any unintended duplicate + * notifications. + */ + if (dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) && hpd) { + DP_DEBUG("connection notified already, skip notification\n"); + goto skip_wait; + } else if (dp_display_state_is(DP_STATE_DISCONNECT_NOTIFIED) && !hpd) { + DP_DEBUG("disonnect notified already, skip notification\n"); + goto skip_wait; + } + + dp->aux->state |= DP_STATE_NOTIFICATION_SENT; + + reinit_completion(&dp->notification_comp); + + if (!dp->mst.mst_active) { + dp->dp_display.is_sst_connected = hpd; + + if (!dp_display_send_hpd_event(dp)) + goto skip_wait; + } else { + dp->dp_display.is_sst_connected = false; + + if (!dp->mst.cbs.hpd) + goto skip_wait; + + dp->mst.cbs.hpd(&dp->dp_display, hpd); + } + + if (hpd) { + dp_display_state_add(DP_STATE_CONNECT_NOTIFIED); + dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED); + } else { + dp_display_state_add(DP_STATE_DISCONNECT_NOTIFIED); + dp_display_state_remove(DP_STATE_CONNECT_NOTIFIED); + } + + /* + * Skip the wait if TUI is active considering that the user mode will + * not act on the notification until after the TUI session is over. + */ + if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) { + dp_display_state_log("[TUI is active, skipping wait]"); + goto skip_wait; + } + + if (skip_wait || (hpd && dp->mst.mst_active)) + goto skip_wait; + + if (!dp->mst.mst_active && + (!!dp_display_state_is(DP_STATE_ENABLED) == hpd)) + goto skip_wait; + + // wait 2 seconds + if (wait_for_completion_timeout(&dp->notification_comp, HZ * 2)) + goto skip_wait; + + //resend notification + if (dp->mst.mst_active) + dp->mst.cbs.hpd(&dp->dp_display, hpd); + else + dp_display_send_hpd_event(dp); + + // wait another 3 seconds + if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 3)) { + DP_WARN("%s timeout\n", hpd ? "connect" : "disconnect"); + ret = -EINVAL; + } + +skip_wait: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, hpd, ret); + return ret; +} + +static void dp_display_update_mst_state(struct dp_display_private *dp, + bool state) +{ + dp->mst.mst_active = state; + dp->panel->mst_state = state; +} + +static void dp_display_mst_init(struct dp_display_private *dp) +{ + bool is_mst_receiver; + const unsigned long clear_mstm_ctrl_timeout_us = 100000; + u8 old_mstm_ctrl; + int ret; + + if (!dp->parser->has_mst || !dp->mst.drm_registered) { + DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n", + dp->parser->has_mst, dp->mst.drm_registered); + return; + } + + is_mst_receiver = dp->panel->read_mst_cap(dp->panel); + + if (!is_mst_receiver) { + DP_MST_DEBUG("sink doesn't support mst\n"); + return; + } + + /* clear sink mst state */ + drm_dp_dpcd_readb(dp->aux->drm_aux, DP_MSTM_CTRL, &old_mstm_ctrl); + drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0); + + /* add extra delay if MST state is not cleared */ + if (old_mstm_ctrl) { + DP_MST_DEBUG("MSTM_CTRL is not cleared, wait %luus\n", + clear_mstm_ctrl_timeout_us); + usleep_range(clear_mstm_ctrl_timeout_us, + clear_mstm_ctrl_timeout_us + 1000); + } + + ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DP_ERR("sink mst enablement failed\n"); + return; + } + + dp_display_update_mst_state(dp, true); +} + +static void dp_display_set_mst_mgr_state(struct dp_display_private *dp, + bool state) +{ + if (!dp->mst.mst_active) + return; + + if (dp->mst.cbs.set_mgr_state) + dp->mst.cbs.set_mgr_state(&dp->dp_display, state); + + DP_MST_DEBUG("mst_mgr_state: %d\n", state); +} + +static int dp_display_host_init(struct dp_display_private *dp) +{ + bool flip = false; + bool reset; + int rc = 0; + + if (dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_log("[already initialized]"); + return rc; + } + + if (dp->hpd->orientation == ORIENTATION_CC2) + flip = true; + + reset = dp->debug->sim_mode ? false : !dp->hpd->multi_func; + + rc = dp->power->init(dp->power, flip); + if (rc) { + DP_WARN("Power init failed.\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE1, dp->state); + return rc; + } + + dp->hpd->host_init(dp->hpd, &dp->catalog->hpd); + rc = dp->ctrl->init(dp->ctrl, flip, reset); + if (rc) { + DP_WARN("Ctrl init Failed.\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE2, dp->state); + goto error_ctrl; + } + + enable_irq(dp->irq); + dp_display_abort_hdcp(dp, false); + + dp_display_state_add(DP_STATE_INITIALIZED); + + /* log this as it results from user action of cable connection */ + DP_INFO("[OK]\n"); + return rc; + +error_ctrl: + dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd); + dp->power->deinit(dp->power); + return rc; +} + +static int dp_display_host_ready(struct dp_display_private *dp) +{ + int rc = 0; + + if (!dp_display_state_is(DP_STATE_INITIALIZED)) { + rc = dp_display_host_init(dp); + if (rc) { + dp_display_state_show("[not initialized]"); + return rc; + } + } + + if (dp_display_state_is(DP_STATE_READY)) { + dp_display_state_log("[already ready]"); + return rc; + } + + /* + * Reset the aborted state for AUX and CTRL modules. This will + * allow these modules to execute normally in response to the + * cable connection event. + * + * One corner case still exists. While the execution flow ensures + * that cable disconnection flushes all pending work items on the DP + * workqueue, and waits for the user module to clean up the DP + * connection session, it is possible that the system delays can + * lead to timeouts in the connect path. As a result, the actual + * connection callback from user modules can come in late and can + * race against a subsequent connection event here which would have + * reset the aborted flags. There is no clear solution for this since + * the connect/disconnect notifications do not currently have any + * sessions IDs. + */ + dp->aux->abort(dp->aux, false); + dp->ctrl->abort(dp->ctrl, false); + + dp->aux->init(dp->aux, dp->parser->aux_cfg); + dp->panel->init(dp->panel); + + dp_display_state_add(DP_STATE_READY); + /* log this as it results from user action of cable connection */ + DP_INFO("[OK]\n"); + return rc; +} + +static void dp_display_host_unready(struct dp_display_private *dp) +{ + if (!dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_warn("[not initialized]"); + return; + } + + if (!dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[not ready]"); + return; + } + + dp_display_state_remove(DP_STATE_READY); + dp->aux->deinit(dp->aux); + /* log this as it results from user action of cable disconnection */ + DP_INFO("[OK]\n"); +} + +static void dp_display_host_deinit(struct dp_display_private *dp) +{ + if (dp->active_stream_cnt) { + SDE_EVT32_EXTERNAL(dp->state, dp->active_stream_cnt); + DP_DEBUG("active stream present\n"); + return; + } + + if (!dp_display_state_is(DP_STATE_INITIALIZED)) { + dp_display_state_show("[not initialized]"); + return; + } + + if (dp_display_state_is(DP_STATE_READY)) { + DP_DEBUG("dp deinit before unready\n"); + dp_display_host_unready(dp); + } + + dp_display_abort_hdcp(dp, true); + dp->ctrl->deinit(dp->ctrl); + dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd); + dp->power->deinit(dp->power); + disable_irq(dp->irq); + dp->aux->state = 0; + + dp_display_state_remove(DP_STATE_INITIALIZED); + + /* log this as it results from user action of cable dis-connection */ + DP_INFO("[OK]\n"); +} + +static bool dp_display_hpd_irq_pending(struct dp_display_private *dp) +{ + + unsigned long wait_timeout_ms = 0; + unsigned long t_out = 0; + unsigned long wait_time = 0; + + do { + /* + * If an IRQ HPD is pending, then do not send a connect notification. + * Once this work returns, the IRQ HPD would be processed and any + * required actions (such as link maintenance) would be done which + * will subsequently send the HPD notification. To keep things simple, + * do this only for SST use-cases. MST use cases require additional + * care in order to handle the side-band communications as well. + * + * One of the main motivations for this is DP LL 1.4 CTS use case + * where it is possible that we could get a test request right after + * a connection, and the strict timing requriements of the test can + * only be met if we do not wait for the e2e connection to be set up. + */ + if (!dp->mst.mst_active && (work_busy(&dp->attention_work) == WORK_BUSY_PENDING)) { + SDE_EVT32_EXTERNAL(dp->state, 99, jiffies_to_msecs(t_out)); + DP_DEBUG("Attention pending, skip HPD notification\n"); + return true; + } + + /* + * If no IRQ HPD, delay the HPD connect notification for + * MAX_CONNECT_NOTIFICATION_DELAY_MS to see if sink generates any IRQ HPDs + * after the HPD high. Wait for + * MAX_CONNECT_NOTIFICATION_DELAY_MS to make sure any IRQ HPD from test + * requests aren't missed. + */ + reinit_completion(&dp->attention_comp); + wait_timeout_ms = min_t(unsigned long, dp->debug->connect_notification_delay_ms, + (unsigned long) MAX_CONNECT_NOTIFICATION_DELAY_MS - wait_time); + t_out = wait_for_completion_timeout(&dp->attention_comp, + msecs_to_jiffies(wait_timeout_ms)); + wait_time += (t_out == 0) ? wait_timeout_ms : t_out; + + } while ((wait_timeout_ms < wait_time) && (wait_time < MAX_CONNECT_NOTIFICATION_DELAY_MS)); + + DP_DEBUG("wait_timeout=%lu ms, time_waited=%lu ms\n", wait_timeout_ms, wait_time); + + return false; + +} + +static int dp_display_process_hpd_high(struct dp_display_private *dp) +{ + int rc = -EINVAL; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + if (dp_display_state_is(DP_STATE_CONNECTED)) { + DP_DEBUG("dp already connected, skipping hpd high\n"); + mutex_unlock(&dp->session_lock); + return -EISCONN; + } + + dp_display_state_add(DP_STATE_CONNECTED); + + dp->dp_display.max_pclk_khz = min(dp->parser->max_pclk_khz, + dp->debug->max_pclk_khz); + + if (!dp->debug->sim_mode && !dp->no_aux_switch && !dp->parser->gpio_aux_switch + && dp->aux_switch_node && dp->aux->switch_configure) { + rc = dp->aux->switch_configure(dp->aux, true, dp->hpd->orientation); + if (rc) + goto err_state; + } + + /* + * If dp video session is not restored from a previous session teardown + * by userspace, ensure the host_init is executed, in such a scenario, + * so that all the required DP resources are enabled. + * + * Below is one of the sequences of events which describe the above + * scenario: + * a. Source initiated power down resulting in host_deinit. + * b. Sink issues hpd low attention without physical cable disconnect. + * c. Source initiated power up sequence returns early because hpd is + * not high. + * d. Sink issues a hpd high attention event. + */ + if (dp_display_state_is(DP_STATE_SRC_PWRDN) && + dp_display_state_is(DP_STATE_CONFIGURED)) { + rc = dp_display_host_init(dp); + if (rc) { + DP_WARN("Host init Failed"); + if (!dp_display_state_is(DP_STATE_SUSPENDED)) { + /* + * If not suspended no point of going forward if + * resource is not enabled. + */ + dp_display_state_remove(DP_STATE_CONNECTED); + } + goto err_unlock; + } + + /* + * If device is suspended and host_init fails, there is + * one more chance for host init to happen in prepare which + * is why DP_STATE_SRC_PWRDN is removed only at success. + */ + dp_display_state_remove(DP_STATE_SRC_PWRDN); + } + + rc = dp_display_host_ready(dp); + if (rc) { + dp_display_state_show("[ready failed]"); + goto err_state; + } + + dp->link->psm_config(dp->link, &dp->panel->link_info, false); + dp->debug->psm_enabled = false; + + if (!dp->dp_display.base_connector) + goto err_unready; + + rc = dp->panel->read_sink_caps(dp->panel, + dp->dp_display.base_connector, dp->hpd->multi_func); + /* + * ETIMEDOUT --> cable may have been removed + * ENOTCONN --> no downstream device connected + */ + if (rc == -ETIMEDOUT || rc == -ENOTCONN) + goto err_unready; + + dp->link->process_request(dp->link); + dp->panel->handle_sink_request(dp->panel); + + dp_display_mst_init(dp); + + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, + dp->panel->fec_en, dp->panel->dsc_en, false); + if (rc) + goto err_mst; + + dp->process_hpd_connect = false; + + dp_display_set_mst_mgr_state(dp, true); + + mutex_unlock(&dp->session_lock); + + if (dp_display_hpd_irq_pending(dp)) + goto end; + + if (!rc && !dp_display_state_is(DP_STATE_ABORTED)) + dp_display_send_hpd_notification(dp, false); + + goto end; + +err_mst: + dp_display_update_mst_state(dp, false); +err_unready: + dp_display_host_unready(dp); +err_state: + dp_display_state_remove(DP_STATE_CONNECTED); +err_unlock: + mutex_unlock(&dp->session_lock); +end: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc); + return rc; +} + +static void dp_display_process_mst_hpd_low(struct dp_display_private *dp, bool skip_wait) +{ + int rc = 0; + + if (dp->mst.mst_active) { + DP_MST_DEBUG("mst_hpd_low work\n"); + + /* + * HPD unplug callflow: + * 1. send hpd unplug on base connector so usermode can disable + * all external displays. + * 2. unset mst state in the topology mgr so the branch device + * can be cleaned up. + */ + + if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) || + dp_display_state_is(DP_STATE_ENABLED))) + rc = dp_display_send_hpd_notification(dp, skip_wait); + + dp_display_set_mst_mgr_state(dp, false); + dp_display_update_mst_state(dp, false); + } + + DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active); +} + +static int dp_display_process_hpd_low(struct dp_display_private *dp, bool skip_wait) +{ + int rc = 0; + + dp_display_state_remove(DP_STATE_CONNECTED); + dp->process_hpd_connect = false; + dp_audio_enable(dp, false); + + if (dp->mst.mst_active) { + dp_display_process_mst_hpd_low(dp, skip_wait); + } else { + if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) || + dp_display_state_is(DP_STATE_ENABLED))) + rc = dp_display_send_hpd_notification(dp, skip_wait); + } + + mutex_lock(&dp->session_lock); + if (!dp->active_stream_cnt) + dp->ctrl->off(dp->ctrl); + mutex_unlock(&dp->session_lock); + + dp->panel->video_test = false; + + return rc; +} + +static int dp_display_aux_switch_callback(struct notifier_block *self, + unsigned long event, void *data) +{ + return 0; +} + +static int dp_display_init_aux_switch(struct dp_display_private *dp) +{ + int rc = 0; + struct notifier_block nb; + const u32 max_retries = 50; + u32 retry; + + if (dp->aux_switch_ready) + return rc; + + if (!dp->aux->switch_register_notifier) + return rc; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + nb.notifier_call = dp_display_aux_switch_callback; + nb.priority = 0; + + /* + * Iteratively wait for reg notifier which confirms that fsa driver is probed. + * Bootup DP with cable connected usecase can hit this scenario. + */ + for (retry = 0; retry < max_retries; retry++) { + rc = dp->aux->switch_register_notifier(&nb, dp->aux_switch_node); + if (rc == 0) { + DP_DEBUG("registered notifier successfully\n"); + dp->aux_switch_ready = true; + break; + } else { + DP_DEBUG("failed to register notifier retry=%d rc=%d\n", retry, rc); + msleep(100); + } + } + + if (retry == max_retries) { + DP_WARN("Failed to register fsa notifier\n"); + dp->aux_switch_ready = false; + return rc; + } + + if (dp->aux->switch_unregister_notifier) + dp->aux->switch_unregister_notifier(&nb, dp->aux_switch_node); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, rc); + return rc; +} + +static int dp_display_usbpd_configure_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DP_ERR("invalid dev\n"); + return -EINVAL; + } + + dp = dev_get_drvdata(dev); + if (!dp) { + DP_ERR("no driver data found\n"); + return -ENODEV; + } + + if (!dp->debug->sim_mode && !dp->no_aux_switch + && !dp->parser->gpio_aux_switch && dp->aux_switch_node && dp->aux->switch_configure) { + rc = dp_display_init_aux_switch(dp); + if (rc) + return rc; + + rc = dp->aux->switch_configure(dp->aux, true, dp->hpd->orientation); + if (rc) + return rc; + } + + mutex_lock(&dp->session_lock); + + if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) { + dp_display_state_log("[TUI is active]"); + mutex_unlock(&dp->session_lock); + return 0; + } + + dp_display_state_remove(DP_STATE_ABORTED); + dp_display_state_add(DP_STATE_CONFIGURED); + + rc = dp_display_host_init(dp); + if (rc) { + DP_ERR("Host init Failed"); + mutex_unlock(&dp->session_lock); + return rc; + } + + /* check for hpd high */ + if (dp->hpd->hpd_high) + queue_work(dp->wq, &dp->connect_work); + else + dp->process_hpd_connect = true; + mutex_unlock(&dp->session_lock); + + return 0; +} + +static void dp_display_clear_reservation(struct dp_display *dp, struct dp_panel *panel) +{ + struct dp_display_private *dp_display; + + if (!dp || !panel) { + DP_ERR("invalid params\n"); + return; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + mutex_lock(&dp_display->accounting_lock); + + dp_display->tot_lm_blks_in_use -= panel->max_lm; + panel->max_lm = 0; + + if (!dp_display->active_stream_cnt) + dp_display->tot_lm_blks_in_use = 0; + + mutex_unlock(&dp_display->accounting_lock); +} + +static void dp_display_clear_dsc_resources(struct dp_display_private *dp, + struct dp_panel *panel) +{ + dp->tot_dsc_blks_in_use -= panel->dsc_blks_in_use; + panel->dsc_blks_in_use = 0; +} + +static int dp_display_get_mst_pbn_div(struct dp_display *dp_display) +{ + struct dp_display_private *dp; + u32 link_rate, lane_count; + + if (!dp_display) { + DP_ERR("invalid params\n"); + return 0; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + link_rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code); + lane_count = dp->link->link_params.lane_count; + + return link_rate * lane_count / 54000; +} + +static int dp_display_stream_pre_disable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + if (!dp->active_stream_cnt) { + DP_WARN("streams already disabled cnt=%d\n", + dp->active_stream_cnt); + return 0; + } + + dp->ctrl->stream_pre_off(dp->ctrl, dp_panel); + + return 0; +} + +static void dp_display_stream_disable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + if (!dp->active_stream_cnt) { + DP_WARN("streams already disabled cnt=%d\n", + dp->active_stream_cnt); + return; + } + + if (dp_panel->stream_id == DP_STREAM_MAX || + !dp->active_panels[dp_panel->stream_id]) { + DP_ERR("panel is already disabled\n"); + return; + } + + dp_display_clear_dsc_resources(dp, dp_panel); + + DP_DEBUG("stream_id=%d, active_stream_cnt=%d, tot_dsc_blks_in_use=%d\n", + dp_panel->stream_id, dp->active_stream_cnt, + dp->tot_dsc_blks_in_use); + + dp->ctrl->stream_off(dp->ctrl, dp_panel); + dp->active_panels[dp_panel->stream_id] = NULL; + dp->active_stream_cnt--; +} + +static void dp_display_clean(struct dp_display_private *dp, bool skip_wait) +{ + int idx; + struct dp_panel *dp_panel; + struct dp_link_hdcp_status *status = &dp->link->hdcp_status; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + + if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) { + DP_WARN("TUI is active\n"); + return; + } + + if (dp_display_is_hdcp_enabled(dp) && + status->hdcp_state != HDCP_STATE_INACTIVE) { + cancel_delayed_work_sync(&dp->hdcp_cb_work); + if (dp->hdcp.ops->off) + dp->hdcp.ops->off(dp->hdcp.data); + + dp_display_update_hdcp_status(dp, true); + } + + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { + if (!dp->active_panels[idx]) + continue; + + dp_panel = dp->active_panels[idx]; + if (dp_panel->audio_supported) + dp_panel->audio->off(dp_panel->audio, skip_wait); + + if (!skip_wait) + dp_display_stream_pre_disable(dp, dp_panel); + dp_display_stream_disable(dp, dp_panel); + dp_display_clear_reservation(&dp->dp_display, dp_panel); + dp_panel->deinit(dp_panel, 0); + } + + dp_display_state_remove(DP_STATE_ENABLED | DP_STATE_CONNECTED); + + dp->ctrl->off(dp->ctrl); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +} + +static int dp_display_handle_disconnect(struct dp_display_private *dp, bool skip_wait) +{ + int rc; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + rc = dp_display_process_hpd_low(dp, skip_wait); + if (rc) { + /* cancel any pending request */ + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + } + + mutex_lock(&dp->session_lock); + if (dp_display_state_is(DP_STATE_ENABLED)) + dp_display_clean(dp, skip_wait); + + dp_display_host_unready(dp); + + dp->tot_lm_blks_in_use = 0; + + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return rc; +} + +static void dp_display_disconnect_sync(struct dp_display_private *dp) +{ + int disconnect_delay_ms; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + /* cancel any pending request */ + dp_display_state_add(DP_STATE_ABORTED); + + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + + /* wait for idle state */ + cancel_work_sync(&dp->connect_work); + cancel_work_sync(&dp->attention_work); + cancel_work_sync(&dp->disconnect_work); + flush_workqueue(dp->wq); + + /* + * Delay the teardown of the mainlink for better interop experience. + * It is possible that certain sinks can issue an HPD high immediately + * following an HPD low as soon as they detect the mainlink being + * turned off. This can sometimes result in the HPD low pulse getting + * lost with certain cable. This issue is commonly seen when running + * DP LL CTS test 4.2.1.3. + */ + disconnect_delay_ms = min_t(u32, dp->debug->disconnect_delay_ms, + (u32) MAX_DISCONNECT_DELAY_MS); + DP_DEBUG("disconnect delay = %d ms\n", disconnect_delay_ms); + msleep(disconnect_delay_ms); + + dp_display_handle_disconnect(dp, false); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, + disconnect_delay_ms); +} + +static int dp_display_usbpd_disconnect_cb(struct device *dev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dev) { + DP_ERR("invalid dev\n"); + rc = -EINVAL; + goto end; + } + + dp = dev_get_drvdata(dev); + if (!dp) { + DP_ERR("no driver data found\n"); + rc = -ENODEV; + goto end; + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, + dp->debug->psm_enabled); + + /* skip if a disconnect is already in progress */ + if (dp_display_state_is(DP_STATE_ABORTED) && + dp_display_state_is(DP_STATE_READY)) { + DP_DEBUG("disconnect already in progress\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE1, dp->state); + return 0; + } + + if (dp->debug->psm_enabled && dp_display_state_is(DP_STATE_READY)) + dp->link->psm_config(dp->link, &dp->panel->link_info, true); + + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + + if (!dp->debug->sim_mode && !dp->no_aux_switch + && !dp->parser->gpio_aux_switch && dp->aux->switch_configure) + dp->aux->switch_configure(dp->aux, false, ORIENTATION_NONE); + + dp_display_disconnect_sync(dp); + + mutex_lock(&dp->session_lock); + dp_display_host_deinit(dp); + dp_display_state_remove(DP_STATE_CONFIGURED); + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +end: + return rc; +} + +static int dp_display_stream_enable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + int rc = 0; + + rc = dp->ctrl->stream_on(dp->ctrl, dp_panel); + + if (dp->debug->tpg_pattern) + dp_panel->tpg_config(dp_panel, dp->debug->tpg_pattern); + + if (!rc) { + dp->active_panels[dp_panel->stream_id] = dp_panel; + dp->active_stream_cnt++; + } + + + DP_DEBUG("dp active_stream_cnt:%d, tot_dsc_blks_in_use=%d\n", + dp->active_stream_cnt, dp->tot_dsc_blks_in_use); + + return rc; +} + +static void dp_display_mst_attention(struct dp_display_private *dp) +{ + if (dp->mst.mst_active && dp->mst.cbs.hpd_irq) + dp->mst.cbs.hpd_irq(&dp->dp_display); + + DP_MST_DEBUG("mst_attention_work. mst_active:%d\n", dp->mst.mst_active); +} + +static void dp_display_attention_work(struct work_struct *work) +{ + struct dp_display_private *dp = container_of(work, + struct dp_display_private, attention_work); + int rc = 0; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + SDE_EVT32_EXTERNAL(dp->state); + + if (dp_display_state_is(DP_STATE_ABORTED)) { + DP_INFO("Hpd off, not handling any attention\n"); + mutex_unlock(&dp->session_lock); + goto exit; + } + + if (!dp_display_state_is(DP_STATE_READY)) { + mutex_unlock(&dp->session_lock); + goto mst_attention; + } + + if (dp->link->process_request(dp->link)) { + mutex_unlock(&dp->session_lock); + goto cp_irq; + } + + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(dp->state, dp->link->sink_request); + + if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) { + SDE_EVT32_EXTERNAL(dp->state, DS_PORT_STATUS_CHANGED); + if (!dp->mst.mst_active) { + if (dp_display_is_sink_count_zero(dp)) { + dp_display_handle_disconnect(dp, false); + } else { + /* + * connect work should take care of sending + * the HPD notification. + */ + queue_work(dp->wq, &dp->connect_work); + } + } + + goto mst_attention; + } + + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { + SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_VIDEO_PATTERN); + dp_display_handle_disconnect(dp, false); + + dp->panel->video_test = true; + /* + * connect work should take care of sending + * the HPD notification. + */ + queue_work(dp->wq, &dp->connect_work); + + goto mst_attention; + } + + if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN | + DP_TEST_LINK_TRAINING | DP_LINK_STATUS_UPDATED)) { + + mutex_lock(&dp->session_lock); + dp_audio_enable(dp, false); + + if (dp->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + SDE_EVT32_EXTERNAL(dp->state, + DP_TEST_LINK_PHY_TEST_PATTERN); + dp->ctrl->process_phy_test_request(dp->ctrl); + } + + if (dp->link->sink_request & DP_TEST_LINK_TRAINING) { + SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_TRAINING); + dp->link->send_test_response(dp->link); + rc = dp->ctrl->link_maintenance(dp->ctrl); + } + + if (dp->link->sink_request & DP_LINK_STATUS_UPDATED) { + SDE_EVT32_EXTERNAL(dp->state, DP_LINK_STATUS_UPDATED); + rc = dp->ctrl->link_maintenance(dp->ctrl); + } + + if (!rc) + dp_audio_enable(dp, true); + + mutex_unlock(&dp->session_lock); + if (rc) + goto exit; + + if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN | + DP_TEST_LINK_TRAINING)) + goto mst_attention; + } + +cp_irq: + if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) + dp->hdcp.ops->cp_irq(dp->hdcp.data); + + if (!dp->mst.mst_active) { + /* + * It is possible that the connect_work skipped sending + * the HPD notification if the attention message was + * already pending. Send the notification here to + * account for that. It is possible that the test sequence + * can trigger an unplug after DP_LINK_STATUS_UPDATED, before + * starting the next test case. Make sure to check the HPD status. + */ + if (!dp_display_state_is(DP_STATE_ABORTED)) + dp_display_send_hpd_notification(dp, false); + } + +mst_attention: + dp_display_mst_attention(dp); +exit: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +} + +static int dp_display_usbpd_attention_cb(struct device *dev) +{ + struct dp_display_private *dp; + + if (!dev) { + DP_ERR("invalid dev\n"); + return -EINVAL; + } + + dp = dev_get_drvdata(dev); + if (!dp) { + DP_ERR("no driver data found\n"); + return -ENODEV; + } + + DP_DEBUG("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n", + dp->hpd->hpd_irq, dp->hpd->hpd_high, + !!dp_display_state_is(DP_STATE_ENABLED), + !!dp_display_state_is(DP_STATE_CONNECTED)); + SDE_EVT32_EXTERNAL(dp->state, dp->hpd->hpd_irq, dp->hpd->hpd_high, + !!dp_display_state_is(DP_STATE_ENABLED), + !!dp_display_state_is(DP_STATE_CONNECTED)); + + if (!dp->hpd->hpd_high) { + dp_display_disconnect_sync(dp); + return 0; + } + + /* + * Ignore all the attention messages except HPD LOW when TUI is + * active, so user mode can be notified of the disconnect event. This + * allows user mode to tear down the control path after the TUI + * session is over. Ideally this should never happen, but on the off + * chance that there is a race condition in which there is a IRQ HPD + * during tear down of DP at TUI start then this check might help avoid + * a potential issue accessing registers in attention processing. + */ + if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) { + DP_WARN("TUI is active\n"); + return 0; + } + + if (dp->hpd->hpd_irq && dp_display_state_is(DP_STATE_READY)) { + queue_work(dp->wq, &dp->attention_work); + complete_all(&dp->attention_comp); + } else if (dp->process_hpd_connect || + !dp_display_state_is(DP_STATE_CONNECTED)) { + dp_display_state_remove(DP_STATE_ABORTED); + queue_work(dp->wq, &dp->connect_work); + } else { + DP_DEBUG("ignored\n"); + } + + return 0; +} + +static void dp_display_connect_work(struct work_struct *work) +{ + int rc = 0; + struct dp_display_private *dp = container_of(work, + struct dp_display_private, connect_work); + + if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) { + dp_display_state_log("[TUI is active]"); + return; + } + + if (dp_display_state_is(DP_STATE_ABORTED)) { + DP_WARN("HPD off requested\n"); + return; + } + + if (!dp->hpd->hpd_high) { + DP_WARN("Sink disconnected\n"); + return; + } + + rc = dp_display_process_hpd_high(dp); + + if (!rc && dp->panel->video_test) + dp->link->send_test_response(dp->link); +} + +static void dp_display_disconnect_work(struct work_struct *work) +{ + struct dp_display_private *dp = container_of(work, + struct dp_display_private, disconnect_work); + + dp_display_handle_disconnect(dp, false); + dp->debug->abort(dp->debug); +} + +static int dp_display_usb_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dp_display_private *dp = container_of(nb, + struct dp_display_private, usb_nb); + + SDE_EVT32_EXTERNAL(dp->state, dp->debug->sim_mode, action); + if (!action && dp->debug->sim_mode) { + DP_WARN("usb disconnected during simulation\n"); + dp_display_state_add(DP_STATE_ABORTED); + dp->ctrl->abort(dp->ctrl, true); + dp->aux->abort(dp->aux, true); + queue_work(dp->wq, &dp->disconnect_work); + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, NOTIFY_DONE); + return NOTIFY_DONE; +} + +static void dp_display_register_usb_notifier(struct dp_display_private *dp) +{ + int rc = 0; + const char *phandle = "usb-phy"; + struct usb_phy *usbphy; + + usbphy = devm_usb_get_phy_by_phandle(&dp->pdev->dev, phandle, 0); + if (IS_ERR_OR_NULL(usbphy)) { + DP_DEBUG("unable to get usbphy\n"); + return; + } + + dp->usb_nb.notifier_call = dp_display_usb_notifier; + dp->usb_nb.priority = 2; + rc = usb_register_notifier(usbphy, &dp->usb_nb); + if (rc) + DP_DEBUG("failed to register for usb event: %d\n", rc); +} + +int dp_display_mmrm_callback(struct mmrm_client_notifier_data *notifier_data) +{ + struct dss_clk_mmrm_cb *mmrm_cb_data = (struct dss_clk_mmrm_cb *)notifier_data->pvt_data; + struct dp_display *dp_display = (struct dp_display *)mmrm_cb_data->phandle; + struct dp_display_private *dp = + container_of(dp_display, struct dp_display_private, dp_display); + int ret = 0; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, notifier_data->cb_type); + if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE + && dp_display_state_is(DP_STATE_ENABLED) + && !dp_display_state_is(DP_STATE_ABORTED)) { + ret = dp_display_handle_disconnect(dp, false); + if (ret) + DP_ERR("mmrm callback error reducing clk, ret:%d\n", ret); + } + + DP_DEBUG("mmrm callback handled, state: 0x%x rc:%d\n", dp->state, ret); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, notifier_data->cb_type); + return ret; +} + +static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +{ + dp_debug_put(dp->debug); + dp_hpd_put(dp->hpd); + if (dp->panel) + dp_audio_put(dp->panel->audio); + dp_ctrl_put(dp->ctrl); + dp_panel_put(dp->panel); + dp_link_put(dp->link); + dp_power_put(dp->power); + dp_pll_put(dp->pll); + dp_aux_put(dp->aux); + dp_catalog_put(dp->catalog); + dp_parser_put(dp->parser); + mutex_destroy(&dp->session_lock); +} + +static int dp_init_sub_modules(struct dp_display_private *dp) +{ + int rc = 0; + u32 dp_core_revision = 0; + bool hdcp_disabled; + const char *phandle = "qcom,dp-aux-switch"; + struct device *dev = &dp->pdev->dev; + struct dp_hpd_cb *cb = &dp->hpd_cb; + struct dp_ctrl_in ctrl_in = { + .dev = dev, + }; + struct dp_panel_in panel_in = { + .dev = dev, + }; + struct dp_debug_in debug_in = { + .dev = dev, + }; + struct dp_pll_in pll_in = { + .pdev = dp->pdev, + }; + + mutex_init(&dp->session_lock); + mutex_init(&dp->accounting_lock); + + dp->parser = dp_parser_get(dp->pdev); + if (IS_ERR(dp->parser)) { + rc = PTR_ERR(dp->parser); + DP_ERR("failed to initialize parser, rc = %d\n", rc); + dp->parser = NULL; + goto error; + } + + rc = dp->parser->parse(dp->parser); + if (rc) { + DP_ERR("device tree parsing failed\n"); + goto error_catalog; + } + + g_dp_display->is_mst_supported = dp->parser->has_mst; + g_dp_display->dsc_cont_pps = dp->parser->dsc_continuous_pps; + + dp->catalog = dp_catalog_get(dev, dp->parser); + if (IS_ERR(dp->catalog)) { + rc = PTR_ERR(dp->catalog); + DP_ERR("failed to initialize catalog, rc = %d\n", rc); + dp->catalog = NULL; + goto error_catalog; + } + + dp_core_revision = dp_catalog_get_dp_core_version(dp->catalog); + + dp->aux_switch_node = of_parse_phandle(dp->pdev->dev.of_node, phandle, 0); + if (!dp->aux_switch_node) { + dp->no_aux_switch = true; + DP_WARN("Aux switch node not found, assigning bypass mode as switch type\n"); + dp->switch_type = DP_AUX_SWITCH_BYPASS; + goto skip_node_name; + } + + if (!strcmp(dp->aux_switch_node->name, "fsa4480")) + dp->switch_type = DP_AUX_SWITCH_FSA4480; + else if (!strcmp(dp->aux_switch_node->name, "wcd939x_i2c")) + dp->switch_type = DP_AUX_SWITCH_WCD939x; + else + dp->switch_type = DP_AUX_SWITCH_BYPASS; + +skip_node_name: + dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser, + dp->aux_switch_node, dp->aux_bridge, g_dp_display->dp_aux_ipc_log, + dp->switch_type); + if (IS_ERR(dp->aux)) { + rc = PTR_ERR(dp->aux); + DP_ERR("failed to initialize aux, rc = %d\n", rc); + dp->aux = NULL; + goto error_aux; + } + + rc = dp->aux->drm_aux_register(dp->aux, dp->dp_display.drm_dev); + if (rc) { + DP_ERR("DRM DP AUX register failed\n"); + goto error_pll; + } + + pll_in.aux = dp->aux; + pll_in.parser = dp->parser; + pll_in.dp_core_revision = dp_core_revision; + + dp->pll = dp_pll_get(&pll_in); + if (IS_ERR(dp->pll)) { + rc = PTR_ERR(dp->pll); + DP_ERR("failed to initialize pll, rc = %d\n", rc); + dp->pll = NULL; + goto error_pll; + } + + dp->power = dp_power_get(dp->parser, dp->pll); + if (IS_ERR(dp->power)) { + rc = PTR_ERR(dp->power); + DP_ERR("failed to initialize power, rc = %d\n", rc); + dp->power = NULL; + goto error_power; + } + + rc = dp->power->power_client_init(dp->power, &dp->priv->phandle, + dp->dp_display.drm_dev); + if (rc) { + DP_ERR("Power client create failed\n"); + goto error_link; + } + + rc = dp->power->power_mmrm_init(dp->power, &dp->priv->phandle, + (void *)&dp->dp_display, dp_display_mmrm_callback); + if (rc) { + DP_ERR("failed to initialize mmrm, rc = %d\n", rc); + goto error_link; + } + + dp->link = dp_link_get(dev, dp->aux, dp_core_revision); + if (IS_ERR(dp->link)) { + rc = PTR_ERR(dp->link); + DP_ERR("failed to initialize link, rc = %d\n", rc); + dp->link = NULL; + goto error_link; + } + + panel_in.aux = dp->aux; + panel_in.catalog = &dp->catalog->panel; + panel_in.link = dp->link; + panel_in.connector = dp->dp_display.base_connector; + panel_in.base_panel = NULL; + panel_in.parser = dp->parser; + + dp->panel = dp_panel_get(&panel_in); + if (IS_ERR(dp->panel)) { + rc = PTR_ERR(dp->panel); + DP_ERR("failed to initialize panel, rc = %d\n", rc); + dp->panel = NULL; + goto error_panel; + } + + ctrl_in.link = dp->link; + ctrl_in.panel = dp->panel; + ctrl_in.aux = dp->aux; + ctrl_in.power = dp->power; + ctrl_in.catalog = &dp->catalog->ctrl; + ctrl_in.parser = dp->parser; + ctrl_in.pll = dp->pll; + + dp->ctrl = dp_ctrl_get(&ctrl_in); + if (IS_ERR(dp->ctrl)) { + rc = PTR_ERR(dp->ctrl); + DP_ERR("failed to initialize ctrl, rc = %d\n", rc); + dp->ctrl = NULL; + goto error_ctrl; + } + + dp->panel->audio = dp_audio_get(dp->pdev, dp->panel, + &dp->catalog->audio); + if (IS_ERR(dp->panel->audio)) { + rc = PTR_ERR(dp->panel->audio); + DP_ERR("failed to initialize audio, rc = %d\n", rc); + dp->panel->audio = NULL; + goto error_audio; + } + + memset(&dp->mst, 0, sizeof(dp->mst)); + dp->active_stream_cnt = 0; + + cb->configure = dp_display_usbpd_configure_cb; + cb->disconnect = dp_display_usbpd_disconnect_cb; + cb->attention = dp_display_usbpd_attention_cb; + + dp->hpd = dp_hpd_get(dev, dp->parser, &dp->catalog->hpd, + dp->aux_bridge, cb); + if (IS_ERR(dp->hpd)) { + rc = PTR_ERR(dp->hpd); + DP_ERR("failed to initialize hpd, rc = %d\n", rc); + dp->hpd = NULL; + goto error_hpd; + } + + hdcp_disabled = !!dp_display_initialize_hdcp(dp); + + debug_in.panel = dp->panel; + debug_in.hpd = dp->hpd; + debug_in.link = dp->link; + debug_in.aux = dp->aux; + debug_in.connector = &dp->dp_display.base_connector; + debug_in.catalog = dp->catalog; + debug_in.parser = dp->parser; + debug_in.ctrl = dp->ctrl; + debug_in.pll = dp->pll; + debug_in.display = &dp->dp_display; + + dp->debug = dp_debug_get(&debug_in); + if (IS_ERR(dp->debug)) { + rc = PTR_ERR(dp->debug); + DP_ERR("failed to initialize debug, rc = %d\n", rc); + dp->debug = NULL; + goto error_debug; + } + + dp->cached_connector_status = connector_status_disconnected; + dp->tot_dsc_blks_in_use = 0; + dp->tot_lm_blks_in_use = 0; + + dp->debug->hdcp_disabled = hdcp_disabled; + dp_display_update_hdcp_status(dp, true); + + dp_display_register_usb_notifier(dp); + + if (dp->hpd->register_hpd) { + rc = dp->hpd->register_hpd(dp->hpd); + if (rc) { + DP_ERR("failed register hpd\n"); + goto error_hpd_reg; + } + } + + return rc; +error_hpd_reg: + dp_debug_put(dp->debug); +error_debug: + dp_hpd_put(dp->hpd); +error_hpd: + dp_audio_put(dp->panel->audio); +error_audio: + dp_ctrl_put(dp->ctrl); +error_ctrl: + dp_panel_put(dp->panel); +error_panel: + dp_link_put(dp->link); +error_link: + dp_power_put(dp->power); +error_power: + dp_pll_put(dp->pll); +error_pll: + dp_aux_put(dp->aux); +error_aux: + dp_catalog_put(dp->catalog); +error_catalog: + dp_parser_put(dp->parser); +error: + mutex_destroy(&dp->session_lock); + return rc; +} + +static int dp_display_post_init(struct dp_display *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + if (IS_ERR_OR_NULL(dp)) { + DP_ERR("invalid params\n"); + rc = -EINVAL; + goto end; + } + + rc = dp_init_sub_modules(dp); + if (rc) + goto end; + + dp_display->post_init = NULL; +end: + DP_DEBUG("%s\n", rc ? "failed" : "success"); + return rc; +} + +static int dp_display_set_mode(struct dp_display *dp_display, void *panel, + struct dp_display_mode *mode) +{ + const u32 num_components = 3, default_bpp = 24; + struct dp_display_private *dp; + struct dp_panel *dp_panel; + bool dsc_en = (mode->capabilities & DP_PANEL_CAPS_DSC) ? true : false; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, + mode->timing.h_active, mode->timing.v_active, + mode->timing.refresh_rate); + + mutex_lock(&dp->session_lock); + mode->timing.bpp = + dp_panel->connector->display_info.bpc * num_components; + if (!mode->timing.bpp) + mode->timing.bpp = default_bpp; + + mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel, + mode->timing.bpp, mode->timing.pixel_clk_khz, dsc_en); + + if (dp->mst.mst_active) + dp->mst.cbs.set_mst_mode_params(&dp->dp_display, mode); + + dp_panel->pinfo = mode->timing; + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return 0; +} + +static int dp_display_prepare(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + int rc = 0; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * If DP video session is restored by the userspace after display + * disconnect notification from dongle i.e. typeC cable connected to + * source but disconnected at the display side, the DP controller is + * not restored to the desired configured state. So, ensure host_init + * is executed in such a scenario so that all the DP controller + * resources are enabled for the next connection event. + */ + if (dp_display_state_is(DP_STATE_SRC_PWRDN) && + dp_display_state_is(DP_STATE_CONFIGURED)) { + rc = dp_display_host_init(dp); + if (rc) { + /* + * Skip all the events that are similar to abort case, just that + * the stream clks should be enabled so that no commit failure can + * be seen. + */ + DP_ERR("Host init failed.\n"); + goto end; + } + + /* + * Remove DP_STATE_SRC_PWRDN flag on successful host_init to + * prevent cases such as below. + * 1. MST stream 1 failed to do host init then stream 2 can retry again. + * 2. Resume path fails, now sink sends hpd_high=0 and hpd_high=1. + */ + dp_display_state_remove(DP_STATE_SRC_PWRDN); + } + + /* + * If the physical connection to the sink is already lost by the time + * we try to set up the connection, we can just skip all the steps + * here safely. + */ + if (dp_display_state_is(DP_STATE_ABORTED)) { + dp_display_state_log("[aborted]"); + goto end; + } + + /* + * If DP_STATE_ENABLED, there is nothing left to do. + * This would happen during MST flow. So, log this. + */ + if (dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_warn("[already enabled]"); + goto end; + } + + if (!dp_display_is_ready(dp)) { + dp_display_state_show("[not ready]"); + goto end; + } + + /* For supporting DP_PANEL_SRC_INITIATED_POWER_DOWN case */ + rc = dp_display_host_ready(dp); + if (rc) { + dp_display_state_show("[ready failed]"); + goto end; + } + + if (dp->debug->psm_enabled) { + dp->link->psm_config(dp->link, &dp->panel->link_info, false); + dp->debug->psm_enabled = false; + } + + /* + * Execute the dp controller power on in shallow mode here. + * In normal cases, controller should have been powered on + * by now. In some cases like suspend/resume or framework + * reboot, we end up here without a powered on controller. + * Cable may have been removed in suspended state. In that + * case, link training is bound to fail on system resume. + * So, we execute in shallow mode here to do only minimal + * and required things. + */ + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, dp_panel->fec_en, + dp_panel->dsc_en, true); + if (rc) + goto end; + +end: + mutex_unlock(&dp->session_lock); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc); + return rc; +} + +static int dp_display_set_stream_info(struct dp_display *dp_display, + void *panel, u32 strm_id, u32 start_slot, + u32 num_slots, u32 pbn, int vcpi) +{ + int rc = 0; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + const int max_slots = 64; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (strm_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", strm_id); + return -EINVAL; + } + + if (start_slot + num_slots > max_slots) { + DP_ERR("invalid channel info received. start:%d, slots:%d\n", + start_slot, num_slots); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, strm_id, + start_slot, num_slots); + + mutex_lock(&dp->session_lock); + + dp->ctrl->set_mst_channel_info(dp->ctrl, strm_id, + start_slot, num_slots); + + if (panel) { + dp_panel = panel; + dp_panel->set_stream_info(dp_panel, strm_id, start_slot, + num_slots, pbn, vcpi); + } + + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc); + + return rc; +} + +static int dp_display_enable(struct dp_display *dp_display, void *panel) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * If DP_STATE_READY is not set, we should not do any HW + * programming. + */ + if (!dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[host not ready]"); + goto end; + } + + /* + * It is possible that by the time we get call back to establish + * the DP pipeline e2e, the physical DP connection to the sink is + * already lost. In such cases, the DP_STATE_ABORTED would be set. + * However, it is necessary to NOT abort the display setup here so as + * to ensure that the rest of the system is in a stable state prior to + * handling the disconnect notification. + */ + if (dp_display_state_is(DP_STATE_ABORTED)) + dp_display_state_log("[aborted, but continue on]"); + + rc = dp_display_stream_enable(dp, panel); + if (rc) + goto end; + + dp_display_state_add(DP_STATE_ENABLED); +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc); + return rc; +} + +static void dp_display_stream_post_enable(struct dp_display_private *dp, + struct dp_panel *dp_panel) +{ + dp_panel->spd_config(dp_panel); + dp_panel->setup_hdr(dp_panel, NULL, false, 0, true); +} + +static int dp_display_post_enable(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * If DP_STATE_READY is not set, we should not do any HW + * programming. + */ + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + goto end; + } + + /* + * If the physical connection to the sink is already lost by the time + * we try to set up the connection, we can just skip all the steps + * here safely. + */ + if (dp_display_state_is(DP_STATE_ABORTED)) { + dp_display_state_log("[aborted]"); + goto end; + } + + if (!dp_display_is_ready(dp) || !dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[not ready]"); + goto end; + } + + dp_display_stream_post_enable(dp, dp_panel); + + cancel_delayed_work_sync(&dp->hdcp_cb_work); + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ); + + if (dp_panel->audio_supported) { + dp_panel->audio->bw_code = dp->link->link_params.bw_code; + dp_panel->audio->lane_count = dp->link->link_params.lane_count; + dp_panel->audio->on(dp_panel->audio); + } + + dp->aux->state &= ~DP_STATE_CTRL_POWERED_OFF; + dp->aux->state |= DP_STATE_CTRL_POWERED_ON; + complete_all(&dp->notification_comp); + DP_DEBUG("display post enable complete. state: 0x%x\n", dp->state); +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return 0; +} + +static void dp_display_clear_colorspaces(struct dp_display *dp_display) +{ + struct drm_connector *connector; + struct sde_connector *sde_conn; + + connector = dp_display->base_connector; + sde_conn = to_sde_connector(connector); + sde_conn->color_enc_fmt = 0; +} + +static int dp_display_pre_disable(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel = panel; + struct dp_link_hdcp_status *status; + int rc = 0; + size_t i; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + status = &dp->link->hdcp_status; + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + goto end; + } + + dp_display_state_add(DP_STATE_HDCP_ABORTED); + cancel_delayed_work_sync(&dp->hdcp_cb_work); + if (dp_display_is_hdcp_enabled(dp) && + status->hdcp_state != HDCP_STATE_INACTIVE) { + bool off = true; + + if (dp_display_state_is(DP_STATE_SUSPENDED)) { + DP_DEBUG("Can't perform HDCP cleanup while suspended. Defer\n"); + dp->hdcp_delayed_off = true; + goto clean; + } + + flush_delayed_work(&dp->hdcp_cb_work); + if (dp->mst.mst_active) { + dp_display_hdcp_deregister_stream(dp, + dp_panel->stream_id); + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + if (i != dp_panel->stream_id && + dp->active_panels[i]) { + DP_DEBUG("Streams are still active. Skip disabling HDCP\n"); + off = false; + } + } + } + + if (off) { + if (dp->hdcp.ops->off) + dp->hdcp.ops->off(dp->hdcp.data); + dp_display_update_hdcp_status(dp, true); + } + } + + dp_display_clear_colorspaces(dp_display); + +clean: + if (dp_panel->audio_supported) + dp_panel->audio->off(dp_panel->audio, false); + + rc = dp_display_stream_pre_disable(dp, dp_panel); + +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return 0; +} + +static int dp_display_disable(struct dp_display *dp_display, void *panel) +{ + int i; + struct dp_display_private *dp = NULL; + struct dp_panel *dp_panel = NULL; + struct dp_link_hdcp_status *status; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + status = &dp->link->hdcp_status; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + goto end; + } + + if (!dp_display_state_is(DP_STATE_READY)) { + dp_display_state_show("[not ready]"); + goto end; + } + + dp_display_stream_disable(dp, dp_panel); + + dp_display_state_remove(DP_STATE_HDCP_ABORTED); + for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) { + if (dp->active_panels[i]) { + if (status->hdcp_state != HDCP_STATE_AUTHENTICATED) + queue_delayed_work(dp->wq, &dp->hdcp_cb_work, + HZ/4); + break; + } + } +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + return 0; +} + +static int dp_request_irq(struct dp_display *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0); + if (dp->irq < 0) { + rc = dp->irq; + DP_ERR("failed to get irq: %d\n", rc); + return rc; + } + + rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq, + IRQF_TRIGGER_HIGH, "dp_display_isr", dp); + if (rc < 0) { + DP_ERR("failed to request IRQ%u: %d\n", + dp->irq, rc); + return rc; + } + disable_irq(dp->irq); + + return 0; +} + +static struct dp_debug *dp_get_debug(struct dp_display *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + return dp->debug; +} + +static int dp_display_unprepare(struct dp_display *dp_display, void *panel) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel = panel; + u32 flags = 0; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + /* + * Check if the power off sequence was triggered + * by a source initialated action like framework + * reboot or suspend-resume but not from normal + * hot plug. If connector is in MST mode, skip + * powering down host as aux needs to be kept + * alive to handle hot-plug sideband message. + */ + if (dp_display_is_ready(dp) && + (dp_display_state_is(DP_STATE_SUSPENDED) || + !dp->mst.mst_active)) + flags |= DP_PANEL_SRC_INITIATED_POWER_DOWN; + + if (dp->active_stream_cnt) + goto end; + + if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) { + dp->link->psm_config(dp->link, &dp->panel->link_info, true); + dp->debug->psm_enabled = true; + + dp->ctrl->off(dp->ctrl); + dp_display_host_unready(dp); + dp_display_host_deinit(dp); + dp_display_state_add(DP_STATE_SRC_PWRDN); + } + + dp_display_state_remove(DP_STATE_ENABLED); + + dp->aux->state &= ~DP_STATE_CTRL_POWERED_ON; + dp->aux->state |= DP_STATE_CTRL_POWERED_OFF; + + complete_all(&dp->notification_comp); + + /* log this as it results from user action of cable dis-connection */ + DP_INFO("[OK]\n"); +end: + mutex_lock(&dp->accounting_lock); + dp->tot_lm_blks_in_use -= dp_panel->max_lm; + dp_panel->max_lm = 0; + mutex_unlock(&dp->accounting_lock); + dp_panel->deinit(dp_panel, flags); + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return 0; +} + +static int dp_display_validate_link_clock(struct dp_display_private *dp, + struct drm_display_mode *mode, struct dp_display_mode dp_mode) +{ + u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; + u32 mode_bpc = 0, tmds_clock = 0; + bool dsc_en; + int rate; + struct msm_compression_info *c_info = &dp_mode.timing.comp_info; + + dsc_en = c_info->enabled; + + if (dsc_en) { + mode_bpp = DSC_BPP(c_info->dsc_info.config); + mode_bpc = c_info->dsc_info.config.bits_per_component; + } else { + mode_bpp = dp_mode.timing.bpp; + mode_bpc = mode_bpp / 3; + } + + mode_rate_khz = mode->clock * mode_bpp; + rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code); + tmds_clock = mode->clock * mode_bpc / 8; + + /* + * For a HBR 2 dongle, limit TMDS clock to ensure a max resolution + * of 4k@30fps for each MST port + */ + if (dp->mst.mst_active && rate <= 540000 && tmds_clock > MAX_TMDS_CLOCK_HDMI_1_4) { + DP_DEBUG("Limit mode clock: %d kHz\n", mode->clock); + return -EPERM; + } + + supported_rate_khz = dp->link->link_params.lane_count * rate * 8; + + if (mode_rate_khz > supported_rate_khz) { + DP_DEBUG("mode_rate: %d kHz, supported_rate: %d kHz\n", + mode_rate_khz, supported_rate_khz); + return -EPERM; + } + + return 0; +} + +static int dp_display_validate_pixel_clock(struct dp_display_mode dp_mode, + u32 max_pclk_khz) +{ + u32 pclk_khz = dp_mode.timing.widebus_en ? + (dp_mode.timing.pixel_clk_khz >> 1) : + dp_mode.timing.pixel_clk_khz; + + if (pclk_khz > max_pclk_khz) { + DP_DEBUG("clk: %d kHz, max: %d kHz\n", pclk_khz, max_pclk_khz); + return -EPERM; + } + + return 0; +} + +static int dp_display_validate_topology(struct dp_display_private *dp, + struct dp_panel *dp_panel, struct drm_display_mode *mode, + struct dp_display_mode *dp_mode, + const struct msm_resource_caps_info *avail_res) +{ + int rc; + struct msm_drm_private *priv = dp->priv; + const u32 dual = 2, quad = 4; + u32 num_lm = 0, num_dsc = 0, num_3dmux = 0; + bool dsc_capable = dp_mode->capabilities & DP_PANEL_CAPS_DSC; + u32 fps = dp_mode->timing.refresh_rate; + int avail_lm = 0; + + mutex_lock(&dp->accounting_lock); + + rc = msm_get_mixer_count(priv, mode, avail_res, &num_lm); + if (rc) { + DP_ERR("error getting mixer count. rc:%d\n", rc); + goto end; + } + + /* Merge using DSC, if enabled */ + if (dp_panel->dsc_en && dsc_capable) { + rc = msm_get_dsc_count(priv, mode->hdisplay, &num_dsc); + if (rc) { + DP_ERR("error getting dsc count. rc:%d\n", rc); + goto end; + } + + num_dsc = max(num_lm, num_dsc); + if ((num_dsc > avail_res->num_lm) || (num_dsc > avail_res->num_dsc)) { + DP_DEBUG("mode %sx%d: not enough resources for dsc %d dsc_a:%d lm_a:%d\n", + mode->name, fps, num_dsc, avail_res->num_dsc, + avail_res->num_lm); + /* Clear DSC caps and retry */ + dp_mode->capabilities &= ~DP_PANEL_CAPS_DSC; + rc = -EAGAIN; + goto end; + } else { + /* Only DSCMERGE is supported on DP */ + num_lm = num_dsc; + } + } + + if (!num_dsc && (num_lm == 2) && avail_res->num_3dmux) { + num_3dmux = 1; + } + + avail_lm = avail_res->num_lm + avail_res->num_lm_in_use - dp->tot_lm_blks_in_use + + dp_panel->max_lm; + + if (num_lm > avail_lm) { + DP_DEBUG("mode %sx%d is invalid, not enough lm req:%d avail:%d\n", + mode->name, fps, num_lm, avail_lm); + rc = -EPERM; + goto end; + } else if (!num_dsc && (num_lm == dual && !num_3dmux)) { + DP_DEBUG("mode %sx%d is invalid, not enough 3dmux %d %d\n", + mode->name, fps, num_3dmux, avail_res->num_3dmux); + rc = -EPERM; + goto end; + } else if (num_lm == quad && num_dsc != quad) { + DP_DEBUG("mode %sx%d is invalid, unsupported DP topology lm:%d dsc:%d\n", + mode->name, fps, num_lm, num_dsc); + rc = -EPERM; + goto end; + } + + DP_DEBUG_V("mode %sx%d is valid, supported DP topology lm:%d dsc:%d 3dmux:%d\n", + mode->name, fps, num_lm, num_dsc, num_3dmux); + + dp_mode->lm_count = num_lm; + rc = 0; + +end: + mutex_unlock(&dp->accounting_lock); + return rc; +} + +static enum drm_mode_status dp_display_validate_mode( + struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode, + const struct msm_resource_caps_info *avail_res) +{ + struct dp_display_private *dp; + struct dp_panel *dp_panel; + struct dp_debug *debug; + enum drm_mode_status mode_status = MODE_BAD; + struct dp_display_mode dp_mode; + int rc = 0; + + if (!dp_display || !mode || !panel || + !avail_res || !avail_res->max_mixer_width) { + DP_ERR("invalid params\n"); + return mode_status; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector\n"); + goto end; + } + + debug = dp->debug; + if (!debug) + goto end; + + dp_display->convert_to_dp_mode(dp_display, panel, mode, &dp_mode); + + /* As per spec, 640x480 mode should always be present as fail-safe */ + if ((dp_mode.timing.h_active == 640) && (dp_mode.timing.v_active == 480) && + (dp_mode.timing.pixel_clk_khz == 25175)) { + goto skip_validation; + } + + rc = dp_display_validate_topology(dp, dp_panel, mode, &dp_mode, avail_res); + if (rc == -EAGAIN) { + dp_panel->convert_to_dp_mode(dp_panel, mode, &dp_mode); + rc = dp_display_validate_topology(dp, dp_panel, mode, &dp_mode, avail_res); + } + + if (rc) + goto end; + + rc = dp_display_validate_link_clock(dp, mode, dp_mode); + if (rc) + goto end; + + rc = dp_display_validate_pixel_clock(dp_mode, dp_display->max_pclk_khz); + if (rc) + goto end; + +skip_validation: + mode_status = MODE_OK; + + if (!avail_res->num_lm_in_use) { + mutex_lock(&dp->accounting_lock); + dp->tot_lm_blks_in_use -= dp_panel->max_lm; + dp_panel->max_lm = max(dp_panel->max_lm, dp_mode.lm_count); + dp->tot_lm_blks_in_use += dp_panel->max_lm; + mutex_unlock(&dp->accounting_lock); + } + +end: + mutex_unlock(&dp->session_lock); + + DP_DEBUG_V("[%s clk:%d] mode is %s\n", mode->name, mode->clock, + (mode_status == MODE_OK) ? "valid" : "invalid"); + + return mode_status; +} + +static int dp_display_get_available_dp_resources(struct dp_display *dp_display, + const struct msm_resource_caps_info *avail_res, + struct msm_resource_caps_info *max_dp_avail_res) +{ + if (!dp_display || !avail_res || !max_dp_avail_res) { + DP_ERR("invalid arguments\n"); + return -EINVAL; + } + + memcpy(max_dp_avail_res, avail_res, + sizeof(struct msm_resource_caps_info)); + + max_dp_avail_res->num_lm = min(avail_res->num_lm, + dp_display->max_mixer_count); + max_dp_avail_res->num_dsc = min(avail_res->num_dsc, + dp_display->max_dsc_count); + + DP_DEBUG_V("max_lm:%d, avail_lm:%d, dp_avail_lm:%d\n", + dp_display->max_mixer_count, avail_res->num_lm, + max_dp_avail_res->num_lm); + + DP_DEBUG_V("max_dsc:%d, avail_dsc:%d, dp_avail_dsc:%d\n", + dp_display->max_dsc_count, avail_res->num_dsc, + max_dp_avail_res->num_dsc); + + return 0; +} + +static int dp_display_get_modes(struct dp_display *dp, void *panel, + struct dp_display_mode *dp_mode) +{ + struct dp_display_private *dp_display; + struct dp_panel *dp_panel; + int ret = 0; + + if (!dp || !panel) { + DP_ERR("invalid params\n"); + return 0; + } + + dp_panel = panel; + if (!dp_panel->connector) { + DP_ERR("invalid connector\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + ret = dp_panel->get_modes(dp_panel, dp_panel->connector, dp_mode); + if (dp_mode->timing.pixel_clk_khz) + dp->max_pclk_khz = dp_mode->timing.pixel_clk_khz; + return ret; +} + +static void dp_display_convert_to_dp_mode(struct dp_display *dp_display, + void *panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode) +{ + int rc; + struct dp_display_private *dp; + struct dp_panel *dp_panel; + u32 free_dsc_blks = 0, required_dsc_blks = 0, curr_dsc = 0, new_dsc = 0; + + if (!dp_display || !drm_mode || !dp_mode || !panel) { + DP_ERR("invalid input\n"); + return; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dp_panel = panel; + + memset(dp_mode, 0, sizeof(*dp_mode)); + + if (dp_panel->dsc_en) { + free_dsc_blks = dp_display->max_dsc_count - + dp->tot_dsc_blks_in_use + + dp_panel->dsc_blks_in_use; + DP_DEBUG_V("Before: in_use:%d, max:%d, free:%d\n", + dp->tot_dsc_blks_in_use, + dp_display->max_dsc_count, free_dsc_blks); + + rc = msm_get_dsc_count(dp->priv, drm_mode->hdisplay, + &required_dsc_blks); + if (rc) { + DP_ERR("error getting dsc count. rc:%d\n", rc); + return; + } + + curr_dsc = dp_panel->dsc_blks_in_use; + dp->tot_dsc_blks_in_use -= dp_panel->dsc_blks_in_use; + dp_panel->dsc_blks_in_use = 0; + + if (free_dsc_blks >= required_dsc_blks) { + dp_mode->capabilities |= DP_PANEL_CAPS_DSC; + new_dsc = max(curr_dsc, required_dsc_blks); + dp_panel->dsc_blks_in_use = new_dsc; + dp->tot_dsc_blks_in_use += new_dsc; + } + + DP_DEBUG_V("After: in_use:%d, max:%d, free:%d, req:%d, caps:0x%x\n", + dp->tot_dsc_blks_in_use, + dp_display->max_dsc_count, + free_dsc_blks, required_dsc_blks, + dp_mode->capabilities); + } + + dp_panel->convert_to_dp_mode(dp_panel, drm_mode, dp_mode); +} + +static int dp_display_config_hdr(struct dp_display *dp_display, void *panel, + struct drm_msm_ext_hdr_metadata *hdr, bool dhdr_update) +{ + struct dp_panel *dp_panel; + struct sde_connector *sde_conn; + struct dp_display_private *dp; + u64 core_clk_rate; + bool flush_hdr; + + if (!dp_display || !panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + dp = container_of(dp_display, struct dp_display_private, dp_display); + sde_conn = to_sde_connector(dp_panel->connector); + + core_clk_rate = dp->power->clk_get_rate(dp->power, "core_clk"); + if (!core_clk_rate) { + DP_ERR("invalid rate for core_clk\n"); + return -EINVAL; + } + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + return 0; + } + + /* + * In rare cases where HDR metadata is updated independently + * flush the HDR metadata immediately instead of relying on + * the colorspace + */ + flush_hdr = !sde_conn->colorspace_updated; + + if (flush_hdr) + DP_DEBUG("flushing the HDR metadata\n"); + else + DP_DEBUG("piggy-backing with colorspace\n"); + + return dp_panel->setup_hdr(dp_panel, hdr, dhdr_update, + core_clk_rate, flush_hdr); +} + +static int dp_display_setup_colospace(struct dp_display *dp_display, + void *panel, + u32 colorspace) +{ + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !panel) { + pr_err("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + return 0; + } + + dp_panel = panel; + + return dp_panel->set_colorspace(dp_panel, colorspace); +} + +static int dp_display_create_workqueue(struct dp_display_private *dp) +{ + dp->wq = create_singlethread_workqueue("drm_dp"); + if (IS_ERR_OR_NULL(dp->wq)) { + DP_ERR("Error creating wq\n"); + return -EPERM; + } + + INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work); + INIT_WORK(&dp->connect_work, dp_display_connect_work); + INIT_WORK(&dp->attention_work, dp_display_attention_work); + INIT_WORK(&dp->disconnect_work, dp_display_disconnect_work); + + return 0; +} + +static int dp_display_bridge_internal_hpd(void *dev, bool hpd, bool hpd_irq) +{ + struct dp_display_private *dp = dev; + struct drm_device *drm_dev = dp->dp_display.drm_dev; + + if (!drm_dev || !drm_dev->mode_config.poll_enabled) + return -EBUSY; + + if (hpd_irq) + dp_display_mst_attention(dp); + else + dp->hpd->simulate_connect(dp->hpd, hpd); + + return 0; +} + +static int dp_display_init_aux_bridge(struct dp_display_private *dp) +{ + int rc = 0; + const char *phandle = "qcom,dp-aux-bridge"; + struct device_node *bridge_node; + + if (!dp->pdev->dev.of_node) { + pr_err("cannot find dev.of_node\n"); + rc = -ENODEV; + goto end; + } + + bridge_node = of_parse_phandle(dp->pdev->dev.of_node, + phandle, 0); + if (!bridge_node) + goto end; + + dp->aux_bridge = of_dp_aux_find_bridge(bridge_node); + if (!dp->aux_bridge) { + pr_err("failed to find dp aux bridge\n"); + rc = -EPROBE_DEFER; + goto end; + } + + if (dp->aux_bridge->register_hpd && + !(dp->aux_bridge->flag & DP_AUX_BRIDGE_HPD)) + dp->aux_bridge->register_hpd(dp->aux_bridge, + dp_display_bridge_internal_hpd, dp); + +end: + return rc; +} + +static int dp_display_mst_install(struct dp_display *dp_display, + struct dp_mst_drm_install_info *mst_install_info) +{ + struct dp_display_private *dp; + + if (!dp_display || !mst_install_info) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + + if (!mst_install_info->cbs->hpd || !mst_install_info->cbs->hpd_irq) { + DP_ERR("invalid mst cbs\n"); + return -EINVAL; + } + + dp_display->dp_mst_prv_info = mst_install_info->dp_mst_prv_info; + + if (!dp->parser->has_mst) { + DP_DEBUG("mst not enabled\n"); + return -EPERM; + } + + memcpy(&dp->mst.cbs, mst_install_info->cbs, sizeof(dp->mst.cbs)); + dp->mst.drm_registered = true; + + DP_MST_DEBUG("dp mst drm installed\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return 0; +} + +static int dp_display_mst_uninstall(struct dp_display *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return -EPERM; + } + + dp = container_of(dp_display, struct dp_display_private, + dp_display); + memset(&dp->mst.cbs, 0, sizeof(dp->mst.cbs)); + dp->mst.drm_registered = false; + + DP_MST_DEBUG("dp mst drm uninstalled\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return 0; +} + +static int dp_display_mst_connector_install(struct dp_display *dp_display, + struct drm_connector *connector) +{ + int rc = 0; + struct dp_panel_in panel_in; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !connector) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + rc = -EPERM; + goto end; + } + + panel_in.dev = &dp->pdev->dev; + panel_in.aux = dp->aux; + panel_in.catalog = &dp->catalog->panel; + panel_in.link = dp->link; + panel_in.connector = connector; + panel_in.base_panel = dp->panel; + panel_in.parser = dp->parser; + + dp_panel = dp_panel_get(&panel_in); + if (IS_ERR(dp_panel)) { + rc = PTR_ERR(dp_panel); + DP_ERR("failed to initialize panel, rc = %d\n", rc); + goto end; + } + + dp_panel->audio = dp_audio_get(dp->pdev, dp_panel, &dp->catalog->audio); + if (IS_ERR(dp_panel->audio)) { + rc = PTR_ERR(dp_panel->audio); + DP_ERR("[mst] failed to initialize audio, rc = %d\n", rc); + dp_panel->audio = NULL; + goto end; + } + + DP_MST_DEBUG("dp mst connector installed. conn:%d\n", + connector->base.id); + +end: + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc); + + return rc; +} + +static int dp_display_mst_connector_uninstall(struct dp_display *dp_display, + struct drm_connector *connector) +{ + int rc = 0; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + struct dp_audio *audio = NULL; + + if (!dp_display || !connector) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state); + mutex_lock(&dp->session_lock); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + mutex_unlock(&dp->session_lock); + return -EPERM; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + mutex_unlock(&dp->session_lock); + return -EINVAL; + } + + dp_panel = sde_conn->drv_panel; + + /* Make a copy of audio structure to call into dp_audio_put later */ + audio = dp_panel->audio; + dp_panel_put(dp_panel); + + DP_MST_DEBUG("dp mst connector uninstalled. conn:%d\n", + connector->base.id); + + mutex_unlock(&dp->session_lock); + + dp_audio_put(audio); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return rc; +} + +static int dp_display_mst_connector_update_edid(struct dp_display *dp_display, + struct drm_connector *connector, + struct edid *edid) +{ + int rc = 0; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !connector || !edid) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return -EPERM; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + return -EINVAL; + } + + dp_panel = sde_conn->drv_panel; + rc = dp_panel->update_edid(dp_panel, edid); + + DP_MST_DEBUG("dp mst connector:%d edid updated. mode_cnt:%d\n", + connector->base.id, rc); + + return rc; +} + +static int dp_display_update_pps(struct dp_display *dp_display, + struct drm_connector *connector, char *pps_cmd) +{ + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + return -EINVAL; + } + + if (!dp_display_state_is(DP_STATE_ENABLED)) { + dp_display_state_show("[not enabled]"); + return 0; + } + + dp_panel = sde_conn->drv_panel; + dp_panel->update_pps(dp_panel, pps_cmd); + return 0; +} + +static int dp_display_mst_connector_update_link_info( + struct dp_display *dp_display, + struct drm_connector *connector) +{ + int rc = 0; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_private *dp; + + if (!dp_display || !connector) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return -EPERM; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid panel for connector:%d\n", connector->base.id); + return -EINVAL; + } + + dp_panel = sde_conn->drv_panel; + + memcpy(dp_panel->dpcd, dp->panel->dpcd, + DP_RECEIVER_CAP_SIZE + 1); + memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd, + DP_RECEIVER_DSC_CAP_SIZE + 1); + memcpy(&dp_panel->link_info, &dp->panel->link_info, + sizeof(dp_panel->link_info)); + + DP_MST_DEBUG("dp mst connector:%d link info updated\n", + connector->base.id); + + return rc; +} + +static int dp_display_mst_get_fixed_topology_port( + struct dp_display *dp_display, + u32 strm_id, u32 *port_num) +{ + struct dp_display_private *dp; + u32 port; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (strm_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", strm_id); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + port = dp->parser->mst_fixed_port[strm_id]; + + if (!port || port > 255) + return -ENOENT; + + if (port_num) + *port_num = port; + + return 0; +} + +static int dp_display_get_mst_caps(struct dp_display *dp_display, + struct dp_mst_caps *mst_caps) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display || !mst_caps) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mst_caps->has_mst = dp->parser->has_mst; + mst_caps->max_streams_supported = (mst_caps->has_mst) ? 2 : 0; + mst_caps->max_dpcd_transaction_bytes = (mst_caps->has_mst) ? 16 : 0; + mst_caps->drm_aux = dp->aux->drm_aux; + + return rc; +} + +static void dp_display_wakeup_phy_layer(struct dp_display *dp_display, + bool wakeup) +{ + struct dp_display_private *dp; + struct dp_hpd *hpd; + + if (!dp_display) { + DP_ERR("invalid input\n"); + return; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + if (!dp->mst.drm_registered) { + DP_DEBUG("drm mst not registered\n"); + return; + } + + hpd = dp->hpd; + if (hpd && hpd->wakeup_phy) + hpd->wakeup_phy(hpd, wakeup); +} + +static int dp_display_probe(struct platform_device *pdev) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!pdev || !pdev->dev.of_node) { + DP_ERR("pdev not found\n"); + rc = -ENODEV; + goto bail; + } + + dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (!dp) { + rc = -ENOMEM; + goto bail; + } + + init_completion(&dp->notification_comp); + init_completion(&dp->attention_comp); + + dp->pdev = pdev; + dp->name = "drm_dp"; + + memset(&dp->mst, 0, sizeof(dp->mst)); + + rc = dp_display_init_aux_bridge(dp); + if (rc) + goto error; + + rc = dp_display_create_workqueue(dp); + if (rc) { + DP_ERR("Failed to create workqueue\n"); + goto error; + } + + platform_set_drvdata(pdev, dp); + + g_dp_display = &dp->dp_display; + + g_dp_display->dp_ipc_log = ipc_log_context_create(DRM_DP_IPC_NUM_PAGES, "drm_dp", 0); + if (!g_dp_display->dp_ipc_log) + DP_WARN("Error in creating ipc_log_context for drm_dp\n"); + g_dp_display->dp_aux_ipc_log = ipc_log_context_create(DRM_DP_IPC_NUM_PAGES, "drm_dp_aux", + 0); + if (!g_dp_display->dp_aux_ipc_log) + DP_WARN("Error in creating ipc_log_context for drm_dp_aux\n"); + + g_dp_display->enable = dp_display_enable; + g_dp_display->post_enable = dp_display_post_enable; + g_dp_display->pre_disable = dp_display_pre_disable; + g_dp_display->disable = dp_display_disable; + g_dp_display->set_mode = dp_display_set_mode; + g_dp_display->validate_mode = dp_display_validate_mode; + g_dp_display->get_modes = dp_display_get_modes; + g_dp_display->prepare = dp_display_prepare; + g_dp_display->unprepare = dp_display_unprepare; + g_dp_display->request_irq = dp_request_irq; + g_dp_display->get_debug = dp_get_debug; + g_dp_display->post_open = NULL; + g_dp_display->post_init = dp_display_post_init; + g_dp_display->config_hdr = dp_display_config_hdr; + g_dp_display->mst_install = dp_display_mst_install; + g_dp_display->mst_uninstall = dp_display_mst_uninstall; + g_dp_display->mst_connector_install = dp_display_mst_connector_install; + g_dp_display->mst_connector_uninstall = + dp_display_mst_connector_uninstall; + g_dp_display->mst_connector_update_edid = + dp_display_mst_connector_update_edid; + g_dp_display->mst_connector_update_link_info = + dp_display_mst_connector_update_link_info; + g_dp_display->get_mst_caps = dp_display_get_mst_caps; + g_dp_display->set_stream_info = dp_display_set_stream_info; + g_dp_display->update_pps = dp_display_update_pps; + g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode; + g_dp_display->mst_get_fixed_topology_port = + dp_display_mst_get_fixed_topology_port; + g_dp_display->wakeup_phy_layer = + dp_display_wakeup_phy_layer; + g_dp_display->set_colorspace = dp_display_setup_colospace; + g_dp_display->get_available_dp_resources = + dp_display_get_available_dp_resources; + g_dp_display->clear_reservation = dp_display_clear_reservation; + g_dp_display->get_mst_pbn_div = dp_display_get_mst_pbn_div; + + rc = component_add(&pdev->dev, &dp_display_comp_ops); + if (rc) { + DP_ERR("component add failed, rc=%d\n", rc); + goto error; + } + + return 0; +error: + devm_kfree(&pdev->dev, dp); +bail: + return rc; +} + +int dp_display_get_displays(void **displays, int count) +{ + if (!displays) { + DP_ERR("invalid data\n"); + return -EINVAL; + } + + if (count != 1) { + DP_ERR("invalid number of displays\n"); + return -EINVAL; + } + + displays[0] = g_dp_display; + return count; +} + +int dp_display_get_num_of_displays(void) +{ + if (!g_dp_display) + return 0; + + return 1; +} + +int dp_display_get_num_of_streams(void) +{ + return DP_STREAM_MAX; +} + +static void dp_display_set_mst_state(void *dp_display, + enum dp_drv_state mst_state) +{ + struct dp_display_private *dp; + + if (!g_dp_display) { + DP_DEBUG("dp display not initialized\n"); + return; + } + + dp = container_of(g_dp_display, struct dp_display_private, dp_display); + SDE_EVT32_EXTERNAL(mst_state, dp->mst.mst_active); + + if (dp->mst.mst_active && dp->mst.cbs.set_drv_state) + dp->mst.cbs.set_drv_state(g_dp_display, mst_state); +} + +static int dp_display_remove(struct platform_device *pdev) +{ + struct dp_display_private *dp; + + if (!pdev) + return -EINVAL; + + dp = platform_get_drvdata(pdev); + + dp_display_deinit_sub_modules(dp); + + if (dp->wq) + destroy_workqueue(dp->wq); + + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, dp); + + if (g_dp_display->dp_ipc_log) { + ipc_log_context_destroy(g_dp_display->dp_ipc_log); + g_dp_display->dp_ipc_log = NULL; + } + + if (g_dp_display->dp_aux_ipc_log) { + ipc_log_context_destroy(g_dp_display->dp_aux_ipc_log); + g_dp_display->dp_aux_ipc_log = NULL; + } + + return 0; +} + +static int dp_pm_prepare(struct device *dev) +{ + struct dp_display_private *dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + mutex_lock(&dp->session_lock); + dp_display_set_mst_state(g_dp_display, PM_SUSPEND); + + /* + * There are a few instances where the DP is hotplugged when the device + * is in PM suspend state. After hotplug, it is observed the device + * enters and exits the PM suspend multiple times while aux transactions + * are taking place. This may sometimes cause an unclocked register + * access error. So, abort aux transactions when such a situation + * arises i.e. when DP is connected but display not enabled yet. + */ + if (dp_display_state_is(DP_STATE_CONNECTED) && + !dp_display_state_is(DP_STATE_ENABLED)) { + dp->aux->abort(dp->aux, true); + dp->ctrl->abort(dp->ctrl, true); + } + + dp_display_state_add(DP_STATE_SUSPENDED); + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); + + return 0; +} + +static void dp_pm_complete(struct device *dev) +{ + struct dp_display_private *dp = container_of(g_dp_display, + struct dp_display_private, dp_display); + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + mutex_lock(&dp->session_lock); + dp_display_set_mst_state(g_dp_display, PM_DEFAULT); + + /* + * There are multiple PM suspend entry and exits observed before + * the connect uevent is issued to userspace. The aux transactions are + * aborted during PM suspend entry in dp_pm_prepare to prevent unclocked + * register access. On PM suspend exit, there will be no host_init call + * to reset the abort flags for ctrl and aux incase DP is connected + * but display not enabled. So, resetting abort flags for aux and ctrl. + */ + if (dp_display_state_is(DP_STATE_CONNECTED) && + !dp_display_state_is(DP_STATE_ENABLED)) { + dp->aux->abort(dp->aux, false); + dp->ctrl->abort(dp->ctrl, false); + } + + dp_display_state_remove(DP_STATE_SUSPENDED); + mutex_unlock(&dp->session_lock); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state); +} + +void *get_ipc_log_context(void) +{ + if (g_dp_display && g_dp_display->dp_ipc_log) + return g_dp_display->dp_ipc_log; + return NULL; +} + +static const struct dev_pm_ops dp_pm_ops = { + .prepare = dp_pm_prepare, + .complete = dp_pm_complete, +}; + +static struct platform_driver dp_display_driver = { + .probe = dp_display_probe, + .remove = dp_display_remove, + .driver = { + .name = "msm-dp-display", + .of_match_table = dp_dt_match, + .suppress_bind_attrs = true, + .pm = &dp_pm_ops, + }, +}; + +void __init dp_display_register(void) +{ + + platform_driver_register(&dp_display_driver); +} + +void __exit dp_display_unregister(void) +{ + platform_driver_unregister(&dp_display_driver); +} diff --git a/msm/dp/dp_display.h b/msm/dp/dp_display.h new file mode 100644 index 000000000..277dfcdd4 --- /dev/null +++ b/msm/dp/dp_display.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DISPLAY_H_ +#define _DP_DISPLAY_H_ + +#include +#include + +#include "dp_panel.h" + + +enum dp_drv_state { + PM_DEFAULT, + PM_SUSPEND, +}; + +struct dp_mst_drm_cbs { + void (*hpd)(void *display, bool hpd_status); + void (*hpd_irq)(void *display); + void (*set_drv_state)(void *dp_display, + enum dp_drv_state mst_state); + int (*set_mgr_state)(void *dp_display, bool state); + void (*set_mst_mode_params)(void *dp_display, struct dp_display_mode *mode); +}; + +struct dp_mst_drm_install_info { + void *dp_mst_prv_info; + const struct dp_mst_drm_cbs *cbs; +}; + +struct dp_mst_caps { + bool has_mst; + u32 max_streams_supported; + u32 max_dpcd_transaction_bytes; + struct drm_dp_aux *drm_aux; +}; + +struct dp_display { + struct drm_device *drm_dev; + struct dp_bridge *bridge; + struct drm_connector *base_connector; + void *base_dp_panel; + bool is_sst_connected; + bool is_mst_supported; + bool dsc_cont_pps; + u32 max_pclk_khz; + void *dp_mst_prv_info; + u32 max_mixer_count; + u32 max_dsc_count; + void *dp_ipc_log; + void *dp_aux_ipc_log; + + int (*enable)(struct dp_display *dp_display, void *panel); + int (*post_enable)(struct dp_display *dp_display, void *panel); + + int (*pre_disable)(struct dp_display *dp_display, void *panel); + int (*disable)(struct dp_display *dp_display, void *panel); + + int (*set_mode)(struct dp_display *dp_display, void *panel, + struct dp_display_mode *mode); + enum drm_mode_status (*validate_mode)(struct dp_display *dp_display, + void *panel, struct drm_display_mode *mode, + const struct msm_resource_caps_info *avail_res); + int (*get_modes)(struct dp_display *dp_display, void *panel, + struct dp_display_mode *dp_mode); + int (*prepare)(struct dp_display *dp_display, void *panel); + int (*unprepare)(struct dp_display *dp_display, void *panel); + int (*request_irq)(struct dp_display *dp_display); + struct dp_debug *(*get_debug)(struct dp_display *dp_display); + void (*post_open)(struct dp_display *dp_display); + int (*config_hdr)(struct dp_display *dp_display, void *panel, + struct drm_msm_ext_hdr_metadata *hdr_meta, + bool dhdr_update); + int (*set_colorspace)(struct dp_display *dp_display, void *panel, + u32 colorspace); + int (*post_init)(struct dp_display *dp_display); + int (*mst_install)(struct dp_display *dp_display, + struct dp_mst_drm_install_info *mst_install_info); + int (*mst_uninstall)(struct dp_display *dp_display); + int (*mst_connector_install)(struct dp_display *dp_display, + struct drm_connector *connector); + int (*mst_connector_uninstall)(struct dp_display *dp_display, + struct drm_connector *connector); + int (*mst_connector_update_edid)(struct dp_display *dp_display, + struct drm_connector *connector, + struct edid *edid); + int (*mst_connector_update_link_info)(struct dp_display *dp_display, + struct drm_connector *connector); + int (*mst_get_fixed_topology_port)(struct dp_display *dp_display, + u32 strm_id, u32 *port_num); + int (*get_mst_caps)(struct dp_display *dp_display, + struct dp_mst_caps *mst_caps); + int (*set_stream_info)(struct dp_display *dp_display, void *panel, + u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn, + int vcpi); + void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode); + int (*update_pps)(struct dp_display *dp_display, + struct drm_connector *connector, char *pps_cmd); + void (*wakeup_phy_layer)(struct dp_display *dp_display, + bool wakeup); + int (*get_available_dp_resources)(struct dp_display *dp_display, + const struct msm_resource_caps_info *avail_res, + struct msm_resource_caps_info *max_dp_avail_res); + void (*clear_reservation)(struct dp_display *dp, struct dp_panel *panel); + int (*get_mst_pbn_div)(struct dp_display *dp); +}; + +void *get_ipc_log_context(void); + +#if IS_ENABLED(CONFIG_DRM_MSM_DP) +int dp_display_get_num_of_displays(void); +int dp_display_get_displays(void **displays, int count); +int dp_display_get_num_of_streams(void); +int dp_display_mmrm_callback(struct mmrm_client_notifier_data *notifier_data); +#else +static inline int dp_display_get_num_of_displays(void) +{ + return 0; +} +static inline int dp_display_get_displays(void **displays, int count) +{ + return 0; +} +static inline int dp_display_get_num_of_streams(void) +{ + return 0; +} +static inline int dp_connector_update_pps(struct drm_connector *connector, + char *pps_cmd, void *display) +{ + return 0; +} +static inline int dp_display_mmrm_callback(struct mmrm_client_notifier_data *notifier_data) +{ + return 0; +} +#endif /* CONFIG_DRM_MSM_DP */ +#endif /* _DP_DISPLAY_H_ */ diff --git a/msm/dp/dp_drm.c b/msm/dp/dp_drm.c new file mode 100644 index 000000000..7cc9d6acf --- /dev/null +++ b/msm/dp/dp_drm.c @@ -0,0 +1,794 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "sde_connector.h" +#include "dp_drm.h" +#include "dp_mst_drm.h" +#include "dp_debug.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) + +#define to_dp_bridge(x) container_of((x), struct dp_bridge, base) + +void convert_to_drm_mode(const struct dp_display_mode *dp_mode, + struct drm_display_mode *drm_mode) +{ + u32 flags = 0; + + memset(drm_mode, 0, sizeof(*drm_mode)); + + drm_mode->hdisplay = dp_mode->timing.h_active; + drm_mode->hsync_start = drm_mode->hdisplay + + dp_mode->timing.h_front_porch; + drm_mode->hsync_end = drm_mode->hsync_start + + dp_mode->timing.h_sync_width; + drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch; + drm_mode->hskew = dp_mode->timing.h_skew; + + drm_mode->vdisplay = dp_mode->timing.v_active; + drm_mode->vsync_start = drm_mode->vdisplay + + dp_mode->timing.v_front_porch; + drm_mode->vsync_end = drm_mode->vsync_start + + dp_mode->timing.v_sync_width; + drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch; + + drm_mode->clock = dp_mode->timing.pixel_clk_khz; + + if (dp_mode->timing.h_active_low) + flags |= DRM_MODE_FLAG_NHSYNC; + else + flags |= DRM_MODE_FLAG_PHSYNC; + + if (dp_mode->timing.v_active_low) + flags |= DRM_MODE_FLAG_NVSYNC; + else + flags |= DRM_MODE_FLAG_PVSYNC; + + drm_mode->flags = flags; + + drm_mode->type = 0x48; + drm_mode_set_name(drm_mode); +} + +static int dp_bridge_attach(struct drm_bridge *dp_bridge, + enum drm_bridge_attach_flags flags) +{ + struct dp_bridge *bridge = to_dp_bridge(dp_bridge); + + if (!dp_bridge) { + DP_ERR("Invalid params\n"); + return -EINVAL; + } + + DP_DEBUG("[%d] attached\n", bridge->id); + + return 0; +} + +static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + dp = bridge->display; + + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + /* By this point mode should have been validated through mode_fixup */ + rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode); + if (rc) { + DP_ERR("[%d] failed to perform a mode set, rc=%d\n", + bridge->id, rc); + return; + } + + rc = dp->prepare(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display prepare failed, rc=%d\n", + bridge->id, rc); + return; + } + + /* for SST force stream id, start slot and total slots to 0 */ + dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0); + + rc = dp->enable(dp, bridge->dp_panel); + if (rc) + DP_ERR("[%d] DP display enable failed, rc=%d\n", + bridge->id, rc); +} + +static void dp_bridge_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + rc = dp->post_enable(dp, bridge->dp_panel); + if (rc) + DP_ERR("[%d] DP display post enable failed, rc=%d\n", + bridge->id, rc); +} + +static void dp_bridge_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + if (!dp) { + DP_ERR("dp is null\n"); + return; + } + + if (dp) + sde_connector_helper_bridge_disable(bridge->connector); + + rc = dp->pre_disable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display pre disable failed, rc=%d\n", + bridge->id, rc); + } +} + +static void dp_bridge_post_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + rc = dp->disable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display disable failed, rc=%d\n", + bridge->id, rc); + return; + } + + rc = dp->unprepare(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display unprepare failed, rc=%d\n", + bridge->id, rc); + return; + } +} + +static void dp_bridge_mode_set(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + return; + } + + dp = bridge->display; + + dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode, + &bridge->dp_mode); + + dp->clear_reservation(dp, bridge->dp_panel); +} + +static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ret = true; + struct dp_display_mode dp_mode; + struct dp_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + ret = false; + goto end; + } + + bridge = to_dp_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + ret = false; + goto end; + } + + if (!bridge->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + ret = false; + goto end; + } + + dp = bridge->display; + + dp->convert_to_dp_mode(dp, bridge->dp_panel, mode, &dp_mode); + dp->clear_reservation(dp, bridge->dp_panel); + convert_to_drm_mode(&dp_mode, adjusted_mode); +end: + return ret; +} + +static const struct drm_bridge_funcs dp_bridge_ops = { + .attach = dp_bridge_attach, + .mode_fixup = dp_bridge_mode_fixup, + .pre_enable = dp_bridge_pre_enable, + .enable = dp_bridge_enable, + .disable = dp_bridge_disable, + .post_disable = dp_bridge_post_disable, + .mode_set = dp_bridge_mode_set, +}; + +int dp_connector_add_custom_mode(struct drm_connector *conn, struct dp_display_mode *dp_mode) +{ + struct drm_display_mode *m, drm_mode; + + memset(&drm_mode, 0x0, sizeof(drm_mode)); + convert_to_drm_mode(dp_mode, &drm_mode); + m = drm_mode_duplicate(conn->dev, &drm_mode); + if (!m) { + DP_ERR("failed to add mode %ux%u\n", drm_mode.hdisplay, drm_mode.vdisplay); + return 0; + } + m->width_mm = conn->display_info.width_mm; + m->height_mm = conn->display_info.height_mm; + drm_mode_probed_add(conn, m); + + return 1; +} + +void init_failsafe_mode(struct dp_display_mode *dp_mode) +{ + static const struct dp_panel_info fail_safe = { + .h_active = 640, + .v_active = 480, + .h_back_porch = 48, + .h_front_porch = 16, + .h_sync_width = 96, + .h_active_low = 1, + .v_back_porch = 33, + .v_front_porch = 10, + .v_sync_width = 2, + .v_active_low = 1, + .h_skew = 0, + .refresh_rate = 60, + .pixel_clk_khz = 25175, + .bpp = 24, + .widebus_en = true, + }; + + memcpy(&dp_mode->timing, &fail_safe, sizeof(fail_safe)); +} + +int dp_connector_config_hdr(struct drm_connector *connector, void *display, + struct sde_connector_state *c_state) +{ + struct dp_display *dp = display; + struct sde_connector *sde_conn; + + if (!display || !c_state || !connector) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return -EINVAL; + } + + return dp->config_hdr(dp, sde_conn->drv_panel, &c_state->hdr_meta, + c_state->dyn_hdr_meta.dynamic_hdr_update); +} + +int dp_connector_set_colorspace(struct drm_connector *connector, + void *display) +{ + struct dp_display *dp_display = display; + struct sde_connector *sde_conn; + + if (!dp_display || !connector) + return -EINVAL; + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + pr_err("invalid dp panel\n"); + return -EINVAL; + } + + return dp_display->set_colorspace(dp_display, + sde_conn->drv_panel, connector->state->colorspace); +} + +int dp_connector_post_init(struct drm_connector *connector, void *display) +{ + int rc; + struct dp_display *dp_display = display; + struct sde_connector *sde_conn; + + if (!dp_display || !connector) + return -EINVAL; + + dp_display->base_connector = connector; + dp_display->bridge->connector = connector; + + if (dp_display->post_init) { + rc = dp_display->post_init(dp_display); + if (rc) + goto end; + } + + sde_conn = to_sde_connector(connector); + dp_display->bridge->dp_panel = sde_conn->drv_panel; + + rc = dp_mst_init(dp_display); + + if (dp_display->dsc_cont_pps) + sde_conn->ops.update_pps = NULL; + +end: + return rc; +} + +int dp_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_sub_mode *sub_mode, + struct msm_mode_info *mode_info, + void *display, const struct msm_resource_caps_info *avail_res) +{ + const u32 single_intf = 1; + const u32 no_enc = 0; + struct msm_display_topology *topology; + struct sde_connector *sde_conn; + struct dp_panel *dp_panel; + struct dp_display_mode dp_mode; + struct dp_display *dp_disp = display; + struct msm_drm_private *priv; + struct msm_resource_caps_info avail_dp_res; + int rc = 0; + + if (!drm_mode || !mode_info || !avail_res || + !avail_res->max_mixer_width || !connector || !display || + !connector->dev || !connector->dev->dev_private) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + memset(mode_info, 0, sizeof(*mode_info)); + + sde_conn = to_sde_connector(connector); + dp_panel = sde_conn->drv_panel; + priv = connector->dev->dev_private; + + topology = &mode_info->topology; + + rc = dp_disp->get_available_dp_resources(dp_disp, avail_res, + &avail_dp_res); + if (rc) { + DP_ERR("error getting max dp resources. rc:%d\n", rc); + return rc; + } + + rc = msm_get_mixer_count(priv, drm_mode, &avail_dp_res, + &topology->num_lm); + if (rc) { + DP_ERR("error getting mixer count. rc:%d\n", rc); + return rc; + } + /* reset dp connector lm_mask for every connection event and + * this will get re-populated in resource manager based on + * resolution and topology of dp display. + */ + sde_conn->lm_mask = 0; + + topology->num_enc = no_enc; + topology->num_intf = single_intf; + + mode_info->frame_rate = drm_mode_vrefresh(drm_mode); + mode_info->vtotal = drm_mode->vtotal; + + mode_info->wide_bus_en = dp_panel->widebus_en; + + dp_disp->convert_to_dp_mode(dp_disp, dp_panel, drm_mode, &dp_mode); + + if (dp_mode.timing.comp_info.enabled) { + memcpy(&mode_info->comp_info, + &dp_mode.timing.comp_info, + sizeof(mode_info->comp_info)); + + topology->num_enc = topology->num_lm; + topology->comp_type = mode_info->comp_info.comp_type; + } + + return 0; +} + +int dp_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *data) +{ + struct dp_display *display = data; + + if (!info || !display || !display->drm_dev) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + info->intf_type = DRM_MODE_CONNECTOR_DisplayPort; + + info->num_of_h_tiles = 1; + info->h_tile_instance[0] = 0; + info->is_connected = display->is_sst_connected; + info->curr_panel_mode = MSM_DISPLAY_VIDEO_MODE; + info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID | + MSM_DISPLAY_CAP_HOT_PLUG; + + return 0; +} + +enum drm_connector_status dp_connector_detect(struct drm_connector *conn, + bool force, + void *display) +{ + enum drm_connector_status status = connector_status_unknown; + struct msm_display_info info; + int rc; + + if (!conn || !display) + return status; + + /* get display dp_info */ + memset(&info, 0x0, sizeof(info)); + rc = dp_connector_get_info(conn, &info, display); + if (rc) { + DP_ERR("failed to get display info, rc=%d\n", rc); + return connector_status_disconnected; + } + + if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG) + status = (info.is_connected ? connector_status_connected : + connector_status_disconnected); + else + status = connector_status_connected; + + conn->display_info.width_mm = info.width_mm; + conn->display_info.height_mm = info.height_mm; + + return status; +} + +void dp_connector_post_open(struct drm_connector *connector, void *display) +{ + struct dp_display *dp; + + if (!display) { + DP_ERR("invalid input\n"); + return; + } + + dp = display; + + if (dp->post_open) + dp->post_open(dp); +} + +int dp_connector_atomic_check(struct drm_connector *connector, + void *display, + struct drm_atomic_state *a_state) +{ + struct sde_connector *sde_conn; + struct drm_connector_state *old_state; + struct drm_connector_state *c_state; + + if (!connector || !display || !a_state) + return -EINVAL; + + c_state = drm_atomic_get_new_connector_state(a_state, connector); + old_state = + drm_atomic_get_old_connector_state(a_state, connector); + + if (!old_state || !c_state) + return -EINVAL; + + sde_conn = to_sde_connector(connector); + + /* + * Marking the colorspace has been changed + * the flag shall be checked in the pre_kickoff + * to configure the new colorspace in HW + */ + if (c_state->colorspace != old_state->colorspace) { + DP_DEBUG("colorspace has been updated\n"); + sde_conn->colorspace_updated = true; + } + + return 0; +} + +int dp_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res) +{ + int rc = 0; + struct dp_display *dp; + struct dp_display_mode *dp_mode = NULL; + struct sde_connector *sde_conn; + + if (!connector || !display) + return 0; + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return 0; + } + + dp = display; + + dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL); + if (!dp_mode) + return 0; + + /* pluggable case assumes EDID is read when HPD */ + if (dp->is_sst_connected) { + /* + * 1. for test request, rc = 1, and dp_mode will have test mode populated + * 2. During normal operation, dp_mode will be untouched + * a. if mode query succeeds rc >= 0, valid modes will be added to connector + * b. if edid read failed, then connector mode list will be empty and rc <= 0 + */ + rc = dp->get_modes(dp, sde_conn->drv_panel, dp_mode); + if (!rc) { + DP_WARN("failed to get DP sink modes, adding failsafe"); + init_failsafe_mode(dp_mode); + } + if (dp_mode->timing.pixel_clk_khz) /* valid DP mode */ + rc = dp_connector_add_custom_mode(connector, dp_mode); + } else { + DP_ERR("No sink connected\n"); + } + kfree(dp_mode); + + return rc; +} + +int dp_drm_bridge_init(void *data, struct drm_encoder *encoder, + u32 max_mixer_count, u32 max_dsc_count) +{ + int rc = 0; + struct dp_bridge *bridge; + struct drm_device *dev; + struct dp_display *display = data; + struct msm_drm_private *priv = NULL; + + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); + if (!bridge) { + rc = -ENOMEM; + goto error; + } + + dev = display->drm_dev; + bridge->display = display; + bridge->base.funcs = &dp_bridge_ops; + bridge->base.encoder = encoder; + + priv = dev->dev_private; + + rc = drm_bridge_attach(encoder, &bridge->base, NULL, 0); + if (rc) { + DP_ERR("failed to attach bridge, rc=%d\n", rc); + goto error_free_bridge; + } + + rc = display->request_irq(display); + if (rc) { + DP_ERR("request_irq failed, rc=%d\n", rc); + goto error_free_bridge; + } + + priv->bridges[priv->num_bridges++] = &bridge->base; + display->bridge = bridge; + display->max_mixer_count = max_mixer_count; + display->max_dsc_count = max_dsc_count; + + return 0; +error_free_bridge: + kfree(bridge); +error: + return rc; +} + +void dp_drm_bridge_deinit(void *data) +{ + struct dp_display *display = data; + struct dp_bridge *bridge = display->bridge; + + kfree(bridge); +} + +enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode, void *display, + const struct msm_resource_caps_info *avail_res) +{ + int rc = 0, vrefresh; + struct dp_display *dp_disp; + struct sde_connector *sde_conn; + struct msm_resource_caps_info avail_dp_res; + struct dp_panel *dp_panel; + + if (!mode || !display || !connector) { + DP_ERR("invalid params\n"); + return MODE_ERROR; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return MODE_ERROR; + } + + dp_disp = display; + dp_panel = sde_conn->drv_panel; + + vrefresh = drm_mode_vrefresh(mode); + + rc = dp_disp->get_available_dp_resources(dp_disp, avail_res, + &avail_dp_res); + if (rc) { + DP_ERR("error getting max dp resources. rc:%d\n", rc); + return MODE_ERROR; + } + + /* As per spec, failsafe mode should always be present */ + if ((mode->hdisplay == 640) && (mode->vdisplay == 480) && (mode->clock == 25175)) + goto validate_mode; + + if (dp_panel->mode_override && (mode->hdisplay != dp_panel->hdisplay || + mode->vdisplay != dp_panel->vdisplay || + vrefresh != dp_panel->vrefresh || + mode->picture_aspect_ratio != dp_panel->aspect_ratio)) + return MODE_BAD; + +validate_mode: + return dp_disp->validate_mode(dp_disp, sde_conn->drv_panel, + mode, &avail_dp_res); +} + +int dp_connector_update_pps(struct drm_connector *connector, + char *pps_cmd, void *display) +{ + struct dp_display *dp_disp; + struct sde_connector *sde_conn; + + if (!display || !connector) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + sde_conn = to_sde_connector(connector); + if (!sde_conn->drv_panel) { + DP_ERR("invalid dp panel\n"); + return MODE_ERROR; + } + + dp_disp = display; + return dp_disp->update_pps(dp_disp, connector, pps_cmd); +} + +int dp_connector_install_properties(void *display, struct drm_connector *conn) +{ + struct dp_display *dp_display = display; + struct drm_connector *base_conn; + int rc; + + if (!display || !conn) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + base_conn = dp_display->base_connector; + + /* + * Create the property on the base connector during probe time and then + * attach the same property onto new connector objects created for MST + */ + if (!base_conn->colorspace_property) { + /* This is the base connector. create the drm property */ + rc = drm_mode_create_dp_colorspace_property(base_conn); + if (rc) + return rc; + } else { + conn->colorspace_property = base_conn->colorspace_property; + } + + drm_object_attach_property(&conn->base, conn->colorspace_property, 0); + + return 0; +} diff --git a/msm/dp/dp_drm.h b/msm/dp/dp_drm.h new file mode 100644 index 000000000..6ceb3ab3f --- /dev/null +++ b/msm/dp/dp_drm.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DRM_H_ +#define _DP_DRM_H_ + +#include +#include +#include + +#include "msm_drv.h" +#include "dp_display.h" + +struct dp_bridge { + struct drm_bridge base; + u32 id; + + struct drm_connector *connector; + struct dp_display *display; + struct dp_display_mode dp_mode; + void *dp_panel; +}; + + +#if IS_ENABLED(CONFIG_DRM_MSM_DP) +/** + * dp_connector_config_hdr - callback to configure HDR + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * @c_state: connect state data + * Returns: Zero on success + */ +int dp_connector_config_hdr(struct drm_connector *connector, + void *display, + struct sde_connector_state *c_state); + +/** + * dp_connector_atomic_check - callback to perform atomic + * check for DP + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * @c_state: connect state data + * Returns: Zero on success + */ +int dp_connector_atomic_check(struct drm_connector *connector, + void *display, + struct drm_atomic_state *state); + +/** + * dp_connector_set_colorspace - callback to set new colorspace + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * Returns: Zero on success + */ +int dp_connector_set_colorspace(struct drm_connector *connector, + void *display); + +/** + * dp_connector_post_init - callback to perform additional initialization steps + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * Returns: Zero on success + */ +int dp_connector_post_init(struct drm_connector *connector, void *display); + +/** + * dp_connector_detect - callback to determine if connector is connected + * @connector: Pointer to drm connector structure + * @force: Force detect setting from drm framework + * @display: Pointer to private display handle + * Returns: Connector 'is connected' status + */ +enum drm_connector_status dp_connector_detect(struct drm_connector *conn, + bool force, + void *display); + +/** + * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add() + * @connector: Pointer to drm connector structure + * @display: Pointer to private display handle + * @avail_res: Pointer with curr available resources + * Returns: Number of modes added + */ +int dp_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res); + +/** + * dp_connector_mode_valid - callback to determine if specified mode is valid + * @connector: Pointer to drm connector structure + * @mode: Pointer to drm mode structure + * @display: Pointer to private display handle + * @avail_res: Pointer with curr available resources + * Returns: Validity status for specified mode + */ +enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode, + void *display, const struct msm_resource_caps_info *avail_res); + +/** + * dp_connector_get_mode_info - retrieve information of the mode selected + * @connector: Pointer to drm connector structure + * @drm_mode: Display mode set for the display + * @mode_info: Out parameter. Information of the mode + * @sub_mode: Additional mode info to drm display mode + * @display: Pointer to private display structure + * @avail_res: Pointer with curr available resources + * Returns: zero on success + */ +int dp_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_sub_mode *sub_mode, + struct msm_mode_info *mode_info, + void *display, const struct msm_resource_caps_info *avail_res); + +/** + * dp_connector_get_info - retrieve connector display info + * @connector: Pointer to drm connector structure + * @info: Out parameter. Information of the connected display + * @display: Pointer to private display structure + * Returns: zero on success + */ +int dp_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *display); + +/** + * dp_connector_post_open - handle the post open functionalities + * @connector: Pointer to drm connector structure + * @display: Pointer to private display structure + */ +void dp_connector_post_open(struct drm_connector *connector, void *display); + +/** + * dp_drm_bridge_init- drm dp bridge initialize + * @display: Pointer to private display structure + * @encoder: encoder for this dp bridge + * @max_mixer_count: max available mixers for dp display + * @max_dsc_count: max available dsc for dp display + */ +int dp_drm_bridge_init(void *display, struct drm_encoder *encoder, + u32 max_mixer_count, u32 max_dsc_count); + +void dp_drm_bridge_deinit(void *display); + +/** + * convert_to_drm_mode - convert dp mode to drm mode + * @dp_mode: Point to dp mode + * @drm_mode: Pointer to drm mode + */ +void convert_to_drm_mode(const struct dp_display_mode *dp_mode, + struct drm_display_mode *drm_mode); + +/** + * dp_connector_update_pps - update pps for given connector + * @dp_mode: Point to dp mode + * @pps_cmd: PPS packet + * @display: Pointer to private display structure + */ +int dp_connector_update_pps(struct drm_connector *connector, + char *pps_cmd, void *display); + +/** + * dp_connector_install_properties - install drm properties + * @display: Pointer to private display structure + * @conn: Pointer to connector + */ +int dp_connector_install_properties(void *display, + struct drm_connector *conn); + +/** + * init_failsafe_mode - add failsafe edid mode + * @dp_mode: Pointer to mode + */ +void init_failsafe_mode(struct dp_display_mode *dp_mode); + +/** + * dp_connector_add_custom_mode - add edid mode to connector + * @conn: Pointer to connector + * @dp_mode: Pointer to mode + */ +int dp_connector_add_custom_mode(struct drm_connector *conn, struct dp_display_mode *dp_mode); + +#else +static inline int dp_connector_config_hdr(struct drm_connector *connector, + void *display, struct sde_connector_state *c_state) +{ + return 0; +} + +static inline int dp_connector_atomic_check(struct drm_connector *connector, + void *display, struct drm_atomic_state *state) +{ + return 0; +} + +static inline int dp_connector_set_colorspace(struct drm_connector *connector, + void *display) +{ + return 0; +} + +static inline int dp_connector_post_init(struct drm_connector *connector, + void *display) +{ + return 0; +} + +static inline enum drm_connector_status dp_connector_detect( + struct drm_connector *conn, + bool force, + void *display) +{ + return 0; +} + + +static inline int dp_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res) +{ + return 0; +} + +static inline enum drm_mode_status dp_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode, + void *display, const struct msm_resource_caps_info *avail_res) +{ + return MODE_OK; +} + +static inline int dp_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_sub_mode *sub_mode, + struct msm_mode_info *mode_info, + void *display, const struct msm_resource_caps_info *avail_res) +{ + return 0; +} + +static inline int dp_connector_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *display) +{ + return 0; +} + +static inline void dp_connector_post_open(struct drm_connector *connector, + void *display) +{ +} + +static inline int dp_drm_bridge_init(void *display, struct drm_encoder *encoder, + u32 max_mixer_count, u32 max_dsc_count) +{ + return 0; +} + +static inline void dp_drm_bridge_deinit(void *display) +{ +} + +static inline void convert_to_drm_mode(const struct dp_display_mode *dp_mode, + struct drm_display_mode *drm_mode) +{ +} + +static int dp_connector_install_properties(void *display, + struct drm_connector *conn) +{ + return 0; +} +#endif /* CONFIG_DRM_MSM_DP */ + +#endif /* _DP_DRM_H_ */ diff --git a/msm/dp/dp_gpio_hpd.c b/msm/dp/dp_gpio_hpd.c new file mode 100644 index 000000000..e2aa54ca1 --- /dev/null +++ b/msm/dp/dp_gpio_hpd.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_gpio_hpd.h" +#include "dp_debug.h" + +struct dp_gpio_hpd_private { + struct device *dev; + struct dp_hpd base; + struct dss_gpio gpio_cfg; + struct delayed_work work; + struct dp_hpd_cb *cb; + int irq; + bool hpd; +}; + +static int dp_gpio_hpd_connect(struct dp_gpio_hpd_private *gpio_hpd, bool hpd) +{ + int rc = 0; + + if (!gpio_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd->base.hpd_high = hpd; + gpio_hpd->base.alt_mode_cfg_done = hpd; + gpio_hpd->base.hpd_irq = false; + + if (!gpio_hpd->cb || + !gpio_hpd->cb->configure || + !gpio_hpd->cb->disconnect) { + DP_ERR("invalid cb\n"); + rc = -EINVAL; + goto error; + } + + if (hpd) + rc = gpio_hpd->cb->configure(gpio_hpd->dev); + else + rc = gpio_hpd->cb->disconnect(gpio_hpd->dev); + +error: + return rc; +} + +static int dp_gpio_hpd_attention(struct dp_gpio_hpd_private *gpio_hpd) +{ + int rc = 0; + + if (!gpio_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd->base.hpd_irq = true; + + if (gpio_hpd->cb && gpio_hpd->cb->attention) + rc = gpio_hpd->cb->attention(gpio_hpd->dev); + +error: + return rc; +} + +static irqreturn_t dp_gpio_isr(int unused, void *data) +{ + struct dp_gpio_hpd_private *gpio_hpd = data; + u32 const disconnect_timeout_retry = 50; + bool hpd; + int i; + + if (!gpio_hpd) + return IRQ_NONE; + + hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio); + + if (!gpio_hpd->hpd && hpd) { + gpio_hpd->hpd = true; + queue_delayed_work(system_wq, &gpio_hpd->work, 0); + return IRQ_HANDLED; + } + + if (!gpio_hpd->hpd) + return IRQ_HANDLED; + + /* In DP 1.2 spec, 100msec is recommended for the detection + * of HPD connect event. Here we'll poll HPD status for + * 50x2ms = 100ms and if HPD is always low, we know DP is + * disconnected. If HPD is high, HPD_IRQ will be handled + */ + for (i = 0; i < disconnect_timeout_retry; i++) { + if (hpd) { + dp_gpio_hpd_attention(gpio_hpd); + return IRQ_HANDLED; + } + usleep_range(2000, 2100); + hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio); + } + + gpio_hpd->hpd = false; + queue_delayed_work(system_wq, &gpio_hpd->work, 0); + return IRQ_HANDLED; +} + +static void dp_gpio_hpd_work(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct dp_gpio_hpd_private *gpio_hpd = container_of(dw, + struct dp_gpio_hpd_private, work); + int ret; + + if (gpio_hpd->hpd) { + devm_free_irq(gpio_hpd->dev, + gpio_hpd->irq, gpio_hpd); + ret = devm_request_threaded_irq(gpio_hpd->dev, + gpio_hpd->irq, NULL, + dp_gpio_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "dp-gpio-intp", gpio_hpd); + dp_gpio_hpd_connect(gpio_hpd, true); + } else { + devm_free_irq(gpio_hpd->dev, + gpio_hpd->irq, gpio_hpd); + ret = devm_request_threaded_irq(gpio_hpd->dev, + gpio_hpd->irq, NULL, + dp_gpio_isr, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "dp-gpio-intp", gpio_hpd); + dp_gpio_hpd_connect(gpio_hpd, false); + } + + if (ret < 0) + DP_ERR("Cannot claim IRQ dp-gpio-intp\n"); +} + +static int dp_gpio_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + int rc = 0; + struct dp_gpio_hpd_private *gpio_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + dp_gpio_hpd_connect(gpio_hpd, hpd); +error: + return rc; +} + +static int dp_gpio_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + int rc = 0; + struct dp_gpio_hpd_private *gpio_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + dp_gpio_hpd_attention(gpio_hpd); +error: + return rc; +} + +int dp_gpio_hpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_gpio_hpd_private *gpio_hpd; + int edge; + int rc = 0; + + if (!dp_hpd) + return -EINVAL; + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + gpio_hpd->hpd = gpio_get_value_cansleep(gpio_hpd->gpio_cfg.gpio); + + edge = gpio_hpd->hpd ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; + rc = devm_request_threaded_irq(gpio_hpd->dev, gpio_hpd->irq, NULL, + dp_gpio_isr, + edge | IRQF_ONESHOT, + "dp-gpio-intp", gpio_hpd); + if (rc) { + DP_ERR("Failed to request INTP threaded IRQ: %d\n", rc); + return rc; + } + + if (gpio_hpd->hpd) + queue_delayed_work(system_wq, &gpio_hpd->work, 0); + + return rc; +} + +struct dp_hpd *dp_gpio_hpd_get(struct device *dev, + struct dp_hpd_cb *cb) +{ + int rc = 0; + const char *hpd_gpio_name = "qcom,dp-hpd-gpio"; + struct dp_gpio_hpd_private *gpio_hpd; + struct dp_pinctrl pinctrl = {0}; + unsigned int gpio; + + if (!dev || !cb) { + DP_ERR("invalid device\n"); + rc = -EINVAL; + goto error; + } + + gpio = of_get_named_gpio(dev->of_node, hpd_gpio_name, 0); + if (!gpio_is_valid(gpio)) { + DP_DEBUG("%s gpio not specified\n", hpd_gpio_name); + rc = -EINVAL; + goto error; + } + + gpio_hpd = devm_kzalloc(dev, sizeof(*gpio_hpd), GFP_KERNEL); + if (!gpio_hpd) { + rc = -ENOMEM; + goto error; + } + + pinctrl.pin = devm_pinctrl_get(dev); + if (!IS_ERR_OR_NULL(pinctrl.pin)) { + pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin, + "mdss_dp_hpd_active"); + if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) { + rc = pinctrl_select_state(pinctrl.pin, + pinctrl.state_hpd_active); + if (rc) { + DP_ERR("failed to set hpd active state\n"); + goto gpio_error; + } + } + } + + gpio_hpd->gpio_cfg.gpio = gpio; + strlcpy(gpio_hpd->gpio_cfg.gpio_name, hpd_gpio_name, + sizeof(gpio_hpd->gpio_cfg.gpio_name)); + gpio_hpd->gpio_cfg.value = 0; + + rc = gpio_request(gpio_hpd->gpio_cfg.gpio, + gpio_hpd->gpio_cfg.gpio_name); + if (rc) { + DP_ERR("%s: failed to request gpio\n", hpd_gpio_name); + goto gpio_error; + } + gpio_direction_input(gpio_hpd->gpio_cfg.gpio); + + gpio_hpd->dev = dev; + gpio_hpd->cb = cb; + gpio_hpd->irq = gpio_to_irq(gpio_hpd->gpio_cfg.gpio); + INIT_DELAYED_WORK(&gpio_hpd->work, dp_gpio_hpd_work); + + gpio_hpd->base.simulate_connect = dp_gpio_hpd_simulate_connect; + gpio_hpd->base.simulate_attention = dp_gpio_hpd_simulate_attention; + gpio_hpd->base.register_hpd = dp_gpio_hpd_register; + + return &gpio_hpd->base; + +gpio_error: + devm_kfree(dev, gpio_hpd); +error: + return ERR_PTR(rc); +} + +void dp_gpio_hpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_gpio_hpd_private *gpio_hpd; + + if (!dp_hpd) + return; + + gpio_hpd = container_of(dp_hpd, struct dp_gpio_hpd_private, base); + + gpio_free(gpio_hpd->gpio_cfg.gpio); + devm_kfree(gpio_hpd->dev, gpio_hpd); +} diff --git a/msm/dp/dp_gpio_hpd.h b/msm/dp/dp_gpio_hpd.h new file mode 100644 index 000000000..0ed305cb9 --- /dev/null +++ b/msm/dp/dp_gpio_hpd.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + */ + + +#ifndef _DP_GPIO_HPD_H_ +#define _DP_GPIO_HPD_H_ + +#include "dp_hpd.h" + +/** + * dp_gpio_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * return: pointer to allocated gpio hpd module data + * + * This function sets up the gpio hpd module + */ +struct dp_hpd *dp_gpio_hpd_get(struct device *dev, + struct dp_hpd_cb *cb); + +/** + * dp_gpio_hpd_put() + * + * Cleans up dp_hpd instance + * + * @hpd: instance of gpio_hpd + */ +void dp_gpio_hpd_put(struct dp_hpd *hpd); + +#endif /* _DP_GPIO_HPD_H_ */ diff --git a/msm/dp/dp_hdcp2p2.c b/msm/dp/dp_hdcp2p2.c new file mode 100644 index 000000000..7b98e23ec --- /dev/null +++ b/msm/dp/dp_hdcp2p2.c @@ -0,0 +1,1039 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif + +#include "sde_hdcp_2x.h" +#include "dp_debug.h" + +#define DP_INTR_STATUS2 (0x00000024) +#define DP_INTR_STATUS3 (0x00000028) +#define dp_read(offset) readl_relaxed((offset)) +#define dp_write(offset, data) writel_relaxed((data), (offset)) +#define DP_HDCP_RXCAPS_LENGTH 3 + +enum dp_hdcp2p2_sink_status { + SINK_DISCONNECTED, + SINK_CONNECTED +}; + +struct dp_hdcp2p2_ctrl { + DECLARE_KFIFO(cmd_q, enum hdcp_transport_wakeup_cmd, 8); + wait_queue_head_t wait_q; + atomic_t auth_state; + atomic_t abort; + enum dp_hdcp2p2_sink_status sink_status; /* Is sink connected */ + struct dp_hdcp2p2_interrupts *intr; + struct sde_hdcp_init_data init_data; + struct mutex mutex; /* mutex to protect access to ctrl */ + struct mutex msg_lock; /* mutex to protect access to msg buffer */ + struct sde_hdcp_ops *ops; + void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */ + struct sde_hdcp_2x_ops *lib; /* Ops for driver to call into TZ */ + + struct task_struct *thread; + struct hdcp2_buffer response; + struct hdcp2_buffer request; + uint32_t total_message_length; + uint32_t transaction_delay; + uint32_t transaction_timeout; + struct sde_hdcp_2x_msg_part msg_part[HDCP_MAX_MESSAGE_PARTS]; + u8 sink_rx_status; + u8 rx_status; + char abort_mask; + + bool polling; +}; + +struct dp_hdcp2p2_int_set { + u32 interrupt; + char *name; + void (*func)(struct dp_hdcp2p2_ctrl *ctrl); +}; + +struct dp_hdcp2p2_interrupts { + u32 reg; + struct dp_hdcp2p2_int_set *int_set; +}; + +static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (!ctrl->lib_ctx) { + DP_ERR("HDCP library needs to be acquired\n"); + return -EINVAL; + } + + if (!ctrl->lib) { + DP_ERR("invalid lib ops data\n"); + return -EINVAL; + } + return 0; +} + +static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl) +{ + enum hdcp_transport_wakeup_cmd cmd; + + if (kfifo_peek(&ctrl->cmd_q, &cmd) && + cmd == HDCP_TRANSPORT_CMD_AUTHENTICATE) + return true; + + if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE) + return true; + + return false; +} + +static int dp_hdcp2p2_copy_buf(struct dp_hdcp2p2_ctrl *ctrl, + struct hdcp_transport_wakeup_data *data) +{ + int i = 0; + uint32_t num_messages = 0; + + if (!data || !data->message_data) + return 0; + + mutex_lock(&ctrl->msg_lock); + + num_messages = data->message_data->num_messages; + ctrl->total_message_length = 0; /* Total length of all messages */ + + for (i = 0; i < num_messages; i++) + ctrl->total_message_length += + data->message_data->messages[i].length; + + memcpy(ctrl->msg_part, data->message_data->messages, + sizeof(data->message_data->messages)); + + ctrl->rx_status = data->message_data->rx_status; + ctrl->abort_mask = data->abort_mask; + + if (!ctrl->total_message_length) { + mutex_unlock(&ctrl->msg_lock); + return 0; + } + + ctrl->response.data = data->buf; + ctrl->response.length = ctrl->total_message_length; + ctrl->request.data = data->buf; + ctrl->request.length = ctrl->total_message_length; + + ctrl->transaction_delay = data->transaction_delay; + ctrl->transaction_timeout = data->transaction_timeout; + + mutex_unlock(&ctrl->msg_lock); + + return 0; +} + +static void dp_hdcp2p2_send_auth_status(struct dp_hdcp2p2_ctrl *ctrl) +{ + ctrl->init_data.notify_status(ctrl->init_data.cb_data, + atomic_read(&ctrl->auth_state)); +} + +static void dp_hdcp2p2_set_interrupts(struct dp_hdcp2p2_ctrl *ctrl, bool enable) +{ + void __iomem *base = ctrl->init_data.dp_ahb->base; + struct dp_hdcp2p2_interrupts *intr = ctrl->intr; + + if (atomic_read(&ctrl->abort)) + return; + + while (intr && intr->reg) { + struct dp_hdcp2p2_int_set *int_set = intr->int_set; + u32 interrupts = 0; + + while (int_set && int_set->interrupt) { + interrupts |= int_set->interrupt; + int_set++; + } + + if (enable) + dp_write(base + intr->reg, + dp_read(base + intr->reg) | interrupts); + else + dp_write(base + intr->reg, + dp_read(base + intr->reg) & ~interrupts); + intr++; + } +} + +static int dp_hdcp2p2_wakeup(struct hdcp_transport_wakeup_data *data) +{ + struct dp_hdcp2p2_ctrl *ctrl; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + if (!data) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + ctrl = data->context; + if (!ctrl) { + DP_ERR("invalid ctrl\n"); + return -EINVAL; + } + + if (dp_hdcp2p2_copy_buf(ctrl, data)) + goto exit; + + ctrl->polling = false; + switch (data->cmd) { + case HDCP_TRANSPORT_CMD_STATUS_SUCCESS: + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED); + kfifo_put(&ctrl->cmd_q, data->cmd); + wake_up(&ctrl->wait_q); + break; + case HDCP_TRANSPORT_CMD_STATUS_FAILED: + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); + kfifo_put(&ctrl->cmd_q, data->cmd); + kthread_park(ctrl->thread); + break; + default: + kfifo_put(&ctrl->cmd_q, data->cmd); + wake_up(&ctrl->wait_q); + break; + } + +exit: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, data->cmd); + return 0; +} + +static inline void dp_hdcp2p2_wakeup_lib(struct dp_hdcp2p2_ctrl *ctrl, + struct sde_hdcp_2x_wakeup_data *data) +{ + int rc = 0; + + if (ctrl && ctrl->lib && ctrl->lib->wakeup && + data && (data->cmd != HDCP_2X_CMD_INVALID)) { + rc = ctrl->lib->wakeup(data); + if (rc) + DP_ERR("error sending %s to lib\n", + sde_hdcp_2x_cmd_to_str(data->cmd)); + } +} + +static void dp_hdcp2p2_reset(struct dp_hdcp2p2_ctrl *ctrl) +{ + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + ctrl->sink_status = SINK_DISCONNECTED; + atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE); +} + +static int dp_hdcp2p2_register(void *input, bool mst_enabled) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_ENABLE}; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + if (mst_enabled) + cdata.device_type = HDCP_TXMTR_DP_MST; + else + cdata.device_type = HDCP_TXMTR_DP; + + cdata.context = ctrl->lib_ctx; + rc = ctrl->lib->wakeup(&cdata); + + return rc; +} + +static int dp_hdcp2p2_on(void *input) +{ + int rc = 0; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + cdata.cmd = HDCP_2X_CMD_START; + cdata.context = ctrl->lib_ctx; + rc = ctrl->lib->wakeup(&cdata); + if (rc) + DP_ERR("Unable to start the HDCP 2.2 library (%d)\n", rc); + + return rc; +} + +static void dp_hdcp2p2_off(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_DISABLE}; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return; + + dp_hdcp2p2_set_interrupts(ctrl, false); + + dp_hdcp2p2_reset(ctrl); + + kthread_park(ctrl->thread); + + cdata.context = ctrl->lib_ctx; + ctrl->lib->wakeup(&cdata); +} + +static int dp_hdcp2p2_authenticate(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct hdcp_transport_wakeup_data cdata = { + HDCP_TRANSPORT_CMD_AUTHENTICATE}; + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + dp_hdcp2p2_set_interrupts(ctrl, true); + + ctrl->sink_status = SINK_CONNECTED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING); + + if (kthread_should_park()) + kthread_park(ctrl->thread); + kfifo_reset(&ctrl->cmd_q); + kthread_unpark(ctrl->thread); + + cdata.context = input; + dp_hdcp2p2_wakeup(&cdata); + + return rc; +} + +static int dp_hdcp2p2_reauthenticate(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + dp_hdcp2p2_reset((struct dp_hdcp2p2_ctrl *)input); + + return dp_hdcp2p2_authenticate(input); +} + +static void dp_hdcp2p2_min_level_change(void *client_ctx, + u8 min_enc_level) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)client_ctx; + struct sde_hdcp_2x_wakeup_data cdata = { + HDCP_2X_CMD_MIN_ENC_LEVEL}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + cdata.context = ctrl->lib_ctx; + cdata.min_enc_level = min_enc_level; + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0, max_size = 16, read_size = 0, bytes_read = 0; + int size = ctrl->request.length, offset = ctrl->msg_part->offset; + u8 *buf = ctrl->request.data; + s64 diff_ms; + ktime_t start_read, finish_read; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE || + atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL) { + DP_ERR("invalid hdcp state\n"); + rc = -EINVAL; + goto exit; + } + + if (!buf) { + DP_ERR("invalid request buffer\n"); + rc = -EINVAL; + goto exit; + } + + DP_DEBUG("offset(0x%x), size(%d)\n", offset, size); + + start_read = ktime_get(); + do { + read_size = min(size, max_size); + + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + offset, buf, read_size); + if (bytes_read != read_size) { + DP_ERR("fail: offset(0x%x), size(0x%x), rc(0x%x)\n", + offset, read_size, bytes_read); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, + offset, + read_size, + bytes_read); + rc = -EINVAL; + break; + } + + buf += read_size; + offset += read_size; + size -= read_size; + } while (size > 0); + finish_read = ktime_get(); + diff_ms = ktime_ms_delta(finish_read, start_read); + + if (ctrl->transaction_timeout && diff_ms > ctrl->transaction_timeout) { + DP_ERR("HDCP read timeout exceeded (%lldms > %ums)\n", diff_ms, + ctrl->transaction_timeout); + rc = -ETIMEDOUT; + } +exit: + return rc; +} + +static int dp_hdcp2p2_aux_write_message(struct dp_hdcp2p2_ctrl *ctrl, + u8 *buf, int size, uint offset, uint timeout) +{ + int const max_size = 16; + int rc = 0, write_size = 0, bytes_written = 0; + + DP_DEBUG("offset(0x%x), size(%d)\n", offset, size); + + do { + write_size = min(size, max_size); + + bytes_written = drm_dp_dpcd_write(ctrl->init_data.drm_aux, + offset, buf, write_size); + if (bytes_written != write_size) { + DP_ERR("fail: offset(0x%x), size(0x%x), rc(0x%x)\n", + offset, write_size, bytes_written); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, + offset, + write_size, + bytes_written); + rc = -EINVAL; + break; + } + + buf += write_size; + offset += write_size; + size -= write_size; + } while (size > 0); + + return rc; +} + +static bool dp_hdcp2p2_feature_supported(void *input) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_ops *lib = NULL; + bool supported = false; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return supported; + + lib = ctrl->lib; + if (lib->feature_supported) + supported = lib->feature_supported( + ctrl->lib_ctx); + + return supported; +} + +static void dp_hdcp2p2_force_encryption(void *data, bool enable) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl = data; + struct sde_hdcp_2x_ops *lib = NULL; + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return; + + lib = ctrl->lib; + if (lib->force_encryption) + lib->force_encryption(ctrl->lib_ctx, enable); +} + +static void dp_hdcp2p2_send_msg(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + if (!ctrl) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto exit; + } + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("hdcp is off\n"); + goto exit; + } + + mutex_lock(&ctrl->msg_lock); + + rc = dp_hdcp2p2_aux_write_message(ctrl, ctrl->response.data, + ctrl->response.length, ctrl->msg_part->offset, + ctrl->transaction_delay); + if (rc) { + DP_ERR("Error sending msg to sink %d\n", rc); + mutex_unlock(&ctrl->msg_lock); + goto exit; + } + + cdata.cmd = HDCP_2X_CMD_MSG_SEND_SUCCESS; + cdata.timeout = ctrl->transaction_delay; + mutex_unlock(&ctrl->msg_lock); + +exit: + if (rc == -ETIMEDOUT) + cdata.cmd = HDCP_2X_CMD_MSG_SEND_TIMEOUT; + else if (rc) + cdata.cmd = HDCP_2X_CMD_MSG_SEND_FAILED; + + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, cdata.cmd); +} + +static int dp_hdcp2p2_get_msg_from_sink(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0; + struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID }; + + cdata.context = ctrl->lib_ctx; + + rc = dp_hdcp2p2_aux_read_message(ctrl); + if (rc) { + DP_ERR("error reading message %d\n", rc); + goto exit; + } + + cdata.total_message_length = ctrl->total_message_length; + cdata.timeout = ctrl->transaction_delay; +exit: + if (rc == -ETIMEDOUT) + cdata.cmd = HDCP_2X_CMD_MSG_RECV_TIMEOUT; + else if (rc) + cdata.cmd = HDCP_2X_CMD_MSG_RECV_FAILED; + else + cdata.cmd = HDCP_2X_CMD_MSG_RECV_SUCCESS; + + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); + + return rc; +} + +static void dp_hdcp2p2_recv_msg(struct dp_hdcp2p2_ctrl *ctrl) +{ + struct sde_hdcp_2x_wakeup_data cdata = { HDCP_2X_CMD_INVALID }; + + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("hdcp is off\n"); + return; + } + + if (ctrl->transaction_delay) + msleep(ctrl->transaction_delay); + + dp_hdcp2p2_get_msg_from_sink(ctrl); +} + +static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl) +{ + int rc = 0; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL || + atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_ERR("invalid hdcp state\n"); + return; + } + + cdata.context = ctrl->lib_ctx; + + if (ctrl->sink_rx_status & ctrl->abort_mask) { + if (ctrl->sink_rx_status & BIT(3)) + DP_WARN("reauth_req set by sink\n"); + + if (ctrl->sink_rx_status & BIT(4)) + DP_WARN("link failure reported by sink\n"); + + ctrl->sink_rx_status = 0; + ctrl->rx_status = 0; + + rc = -ENOLINK; + + cdata.cmd = HDCP_2X_CMD_LINK_FAILED; + atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL); + goto exit; + } + + /* check if sink has made a message available */ + if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) { + ctrl->sink_rx_status = 0; + ctrl->rx_status = 0; + + dp_hdcp2p2_get_msg_from_sink(ctrl); + + ctrl->polling = false; + } +exit: + if (rc) + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl) +{ + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH}; + cdata.context = ctrl->lib_ctx; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING) + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); +} + +static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl, + u8 *rx_status) +{ + u32 const cp_irq_dpcd_offset = 0x201; + u32 const rxstatus_dpcd_offset = 0x69493; + ssize_t const bytes_to_read = 1; + ssize_t bytes_read = 0; + u8 buf = 0; + int rc = 0; + bool cp_irq = false; + + *rx_status = 0; + + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + cp_irq_dpcd_offset, &buf, bytes_to_read); + if (bytes_read != bytes_to_read) { + DP_ERR("cp irq read failed\n"); + rc = bytes_read; + goto error; + } + + cp_irq = buf & BIT(2); + DP_DEBUG("cp_irq=0x%x\n", cp_irq); + buf = 0; + + if (cp_irq) { + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + rxstatus_dpcd_offset, &buf, bytes_to_read); + if (bytes_read != bytes_to_read) { + DP_ERR("rxstatus read failed\n"); + rc = bytes_read; + goto error; + } + *rx_status = buf; + DP_DEBUG("rx_status=0x%x\n", *rx_status); + } + +error: + return rc; +} + +static int dp_hdcp2p2_cp_irq(void *input) +{ + int rc, retries = 15; + struct dp_hdcp2p2_ctrl *ctrl = input; + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + rc = dp_hdcp2p2_valid_handle(ctrl); + if (rc) + return rc; + + if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL || + atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) { + DP_DEBUG("invalid hdcp state\n"); + return -EINVAL; + } + + ctrl->sink_rx_status = 0; + rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status); + if (rc) { + DP_ERR("failed to read rx status\n"); + return rc; + } + + DP_DEBUG("sink_rx_status=0x%x\n", ctrl->sink_rx_status); + + if (!ctrl->sink_rx_status) { + DP_DEBUG("not a hdcp 2.2 irq\n"); + return -EINVAL; + } + + /* + * Wait for link to be transitioned to polling mode. This wait + * should be done in this CP_IRQ handler and NOT in the event thread + * as the transition to link polling happens in the event thread + * as part of the wake up from the HDCP engine. + * + * One specific case where this sequence of event commonly happens + * is when executing HDCP 2.3 CTS test 1B-09 with Unigraf UCD-400 + * test equipment (TE). As part of this test, the TE issues a CP-IRQ + * right after the successful completion of the HDCP authentication + * part 2. This CP-IRQ handler gets invoked even before the HDCP + * state engine gets transitioned to the polling mode, which can + * cause the test to fail as we would not read the + * RepeaterAuth_Send_ReceiverID_List from the TE in response to the + * CP_IRQ. + * + * Skip this wait when any of the fields in the abort mask is set. + */ + if (ctrl->sink_rx_status & ctrl->abort_mask) + goto exit; + + while (!ctrl->polling && retries--) + msleep(20); + +exit: + kfifo_put(&ctrl->cmd_q, HDCP_TRANSPORT_CMD_LINK_CHECK); + wake_up(&ctrl->wait_q); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT); + + return 0; +} + +static int dp_hdcp2p2_isr(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + int rc = 0; + struct dss_io_data *io; + struct dp_hdcp2p2_interrupts *intr; + u32 hdcp_int_val = 0; + + if (!ctrl || !ctrl->init_data.dp_ahb) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + io = ctrl->init_data.dp_ahb; + intr = ctrl->intr; + + while (intr && intr->reg) { + struct dp_hdcp2p2_int_set *int_set = intr->int_set; + + hdcp_int_val = dp_read(io->base + intr->reg); + + while (int_set && int_set->interrupt) { + if (hdcp_int_val & (int_set->interrupt >> 2)) { + DP_DEBUG("%s\n", int_set->name); + + if (int_set->func) + int_set->func(ctrl); + + dp_write(io->base + intr->reg, hdcp_int_val | + (int_set->interrupt >> 1)); + } + int_set++; + } + intr++; + } +end: + return rc; +} + +static bool dp_hdcp2p2_supported(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + u32 const rxcaps_dpcd_offset = 0x6921d; + ssize_t bytes_read = 0; + u8 buf[DP_HDCP_RXCAPS_LENGTH]; + + DP_DEBUG("Checking sink capability\n"); + + bytes_read = drm_dp_dpcd_read(ctrl->init_data.drm_aux, + rxcaps_dpcd_offset, &buf, DP_HDCP_RXCAPS_LENGTH); + if (bytes_read != DP_HDCP_RXCAPS_LENGTH) { + DP_ERR("RxCaps read failed\n"); + goto error; + } + + DP_DEBUG("HDCP_CAPABLE=%lu\n", (buf[2] & BIT(1)) >> 1); + DP_DEBUG("VERSION=%d\n", buf[0]); + + if ((buf[2] & BIT(1)) && (buf[0] == 0x2)) + return true; +error: + return false; +} + +static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl, + struct sde_hdcp_2x_wakeup_data *cdata) +{ + if (!ctrl || cdata->num_streams == 0 || !cdata->streams) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (!ctrl->lib_ctx) { + DP_ERR("HDCP library needs to be acquired\n"); + return -EINVAL; + } + + if (!ctrl->lib) { + DP_ERR("invalid lib ops data\n"); + return -EINVAL; + } + + cdata->context = ctrl->lib_ctx; + return ctrl->lib->wakeup(cdata); +} + + +static int dp_hdcp2p2_register_streams(void *input, u8 num_streams, + struct stream_info *streams) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS}; + + cdata.streams = streams; + cdata.num_streams = num_streams; + return dp_hdcp2p2_change_streams(ctrl, &cdata); +} + +static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams, + struct stream_info *streams) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS}; + + cdata.streams = streams; + cdata.num_streams = num_streams; + return dp_hdcp2p2_change_streams(ctrl, &cdata); +} + +void sde_dp_hdcp2p2_deinit(void *input) +{ + struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input; + struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID}; + + if (!ctrl) { + DP_ERR("invalid input\n"); + return; + } + + if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) { + cdata.cmd = HDCP_2X_CMD_STOP; + cdata.context = ctrl->lib_ctx; + dp_hdcp2p2_wakeup_lib(ctrl, &cdata); + } + + sde_hdcp_2x_deregister(ctrl->lib_ctx); + + kthread_stop(ctrl->thread); + + mutex_destroy(&ctrl->mutex); + mutex_destroy(&ctrl->msg_lock); + kfree(ctrl); +} + +static int dp_hdcp2p2_main(void *data) +{ + struct dp_hdcp2p2_ctrl *ctrl = data; + enum hdcp_transport_wakeup_cmd cmd; + + while (1) { + wait_event_idle(ctrl->wait_q, + !kfifo_is_empty(&ctrl->cmd_q) || + kthread_should_stop() || + kthread_should_park()); + + if (kthread_should_stop()) + break; + + if (kfifo_is_empty(&ctrl->cmd_q) && kthread_should_park()) { + kthread_parkme(); + continue; + } + + if (!kfifo_get(&ctrl->cmd_q, &cmd)) + continue; + + switch (cmd) { + case HDCP_TRANSPORT_CMD_SEND_MESSAGE: + dp_hdcp2p2_send_msg(ctrl); + break; + case HDCP_TRANSPORT_CMD_RECV_MESSAGE: + if (ctrl->rx_status) + ctrl->polling = true; + else + dp_hdcp2p2_recv_msg(ctrl); + break; + case HDCP_TRANSPORT_CMD_STATUS_SUCCESS: + dp_hdcp2p2_send_auth_status(ctrl); + break; + case HDCP_TRANSPORT_CMD_STATUS_FAILED: + dp_hdcp2p2_set_interrupts(ctrl, false); + dp_hdcp2p2_send_auth_status(ctrl); + break; + case HDCP_TRANSPORT_CMD_LINK_POLL: + ctrl->polling = true; + break; + case HDCP_TRANSPORT_CMD_LINK_CHECK: + dp_hdcp2p2_link_check(ctrl); + break; + case HDCP_TRANSPORT_CMD_AUTHENTICATE: + dp_hdcp2p2_start_auth(ctrl); + break; + default: + break; + } + } + + return 0; +} + +static void dp_hdcp2p2_abort(void *input, bool abort) +{ + struct dp_hdcp2p2_ctrl *ctrl = input; + + atomic_set(&ctrl->abort, abort); +} + +void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data) +{ + int rc; + struct dp_hdcp2p2_ctrl *ctrl; + static struct sde_hdcp_ops ops = { + .isr = dp_hdcp2p2_isr, + .reauthenticate = dp_hdcp2p2_reauthenticate, + .authenticate = dp_hdcp2p2_authenticate, + .feature_supported = dp_hdcp2p2_feature_supported, + .force_encryption = dp_hdcp2p2_force_encryption, + .sink_support = dp_hdcp2p2_supported, + .set_mode = dp_hdcp2p2_register, + .on = dp_hdcp2p2_on, + .off = dp_hdcp2p2_off, + .abort = dp_hdcp2p2_abort, + .cp_irq = dp_hdcp2p2_cp_irq, + .register_streams = dp_hdcp2p2_register_streams, + .deregister_streams = dp_hdcp2p2_deregister_streams, + }; + + static struct hdcp_transport_ops client_ops = { + .wakeup = dp_hdcp2p2_wakeup, + }; + static struct dp_hdcp2p2_int_set int_set1[] = { + {BIT(17), "authentication successful", NULL}, + {BIT(20), "authentication failed", NULL}, + {BIT(24), "encryption enabled", NULL}, + {BIT(27), "encryption disabled", NULL}, + {0}, + }; + static struct dp_hdcp2p2_int_set int_set2[] = { + {BIT(2), "key fifo underflow", NULL}, + {0}, + }; + static struct dp_hdcp2p2_interrupts intr[] = { + {DP_INTR_STATUS2, int_set1}, + {DP_INTR_STATUS3, int_set2}, + {0} + }; + static struct sde_hdcp_2x_ops hdcp2x_ops; + struct sde_hdcp_2x_register_data register_data = {0}; + + if (!init_data || !init_data->cb_data || + !init_data->notify_status || !init_data->drm_aux) { + DP_ERR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return ERR_PTR(-ENOMEM); + + ctrl->init_data = *init_data; + ctrl->lib = &hdcp2x_ops; + ctrl->response.data = NULL; + ctrl->request.data = NULL; + + ctrl->sink_status = SINK_DISCONNECTED; + ctrl->intr = intr; + + INIT_KFIFO(ctrl->cmd_q); + + init_waitqueue_head(&ctrl->wait_q); + atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE); + + ctrl->ops = &ops; + mutex_init(&ctrl->mutex); + mutex_init(&ctrl->msg_lock); + + register_data.hdcp_data = &ctrl->lib_ctx; + register_data.client_ops = &client_ops; + register_data.ops = &hdcp2x_ops; + register_data.client_data = ctrl; + + rc = sde_hdcp_2x_register(®ister_data); + if (rc) { + DP_ERR("Unable to register with HDCP 2.2 library\n"); + goto error; + } + + if (IS_ENABLED(CONFIG_HDCP_QSEECOM)) + msm_hdcp_register_cb(init_data->msm_hdcp_dev, ctrl, + dp_hdcp2p2_min_level_change); + + ctrl->thread = kthread_run(dp_hdcp2p2_main, ctrl, "dp_hdcp2p2"); + + if (IS_ERR(ctrl->thread)) { + DP_ERR("unable to start DP hdcp2p2 thread\n"); + rc = PTR_ERR(ctrl->thread); + ctrl->thread = NULL; + goto error; + } + + return ctrl; +error: + kfree(ctrl); + return ERR_PTR(rc); +} + +struct sde_hdcp_ops *sde_dp_hdcp2p2_get(void *input) +{ + return ((struct dp_hdcp2p2_ctrl *)input)->ops; +} diff --git a/msm/dp/dp_hpd.c b/msm/dp/dp_hpd.c new file mode 100644 index 000000000..c24eac7b4 --- /dev/null +++ b/msm/dp/dp_hpd.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "dp_hpd.h" +#include "dp_altmode.h" +#include "dp_usbpd.h" +#include "dp_gpio_hpd.h" +#include "dp_lphw_hpd.h" +#include "dp_debug.h" +#include "dp_bridge_hpd.h" + +static void dp_hpd_host_init(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + if (!catalog) { + DP_ERR("invalid input\n"); + return; + } + catalog->config_hpd(catalog, true); +} + +static void dp_hpd_host_deinit(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + if (!catalog) { + DP_ERR("invalid input\n"); + return; + } + catalog->config_hpd(catalog, false); +} + +static void dp_hpd_isr(struct dp_hpd *dp_hpd) +{ +} + +struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, + struct dp_aux_bridge *aux_bridge, + struct dp_hpd_cb *cb) +{ + struct dp_hpd *dp_hpd = NULL; + + if (aux_bridge && (aux_bridge->flag & DP_AUX_BRIDGE_HPD)) { + dp_hpd = dp_bridge_hpd_get(dev, cb, aux_bridge); + if (!IS_ERR(dp_hpd)) { + dp_hpd->type = DP_HPD_BRIDGE; + goto config; + } + } + + dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb); + if (!IS_ERR_OR_NULL(dp_hpd)) { + dp_hpd->type = DP_HPD_LPHW; + goto config; + } + + dp_hpd = dp_gpio_hpd_get(dev, cb); + if (!IS_ERR_OR_NULL(dp_hpd)) { + dp_hpd->type = DP_HPD_GPIO; + goto config; + } + + dp_hpd = dp_altmode_get(dev, cb); + if (!IS_ERR_OR_NULL(dp_hpd)) { + dp_hpd->type = DP_HPD_ALTMODE; + goto config; + } + + dp_hpd = dp_usbpd_get(dev, cb); + if (!IS_ERR_OR_NULL(dp_hpd)) { + dp_hpd->type = DP_HPD_USBPD; + goto config; + } + + DP_ERR("Failed to detect HPD type\n"); + goto end; + +config: + if (!dp_hpd->host_init) + dp_hpd->host_init = dp_hpd_host_init; + if (!dp_hpd->host_deinit) + dp_hpd->host_deinit = dp_hpd_host_deinit; + if (!dp_hpd->isr) + dp_hpd->isr = dp_hpd_isr; + +end: + return dp_hpd; +} + +void dp_hpd_put(struct dp_hpd *dp_hpd) +{ + if (!dp_hpd) + return; + + switch (dp_hpd->type) { + case DP_HPD_USBPD: + dp_usbpd_put(dp_hpd); + break; + case DP_HPD_ALTMODE: + dp_altmode_put(dp_hpd); + break; + case DP_HPD_GPIO: + dp_gpio_hpd_put(dp_hpd); + break; + case DP_HPD_LPHW: + dp_lphw_hpd_put(dp_hpd); + break; + case DP_HPD_BRIDGE: + dp_bridge_hpd_put(dp_hpd); + break; + default: + DP_ERR("unknown hpd type %d\n", dp_hpd->type); + break; + } +} diff --git a/msm/dp/dp_hpd.h b/msm/dp/dp_hpd.h new file mode 100644 index 000000000..2ea903be3 --- /dev/null +++ b/msm/dp/dp_hpd.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_HPD_H_ +#define _DP_HPD_H_ + +#include +#include "dp_parser.h" +#include "dp_catalog.h" +#include "dp_aux_bridge.h" + +struct device; + +/** + * enum dp_hpd_plug_orientation - plug orientation + * @ORIENTATION_NONE: Undefined or unspecified + * @ORIENTATION_CC1: CC1 + * @ORIENTATION_CC2: CC2 + */ +enum dp_hpd_plug_orientation { + ORIENTATION_NONE, + ORIENTATION_CC1, + ORIENTATION_CC2, +}; + +/** + * enum dp_hpd_type - dp hpd type + * @DP_HPD_ALTMODE: AltMode over G-Link based HPD + * @DP_HPD_USBPD: USB type-c based HPD + * @DP_HPD_GPIO: GPIO based HPD + * @DP_HPD_LPHW: LPHW based HPD + * @DP_HPD_BRIDGE: External bridge HPD + */ + +enum dp_hpd_type { + DP_HPD_ALTMODE, + DP_HPD_USBPD, + DP_HPD_GPIO, + DP_HPD_LPHW, + DP_HPD_BRIDGE, +}; + +/** + * struct dp_hpd_cb - callback functions provided by the client + * + * @configure: called when dp connection is ready. + * @disconnect: notify the cable disconnect event. + * @attention: notify any attention message event. + */ +struct dp_hpd_cb { + int (*configure)(struct device *dev); + int (*disconnect)(struct device *dev); + int (*attention)(struct device *dev); +}; + +/** + * struct dp_hpd - DisplayPort HPD status + * + * @type: type of HPD + * @orientation: plug orientation configuration, USBPD type only. + * @hpd_high: Hot Plug Detect signal is high. + * @hpd_irq: Change in the status since last message + * @alt_mode_cfg_done: bool to specify alt mode status + * @multi_func: multi-function preferred, USBPD type only + * @peer_usb_com: downstream supports usb data communication + * @force_multi_func: force multi-function preferred + * @isr: event interrupt, BUILTIN and LPHW type only + * @register_hpd: register hardware callback + * @host_init: source or host side setup for hpd + * @host_deinit: source or host side de-initializations + * @simulate_connect: simulate disconnect or connect for debug mode + * @simulate_attention: simulate attention messages for debug mode + * @wakeup_phy: wakeup USBPD phy layer + */ +struct dp_hpd { + enum dp_hpd_type type; + u32 orientation; + bool hpd_high; + bool hpd_irq; + bool alt_mode_cfg_done; + bool multi_func; + bool peer_usb_comm; + bool force_multi_func; + + void (*isr)(struct dp_hpd *dp_hpd); + int (*register_hpd)(struct dp_hpd *dp_hpd); + void (*host_init)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog); + void (*host_deinit)(struct dp_hpd *hpd, struct dp_catalog_hpd *catalog); + int (*simulate_connect)(struct dp_hpd *dp_hpd, bool hpd); + int (*simulate_attention)(struct dp_hpd *dp_hpd, int vdo); + void (*wakeup_phy)(struct dp_hpd *dp_hpd, bool wakeup); +}; + +/** + * dp_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * @parser: pointer to DP parser module + * @catalog: pointer to DP catalog module + * @aux_bridge: handle for aux_bridge driver data + * @cb: callback function for HPD response + * return: pointer to allocated hpd module data + * + * This function sets up the hpd module + */ +struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, + struct dp_aux_bridge *aux_bridge, + struct dp_hpd_cb *cb); + +/** + * dp_hpd_put() + * + * Cleans up dp_hpd instance + * + * @dp_hpd: instance of dp_hpd + */ +void dp_hpd_put(struct dp_hpd *dp_hpd); + +#endif /* _DP_HPD_H_ */ diff --git a/msm/dp/dp_link.c b/msm/dp/dp_link.c new file mode 100644 index 000000000..13cbc4e2e --- /dev/null +++ b/msm/dp/dp_link.c @@ -0,0 +1,1691 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_debug.h" + +enum dynamic_range { + DP_DYNAMIC_RANGE_RGB_VESA = 0x00, + DP_DYNAMIC_RANGE_RGB_CEA = 0x01, + DP_DYNAMIC_RANGE_UNKNOWN = 0xFFFFFFFF, +}; + +enum audio_sample_rate { + AUDIO_SAMPLE_RATE_32_KHZ = 0x00, + AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01, + AUDIO_SAMPLE_RATE_48_KHZ = 0x02, + AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03, + AUDIO_SAMPLE_RATE_96_KHZ = 0x04, + AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05, + AUDIO_SAMPLE_RATE_192_KHZ = 0x06, +}; + +enum audio_pattern_type { + AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00, + AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, +}; + +struct dp_link_request { + u32 test_requested; + u32 test_link_rate; + u32 test_lane_count; +}; + +struct dp_link_private { + u32 prev_sink_count; + struct device *dev; + struct dp_aux *aux; + struct dp_link dp_link; + + struct dp_link_request request; + u8 link_status[DP_LINK_STATUS_SIZE]; +}; + +static char *dp_link_get_audio_test_pattern(u32 pattern) +{ + switch (pattern) { + case AUDIO_TEST_PATTERN_OPERATOR_DEFINED: + return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_OPERATOR_DEFINED); + case AUDIO_TEST_PATTERN_SAWTOOTH: + return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_SAWTOOTH); + default: + return "unknown"; + } +} + +static char *dp_link_get_audio_sample_rate(u32 rate) +{ + switch (rate) { + case AUDIO_SAMPLE_RATE_32_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_32_KHZ); + case AUDIO_SAMPLE_RATE_44_1_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_44_1_KHZ); + case AUDIO_SAMPLE_RATE_48_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_48_KHZ); + case AUDIO_SAMPLE_RATE_88_2_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_88_2_KHZ); + case AUDIO_SAMPLE_RATE_96_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_96_KHZ); + case AUDIO_SAMPLE_RATE_176_4_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_176_4_KHZ); + case AUDIO_SAMPLE_RATE_192_KHZ: + return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_192_KHZ); + default: + return "unknown"; + } +} + +static int dp_link_get_period(struct dp_link_private *link, int const addr) +{ + int ret = 0; + u8 bp; + u8 data; + u32 const param_len = 0x1; + u32 const max_audio_period = 0xA; + + /* TEST_AUDIO_PERIOD_CH_XX */ + if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, + param_len) < param_len) { + DP_ERR("failed to read test_audio_period (0x%x)\n", addr); + ret = -EINVAL; + goto exit; + } + + data = bp; + + /* Period - Bits 3:0 */ + data = data & 0xF; + if ((int)data > max_audio_period) { + DP_ERR("invalid test_audio_period_ch_1 = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + ret = data; +exit: + return ret; +} + +static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +{ + int ret = 0; + struct dp_link_test_audio *req = &link->dp_link.test_audio; + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_1 = ret; + DP_DEBUG("test_audio_period_ch_1 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_2 = ret; + DP_DEBUG("test_audio_period_ch_2 = 0x%x\n", ret); + + /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_3 = ret; + DP_DEBUG("test_audio_period_ch_3 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_4 = ret; + DP_DEBUG("test_audio_period_ch_4 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_5 = ret; + DP_DEBUG("test_audio_period_ch_5 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_6 = ret; + DP_DEBUG("test_audio_period_ch_6 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_7 = ret; + DP_DEBUG("test_audio_period_ch_7 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_8 = ret; + DP_DEBUG("test_audio_period_ch_8 = 0x%x\n", ret); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +{ + int ret = 0; + u8 bp; + u8 data; + int rlen; + int const param_len = 0x1; + int const max_audio_pattern_type = 0x1; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, + DP_TEST_AUDIO_PATTERN_TYPE, &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link audio mode data\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + /* Audio Pattern Type - Bits 7:0 */ + if ((int)data > max_audio_pattern_type) { + DP_ERR("invalid audio pattern type = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_pattern_type = data; + DP_DEBUG("audio pattern type = %s\n", + dp_link_get_audio_test_pattern(data)); +exit: + return ret; +} + +static int dp_link_parse_audio_mode(struct dp_link_private *link) +{ + int ret = 0; + u8 bp; + u8 data; + int rlen; + int const param_len = 0x1; + int const max_audio_sampling_rate = 0x6; + int const max_audio_channel_count = 0x8; + int sampling_rate = 0x0; + int channel_count = 0x0; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_AUDIO_MODE, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link audio mode data\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + /* Sampling Rate - Bits 3:0 */ + sampling_rate = data & 0xF; + if (sampling_rate > max_audio_sampling_rate) { + DP_ERR("sampling rate (0x%x) greater than max (0x%x)\n", + sampling_rate, max_audio_sampling_rate); + ret = -EINVAL; + goto exit; + } + + /* Channel Count - Bits 7:4 */ + channel_count = ((data & 0xF0) >> 4) + 1; + if (channel_count > max_audio_channel_count) { + DP_ERR("channel_count (0x%x) greater than max (0x%x)\n", + channel_count, max_audio_channel_count); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->dp_link.test_audio.test_audio_channel_count = channel_count; + DP_DEBUG("sampling_rate = %s, channel_count = 0x%x\n", + dp_link_get_audio_sample_rate(sampling_rate), channel_count); +exit: + return ret; +} + +/** + * dp_parse_audio_pattern_params() - parses audio pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the audio link pattern parameters. + */ +static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + + ret = dp_link_parse_audio_mode(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_pattern_type(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_channel_period(link); + +exit: + return ret; +} + +/** + * dp_link_is_video_pattern_valid() - validates the video pattern + * @pattern: video pattern requested by the sink + * + * Returns true if the requested video pattern is supported. + */ +static bool dp_link_is_video_pattern_valid(u32 pattern) +{ + switch (pattern) { + case DP_NO_TEST_PATTERN: + case DP_COLOR_RAMP: + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + case DP_COLOR_SQUARE: + return true; + default: + return false; + } +} + +static char *dp_link_video_pattern_to_string(u32 test_video_pattern) +{ + switch (test_video_pattern) { + case DP_NO_TEST_PATTERN: + return DP_LINK_ENUM_STR(DP_NO_TEST_PATTERN); + case DP_COLOR_RAMP: + return DP_LINK_ENUM_STR(DP_COLOR_RAMP); + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + return DP_LINK_ENUM_STR(DP_BLACK_AND_WHITE_VERTICAL_LINES); + case DP_COLOR_SQUARE: + return DP_LINK_ENUM_STR(DP_COLOR_SQUARE); + default: + return "unknown"; + } +} + +/** + * dp_link_is_dynamic_range_valid() - validates the dynamic range + * @bit_depth: the dynamic range value to be checked + * + * Returns true if the dynamic range value is supported. + */ +static bool dp_link_is_dynamic_range_valid(u32 dr) +{ + switch (dr) { + case DP_DYNAMIC_RANGE_RGB_VESA: + case DP_DYNAMIC_RANGE_RGB_CEA: + return true; + default: + return false; + } +} + +static char *dp_link_dynamic_range_to_string(u32 dr) +{ + switch (dr) { + case DP_DYNAMIC_RANGE_RGB_VESA: + return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_VESA); + case DP_DYNAMIC_RANGE_RGB_CEA: + return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_CEA); + case DP_DYNAMIC_RANGE_UNKNOWN: + default: + return "unknown"; + } +} + +/** + * dp_link_is_bit_depth_valid() - validates the bit depth requested + * @bit_depth: bit depth requested by the sink + * + * Returns true if the requested bit depth is supported. + */ +static bool dp_link_is_bit_depth_valid(u32 tbd) +{ + /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + case DP_TEST_BIT_DEPTH_8: + case DP_TEST_BIT_DEPTH_10: + return true; + default: + return false; + } +} + +static char *dp_link_bit_depth_to_string(u32 tbd) +{ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_6); + case DP_TEST_BIT_DEPTH_8: + return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_8); + case DP_TEST_BIT_DEPTH_10: + return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_10); + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return "unknown"; + } +} + +static int dp_link_parse_timing_params1(struct dp_link_private *link, + int const addr, int const len, u32 *val) +{ + u8 bp[2]; + int rlen; + + if (len < 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len); + if (rlen < len) { + DP_ERR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val = bp[1] | (bp[0] << 8); + + return 0; +} + +static int dp_link_parse_timing_params2(struct dp_link_private *link, + int const addr, int const len, u32 *val1, u32 *val2) +{ + u8 bp[2]; + int rlen; + + if (len < 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, bp, len); + if (rlen < len) { + DP_ERR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val1 = (bp[0] & BIT(7)) >> 7; + *val2 = bp[1] | ((bp[0] & 0x7F) << 8); + + return 0; +} + +static int dp_link_parse_timing_params3(struct dp_link_private *link, + int const addr, u32 *val) +{ + u8 bp; + u32 len = 1; + int rlen; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len); + if (rlen < 1) { + DP_ERR("failed to read 0x%x\n", addr); + return -EINVAL; + } + *val = bp; + + return 0; +} + +/** + * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the video link pattern and the link + * bit depth requested by the sink and, and if the values parsed are valid. + */ +static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + int rlen; + u8 bp; + u8 data; + u32 dyn_range; + int const param_len = 0x1; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_PATTERN, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link video pattern\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + if (!dp_link_is_video_pattern_valid(data)) { + DP_ERR("invalid link video pattern = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_video.test_video_pattern = data; + DP_DEBUG("link video pattern = 0x%x (%s)\n", + link->dp_link.test_video.test_video_pattern, + dp_link_video_pattern_to_string( + link->dp_link.test_video.test_video_pattern)); + + /* Read the requested color bit depth and dynamic range (Byte 0x232) */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_MISC0, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link bit depth\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + /* Dynamic Range */ + dyn_range = (data & DP_TEST_DYNAMIC_RANGE_CEA) >> 3; + if (!dp_link_is_dynamic_range_valid(dyn_range)) { + DP_ERR("invalid link dynamic range = 0x%x\n", dyn_range); + ret = -EINVAL; + goto exit; + } + link->dp_link.test_video.test_dyn_range = dyn_range; + DP_DEBUG("link dynamic range = 0x%x (%s)\n", + link->dp_link.test_video.test_dyn_range, + dp_link_dynamic_range_to_string( + link->dp_link.test_video.test_dyn_range)); + + /* Color bit depth */ + data &= DP_TEST_BIT_DEPTH_MASK; + if (!dp_link_is_bit_depth_valid(data)) { + DP_ERR("invalid link bit depth = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_video.test_bit_depth = data; + DP_DEBUG("link bit depth = 0x%x (%s)\n", + link->dp_link.test_video.test_bit_depth, + dp_link_bit_depth_to_string( + link->dp_link.test_video.test_bit_depth)); + + /* resolution timing params */ + ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->dp_link.test_video.test_h_total); + if (ret) { + DP_ERR("failed to parse test_h_total (DP_TEST_H_TOTAL_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_H_TOTAL = %d\n", link->dp_link.test_video.test_h_total); + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->dp_link.test_video.test_v_total); + if (ret) { + DP_ERR("failed to parse test_v_total (DP_TEST_V_TOTAL_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_V_TOTAL = %d\n", link->dp_link.test_video.test_v_total); + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->dp_link.test_video.test_h_start); + if (ret) { + DP_ERR("failed to parse test_h_start (DP_TEST_H_START_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_H_START = %d\n", link->dp_link.test_video.test_h_start); + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->dp_link.test_video.test_v_start); + if (ret) { + DP_ERR("failed to parse test_v_start (DP_TEST_V_START_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_V_START = %d\n", link->dp_link.test_video.test_v_start); + + ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->dp_link.test_video.test_hsync_pol, + &link->dp_link.test_video.test_hsync_width); + if (ret) { + DP_ERR("failed to parse (DP_TEST_HSYNC_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_HSYNC_POL = %d\n", + link->dp_link.test_video.test_hsync_pol); + DP_DEBUG("TEST_HSYNC_WIDTH = %d\n", + link->dp_link.test_video.test_hsync_width); + + ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->dp_link.test_video.test_vsync_pol, + &link->dp_link.test_video.test_vsync_width); + if (ret) { + DP_ERR("failed to parse (DP_TEST_VSYNC_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_VSYNC_POL = %d\n", + link->dp_link.test_video.test_vsync_pol); + DP_DEBUG("TEST_VSYNC_WIDTH = %d\n", + link->dp_link.test_video.test_vsync_width); + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->dp_link.test_video.test_h_width); + if (ret) { + DP_ERR("failed to parse test_h_width (DP_TEST_H_WIDTH_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_H_WIDTH = %d\n", link->dp_link.test_video.test_h_width); + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->dp_link.test_video.test_v_height); + if (ret) { + DP_ERR("failed to parse test_v_height (DP_TEST_V_HEIGHT_HI)\n"); + goto exit; + } + DP_DEBUG("TEST_V_HEIGHT = %d\n", + link->dp_link.test_video.test_v_height); + + ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->dp_link.test_video.test_rr_d); + link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + if (ret) { + DP_ERR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); + goto exit; + } + DP_DEBUG("TEST_REFRESH_DENOMINATOR = %d\n", + link->dp_link.test_video.test_rr_d); + + ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->dp_link.test_video.test_rr_n); + if (ret) { + DP_ERR("failed to parse test_rr_n (DP_TEST_REFRESH_RATE_NUMERATOR)\n"); + goto exit; + } + DP_DEBUG("TEST_REFRESH_NUMERATOR = %d\n", + link->dp_link.test_video.test_rr_n); +exit: + return ret; +} + +/** + * dp_link_parse_link_training_params() - parses link training parameters from + * DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane + * count (Byte 0x220), and if these values parse are valid. + */ +static int dp_link_parse_link_training_params(struct dp_link_private *link) +{ + u8 bp; + u8 data; + int ret = 0; + int rlen; + int const param_len = 0x1; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read link rate\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + + if (!is_link_rate_valid(data)) { + DP_ERR("invalid link rate = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->request.test_link_rate = data; + DP_DEBUG("link rate = 0x%x\n", link->request.test_link_rate); + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read lane count\n"); + ret = -EINVAL; + goto exit; + } + data = bp; + data &= 0x1F; + + if (!is_lane_count_valid(data)) { + DP_ERR("invalid lane count = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->request.test_lane_count = data; + DP_DEBUG("lane count = 0x%x\n", link->request.test_lane_count); +exit: + return ret; +} + +static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel) +{ + switch (phy_test_pattern_sel) { + case DP_PHY_TEST_PATTERN_NONE: + case DP_PHY_TEST_PATTERN_D10_2: + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + case DP_PHY_TEST_PATTERN_PRBS7: + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + case DP_PHY_TEST_PATTERN_CP2520: + case DP_PHY_TEST_PATTERN_CP2520_3: + return true; + default: + return false; + } +} + +/** + * dp_parse_phy_test_params() - parses the phy link parameters + * @link: Display Port Driver data + * + * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being + * requested. + */ +static int dp_link_parse_phy_test_params(struct dp_link_private *link) +{ + u8 bp; + u8 data; + int rlen; + int const param_len = 0x1; + int ret = 0; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_PHY_TEST_PATTERN, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed to read phy link pattern\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + link->dp_link.phy_params.phy_test_pattern_sel = data; + + DP_DEBUG("phy_test_pattern_sel = %s\n", + dp_link_get_phy_test_pattern(data)); + + if (!dp_link_is_phy_test_pattern_supported(data)) + ret = -EINVAL; +end: + return ret; +} + +/** + * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * @link: link requested by the sink + * + * Returns true if the requested link is a permitted audio/video link. + */ +static bool dp_link_is_video_audio_test_requested(u32 link) +{ + return (link == DP_TEST_LINK_VIDEO_PATTERN) || + (link == (DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_VIDEO_PATTERN)) || + (link == DP_TEST_LINK_AUDIO_PATTERN) || + (link == (DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_AUDIO_DISABLED_VIDEO)); +} + +/** + * dp_link_supported() - checks if link requested by sink is supported + * @test_requested: link requested by the sink + * + * Returns true if the requested link is supported. + */ +static bool dp_link_is_test_supported(u32 test_requested) +{ + return (test_requested == DP_TEST_LINK_TRAINING) || + (test_requested == DP_TEST_LINK_EDID_READ) || + (test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) || + dp_link_is_video_audio_test_requested(test_requested); +} + +static bool dp_link_is_test_edid_read(struct dp_link_private *link) +{ + return (link->request.test_requested == DP_TEST_LINK_EDID_READ); +} + +/** + * dp_sink_parse_test_request() - parses link request parameters from sink + * @link: Display Port Driver data + * + * Parses the DPCD to check if an automated link is requested (Byte 0x201), + * and what type of link automation is being requested (Byte 0x218). + */ +static int dp_link_parse_request(struct dp_link_private *link) +{ + int ret = 0; + u8 bp; + u8 data; + int rlen; + u32 const param_len = 0x1; + + /** + * Read the device service IRQ vector (Byte 0x201) to determine + * whether an automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len); + if (rlen < param_len) { + DP_ERR("aux read failed\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + if (!(data & DP_AUTOMATED_TEST_REQUEST)) + return 0; + + /** + * Read the link request byte (Byte 0x218) to determine what type + * of automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("aux read failed\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + if (!dp_link_is_test_supported(data)) { + DP_DEBUG("link 0x%x not supported\n", data); + goto end; + } + + link->request.test_requested = data; + + if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { + ret = dp_link_parse_phy_test_params(link); + if (ret) + goto end; + ret = dp_link_parse_link_training_params(link); + } + + if (link->request.test_requested == DP_TEST_LINK_TRAINING) + ret = dp_link_parse_link_training_params(link); + + if (dp_link_is_video_audio_test_requested( + link->request.test_requested)) { + ret = dp_link_parse_video_pattern_params(link); + if (ret) + goto end; + + ret = dp_link_parse_audio_pattern_params(link); + } +end: + /** + * Send a DP_TEST_ACK if all link parameters are valid, otherwise send + * a DP_TEST_NAK. + */ + if (ret) { + link->dp_link.test_response = DP_TEST_NAK; + } else { + if (!dp_link_is_test_edid_read(link)) + link->dp_link.test_response = DP_TEST_ACK; + else + link->dp_link.test_response = + DP_TEST_EDID_CHECKSUM_WRITE; + } + + return ret; +} + +/** + * dp_link_parse_sink_count() - parses the sink count + * + * Parses the DPCD to check if there is an update to the sink count + * (Byte 0x200), and whether all the sink devices connected have Content + * Protection enabled. + */ +static int dp_link_parse_sink_count(struct dp_link *dp_link) +{ + int rlen; + int const param_len = 0x1; + struct dp_link_private *link = container_of(dp_link, + struct dp_link_private, dp_link); + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT, + &link->dp_link.sink_count.count, param_len); + if (rlen < param_len) { + DP_ERR("failed to read sink count\n"); + return -EINVAL; + } + + link->dp_link.sink_count.cp_ready = + link->dp_link.sink_count.count & DP_SINK_CP_READY; + /* BIT 7, BIT 5:0 */ + link->dp_link.sink_count.count = + DP_GET_SINK_COUNT(link->dp_link.sink_count.count); + + DP_DEBUG("sink_count = 0x%x, cp_ready = 0x%x\n", + link->dp_link.sink_count.count, + link->dp_link.sink_count.cp_ready); + return 0; +} + +static void dp_link_parse_sink_status_field(struct dp_link_private *link) +{ + int len = 0; + + link->prev_sink_count = link->dp_link.sink_count.count; + dp_link_parse_sink_count(&link->dp_link); + + len = drm_dp_dpcd_read_link_status(link->aux->drm_aux, + link->link_status); + if (len < DP_LINK_STATUS_SIZE) + DP_ERR("DP link status read failed\n"); + dp_link_parse_request(link); +} + +static bool dp_link_is_link_training_requested(struct dp_link_private *link) +{ + return (link->request.test_requested == DP_TEST_LINK_TRAINING); +} + +/** + * dp_link_process_link_training_request() - processes new training requests + * @link: Display Port link data + * + * This function will handle new link training requests that are initiated by + * the sink. In particular, it will update the requested lane count and link + * link rate, and then trigger the link retraining procedure. + * + * The function will return 0 if a link training request has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_training_request(struct dp_link_private *link) +{ + if (!dp_link_is_link_training_requested(link)) + return -EINVAL; + + DP_DEBUG("%s link rate = 0x%x, lane count = 0x%x\n", + dp_link_get_test_name(DP_TEST_LINK_TRAINING), + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.lane_count = link->request.test_lane_count; + link->dp_link.link_params.bw_code = link->request.test_link_rate; + + return 0; +} + +static void dp_link_send_test_response(struct dp_link *dp_link) +{ + struct dp_link_private *link = NULL; + u32 const response_len = 0x1; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_RESPONSE, + &dp_link->test_response, response_len); +} + +static int dp_link_psm_config(struct dp_link *dp_link, + struct drm_dp_link *link_info, bool enable) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DP_ERR("invalid params\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + if (enable) + ret = dp_link_power_down(link->aux->drm_aux, link_info); + else + ret = dp_link_power_up(link->aux->drm_aux, link_info); + + if (ret) + DP_ERR("Failed to %s low power mode\n", + (enable ? "enter" : "exit")); + + return ret; +} + +static void dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +{ + struct dp_link_private *link = NULL; + u32 const response_len = 0x1; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_EDID_CHECKSUM, + &checksum, response_len); +} + +static int dp_link_parse_vx_px(struct dp_link_private *link) +{ + u8 bp; + u8 data; + int const param_len = 0x1; + int ret = 0; + u32 v0, p0, v1, p1, v2, p2, v3, p3; + int rlen; + + DP_DEBUG("\n"); + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_ADJUST_REQUEST_LANE0_1, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed reading lanes 0/1\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + DP_DEBUG("lanes 0/1 (Byte 0x206): 0x%x\n", data); + + v0 = data & 0x3; + data = data >> 2; + p0 = data & 0x3; + data = data >> 2; + + v1 = data & 0x3; + data = data >> 2; + p1 = data & 0x3; + data = data >> 2; + + rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_ADJUST_REQUEST_LANE2_3, + &bp, param_len); + if (rlen < param_len) { + DP_ERR("failed reading lanes 2/3\n"); + ret = -EINVAL; + goto end; + } + + data = bp; + + DP_DEBUG("lanes 2/3 (Byte 0x207): 0x%x\n", data); + + v2 = data & 0x3; + data = data >> 2; + p2 = data & 0x3; + data = data >> 2; + + v3 = data & 0x3; + data = data >> 2; + p3 = data & 0x3; + data = data >> 2; + + DP_DEBUG("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", v0, v1, v2, v3); + DP_DEBUG("px: 0=%d, 1=%d, 2=%d, 3=%d\n", p0, p1, p2, p3); + + /** + * Update the voltage and pre-emphasis levels as per DPCD request + * vector. + */ + DP_DEBUG("Current: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + DP_DEBUG("Requested: v_level = 0x%x, p_level = 0x%x\n", v0, p0); + link->dp_link.phy_params.v_level = v0; + link->dp_link.phy_params.p_level = p0; + + DP_DEBUG("Success\n"); +end: + return ret; +} + +/** + * dp_link_process_phy_test_pattern_request() - process new phy link requests + * @link: Display Port Driver data + * + * This function will handle new phy link pattern requests that are initiated + * by the sink. The function will return 0 if a phy link pattern has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_phy_test_pattern_request( + struct dp_link_private *link) +{ + u32 test_link_rate = 0, test_lane_count = 0; + + if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { + DP_DEBUG("no phy test\n"); + return -EINVAL; + } + + test_link_rate = link->request.test_link_rate; + test_lane_count = link->request.test_lane_count; + + if (!is_link_rate_valid(test_link_rate) || + !is_lane_count_valid(test_lane_count)) { + DP_ERR("Invalid params: link rate = 0x%x, lane count = 0x%x\n", + test_link_rate, test_lane_count); + return -EINVAL; + } + + DP_DEBUG("start\n"); + + DP_INFO("Current: bw_code = 0x%x, lane count = 0x%x\n", + link->dp_link.link_params.bw_code, + link->dp_link.link_params.lane_count); + + DP_INFO("Requested: bw_code = 0x%x, lane count = 0x%x\n", + test_link_rate, test_lane_count); + + link->dp_link.link_params.lane_count = link->request.test_lane_count; + link->dp_link.link_params.bw_code = link->request.test_link_rate; + + dp_link_parse_vx_px(link); + + DP_DEBUG("end\n"); + + return 0; +} + +static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +/** + * dp_link_process_link_status_update() - processes link status updates + * @link: Display Port link module data + * + * This function will check for changes in the link status, e.g. clock + * recovery done on all lanes, and trigger link training if there is a + * failure/error on the link. + * + * The function will return 0 if the a link status update has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_status_update(struct dp_link_private *link) +{ + bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.lane_count); + bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.lane_count); + DP_DEBUG("channel_eq_done = %d, clock_recovery_done = %d\n", + channel_eq_done, clock_recovery_done); + + if (channel_eq_done && clock_recovery_done) + return -EINVAL; + + return 0; +} + +static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link) +{ + if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & + DP_DOWNSTREAM_PORT_STATUS_CHANGED) /* port status changed */ + return true; + + if (link->prev_sink_count != link->dp_link.sink_count.count) + return true; + + return false; +} + +/** + * dp_link_process_downstream_port_status_change() - process port status changes + * @link: Display Port Driver data + * + * This function will handle downstream port updates that are initiated by + * the sink. If the downstream port status has changed, the EDID is read via + * AUX. + * + * The function will return 0 if a downstream port update has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +{ + if (!dp_link_is_ds_port_status_changed(link)) + return -EINVAL; + + /* reset prev_sink_count */ + link->prev_sink_count = link->dp_link.sink_count.count; + + return 0; +} + +static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) + && !(link->request.test_requested & + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); +} + +static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); +} + +/** + * dp_link_process_video_pattern_request() - process new video pattern request + * @link: Display Port link module's data + * + * This function will handle a new video pattern request that are initiated by + * the sink. This is acheieved by first sending a disconnect notification to + * the sink followed by a subsequent connect notification to the user modules, + * where it is expected that the user modules would draw the required link + * pattern. + */ +static int dp_link_process_video_pattern_request(struct dp_link_private *link) +{ + if (!dp_link_is_video_pattern_requested(link)) + goto end; + + DP_DEBUG("%s: bit depth=%d(%d bpp) pattern=%s\n", + dp_link_get_test_name(DP_TEST_LINK_VIDEO_PATTERN), + link->dp_link.test_video.test_bit_depth, + dp_link_bit_depth_to_bpp( + link->dp_link.test_video.test_bit_depth), + dp_link_video_pattern_to_string( + link->dp_link.test_video.test_video_pattern)); + + return 0; +end: + return -EINVAL; +} + +/** + * dp_link_process_audio_pattern_request() - process new audio pattern request + * @link: Display Port link module data + * + * This function will handle a new audio pattern request that is initiated by + * the sink. This is acheieved by sending the necessary secondary data packets + * to the sink. It is expected that any simulatenous requests for video + * patterns will be handled before the audio pattern is sent to the sink. + */ +static int dp_link_process_audio_pattern_request(struct dp_link_private *link) +{ + if (!dp_link_is_audio_pattern_requested(link)) + return -EINVAL; + + DP_DEBUG("sampling_rate=%s, channel_count=%d, pattern_type=%s\n", + dp_link_get_audio_sample_rate( + link->dp_link.test_audio.test_audio_sampling_rate), + link->dp_link.test_audio.test_audio_channel_count, + dp_link_get_audio_test_pattern( + link->dp_link.test_audio.test_audio_pattern_type)); + + DP_DEBUG("audio_period: ch1=0x%x, ch2=0x%x, ch3=0x%x, ch4=0x%x\n", + link->dp_link.test_audio.test_audio_period_ch_1, + link->dp_link.test_audio.test_audio_period_ch_2, + link->dp_link.test_audio.test_audio_period_ch_3, + link->dp_link.test_audio.test_audio_period_ch_4); + + DP_DEBUG("audio_period: ch5=0x%x, ch6=0x%x, ch7=0x%x, ch8=0x%x\n", + link->dp_link.test_audio.test_audio_period_ch_5, + link->dp_link.test_audio.test_audio_period_ch_6, + link->dp_link.test_audio.test_audio_period_ch_7, + link->dp_link.test_audio.test_audio_period_ch_8); + + return 0; +} + +static void dp_link_reset_data(struct dp_link_private *link) +{ + link->request = (const struct dp_link_request){ 0 }; + link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; + link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; + link->dp_link.phy_params.phy_test_pattern_sel = 0; + link->dp_link.sink_request = 0; + link->dp_link.test_response = 0; +} + +/** + * dp_link_process_request() - handle HPD IRQ transition to HIGH + * @link: pointer to link module data + * + * This function will handle the HPD IRQ state transitions from LOW to HIGH + * (including cases when there are back to back HPD IRQ HIGH) indicating + * the start of a new link training request or sink status update. + */ +static int dp_link_process_request(struct dp_link *dp_link) +{ + int ret = 0; + struct dp_link_private *link; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + dp_link_reset_data(link); + + dp_link_parse_sink_status_field(link); + + if (dp_link_is_test_edid_read(link)) { + dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + goto exit; + } + + ret = dp_link_process_ds_port_status_change(link); + if (!ret) { + dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + goto exit; + } + + ret = dp_link_process_link_training_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_TRAINING; + goto exit; + } + + ret = dp_link_process_phy_test_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + goto exit; + } + + ret = dp_link_process_link_status_update(link); + if (!ret) { + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + goto exit; + } + + ret = dp_link_process_video_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + goto exit; + } + + ret = dp_link_process_audio_pattern_request(link); + if (!ret) { + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + goto exit; + } + + DP_DEBUG("no test requested\n"); + return ret; +exit: + /* + * log this as it can be a use initiated action to run a DP CTS + * test or in normal cases, sink has encountered a problem and + * and want source to redo some part of initialization which can + * be helpful in debugging. + */ + DP_INFO("event: %s\n", + dp_link_get_test_name(dp_link->sink_request)); + return 0; +} + +static int dp_link_get_colorimetry_config(struct dp_link *dp_link) +{ + u32 cc; + enum dynamic_range dr; + struct dp_link_private *link; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* unless a video pattern CTS test is ongoing, use CEA_VESA */ + if (dp_link_is_video_pattern_requested(link)) + dr = link->dp_link.test_video.test_dyn_range; + else + dr = DP_DYNAMIC_RANGE_RGB_VESA; + + /* Only RGB_VESA nd RGB_CEA supported for now */ + switch (dr) { + case DP_DYNAMIC_RANGE_RGB_CEA: + cc = BIT(2); + break; + case DP_DYNAMIC_RANGE_RGB_VESA: + default: + cc = 0; + } + + return cc; +} + +static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +{ + int i; + int max = 0; + u8 data; + struct dp_link_private *link; + u8 buf[8] = {0}, offset = 0; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* use the max level across lanes */ + for (i = 0; i < dp_link->link_params.lane_count; i++) { + data = drm_dp_get_adjust_request_voltage(link_status, i); + data >>= DP_TRAIN_VOLTAGE_SWING_SHIFT; + + offset = i * 2; + if (offset < sizeof(buf)) + buf[offset] = data; + + if (max < data) + max = data; + } + + dp_link->phy_params.v_level = max; + + /* use the max level across lanes */ + max = 0; + for (i = 0; i < dp_link->link_params.lane_count; i++) { + data = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + data >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + offset = (i * 2) + 1; + if (offset < sizeof(buf)) + buf[offset] = data; + + if (max < data) + max = data; + } + + dp_link->phy_params.p_level = max; + + print_hex_dump_debug("[drm-dp] Req (VxPx): ", + DUMP_PREFIX_NONE, 8, 2, buf, sizeof(buf), false); + + DP_DEBUG("Current (VxPx): 0x%x, 0x%x\n", + dp_link->phy_params.v_level, dp_link->phy_params.p_level); + + /** + * Adjust the voltage swing and pre-emphasis level combination to within + * the allowable range. + */ + if (dp_link->phy_params.v_level > dp_link->phy_params.max_v_level) + dp_link->phy_params.v_level = dp_link->phy_params.max_v_level; + + if (dp_link->phy_params.p_level > dp_link->phy_params.max_p_level) + dp_link->phy_params.p_level = dp_link->phy_params.max_p_level; + + if ((dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_LEVEL_1) + && (dp_link->phy_params.v_level == DP_LINK_VOLTAGE_LEVEL_2)) + dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1; + + if ((dp_link->phy_params.p_level > DP_LINK_PRE_EMPHASIS_LEVEL_2) + && (dp_link->phy_params.v_level == DP_LINK_VOLTAGE_LEVEL_1)) + dp_link->phy_params.p_level = DP_LINK_PRE_EMPHASIS_LEVEL_2; + + DP_DEBUG("Set (VxPx): 0x%x, 0x%x\n", + dp_link->phy_params.v_level, dp_link->phy_params.p_level); + + return 0; +} + +static int dp_link_send_psm_request(struct dp_link *dp_link, bool req) +{ + struct dp_link_private *link; + + if (!dp_link) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + return 0; +} + +static u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +{ + u32 tbd; + + /* + * Few simplistic rules and assumptions made here: + * 1. Test bit depth is bit depth per color component + * 2. Assume 3 color components + */ + switch (bpp) { + case 18: + tbd = DP_TEST_BIT_DEPTH_6; + break; + case 24: + tbd = DP_TEST_BIT_DEPTH_8; + break; + case 30: + tbd = DP_TEST_BIT_DEPTH_10; + break; + default: + tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + break; + } + + if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + + return tbd; +} + +/** + * dp_link_probe() - probe a DisplayPort link for capabilities + * @aux: DisplayPort AUX channel + * @link: pointer to structure in which to return link capabilities + * + * The structure filled in by this function can usually be passed directly + * into dp_link_power_up() and dp_link_configure() to power up and + * configure the link based on the link's capabilities. + * + * Returns 0 on success or a negative error code on failure. + */ +int dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[3]; + int ret; + + memset(link, 0, sizeof(*link)); + + ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + if (ret < 3) { + DP_ERR("failed to probe link, ret:%d\n", ret); + ret = -EIO; + } + + link->revision = values[0]; + link->rate = drm_dp_bw_code_to_link_rate(values[1]); + link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + + if (values[2] & DP_ENHANCED_FRAME_CAP) + link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + return 0; +} + +/** + * dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int ret; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (ret != 1) { + DP_ERR("failed to read sink power when powering up, ret:%d\n", ret); + return -EIO; + } + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + ret = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (ret != 1) { + DP_ERR("failed to power up[0x%x] sink, ret:%d\n", value, ret); + return -EIO; + } + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +/** + * dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int ret; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (ret != 1) { + DP_ERR("failed to read sink power when powering down, ret:%d\n", ret); + return -EIO; + } + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + ret = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (ret != 1) { + DP_ERR("failed to power down[0x%x] sink, ret:%d\n", value, ret); + return -EIO; + } + + return 0; +} + +/** + * dp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[2]; + int ret; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (ret != 2) { + DP_ERR("failed to configure link, ret:%d\n", ret); + return -EIO; + } + + return 0; +} + +struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux, u32 dp_core_revision) +{ + int rc = 0; + struct dp_link_private *link; + struct dp_link *dp_link; + + if (!dev || !aux) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL); + if (!link) { + rc = -EINVAL; + goto error; + } + + link->dev = dev; + link->aux = aux; + + dp_link = &link->dp_link; + + if (dp_core_revision >= 0x10020003) + dp_link->phy_params.max_v_level = DP_LINK_VOLTAGE_LEVEL_3; + else + dp_link->phy_params.max_v_level = DP_LINK_VOLTAGE_LEVEL_2; + + dp_link->phy_params.max_p_level = DP_LINK_PRE_EMPHASIS_LEVEL_3; + + dp_link->process_request = dp_link_process_request; + dp_link->get_test_bits_depth = dp_link_get_test_bits_depth; + dp_link->get_colorimetry_config = dp_link_get_colorimetry_config; + dp_link->adjust_levels = dp_link_adjust_levels; + dp_link->send_psm_request = dp_link_send_psm_request; + dp_link->send_test_response = dp_link_send_test_response; + dp_link->psm_config = dp_link_psm_config; + dp_link->send_edid_checksum = dp_link_send_edid_checksum; + + return dp_link; +error: + return ERR_PTR(rc); +} + +void dp_link_put(struct dp_link *dp_link) +{ + struct dp_link_private *link; + + if (!dp_link) + return; + + link = container_of(dp_link, struct dp_link_private, dp_link); + + devm_kfree(link->dev, link); +} diff --git a/msm/dp/dp_link.h b/msm/dp/dp_link.h new file mode 100644 index 000000000..ca419f132 --- /dev/null +++ b/msm/dp/dp_link.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DP_LINK_H_ +#define _DP_LINK_H_ + +#include "dp_aux.h" + +#define DS_PORT_STATUS_CHANGED 0x200 +#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF +#define DP_LINK_ENUM_STR(x) #x + +#define DP_PHY_TEST_PATTERN_CP2520_2 0x6 +#define DP_PHY_TEST_PATTERN_CP2520_3 0x7 + +struct drm_dp_aux; + +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) +#define DP_LINK_CAP_CRC (1 << 1) + +struct drm_dp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +int dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); +int dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); +int dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); +int dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); + +enum dp_link_voltage_level { + DP_LINK_VOLTAGE_LEVEL_0, + DP_LINK_VOLTAGE_LEVEL_1, + DP_LINK_VOLTAGE_LEVEL_2, + DP_LINK_VOLTAGE_LEVEL_3, +}; + +enum dp_link_preemaphasis_level { + DP_LINK_PRE_EMPHASIS_LEVEL_0, + DP_LINK_PRE_EMPHASIS_LEVEL_1, + DP_LINK_PRE_EMPHASIS_LEVEL_2, + DP_LINK_PRE_EMPHASIS_LEVEL_3, +}; + +struct dp_link_sink_count { + u32 count; + bool cp_ready; +}; + +struct dp_link_test_video { + u32 test_video_pattern; + u32 test_bit_depth; + u32 test_dyn_range; + u32 test_h_total; + u32 test_v_total; + u32 test_h_start; + u32 test_v_start; + u32 test_hsync_pol; + u32 test_hsync_width; + u32 test_vsync_pol; + u32 test_vsync_width; + u32 test_h_width; + u32 test_v_height; + u32 test_rr_d; + u32 test_rr_n; +}; + +struct dp_link_test_audio { + u32 test_audio_sampling_rate; + u32 test_audio_channel_count; + u32 test_audio_pattern_type; + u32 test_audio_period_ch_1; + u32 test_audio_period_ch_2; + u32 test_audio_period_ch_3; + u32 test_audio_period_ch_4; + u32 test_audio_period_ch_5; + u32 test_audio_period_ch_6; + u32 test_audio_period_ch_7; + u32 test_audio_period_ch_8; +}; + +struct dp_link_hdcp_status { + int hdcp_state; + int hdcp_version; +}; + +struct dp_link_phy_params { + u32 phy_test_pattern_sel; + u8 v_level; + u8 p_level; + u8 max_v_level; + u8 max_p_level; +}; + +struct dp_link_params { + u32 lane_count; + u32 bw_code; +}; + +static inline char *dp_link_get_test_name(u32 test_requested) +{ + switch (test_requested) { + case DP_TEST_LINK_TRAINING: + return DP_LINK_ENUM_STR(DP_TEST_LINK_TRAINING); + case DP_TEST_LINK_VIDEO_PATTERN: + return DP_LINK_ENUM_STR(DP_TEST_LINK_VIDEO_PATTERN); + case DP_TEST_LINK_EDID_READ: + return DP_LINK_ENUM_STR(DP_TEST_LINK_EDID_READ); + case DP_TEST_LINK_PHY_TEST_PATTERN: + return DP_LINK_ENUM_STR(DP_TEST_LINK_PHY_TEST_PATTERN); + case DP_TEST_LINK_AUDIO_PATTERN: + return DP_LINK_ENUM_STR(DP_TEST_LINK_AUDIO_PATTERN); + case DS_PORT_STATUS_CHANGED: + return DP_LINK_ENUM_STR(DS_PORT_STATUS_CHANGED); + case DP_LINK_STATUS_UPDATED: + return DP_LINK_ENUM_STR(DP_LINK_STATUS_UPDATED); + default: + return "unknown"; + } +} + +struct dp_link { + u32 sink_request; + u32 test_response; + + struct dp_link_sink_count sink_count; + struct dp_link_test_video test_video; + struct dp_link_test_audio test_audio; + struct dp_link_phy_params phy_params; + struct dp_link_params link_params; + struct dp_link_hdcp_status hdcp_status; + + u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp); + int (*process_request)(struct dp_link *dp_link); + int (*get_colorimetry_config)(struct dp_link *dp_link); + int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status); + int (*send_psm_request)(struct dp_link *dp_link, bool req); + void (*send_test_response)(struct dp_link *dp_link); + int (*psm_config)(struct dp_link *dp_link, + struct drm_dp_link *link_info, bool enable); + void (*send_edid_checksum)(struct dp_link *dp_link, u8 checksum); +}; + +static inline char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel) +{ + switch (phy_test_pattern_sel) { + case DP_PHY_TEST_PATTERN_NONE: + return DP_LINK_ENUM_STR(DP_PHY_TEST_PATTERN_NONE); + case DP_PHY_TEST_PATTERN_D10_2: + return DP_LINK_ENUM_STR( + DP_PHY_TEST_PATTERN_D10_2); + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + return DP_LINK_ENUM_STR( + DP_PHY_TEST_PATTERN_ERROR_COUNT); + case DP_PHY_TEST_PATTERN_PRBS7: + return DP_LINK_ENUM_STR(DP_PHY_TEST_PATTERN_PRBS7); + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + return DP_LINK_ENUM_STR( + DP_PHY_TEST_PATTERN_80BIT_CUSTOM); + case DP_PHY_TEST_PATTERN_CP2520: + return DP_LINK_ENUM_STR(DP_PHY_TEST_PATTERN_CP2520); + case DP_PHY_TEST_PATTERN_CP2520_2: + return DP_LINK_ENUM_STR(DP_PHY_TEST_PATTERN_CP2520_2); + case DP_PHY_TEST_PATTERN_CP2520_3: + return DP_LINK_ENUM_STR(DP_PHY_TEST_PATTERN_CP2520_3); + default: + return "unknown"; + } +} + +/** + * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp + * @tbd: test bit depth + * + * Returns the bits per pixel (bpp) to be used corresponding to the + * git bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +{ + u32 bpp; + + /* + * Few simplistic rules and assumptions made here: + * 1. Bit depth is per color component + * 2. If bit depth is unknown return 0 + * 3. Assume 3 color components + */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + bpp = 18; + break; + case DP_TEST_BIT_DEPTH_8: + bpp = 24; + break; + case DP_TEST_BIT_DEPTH_10: + bpp = 30; + break; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + bpp = 0; + } + + return bpp; +} + +/** + * dp_link_get() - get the functionalities of dp test module + * + * + * return: a pointer to dp_link struct + */ +struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux, u32 dp_core_revision); + +/** + * dp_link_put() - releases the dp test module's resources + * + * @dp_link: an instance of dp_link module + * + */ +void dp_link_put(struct dp_link *dp_link); + +#endif /* _DP_LINK_H_ */ diff --git a/msm/dp/dp_lphw_hpd.c b/msm/dp/dp_lphw_hpd.c new file mode 100644 index 000000000..add17505b --- /dev/null +++ b/msm/dp/dp_lphw_hpd.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dp_lphw_hpd.h" +#include "dp_debug.h" + +struct dp_lphw_hpd_private { + struct device *dev; + struct dp_hpd base; + struct dp_parser *parser; + struct dp_catalog_hpd *catalog; + struct dss_gpio gpio_cfg; + struct workqueue_struct *connect_wq; + struct delayed_work work; + struct work_struct connect; + struct work_struct disconnect; + struct work_struct attention; + struct dp_hpd_cb *cb; + int irq; + bool hpd; +}; + +static void dp_lphw_hpd_attention(struct work_struct *work) +{ + struct dp_lphw_hpd_private *lphw_hpd = container_of(work, + struct dp_lphw_hpd_private, attention); + + if (!lphw_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd->base.hpd_irq = true; + + if (lphw_hpd->cb && lphw_hpd->cb->attention) + lphw_hpd->cb->attention(lphw_hpd->dev); +} + +static void dp_lphw_hpd_connect(struct work_struct *work) +{ + struct dp_lphw_hpd_private *lphw_hpd = container_of(work, + struct dp_lphw_hpd_private, connect); + + if (!lphw_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd->base.hpd_high = true; + lphw_hpd->base.alt_mode_cfg_done = true; + lphw_hpd->base.hpd_irq = false; + + if (lphw_hpd->cb && lphw_hpd->cb->configure) + lphw_hpd->cb->configure(lphw_hpd->dev); +} + +static void dp_lphw_hpd_disconnect(struct work_struct *work) +{ + struct dp_lphw_hpd_private *lphw_hpd = container_of(work, + struct dp_lphw_hpd_private, disconnect); + + if (!lphw_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd->base.hpd_high = false; + lphw_hpd->base.alt_mode_cfg_done = false; + lphw_hpd->base.hpd_irq = false; + + if (lphw_hpd->cb && lphw_hpd->cb->disconnect) + lphw_hpd->cb->disconnect(lphw_hpd->dev); +} + +static irqreturn_t dp_tlmm_isr(int unused, void *data) +{ + struct dp_lphw_hpd_private *lphw_hpd = data; + bool hpd; + + if (!lphw_hpd) + return IRQ_NONE; + + /* + * According to the DP spec, HPD high event can be confirmed only after + * the HPD line has een asserted continuously for more than 100ms + */ + usleep_range(99000, 100000); + + hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio); + + DP_DEBUG("lphw_hpd state = %d, new hpd state = %d\n", + lphw_hpd->hpd, hpd); + if (!lphw_hpd->hpd && hpd) { + lphw_hpd->hpd = true; + queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect); + } + + return IRQ_HANDLED; +} + +static void dp_lphw_hpd_host_init(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, true); + + /* + * Changing the gpio function to dp controller for the hpd line is not + * stopping the tlmm interrupts generation on function 0. + * So, as an additional step, disable the gpio interrupt irq also + */ + disable_irq(lphw_hpd->irq); +} + +static void dp_lphw_hpd_host_deinit(struct dp_hpd *dp_hpd, + struct dp_catalog_hpd *catalog) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + /* Enable the tlmm interrupt irq which was disabled in host_init */ + enable_irq(lphw_hpd->irq); + + lphw_hpd->catalog->config_hpd(lphw_hpd->catalog, false); +} + +static void dp_lphw_hpd_isr(struct dp_hpd *dp_hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + u32 isr = 0; + int rc = 0; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + isr = lphw_hpd->catalog->get_interrupt(lphw_hpd->catalog); + + if (isr & DP_HPD_UNPLUG_INT_STATUS) { /* disconnect interrupt */ + + DP_DEBUG("disconnect interrupt, hpd isr state: 0x%x\n", isr); + + if (lphw_hpd->base.hpd_high) { + lphw_hpd->hpd = false; + lphw_hpd->base.hpd_high = false; + lphw_hpd->base.alt_mode_cfg_done = false; + lphw_hpd->base.hpd_irq = false; + + rc = queue_work(lphw_hpd->connect_wq, + &lphw_hpd->disconnect); + if (!rc) + DP_DEBUG("disconnect not queued\n"); + } else { + DP_ERR("already disconnected\n"); + } + + } else if (isr & DP_IRQ_HPD_INT_STATUS) { /* attention interrupt */ + + DP_DEBUG("hpd_irq interrupt, hpd isr state: 0x%x\n", isr); + + rc = queue_work(lphw_hpd->connect_wq, &lphw_hpd->attention); + if (!rc) + DP_DEBUG("attention not queued\n"); + } +} + +static int dp_lphw_hpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->base.hpd_high = hpd; + lphw_hpd->base.alt_mode_cfg_done = hpd; + lphw_hpd->base.hpd_irq = false; + + if (!lphw_hpd->cb || !lphw_hpd->cb->configure || + !lphw_hpd->cb->disconnect) { + DP_ERR("invalid callback\n"); + return -EINVAL; + } + + if (hpd) + lphw_hpd->cb->configure(lphw_hpd->dev); + else + lphw_hpd->cb->disconnect(lphw_hpd->dev); + + return 0; +} + +static int dp_lphw_hpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->base.hpd_irq = true; + + if (lphw_hpd->cb && lphw_hpd->cb->attention) + lphw_hpd->cb->attention(lphw_hpd->dev); + + return 0; +} + +int dp_lphw_hpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + int rc = 0; + + if (!dp_hpd) + return -EINVAL; + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + lphw_hpd->hpd = gpio_get_value_cansleep(lphw_hpd->gpio_cfg.gpio); + + rc = devm_request_threaded_irq(lphw_hpd->dev, lphw_hpd->irq, NULL, + dp_tlmm_isr, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "dp-gpio-intp", lphw_hpd); + if (rc) { + DP_ERR("Failed to request INTP threaded IRQ: %d\n", rc); + return rc; + } + enable_irq_wake(lphw_hpd->irq); + + if (lphw_hpd->hpd) + queue_work(lphw_hpd->connect_wq, &lphw_hpd->connect); + + return rc; +} + +static void dp_lphw_hpd_deinit(struct dp_lphw_hpd_private *lphw_hpd) +{ + struct dp_parser *parser = lphw_hpd->parser; + int i = 0; + + for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) { + + if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name, + "hpd-pwr")) { + /* disable the hpd-pwr voltage regulator */ + if (msm_dss_enable_vreg( + &parser->mp[DP_PHY_PM].vreg_config[i], 1, + false)) + DP_ERR("hpd-pwr vreg not disabled\n"); + + break; + } + } +} + +static void dp_lphw_hpd_init(struct dp_lphw_hpd_private *lphw_hpd) +{ + struct dp_pinctrl pinctrl = {0}; + struct dp_parser *parser = lphw_hpd->parser; + int i = 0, rc = 0; + + for (i = 0; i < parser->mp[DP_PHY_PM].num_vreg; i++) { + + if (!strcmp(parser->mp[DP_PHY_PM].vreg_config[i].vreg_name, + "hpd-pwr")) { + /* enable the hpd-pwr voltage regulator */ + if (msm_dss_enable_vreg( + &parser->mp[DP_PHY_PM].vreg_config[i], 1, + true)) + DP_ERR("hpd-pwr vreg not enabled\n"); + + break; + } + } + + pinctrl.pin = devm_pinctrl_get(lphw_hpd->dev); + + if (!IS_ERR_OR_NULL(pinctrl.pin)) { + pinctrl.state_hpd_active = pinctrl_lookup_state(pinctrl.pin, + "mdss_dp_hpd_active"); + + if (!IS_ERR_OR_NULL(pinctrl.state_hpd_active)) { + rc = pinctrl_select_state(pinctrl.pin, + pinctrl.state_hpd_active); + if (rc) + DP_ERR("failed to set hpd_active state\n"); + } + pinctrl.state_hpd_tlmm = pinctrl.state_hpd_ctrl = NULL; + } +} + +static int dp_lphw_hpd_create_workqueue(struct dp_lphw_hpd_private *lphw_hpd) +{ + lphw_hpd->connect_wq = create_singlethread_workqueue("dp_lphw_work"); + if (IS_ERR_OR_NULL(lphw_hpd->connect_wq)) { + DP_ERR("Error creating connect_wq\n"); + return -EPERM; + } + + INIT_WORK(&lphw_hpd->connect, dp_lphw_hpd_connect); + INIT_WORK(&lphw_hpd->disconnect, dp_lphw_hpd_disconnect); + INIT_WORK(&lphw_hpd->attention, dp_lphw_hpd_attention); + + return 0; +} + +struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb) +{ + int rc = 0; + const char *hpd_gpio_name = "qcom,dp-hpd-gpio"; + struct dp_lphw_hpd_private *lphw_hpd = NULL; + unsigned int gpio; + + if (!dev || !parser || !cb) { + DP_ERR("invalid device\n"); + rc = -EINVAL; + goto error; + } + + gpio = of_get_named_gpio(dev->of_node, hpd_gpio_name, 0); + if (!gpio_is_valid(gpio)) { + DP_DEBUG("%s gpio not specified\n", hpd_gpio_name); + rc = -EINVAL; + goto error; + } + + lphw_hpd = devm_kzalloc(dev, sizeof(*lphw_hpd), GFP_KERNEL); + if (!lphw_hpd) { + rc = -ENOMEM; + goto error; + } + + lphw_hpd->gpio_cfg.gpio = gpio; + strlcpy(lphw_hpd->gpio_cfg.gpio_name, hpd_gpio_name, + sizeof(lphw_hpd->gpio_cfg.gpio_name)); + lphw_hpd->gpio_cfg.value = 0; + + rc = gpio_request(lphw_hpd->gpio_cfg.gpio, + lphw_hpd->gpio_cfg.gpio_name); + if (rc) { + DP_ERR("%s: failed to request gpio\n", hpd_gpio_name); + goto gpio_error; + } + gpio_direction_input(lphw_hpd->gpio_cfg.gpio); + + lphw_hpd->dev = dev; + lphw_hpd->cb = cb; + lphw_hpd->irq = gpio_to_irq(lphw_hpd->gpio_cfg.gpio); + + rc = dp_lphw_hpd_create_workqueue(lphw_hpd); + if (rc) { + DP_ERR("Failed to create a dp_hpd workqueue\n"); + goto gpio_error; + } + + lphw_hpd->parser = parser; + lphw_hpd->catalog = catalog; + lphw_hpd->base.isr = dp_lphw_hpd_isr; + lphw_hpd->base.host_init = dp_lphw_hpd_host_init; + lphw_hpd->base.host_deinit = dp_lphw_hpd_host_deinit; + lphw_hpd->base.simulate_connect = dp_lphw_hpd_simulate_connect; + lphw_hpd->base.simulate_attention = dp_lphw_hpd_simulate_attention; + lphw_hpd->base.register_hpd = dp_lphw_hpd_register; + + dp_lphw_hpd_init(lphw_hpd); + + return &lphw_hpd->base; + +gpio_error: + devm_kfree(dev, lphw_hpd); +error: + return ERR_PTR(rc); +} + +void dp_lphw_hpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_lphw_hpd_private *lphw_hpd; + + if (!dp_hpd) + return; + + lphw_hpd = container_of(dp_hpd, struct dp_lphw_hpd_private, base); + + dp_lphw_hpd_deinit(lphw_hpd); + gpio_free(lphw_hpd->gpio_cfg.gpio); + devm_kfree(lphw_hpd->dev, lphw_hpd); +} diff --git a/msm/dp/dp_lphw_hpd.h b/msm/dp/dp_lphw_hpd.h new file mode 100644 index 000000000..9779331bd --- /dev/null +++ b/msm/dp/dp_lphw_hpd.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_LPHW_HPD_H_ +#define _DP_LPHW_HPD_H_ + +#include "dp_hpd.h" + +#define DP_HPD_PLUG_INT_STATUS BIT(0) +#define DP_IRQ_HPD_INT_STATUS BIT(1) +#define DP_HPD_REPLUG_INT_STATUS BIT(2) +#define DP_HPD_UNPLUG_INT_STATUS BIT(3) + +/** + * dp_lphw_hpd_get() - configure and get the DisplayPlot HPD module data + * + * @dev: device instance of the caller + * return: pointer to allocated gpio hpd module data + * + * This function sets up the lphw hpd module + */ +struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser, + struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb); + +/** + * dp_lphw_hpd_put() + * + * Cleans up dp_hpd instance + * + * @hpd: instance of lphw_hpd + */ +void dp_lphw_hpd_put(struct dp_hpd *hpd); + +#endif /* _DP_LPHW_HPD_H_ */ diff --git a/msm/dp/dp_mst_drm.c b/msm/dp/dp_mst_drm.c new file mode 100644 index 000000000..ef3fa1e02 --- /dev/null +++ b/msm/dp/dp_mst_drm.c @@ -0,0 +1,2245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +/* + * Copyright © 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) +#include +#include +#else +#include +#include +#endif + +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "sde_connector.h" +#include "dp_drm.h" +#include "dp_debug.h" +#include "dp_parser.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) +#define DP_MST_INFO(fmt, ...) DP_INFO(fmt, ##__VA_ARGS__) +#define DP_MST_DEBUG_V(fmt, ...) DP_DEBUG_V(fmt, ##__VA_ARGS__) +#define DP_MST_INFO_V(fmt, ...) DP_INFO_V(fmt, ##__VA_ARGS__) + +#define MAX_DP_MST_DRM_ENCODERS 2 +#define MAX_DP_MST_DRM_BRIDGES 2 +#define HPD_STRING_SIZE 30 +#define DP_MST_CONN_ID(bridge) ((bridge)->connector ? \ + (bridge)->connector->base.id : 0) + +struct dp_drm_mst_fw_helper_ops { +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + int (*atomic_find_time_slots)(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int pbn); + int (*update_payload_part1)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); + int (*update_payload_part2)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_atomic_state *state, + struct drm_dp_mst_atomic_payload *payload); +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) + void (*reset_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + const struct drm_dp_mst_atomic_payload *old_payload, + struct drm_dp_mst_atomic_payload *new_payload); +#else + void (*reset_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); +#endif +#else + + int (*atomic_find_vcpi_slots)(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int pbn, int pbn_div); + int (*update_payload_part1)(struct drm_dp_mst_topology_mgr *mgr); + int (*update_payload_part2)(struct drm_dp_mst_topology_mgr *mgr); + void (*reset_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +#endif + int (*atomic_release_time_slots)(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + int (*calc_pbn_mode)(struct dp_display_mode *dp_mode); + int (*find_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, int pbn); + bool (*allocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int pbn, int slots); + int (*check_act_status)(struct drm_dp_mst_topology_mgr *mgr); + int (*detect_port_ctx)( + struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + struct edid *(*get_edid)(struct drm_connector *connector, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + int (*topology_mgr_set_mst)(struct drm_dp_mst_topology_mgr *mgr, + bool mst_state); + void (*get_vcpi_info)(struct drm_dp_mst_topology_mgr *mgr, + int vcpi, int *start_slot, int *num_slots); + void (*deallocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); +}; + +struct dp_mst_sim_port_edid { + u8 port_number; + u8 edid[SZ_256]; + bool valid; +}; + +struct dp_mst_bridge { + struct drm_bridge base; + struct drm_private_obj obj; + u32 id; + + bool in_use; + + struct dp_display *display; + struct drm_encoder *encoder; + + struct drm_display_mode drm_mode; + struct dp_display_mode dp_mode; + struct drm_connector *connector; + void *dp_panel; + + int vcpi; + int pbn; + int num_slots; + int start_slot; + + u32 fixed_port_num; + bool fixed_port_added; + struct drm_connector *fixed_connector; +}; + +struct dp_mst_bridge_state { + struct drm_private_state base; + struct drm_connector *connector; + void *dp_panel; + int num_slots; +}; + +struct dp_mst_private { + bool mst_initialized; + struct dp_mst_caps caps; + struct drm_dp_mst_topology_mgr mst_mgr; + struct dp_mst_bridge mst_bridge[MAX_DP_MST_DRM_BRIDGES]; + struct dp_display *dp_display; + const struct dp_drm_mst_fw_helper_ops *mst_fw_cbs; + struct mutex mst_lock; + struct mutex edid_lock; + enum dp_drv_state state; + bool mst_session_state; +}; + +struct dp_mst_encoder_info_cache { + u8 cnt; + struct drm_encoder *mst_enc[MAX_DP_MST_DRM_BRIDGES]; +}; + +#define to_dp_mst_bridge(x) container_of((x), struct dp_mst_bridge, base) +#define to_dp_mst_bridge_priv(x) \ + container_of((x), struct dp_mst_bridge, obj) +#define to_dp_mst_bridge_priv_state(x) \ + container_of((x), struct dp_mst_bridge_state, base) +#define to_dp_mst_bridge_state(x) \ + to_dp_mst_bridge_priv_state((x)->obj.state) + +struct dp_mst_private dp_mst; +struct dp_mst_encoder_info_cache dp_mst_enc_cache; + +static struct drm_private_state *dp_mst_duplicate_bridge_state( + struct drm_private_obj *obj) +{ + struct dp_mst_bridge_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void dp_mst_destroy_bridge_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dp_mst_bridge_state *priv_state = + to_dp_mst_bridge_priv_state(state); + + kfree(priv_state); +} + +static const struct drm_private_state_funcs dp_mst_bridge_state_funcs = { + .atomic_duplicate_state = dp_mst_duplicate_bridge_state, + .atomic_destroy_state = dp_mst_destroy_bridge_state, +}; + +static struct dp_mst_bridge_state *dp_mst_get_bridge_atomic_state( + struct drm_atomic_state *state, struct dp_mst_bridge *bridge) +{ + struct drm_device *dev = bridge->base.dev; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + + return to_dp_mst_bridge_priv_state( + drm_atomic_get_private_obj_state(state, &bridge->obj)); +} + +static int dp_mst_detect_port( + struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + struct dp_mst_private *mst = container_of(mgr, + struct dp_mst_private, mst_mgr); + int status = connector_status_disconnected; + + if (mst->mst_session_state) + status = drm_dp_mst_detect_port(connector, ctx, mgr, port); + + DP_MST_DEBUG("mst port status: %d, session state: %d\n", + status, mst->mst_session_state); + return status; +} + +static void _dp_mst_get_vcpi_info( + struct drm_dp_mst_topology_mgr *mgr, + int vcpi, int *start_slot, int *num_slots) +{ +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + struct drm_dp_mst_topology_state *state; + struct drm_dp_mst_atomic_payload *payload; +#else + int i; +#endif + + *start_slot = 0; + *num_slots = 0; + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + state = to_drm_dp_mst_topology_state(mgr->base.state); + list_for_each_entry(payload, &state->payloads, next) { + if (payload->vcpi == vcpi) { + *start_slot = payload->vc_start_slot; + *num_slots = payload->time_slots; + break; + } + } +#else + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->payloads[i].vcpi == vcpi) { + *start_slot = mgr->payloads[i].start_slot; + *num_slots = mgr->payloads[i].num_slots; + break; + } + } + mutex_unlock(&mgr->payload_lock); +#endif + DP_INFO("vcpi_info. vcpi:%d, start_slot:%d, num_slots:%d\n", + vcpi, *start_slot, *num_slots); +} + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) +/** + * dp_mst_find_vcpi_slots() - Find VCPI slots for this PBN value + * @mgr: manager to use + * @pbn: payload bandwidth to convert into slots. + * + * Calculate the number of VCPI slots that will be required for the given PBN + * value. + * + * RETURNS: + * The total slots required for this port, or error. + */ +static int dp_mst_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, int pbn) +{ + int num_slots; + struct drm_dp_mst_topology_state *state; + + state = to_drm_dp_mst_topology_state(mgr->base.state); + num_slots = DIV_ROUND_UP(pbn, state->pbn_div); + + /* max. time slots - one slot for MTP header */ + if (num_slots > 63) + return -ENOSPC; + return num_slots; +} +#endif + +static int dp_mst_calc_pbn_mode(struct dp_display_mode *dp_mode) +{ + int pbn, bpp; + bool dsc_en; + s64 pbn_fp; + struct dp_panel_info *pinfo = &dp_mode->timing; + + dsc_en = pinfo->comp_info.enabled; + bpp = dsc_en ? DSC_BPP(pinfo->comp_info.dsc_info.config) : pinfo->bpp; + + pbn = drm_dp_calc_pbn_mode(pinfo->pixel_clk_khz, bpp, false); + pbn_fp = drm_fixp_from_fraction(pbn, 1); + pinfo->pbn_no_overhead = pbn; + + if (dsc_en) + pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->dsc_overhead_fp); + + if (dp_mode->fec_overhead_fp) + pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->fec_overhead_fp); + + pbn = drm_fixp2int(pbn_fp); + pinfo->pbn = pbn; + + DP_DEBUG_V("pbn before overhead:%d pbn final:%d, bpp:%d\n", pinfo->pbn_no_overhead, pbn, + bpp); + + return pbn; +} + +static const struct dp_drm_mst_fw_helper_ops drm_dp_mst_fw_helper_ops = { +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + .calc_pbn_mode = dp_mst_calc_pbn_mode, + .find_vcpi_slots = dp_mst_find_vcpi_slots, + .atomic_find_time_slots = drm_dp_atomic_find_time_slots, + .update_payload_part1 = drm_dp_add_payload_part1, + .check_act_status = drm_dp_check_act_status, + .update_payload_part2 = drm_dp_add_payload_part2, + .detect_port_ctx = dp_mst_detect_port, + .get_edid = drm_dp_mst_get_edid, + .topology_mgr_set_mst = drm_dp_mst_topology_mgr_set_mst, + .get_vcpi_info = _dp_mst_get_vcpi_info, + .atomic_release_time_slots = drm_dp_atomic_release_time_slots, + .reset_vcpi_slots = drm_dp_remove_payload, +#else + .calc_pbn_mode = dp_mst_calc_pbn_mode, + .find_vcpi_slots = drm_dp_find_vcpi_slots, + .atomic_find_vcpi_slots = drm_dp_atomic_find_vcpi_slots, + .allocate_vcpi = drm_dp_mst_allocate_vcpi, + .update_payload_part1 = drm_dp_update_payload_part1, + .check_act_status = drm_dp_check_act_status, + .update_payload_part2 = drm_dp_update_payload_part2, + .detect_port_ctx = dp_mst_detect_port, + .get_edid = drm_dp_mst_get_edid, + .topology_mgr_set_mst = drm_dp_mst_topology_mgr_set_mst, + .get_vcpi_info = _dp_mst_get_vcpi_info, + .atomic_release_time_slots = drm_dp_atomic_release_vcpi_slots, + .reset_vcpi_slots = drm_dp_mst_reset_vcpi_slots, + .deallocate_vcpi = drm_dp_mst_deallocate_vcpi, +#endif +}; + +/* DP MST Bridge OPs */ + +static int dp_mst_bridge_attach(struct drm_bridge *dp_bridge, + enum drm_bridge_attach_flags flags) +{ + struct dp_mst_bridge *bridge; + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + if (!dp_bridge) { + DP_ERR("Invalid params\n"); + return -EINVAL; + } + + bridge = to_dp_mst_bridge(dp_bridge); + + DP_MST_DEBUG("mst bridge [%d] attached\n", bridge->id); + + return 0; +} + +static bool dp_mst_bridge_mode_fixup(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ret = true; + struct dp_display_mode dp_mode; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct drm_crtc_state *crtc_state; + struct dp_mst_bridge_state *bridge_state; + + DP_MST_DEBUG_V("enter\n"); + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + ret = false; + goto end; + } + + bridge = to_dp_mst_bridge(drm_bridge); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge)); + + crtc_state = container_of(mode, struct drm_crtc_state, mode); + bridge_state = dp_mst_get_bridge_atomic_state(crtc_state->state, + bridge); + if (IS_ERR(bridge_state)) { + DP_ERR("invalid bridge state\n"); + ret = false; + goto end; + } + + if (!bridge_state->dp_panel) { + DP_ERR("Invalid dp_panel\n"); + ret = false; + goto end; + } + + dp = bridge->display; + + dp->convert_to_dp_mode(dp, bridge_state->dp_panel, mode, &dp_mode); + dp->clear_reservation(dp, bridge_state->dp_panel); + convert_to_drm_mode(&dp_mode, adjusted_mode); + + DP_MST_DEBUG("mst bridge [%d] mode:%s fixup\n", bridge->id, mode->name); +end: + return ret; +} + +static int _dp_mst_compute_config(struct drm_atomic_state *state, + struct dp_mst_private *mst, struct drm_connector *connector, + struct dp_display_mode *mode) +{ + int slots = 0, pbn; + struct sde_connector *c_conn = to_sde_connector(connector); + int rc = 0; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + struct drm_dp_mst_topology_state *mst_state; +#endif + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id); + + pbn = mst->mst_fw_cbs->calc_pbn_mode(mode); + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + mst_state = to_drm_dp_mst_topology_state(mst->mst_mgr.base.state); + + if (!mst_state->pbn_div) + mst_state->pbn_div = mst->dp_display->get_mst_pbn_div(mst->dp_display); + + rc = mst->mst_fw_cbs->atomic_find_time_slots(state, &mst->mst_mgr, c_conn->mst_port, pbn); + if (rc < 0) { + DP_ERR("conn:%d failed to find vcpi slots. pbn:%d, rc:%d\n", + connector->base.id, pbn, rc); + goto end; + } + + slots = rc; + + rc = drm_dp_mst_atomic_check(state); + if (rc) { + DP_ERR("conn:%d mst atomic check failed: rc=%d\n", connector->base.id, rc); + slots = 0; + goto end; + } +#else + slots = mst->mst_fw_cbs->atomic_find_vcpi_slots(state, + &mst->mst_mgr, c_conn->mst_port, pbn, 0); + if (slots < 0) { + DP_ERR("conn:%d failed to find vcpi slots. pbn:%d, slots:%d\n", + connector->base.id, pbn, slots); + rc = slots; + slots = 0; + goto end; + } +#endif + +end: + DP_MST_DEBUG("conn:%d pbn:%d slots:%d rc:%d\n", connector->base.id, pbn, slots, rc); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, pbn, slots, rc); + + return (rc < 0 ? rc : slots); +} + +static void _dp_mst_update_timeslots(struct dp_mst_private *mst, + struct dp_mst_bridge *mst_bridge, struct drm_dp_mst_port *port) +{ + int i; + struct dp_mst_bridge *dp_bridge; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *payload; + int prev_start = 0; + int prev_slots = 0; + + mst_state = to_drm_dp_mst_topology_state(mst->mst_mgr.base.state); + payload = drm_atomic_get_mst_payload_state(mst_state, port); + + if (!payload) { + DP_ERR("mst bridge [%d] update_timeslots failed, null payload\n", + mst_bridge->id); + return; + } + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + dp_bridge = &mst->mst_bridge[i]; + if (mst_bridge == dp_bridge) { + /* + * When a payload was removed make sure to move any payloads after it + * to the left so all payloads are aligned to the left. + */ + if (payload->vc_start_slot < 0) { + // cache the payload + prev_start = dp_bridge->start_slot; + prev_slots = dp_bridge->num_slots; + dp_bridge->pbn = 0; + dp_bridge->start_slot = 1; + dp_bridge->num_slots = 0; + dp_bridge->vcpi = 0; + } else { //add payload + dp_bridge->pbn = payload->pbn; + dp_bridge->start_slot = payload->vc_start_slot; + dp_bridge->num_slots = payload->time_slots; + dp_bridge->vcpi = payload->vcpi; + } + } + } + + // Now commit all the updated payloads + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + dp_bridge = &mst->mst_bridge[i]; + + //Shift payloads to the left if there was a removed payload. + if ((payload->vc_start_slot < 0) && (dp_bridge->start_slot > prev_start)) { + dp_bridge->start_slot -= prev_slots; + } + + mst->dp_display->set_stream_info(mst->dp_display, dp_bridge->dp_panel, + dp_bridge->id, dp_bridge->start_slot, dp_bridge->num_slots, + dp_bridge->pbn, dp_bridge->vcpi); + DP_INFO("conn:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n", + DP_MST_CONN_ID(dp_bridge), dp_bridge->vcpi, dp_bridge->start_slot, + dp_bridge->num_slots, dp_bridge->pbn); + } +#else + int pbn, start_slot, num_slots; + + mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr); + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + dp_bridge = &mst->mst_bridge[i]; + + pbn = 0; + start_slot = 0; + num_slots = 0; + + if (dp_bridge->vcpi) { + mst->mst_fw_cbs->get_vcpi_info(&mst->mst_mgr, + dp_bridge->vcpi, + &start_slot, &num_slots); + pbn = dp_bridge->pbn; + } + + if (mst_bridge == dp_bridge) + dp_bridge->num_slots = num_slots; + + mst->dp_display->set_stream_info(mst->dp_display, + dp_bridge->dp_panel, + dp_bridge->id, start_slot, num_slots, pbn, + dp_bridge->vcpi); + + DP_INFO("conn:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n", + DP_MST_CONN_ID(dp_bridge), dp_bridge->vcpi, + start_slot, num_slots, pbn); + } +#endif +} + +static void _dp_mst_update_single_timeslot(struct dp_mst_private *mst, + struct dp_mst_bridge *mst_bridge) +{ + int pbn = 0, start_slot = 0, num_slots = 0; + + if (mst->state == PM_SUSPEND) { + if (mst_bridge->vcpi) { + mst->mst_fw_cbs->get_vcpi_info(&mst->mst_mgr, + mst_bridge->vcpi, + &start_slot, &num_slots); + pbn = mst_bridge->pbn; + } + + mst_bridge->num_slots = num_slots; + + mst->dp_display->set_stream_info(mst->dp_display, + mst_bridge->dp_panel, + mst_bridge->id, start_slot, num_slots, pbn, + mst_bridge->vcpi); + } +} + +static int _dp_mst_bridge_pre_enable_part1(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct drm_dp_mst_port *port = c_conn->mst_port; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *payload; +#endif + bool ret; + int pbn, slots; + int rc = 0; + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge)); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) { + dp_display->wakeup_phy_layer(dp_display, true); + drm_dp_send_power_updown_phy(&mst->mst_mgr, port, true); + dp_display->wakeup_phy_layer(dp_display, false); + _dp_mst_update_single_timeslot(mst, dp_bridge); + return rc; + } + + pbn = mst->mst_fw_cbs->calc_pbn_mode(&dp_bridge->dp_mode); + + slots = mst->mst_fw_cbs->find_vcpi_slots(&mst->mst_mgr, pbn); + + DP_INFO("conn:%d pbn:%d, slots:%d\n", DP_MST_CONN_ID(dp_bridge), pbn, slots); + + ret = false; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + mst_state = to_drm_dp_mst_topology_state(mst->mst_mgr.base.state); + payload = drm_atomic_get_mst_payload_state(mst_state, port); + if (!payload || payload->time_slots <= 0) { + DP_ERR("time slots not allocated for conn:%d\n", DP_MST_CONN_ID(dp_bridge)); + rc = -EINVAL; + goto end; + } + + drm_dp_mst_update_slots(mst_state, DP_CAP_ANSI_8B10B); + + rc = mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr, + mst_state, payload); + if (rc) { + DP_ERR("payload allocation failure for conn:%d\n", DP_MST_CONN_ID(dp_bridge)); + goto end; + } + +#else + ret = mst->mst_fw_cbs->allocate_vcpi(&mst->mst_mgr, port, pbn, slots); + if (!ret) { + DP_ERR("mst: failed to allocate vcpi. bridge:%d\n", dp_bridge->id); + rc = -EINVAL; + goto end; + } + + dp_bridge->vcpi = port->vcpi.vcpi; + dp_bridge->pbn = pbn; +#endif + _dp_mst_update_timeslots(mst, dp_bridge, port); + +end: + return rc; +} + +static void _dp_mst_bridge_pre_enable_part2(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + struct sde_connector *c_conn = to_sde_connector(dp_bridge->connector); + struct drm_dp_mst_port *port = c_conn->mst_port; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *payload; +#endif + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge)); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) + return; + + mst->mst_fw_cbs->check_act_status(&mst->mst_mgr); + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + mst_state = to_drm_dp_mst_topology_state(mst->mst_mgr.base.state); + payload = drm_atomic_get_mst_payload_state(mst_state, port); + + if (!payload) { + DP_ERR("mst bridge [%d] _pre enable part-2 failed, null payload\n", dp_bridge->id); + return; + } + + if (!payload->port) { + DP_ERR("mst bridge [%d] _pre enable part-2 failed, null port\n", dp_bridge->id); + return; + } + + if (!payload->port->connector) { + DP_ERR("mst bridge [%d] _pre enable part-2 failed, null connector\n", + dp_bridge->id); + return; + } + + if (payload->vc_start_slot == -1) { + DP_ERR("mst bridge [%d] _pre enable part-2 failed, payload alloc part 1 failed\n", + dp_bridge->id); + return; + } + + mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr, mst_state->base.state, payload); +#else + mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr); +#endif + DP_MST_DEBUG("mst bridge [%d] _pre enable part-2 complete\n", + dp_bridge->id); +} + +static void _dp_mst_bridge_pre_disable_part1(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct drm_dp_mst_port *port = c_conn->mst_port; +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *payload; +#endif + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge)); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) { + _dp_mst_update_single_timeslot(mst, dp_bridge); + return; + } + +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + mst_state = to_drm_dp_mst_topology_state(mst->mst_mgr.base.state); + payload = drm_atomic_get_mst_payload_state(mst_state, port); + + if (!payload) { + DP_ERR("mst bridge [%d] _pre disable part-1 failed, null payload\n", + dp_bridge->id); + return; + } + +#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE) + mst->mst_fw_cbs->reset_vcpi_slots(&mst->mst_mgr, mst_state, payload, payload); +#else + mst->mst_fw_cbs->reset_vcpi_slots(&mst->mst_mgr, mst_state, payload); +#endif +#else + mst->mst_fw_cbs->reset_vcpi_slots(&mst->mst_mgr, port); +#endif + _dp_mst_update_timeslots(mst, dp_bridge, port); + + DP_MST_DEBUG("mst bridge [%d] _pre disable part-1 complete\n", + dp_bridge->id); +} + +static void _dp_mst_bridge_pre_disable_part2(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct drm_dp_mst_port *port = c_conn->mst_port; + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge)); + + /* skip mst specific disable operations during suspend */ + if (mst->state == PM_SUSPEND) { + dp_display->wakeup_phy_layer(dp_display, true); + drm_dp_send_power_updown_phy(&mst->mst_mgr, port, false); + dp_display->wakeup_phy_layer(dp_display, false); + return; + } + + mst->mst_fw_cbs->check_act_status(&mst->mst_mgr); + +#if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE) + mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr); + + port->vcpi.vcpi = dp_bridge->vcpi; + mst->mst_fw_cbs->deallocate_vcpi(&mst->mst_mgr, port); + dp_bridge->vcpi = 0; + dp_bridge->pbn = 0; +#endif + + DP_MST_DEBUG("mst bridge [%d] _pre disable part-2 complete\n", + dp_bridge->id); +} + +static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct dp_mst_private *mst; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + DP_MST_DEBUG_V("enter\n"); + + bridge = to_dp_mst_bridge(drm_bridge); + dp = bridge->display; + + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge)); + mst = dp->dp_mst_prv_info; + + mutex_lock(&mst->mst_lock); + + /* By this point mode should have been validated through mode_fixup */ + rc = dp->set_mode(dp, bridge->dp_panel, &bridge->dp_mode); + if (rc) { + DP_ERR("[%d] failed to perform a mode set, rc=%d\n", + bridge->id, rc); + goto end; + } + + rc = dp->prepare(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display prepare failed, rc=%d\n", + bridge->id, rc); + goto end; + } + + rc = _dp_mst_bridge_pre_enable_part1(bridge); + if (rc) { + DP_ERR("[%d] DP display pre-enable failed, rc=%d\n", bridge->id, rc); + dp->unprepare(dp, bridge->dp_panel); + goto end; + } + + rc = dp->enable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("[%d] DP display enable failed, rc=%d\n", + bridge->id, rc); + dp->unprepare(dp, bridge->dp_panel); + goto end; + } else { + _dp_mst_bridge_pre_enable_part2(bridge); + } + + DP_MST_INFO("conn:%d mode:%s fps:%d dsc:%d vcpi:%d slots:%d to %d\n", + DP_MST_CONN_ID(bridge), bridge->drm_mode.name, + drm_mode_vrefresh(&bridge->drm_mode), + bridge->dp_mode.timing.comp_info.enabled, + bridge->vcpi, bridge->start_slot, + bridge->start_slot + bridge->num_slots); +end: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, DP_MST_CONN_ID(bridge)); + mutex_unlock(&mst->mst_lock); +} + +static void dp_mst_bridge_enable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge)); + + dp = bridge->display; + + rc = dp->post_enable(dp, bridge->dp_panel); + if (rc) { + DP_ERR("mst bridge [%d] post enable failed, rc=%d\n", + bridge->id, rc); + return; + } + + DP_MST_INFO("mst bridge:%d conn:%d post enable complete\n", + bridge->id, DP_MST_CONN_ID(bridge)); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, DP_MST_CONN_ID(bridge)); +} + +static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct dp_mst_private *mst; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + DP_MST_DEBUG_V("enter\n"); + + bridge = to_dp_mst_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge)); + dp = bridge->display; + + mst = dp->dp_mst_prv_info; + + sde_connector_helper_bridge_disable(bridge->connector); + + mutex_lock(&mst->mst_lock); + + _dp_mst_bridge_pre_disable_part1(bridge); + + rc = dp->pre_disable(dp, bridge->dp_panel); + if (rc) + DP_ERR("[%d] DP display pre disable failed, rc=%d\n", + bridge->id, rc); + + _dp_mst_bridge_pre_disable_part2(bridge); + + DP_MST_INFO("mst bridge:%d conn:%d disable complete\n", bridge->id, + DP_MST_CONN_ID(bridge)); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, DP_MST_CONN_ID(bridge)); + mutex_unlock(&mst->mst_lock); +} + +static void dp_mst_bridge_post_disable(struct drm_bridge *drm_bridge) +{ + int rc = 0, conn = 0; + struct dp_mst_bridge *bridge; + struct dp_display *dp; + struct dp_mst_private *mst; + + if (!drm_bridge) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + if (!bridge->connector) { + DP_ERR("Invalid connector\n"); + return; + } + + conn = DP_MST_CONN_ID(bridge); + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, conn); + + dp = bridge->display; + mst = dp->dp_mst_prv_info; + + rc = dp->disable(dp, bridge->dp_panel); + if (rc) + DP_MST_INFO("bridge:%d conn:%d display disable failed, rc=%d\n", + bridge->id, conn, rc); + + rc = dp->unprepare(dp, bridge->dp_panel); + if (rc) + DP_MST_INFO("bridge:%d conn:%d display unprepare failed, rc=%d\n", + bridge->id, conn, rc); + + bridge->connector = NULL; + bridge->dp_panel = NULL; + + DP_MST_INFO("mst bridge:%d conn:%d post disable complete\n", + bridge->id, conn); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, conn); +} + +static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct dp_mst_bridge *bridge; + struct dp_mst_bridge_state *dp_bridge_state; + struct dp_display *dp; + + DP_MST_DEBUG_V("enter\n"); + + if (!drm_bridge || !mode || !adjusted_mode) { + DP_ERR("Invalid params\n"); + return; + } + + bridge = to_dp_mst_bridge(drm_bridge); + + dp_bridge_state = to_dp_mst_bridge_state(bridge); + bridge->connector = dp_bridge_state->connector; + bridge->dp_panel = dp_bridge_state->dp_panel; + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge)); + + dp = bridge->display; + + memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode)); + memcpy(&bridge->drm_mode, adjusted_mode, sizeof(bridge->drm_mode)); + dp->convert_to_dp_mode(dp, bridge->dp_panel, adjusted_mode, + &bridge->dp_mode); + dp->clear_reservation(dp, dp_bridge_state->dp_panel); + + DP_MST_INFO("mst bridge:%d conn:%d mode set complete %s\n", bridge->id, + DP_MST_CONN_ID(bridge), mode->name); +} + +/* DP MST Bridge APIs */ + +static struct drm_connector * +dp_mst_drm_fixed_connector_init(struct dp_display *dp_display, + struct drm_encoder *encoder); + +static const struct drm_bridge_funcs dp_mst_bridge_ops = { + .attach = dp_mst_bridge_attach, + .mode_fixup = dp_mst_bridge_mode_fixup, + .pre_enable = dp_mst_bridge_pre_enable, + .enable = dp_mst_bridge_enable, + .disable = dp_mst_bridge_disable, + .post_disable = dp_mst_bridge_post_disable, + .mode_set = dp_mst_bridge_mode_set, +}; + +int dp_mst_drm_bridge_init(void *data, struct drm_encoder *encoder) +{ + int rc = 0; + struct dp_mst_bridge *bridge = NULL; + struct dp_mst_bridge_state *state; + struct drm_device *dev; + struct dp_display *display = data; + struct msm_drm_private *priv = NULL; + struct dp_mst_private *mst = display->dp_mst_prv_info; + int i; + + if (!mst || !mst->mst_initialized) { + if (dp_mst_enc_cache.cnt >= MAX_DP_MST_DRM_BRIDGES) { + DP_MST_INFO("exceeding max bridge cnt %d\n", + dp_mst_enc_cache.cnt); + return 0; + } + + dp_mst_enc_cache.mst_enc[dp_mst_enc_cache.cnt] = encoder; + dp_mst_enc_cache.cnt++; + DP_MST_INFO("mst not initialized. cache encoder information\n"); + return 0; + } + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (!mst->mst_bridge[i].in_use) { + bridge = &mst->mst_bridge[i]; + bridge->encoder = encoder; + bridge->in_use = true; + bridge->id = i; + break; + } + } + + if (i == MAX_DP_MST_DRM_BRIDGES) { + DP_ERR("mst supports only %d bridges\n", i); + rc = -EACCES; + goto end; + } + + dev = display->drm_dev; + bridge->display = display; + bridge->base.funcs = &dp_mst_bridge_ops; + bridge->base.encoder = encoder; + + priv = dev->dev_private; + + rc = drm_bridge_attach(encoder, &bridge->base, NULL, 0); + if (rc) { + DP_ERR("failed to attach bridge, rc=%d\n", rc); + goto end; + } + + priv->bridges[priv->num_bridges++] = &bridge->base; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) { + rc = -ENOMEM; + goto end; + } + + drm_atomic_private_obj_init(dev, &bridge->obj, + &state->base, + &dp_mst_bridge_state_funcs); + + DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i); + + /* + * If fixed topology port is defined, connector will be created + * immediately. + */ + rc = display->mst_get_fixed_topology_port(display, bridge->id, + &bridge->fixed_port_num); + if (!rc) { + bridge->fixed_connector = + dp_mst_drm_fixed_connector_init(display, + bridge->encoder); + if (bridge->fixed_connector == NULL) { + DP_ERR("failed to create fixed connector\n"); + kfree(state); + rc = -ENOMEM; + goto end; + } + } + + return 0; + +end: + return rc; +} + +void dp_mst_drm_bridge_deinit(void *display) +{ + DP_MST_DEBUG("mst bridge deinit\n"); +} + +/* DP MST Connector OPs */ + +static int +dp_mst_connector_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force, + void *display) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct dp_display *dp_display = c_conn->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct dp_panel *dp_panel; + enum drm_connector_status status; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + dp_panel = c_conn->drv_panel; + + if (dp_panel->mst_hide) + return connector_status_disconnected; + + status = mst->mst_fw_cbs->detect_port_ctx(connector, + ctx, &mst->mst_mgr, c_conn->mst_port); + + DP_MST_INFO("conn:%d status:%d\n", connector->base.id, status); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, status); + + return (int)status; +} + +void dp_mst_clear_edid_cache(void *dp_display) { + struct dp_display *dp = dp_display; + struct drm_connector_list_iter conn_iter; + struct drm_connector *conn; + struct sde_connector *c_conn; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + if (!dp) { + DP_ERR("invalid input\n"); + return; + } + + drm_connector_list_iter_begin(dp->drm_dev, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { + c_conn = to_sde_connector(conn); + if (!c_conn->mst_port) + continue; + + mutex_lock(&dp_mst.edid_lock); + kfree(c_conn->cached_edid); + c_conn->cached_edid = NULL; + mutex_unlock(&dp_mst.edid_lock); + } + + drm_connector_list_iter_end(&conn_iter); + + DP_MST_DEBUG_V("exit:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT); +} + +static int dp_mst_connector_get_modes(struct drm_connector *connector, + void *display, const struct msm_resource_caps_info *avail_res) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct dp_display_mode *dp_mode = NULL; + int rc = 0; + struct edid *edid = NULL; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id); + + mutex_lock(&mst->edid_lock); + + if (c_conn->cached_edid) + goto duplicate_edid; + + mutex_unlock(&mst->edid_lock); + + edid = mst->mst_fw_cbs->get_edid(connector, + &mst->mst_mgr, c_conn->mst_port); + + if (!edid) { + DP_WARN("get edid failed. id: %d\n", connector->base.id); + goto end; + } + + mutex_lock(&mst->edid_lock); + c_conn->cached_edid = edid; + +duplicate_edid: + + edid = drm_edid_duplicate(c_conn->cached_edid); + + mutex_unlock(&mst->edid_lock); + + if (IS_ERR(edid)) { + DP_MST_DEBUG("edid duplication failed. id: %d\n", + connector->base.id); + goto end; + } + + rc = dp_display->mst_connector_update_edid(dp_display, + connector, edid); + +end: + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, rc); + if (rc <= 0) { + DP_WARN("conn:%d has no modes, adding failsafe. rc=%d\n", connector->base.id, rc); + dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL); + if (!dp_mode) + return 0; + + init_failsafe_mode(dp_mode); + rc = dp_connector_add_custom_mode(connector, dp_mode); + } else { + DP_MST_INFO("conn:%d has %d modes\n", connector->base.id, rc); + } + + return rc; +} + +enum drm_mode_status dp_mst_connector_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode, + void *display, const struct msm_resource_caps_info *avail_res) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst; + struct sde_connector *c_conn; + struct drm_dp_mst_port *mst_port; + struct dp_display_mode dp_mode; + struct dp_panel *dp_panel; + uint16_t full_pbn, required_pbn; + int available_slots, required_slots; + struct dp_mst_bridge_state *dp_bridge_state; + int i, vrefresh, slots_in_use = 0, active_enc_cnt = 0; + const u32 tot_slots = 63; + + if (!connector || !mode || !display) { + DP_ERR("invalid input\n"); + return 0; + } + + mst = dp_display->dp_mst_prv_info; + c_conn = to_sde_connector(connector); + mst_port = c_conn->mst_port; + dp_panel = c_conn->drv_panel; + + if (!dp_panel || !mst_port) + return MODE_ERROR; + + vrefresh = drm_mode_vrefresh(mode); + + /* As per spec, failsafe mode should always be present */ + if ((mode->hdisplay == 640) && (mode->vdisplay == 480) && (mode->clock == 25175)) + goto validate_mode; + + if (dp_panel->mode_override && (mode->hdisplay != dp_panel->hdisplay || + mode->vdisplay != dp_panel->vdisplay || + vrefresh != dp_panel->vrefresh || + mode->picture_aspect_ratio != dp_panel->aspect_ratio)) + return MODE_BAD; + + /* dp bridge state is protected by drm_mode_config.connection_mutex */ + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + dp_bridge_state = to_dp_mst_bridge_state(&mst->mst_bridge[i]); + if (dp_bridge_state->connector && + dp_bridge_state->connector != connector) { + active_enc_cnt++; + slots_in_use += dp_bridge_state->num_slots; + } + } + + if (active_enc_cnt < DP_STREAM_MAX) { + full_pbn = mst_port->full_pbn; + available_slots = tot_slots - slots_in_use; + } else { + DP_DEBUG("all mst streams are active\n"); + return MODE_BAD; + } + + dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel, + mode, &dp_mode); + + required_pbn = mst->mst_fw_cbs->calc_pbn_mode(&dp_mode); + required_slots = mst->mst_fw_cbs->find_vcpi_slots( + &mst->mst_mgr, required_pbn); + + if (required_pbn > full_pbn || required_slots > available_slots) { + DP_DEBUG("mode:%s not supported. pbn %d vs %d slots %d vs %d\n", + mode->name, required_pbn, full_pbn, + required_slots, available_slots); + return MODE_BAD; + } + +validate_mode: + return dp_display->validate_mode(dp_display, dp_panel, mode, avail_res); +} + +int dp_mst_connector_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_sub_mode *sub_mode, + struct msm_mode_info *mode_info, + void *display, + const struct msm_resource_caps_info *avail_res) +{ + int rc; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id); + + rc = dp_connector_get_mode_info(connector, drm_mode, NULL, mode_info, + display, avail_res); + + DP_MST_DEBUG_V("mst connector:%d get mode info. rc:%d\n", + connector->base.id, rc); + + DP_MST_DEBUG_V("exit:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id); + + return rc; +} + +static struct drm_encoder * +dp_mst_atomic_best_encoder(struct drm_connector *connector, + void *display, struct drm_connector_state *state) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *conn = to_sde_connector(connector); + struct drm_encoder *enc = NULL; + struct dp_mst_bridge_state *bridge_state; + u32 i; + + if (state->best_encoder) + return state->best_encoder; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + bridge_state = dp_mst_get_bridge_atomic_state( + state->state, &mst->mst_bridge[i]); + if (IS_ERR(bridge_state)) + goto end; + + if (bridge_state->connector == connector) { + enc = mst->mst_bridge[i].encoder; + goto end; + } + } + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (mst->mst_bridge[i].fixed_connector) + continue; + + bridge_state = dp_mst_get_bridge_atomic_state( + state->state, &mst->mst_bridge[i]); + + if (!bridge_state->connector) { + bridge_state->connector = connector; + bridge_state->dp_panel = conn->drv_panel; + enc = mst->mst_bridge[i].encoder; + break; + } + + } + +end: + if (enc) + DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n", + connector->base.id, i); + else + DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n", + connector->base.id); + + return enc; +} + +static int dp_mst_connector_atomic_check(struct drm_connector *connector, + void *display, struct drm_atomic_state *state) +{ + int rc = 0, slots, i; + bool vcpi_released = false; + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_crtc *old_crtc; + struct drm_crtc_state *crtc_state; + struct dp_mst_bridge *bridge; + struct dp_mst_bridge_state *bridge_state; + struct drm_bridge *drm_bridge; + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *c_conn = to_sde_connector(connector); + struct dp_display_mode dp_mode; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id); + + if (!state) + return rc; + + new_conn_state = drm_atomic_get_new_connector_state(state, connector); + if (!new_conn_state) + return rc; + + old_conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!old_conn_state) + goto mode_set; + + old_crtc = old_conn_state->crtc; + if (!old_crtc) + goto mode_set; + + crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc); + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + bridge = &mst->mst_bridge[i]; + DP_MST_DEBUG("bridge id:%d, vcpi:%d, pbn:%d, slots:%d\n", + bridge->id, bridge->vcpi, bridge->pbn, + bridge->num_slots); + } + + /*attempt to release vcpi slots on a modeset change for crtc state*/ + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (WARN_ON(!old_conn_state->best_encoder)) { + rc = -EINVAL; + goto end; + } + + drm_bridge = drm_bridge_chain_get_first_bridge( + old_conn_state->best_encoder); + if (WARN_ON(!drm_bridge)) { + rc = -EINVAL; + goto end; + } + bridge = to_dp_mst_bridge(drm_bridge); + + bridge_state = dp_mst_get_bridge_atomic_state(state, bridge); + if (IS_ERR(bridge_state)) { + rc = PTR_ERR(bridge_state); + goto end; + } + + if (WARN_ON(bridge_state->connector != connector)) { + rc = -EINVAL; + goto end; + } + + slots = bridge_state->num_slots; + if (slots > 0) { + rc = mst->mst_fw_cbs->atomic_release_time_slots(state, + &mst->mst_mgr, c_conn->mst_port); + if (rc) { + DP_ERR("failed releasing %d vcpi slots %d\n", + slots, rc); + goto end; + } + vcpi_released = true; + } + + bridge_state->num_slots = 0; + + if (!new_conn_state->crtc && mst->state != PM_SUSPEND) { + bridge_state->connector = NULL; + bridge_state->dp_panel = NULL; + + DP_MST_DEBUG("clear best encoder: %d\n", bridge->id); + } + } + +mode_set: + if (!new_conn_state->crtc) + goto end; + + crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + if (drm_atomic_crtc_needs_modeset(crtc_state) && crtc_state->active) { + c_conn = to_sde_connector(connector); + + if (WARN_ON(!new_conn_state->best_encoder)) { + rc = -EINVAL; + goto end; + } + + drm_bridge = drm_bridge_chain_get_first_bridge( + new_conn_state->best_encoder); + if (WARN_ON(!drm_bridge)) { + rc = -EINVAL; + goto end; + } + bridge = to_dp_mst_bridge(drm_bridge); + + bridge_state = dp_mst_get_bridge_atomic_state(state, bridge); + if (IS_ERR(bridge_state)) { + rc = PTR_ERR(bridge_state); + goto end; + } + + if (WARN_ON(bridge_state->connector != connector)) { + rc = -EINVAL; + goto end; + } + + /* + * check if vcpi slots are trying to get allocated in same phase + * as deallocation. If so, go to end to avoid allocation. + */ + if (vcpi_released) { + DP_WARN("skipping allocation since vcpi was released in the same state\n"); + goto end; + } + + if (WARN_ON(bridge_state->num_slots)) { + rc = -EINVAL; + goto end; + } + + dp_display->convert_to_dp_mode(dp_display, c_conn->drv_panel, + &crtc_state->mode, &dp_mode); + + slots = _dp_mst_compute_config(state, mst, connector, &dp_mode); + if (slots < 0) { + rc = slots; + goto end; + } + + bridge_state->num_slots = slots; + } + +end: + DP_MST_DEBUG("mst connector:%d atomic check ret %d\n", + connector->base.id, rc); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, rc); + return rc; +} + +static int dp_mst_connector_config_hdr(struct drm_connector *connector, + void *display, struct sde_connector_state *c_state) +{ + int rc; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id); + + rc = dp_connector_config_hdr(connector, display, c_state); + + DP_MST_DEBUG("mst connector:%d cfg hdr. rc:%d\n", + connector->base.id, rc); + + DP_MST_DEBUG_V("exit:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, rc); + + return rc; +} + +static void dp_mst_connector_pre_destroy(struct drm_connector *connector, + void *display) +{ + struct dp_display *dp_display = display; + struct sde_connector *c_conn = to_sde_connector(connector); + u32 conn_id = connector->base.id; + + DP_MST_DEBUG_V("enter:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, conn_id); + + kfree(c_conn->cached_edid); + c_conn->cached_edid = NULL; + + drm_dp_mst_put_port_malloc(c_conn->mst_port); + + dp_display->mst_connector_uninstall(dp_display, connector); + DP_MST_DEBUG_V("exit:\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, conn_id); +} + +static int dp_mst_connector_post_init(struct drm_connector *connector, + void *display) +{ + struct dp_display *dp_display = display; + struct sde_connector *sde_conn = to_sde_connector(connector); + + if (!dp_display || !connector) + return -EINVAL; + + if (dp_display->dsc_cont_pps) + sde_conn->ops.update_pps = NULL; + + return 0; +} + +/* DRM MST callbacks */ + +static struct drm_connector * +dp_mst_add_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, const char *pathprop) +{ + static const struct sde_connector_ops dp_mst_connector_ops = { + .post_init = dp_mst_connector_post_init, + .detect_ctx = dp_mst_connector_detect, + .get_modes = dp_mst_connector_get_modes, + .mode_valid = dp_mst_connector_mode_valid, + .get_info = dp_connector_get_info, + .get_mode_info = dp_mst_connector_get_mode_info, + .atomic_best_encoder = dp_mst_atomic_best_encoder, + .atomic_check = dp_mst_connector_atomic_check, + .config_hdr = dp_mst_connector_config_hdr, + .pre_destroy = dp_mst_connector_pre_destroy, + .update_pps = dp_connector_update_pps, + .install_properties = dp_connector_install_properties, + }; + struct dp_mst_private *dp_mst; + struct drm_device *dev; + struct dp_display *dp_display; + struct drm_connector *connector; + struct sde_connector *c_conn; + int rc, i; + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr); + + dp_display = dp_mst->dp_display; + dev = dp_display->drm_dev; + + /* make sure connector is not accessed before reset */ + drm_modeset_lock_all(dev); + + connector = sde_connector_init(dev, + dp_mst->mst_bridge[0].encoder, + NULL, + dp_display, + &dp_mst_connector_ops, + DRM_CONNECTOR_POLL_HPD, + DRM_MODE_CONNECTOR_DisplayPort); + + if (IS_ERR_OR_NULL(connector)) { + DP_ERR("mst sde_connector_init failed\n"); + drm_modeset_unlock_all(dev); + return NULL; + } + + rc = dp_display->mst_connector_install(dp_display, connector); + if (rc) { + DP_ERR("mst connector install failed\n"); + sde_connector_destroy(connector); + drm_modeset_unlock_all(dev); + return NULL; + } + + c_conn = to_sde_connector(connector); + c_conn->mst_port = port; + drm_dp_mst_get_port_malloc(c_conn->mst_port); + + if (connector->funcs->reset) + connector->funcs->reset(connector); + + for (i = 1; i < MAX_DP_MST_DRM_BRIDGES; i++) { + drm_connector_attach_encoder(connector, + dp_mst->mst_bridge[i].encoder); + } + + drm_object_attach_property(&connector->base, + dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tile_property, 0); + + /* unlock connector and make it accessible */ + drm_modeset_unlock_all(dev); + + DP_MST_INFO("add mst connector id:%d\n", connector->base.id); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id); + + return connector; +} + +static int +dp_mst_fixed_connector_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force, + void *display) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + int i; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (mst->mst_bridge[i].fixed_connector != connector) + continue; + + if (!mst->mst_bridge[i].fixed_port_added) + break; + + return dp_mst_connector_detect(connector, ctx, force, display); + } + + return (int)connector_status_disconnected; +} + +static struct drm_encoder * +dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector, + void *display, struct drm_connector_state *state) +{ + struct dp_display *dp_display = display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *conn = to_sde_connector(connector); + struct drm_encoder *enc = NULL; + struct dp_mst_bridge_state *bridge_state; + u32 i; + + if (state->best_encoder) + return state->best_encoder; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (mst->mst_bridge[i].fixed_connector == connector) { + bridge_state = dp_mst_get_bridge_atomic_state( + state->state, &mst->mst_bridge[i]); + if (IS_ERR(bridge_state)) + goto end; + + bridge_state->connector = connector; + bridge_state->dp_panel = conn->drv_panel; + enc = mst->mst_bridge[i].encoder; + break; + } + } + +end: + if (enc) + DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n", + connector->base.id, i); + else + DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n", + connector->base.id); + + return enc; +} + +static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *target) +{ + struct drm_dp_mst_port *port; + u32 port_num = 0; + + /* + * search through reversed order of adding sequence, so the port number + * will be unique once topology is fixed + */ + list_for_each_entry_reverse(port, &mstb->ports, next) { + if (port->mstb) + port_num += dp_mst_find_fixed_port_num(port->mstb, + target); + else if (!port->input) { + ++port_num; + if (port == target) + break; + } + } + + return port_num; +} + +static struct drm_connector * +dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst, + struct drm_dp_mst_port *port) +{ + struct dp_display *dp_display = dp_mst->dp_display; + struct drm_connector *connector = NULL; + struct sde_connector *c_conn; + u32 port_num; + int i; + + mutex_lock(&port->mgr->lock); + port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port); + mutex_unlock(&port->mgr->lock); + + if (!port_num) + return NULL; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (dp_mst->mst_bridge[i].fixed_port_num == port_num) { + connector = dp_mst->mst_bridge[i].fixed_connector; + c_conn = to_sde_connector(connector); + c_conn->mst_port = port; + dp_display->mst_connector_update_link_info(dp_display, + connector); + dp_mst->mst_bridge[i].fixed_port_added = true; + DP_MST_DEBUG("found fixed connector %d\n", + DRMID(connector)); + break; + } + } + + return connector; +} + +static int +dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst) +{ + int enc_idx = MAX_DP_MST_DRM_BRIDGES; + int i; + + for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) { + if (!dp_mst->mst_bridge[i].fixed_connector) { + enc_idx = i; + break; + } + } + + return enc_idx; +} + +static struct drm_connector * +dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, const char *pathprop) +{ + struct dp_mst_private *dp_mst; + struct drm_device *dev; + struct dp_display *dp_display; + struct drm_connector *connector; + int i, enc_idx; + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr); + + dp_display = dp_mst->dp_display; + dev = dp_display->drm_dev; + + if (port->input || port->mstb) + enc_idx = MAX_DP_MST_DRM_BRIDGES; + else { + /* if port is already reserved, return immediately */ + connector = dp_mst_find_fixed_connector(dp_mst, port); + if (connector != NULL) + return connector; + + /* first available bridge index for non-reserved port */ + enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst); + } + + /* add normal connector */ + connector = dp_mst_add_connector(mgr, port, pathprop); + if (!connector) { + DP_MST_DEBUG("failed to add connector\n"); + return NULL; + } + + drm_modeset_lock_all(dev); + + /* clear encoder list */ + connector->possible_encoders = 0; + + /* re-attach encoders from first available encoders */ + for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++) + drm_connector_attach_encoder(connector, + dp_mst->mst_bridge[i].encoder); + + drm_modeset_unlock_all(dev); + + DP_MST_DEBUG("add mst connector:%d\n", connector->base.id); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id); + + return connector; +} + +static struct drm_connector * +dp_mst_drm_fixed_connector_init(struct dp_display *dp_display, + struct drm_encoder *encoder) +{ + static const struct sde_connector_ops dp_mst_connector_ops = { + .post_init = dp_mst_connector_post_init, + .detect_ctx = dp_mst_fixed_connector_detect, + .get_modes = dp_mst_connector_get_modes, + .mode_valid = dp_mst_connector_mode_valid, + .get_info = dp_connector_get_info, + .get_mode_info = dp_mst_connector_get_mode_info, + .atomic_best_encoder = dp_mst_fixed_atomic_best_encoder, + .atomic_check = dp_mst_connector_atomic_check, + .config_hdr = dp_mst_connector_config_hdr, + .pre_destroy = dp_mst_connector_pre_destroy, + }; + struct drm_device *dev; + struct drm_connector *connector; + int rc; + + DP_MST_DEBUG_V("enter\n"); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY); + + dev = dp_display->drm_dev; + + connector = sde_connector_init(dev, + encoder, + NULL, + dp_display, + &dp_mst_connector_ops, + DRM_CONNECTOR_POLL_HPD, + DRM_MODE_CONNECTOR_DisplayPort); + + if (IS_ERR_OR_NULL(connector)) { + DP_ERR("mst sde_connector_init failed\n"); + return NULL; + } + + rc = dp_display->mst_connector_install(dp_display, connector); + if (rc) { + DP_ERR("mst connector install failed\n"); + sde_connector_destroy(connector); + return NULL; + } + + drm_object_attach_property(&connector->base, + dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.tile_property, 0); + + DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id); + + return connector; +} + +static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status) +{ + struct drm_device *dev = mst->dp_display->drm_dev; + char event_string[] = "HOTPLUG=1"; + char status[HPD_STRING_SIZE]; + char *envp[3]; + + if (hpd_status) + snprintf(status, HPD_STRING_SIZE, "status=connected"); + else + snprintf(status, HPD_STRING_SIZE, "status=disconnected"); + + envp[0] = event_string; + envp[1] = status; + envp[2] = NULL; + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); + + DP_MST_INFO("%s finished. hpd_status:%d\n", __func__, hpd_status); + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, hpd_status); +} + +/* DP Driver Callback OPs */ + +static int dp_mst_display_set_mgr_state(void *dp_display, bool state) +{ + int rc; + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, state); + /* + * on hpd high, set_mgr_state is called before hotplug event is sent + * to usermode and mst_session_state should be updated here. + * on hpd_low, set_mgr_state is called after hotplug event is sent and + * the session_state was already updated prior to that. + */ + if (state) + mst->mst_session_state = state; + + dp_mst_clear_edid_cache(dp); + mst->mst_fw_cbs = &drm_dp_mst_fw_helper_ops; + + rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr, state); + if (rc < 0) { + DP_ERR("failed to set topology mgr state to %d. rc %d\n", + state, rc); + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, rc); + + return rc; +} + +static void dp_mst_display_hpd(void *dp_display, bool hpd_status) +{ + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + + /* + * on hpd high, set_mgr_state is called before hotplug event is sent + * to usermode and mst_session_state was already updated there. + * on hpd_low, hotplug event is sent before set_mgr_state and the + * session state should be unset here for the connection status to be + * updated accordingly. + */ + if (!hpd_status) + mst->mst_session_state = hpd_status; + + dp_mst_hpd_event_notify(mst, hpd_status); +} + +static void dp_mst_display_hpd_irq(void *dp_display) +{ + int rc; + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + u8 esi[14]; + u8 ack[8] = {}; + unsigned int esi_res = DP_SINK_COUNT_ESI + 1; + bool handled; + + if (!mst->mst_session_state) { + DP_ERR("mst_hpd_irq received before mst session start\n"); + return; + } + + rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI, + esi, 14); + if (rc != 14) { + DP_ERR("dpcd sink status read failed, rlen=%d\n", rc); + return; + } + + DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n", + esi[1], esi[2], esi[3]); + + rc = drm_dp_mst_hpd_irq_handle_event(&mst->mst_mgr, esi, ack, &handled); + + /* ack the request */ + if (handled) { + rc = drm_dp_dpcd_writeb(mst->caps.drm_aux, esi_res, ack[1]); + + if (esi[1] & DP_UP_REQ_MSG_RDY) + dp_mst_clear_edid_cache(dp); + + if (rc != 1) + DP_ERR("dpcd esi_res failed. rlen=%d\n", rc); + else + drm_dp_mst_hpd_irq_send_new_request(&mst->mst_mgr); + } + + DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc); +} + +static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state) +{ + struct dp_display *dp = dp_display; + struct dp_mst_private *mst = dp->dp_mst_prv_info; + + if (!mst) { + DP_DEBUG("mst not initialized\n"); + return; + } + + SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, mst_state); + mst->state = mst_state; + DP_MST_INFO("mst power state:%d\n", mst_state); +} + +static void dp_mst_display_set_mst_mode_params(void *dp_display, struct dp_display_mode *mode) +{ + // update pbn values that will later be used for rg calculation + dp_mst_calc_pbn_mode(mode); +} + +/* DP MST APIs */ + +static const struct dp_mst_drm_cbs dp_mst_display_cbs = { + .hpd = dp_mst_display_hpd, + .hpd_irq = dp_mst_display_hpd_irq, + .set_drv_state = dp_mst_set_state, + .set_mgr_state = dp_mst_display_set_mgr_state, + .set_mst_mode_params = dp_mst_display_set_mst_mode_params, +}; + +static const struct drm_dp_mst_topology_cbs dp_mst_drm_cbs = { + .add_connector = dp_mst_add_connector, +}; + +static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = { + .add_connector = dp_mst_add_fixed_connector, +}; + +int dp_mst_init(struct dp_display *dp_display) +{ + struct drm_device *dev; + int conn_base_id = 0; + int ret, i; + struct dp_mst_drm_install_info install_info; + + memset(&dp_mst, 0, sizeof(dp_mst)); + + if (!dp_display) { + DP_ERR("invalid params\n"); + return 0; + } + + dev = dp_display->drm_dev; + + /* register with DP driver */ + install_info.dp_mst_prv_info = &dp_mst; + install_info.cbs = &dp_mst_display_cbs; + dp_display->mst_install(dp_display, &install_info); + + dp_display->get_mst_caps(dp_display, &dp_mst.caps); + + if (!dp_mst.caps.has_mst) { + DP_MST_DEBUG("mst not supported\n"); + return 0; + } + + dp_mst.mst_fw_cbs = &drm_dp_mst_fw_helper_ops; + + memset(&dp_mst.mst_mgr, 0, sizeof(dp_mst.mst_mgr)); + dp_mst.mst_mgr.cbs = &dp_mst_drm_cbs; + conn_base_id = dp_display->base_connector->base.id; + dp_mst.dp_display = dp_display; + + mutex_init(&dp_mst.mst_lock); + mutex_init(&dp_mst.edid_lock); + +/* + * Upstream driver modified drm_dp_mst_topology_mgr_init signature + * in 5.15 kernel and reverted it back in 6.1 + */ +#if ((KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) && \ + (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE)) + ret = drm_dp_mst_topology_mgr_init(&dp_mst.mst_mgr, dev, + dp_mst.caps.drm_aux, + dp_mst.caps.max_dpcd_transaction_bytes, + dp_mst.caps.max_streams_supported, + 4, DP_MAX_LINK_CLK_KHZ, conn_base_id); +#else + ret = drm_dp_mst_topology_mgr_init(&dp_mst.mst_mgr, dev, + dp_mst.caps.drm_aux, + dp_mst.caps.max_dpcd_transaction_bytes, + dp_mst.caps.max_streams_supported, + conn_base_id); +#endif + if (ret) { + DP_ERR("dp drm mst topology manager init failed\n"); + goto error; + } + + dp_mst.mst_initialized = true; + + /* create drm_bridges for cached mst encoders and clear cache */ + for (i = 0; i < dp_mst_enc_cache.cnt; i++) { + ret = dp_mst_drm_bridge_init(dp_display, + dp_mst_enc_cache.mst_enc[i]); + } + memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache)); + + /* choose fixed callback function if fixed topology is found */ + if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL)) + dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs; + + DP_MST_INFO("dp drm mst topology manager init completed\n"); + + return ret; + +error: + mutex_destroy(&dp_mst.mst_lock); + mutex_destroy(&dp_mst.edid_lock); + return ret; +} + +void dp_mst_deinit(struct dp_display *dp_display) +{ + struct dp_mst_private *mst; + + if (!dp_display) { + DP_ERR("invalid params\n"); + return; + } + + mst = dp_display->dp_mst_prv_info; + + if (!mst->mst_initialized) + return; + + dp_display->mst_uninstall(dp_display); + + drm_dp_mst_topology_mgr_destroy(&mst->mst_mgr); + + dp_mst.mst_initialized = false; + + mutex_destroy(&mst->mst_lock); + mutex_destroy(&mst->edid_lock); + + DP_MST_INFO("dp drm mst topology manager deinit completed\n"); +} + diff --git a/msm/dp/dp_mst_drm.h b/msm/dp/dp_mst_drm.h new file mode 100644 index 000000000..89a3e5385 --- /dev/null +++ b/msm/dp/dp_mst_drm.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_MST_DRM_H_ +#define _DP_MST_DRM_H_ + +#include +#include +#include + +#include "dp_display.h" + +#if IS_ENABLED(CONFIG_DRM_MSM_DP_MST) + +/** + * dp_mst_drm_bridge_init - initialize mst bridge + * @display: Pointer to private display structure + * @encoder: Pointer to encoder for mst bridge mapping + */ +int dp_mst_drm_bridge_init(void *display, + struct drm_encoder *encoder); + +/** + * dp_mst_drm_bridge_deinit - de-initialize mst bridges + * @display: Pointer to private display structure + */ +void dp_mst_drm_bridge_deinit(void *display); + +/** + * dp_mst_init - initialize mst objects for the given display + * @display: Pointer to private display structure + */ +int dp_mst_init(struct dp_display *dp_display); + +/** + * dp_mst_deinit - de-initialize mst objects for the given display + * @display: Pointer to private display structure + */ +void dp_mst_deinit(struct dp_display *dp_display); + +/** + * dp_mst_clear_edid_cache - clear mst edid cache for the given display + * @display: Pointer to private display structure + */ +void dp_mst_clear_edid_cache(void *dp_display); +#else + +static inline int dp_mst_drm_bridge_init(void *display, + struct drm_encoder *encoder) +{ + return 0; +} + +static inline void dp_mst_drm_bridge_deinit(void *display) +{ +} + +static inline int dp_mst_init(struct dp_display *dp_display) +{ + return 0; +} + +static inline int dp_mst_deinit(struct dp_display *dp_display) +{ + return 0; +} + +static inline void dp_mst_clear_edid_cache(void *display) +{ +} +#endif /* CONFIG_DRM_MSM_DP_MST */ + +#endif /* _DP_MST_DRM_H_ */ diff --git a/msm/dp/dp_mst_sim.c b/msm/dp/dp_mst_sim.c new file mode 100644 index 000000000..39517b2b2 --- /dev/null +++ b/msm/dp/dp_mst_sim.c @@ -0,0 +1,1705 @@ +/* + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif +#include "dp_debug.h" +#include "dp_mst_sim.h" + +struct dp_sim_dpcd_reg { + struct list_head head; + u32 addr; + u8 val; +}; + +#define DP_SIM_BRIDGE_PRIV_FLAG (1 << 31) + +#define MAX_BUILTIN_DPCD_ADDR SZ_2K +#define MAX_MST_PORT 8 + +struct dp_sim_device { + struct device *dev; + struct dp_aux_bridge bridge; + void *host_dev; + int (*hpd_cb)(void *, bool, bool); + + struct mutex lock; + const char *label; + + struct dentry *debugfs_dir; + struct dentry *debugfs_edid_dir; + + u8 dpcd_reg[MAX_BUILTIN_DPCD_ADDR]; + struct list_head dpcd_reg_list; + u32 dpcd_write_addr; + u32 dpcd_write_size; + + u32 link_training_cnt; + u32 link_training_remain; + u32 link_training_lane_cnt; + bool link_training_mismatch; + + struct dp_mst_sim_port *ports; + u32 port_num; + u32 current_port_num; + u32 sim_mode; + + u32 edid_seg; + u32 edid_seg_int; + u32 edid_addr; + + bool skip_edid; + bool skip_dpcd; + bool skip_link_training; + bool skip_config; + bool skip_hpd; + bool skip_mst; +}; + +struct dp_sim_debug_edid_entry { + struct dp_sim_device *sim_dev; + u32 index; +}; + +#define to_dp_sim_dev(x) container_of((x), struct dp_sim_device, bridge) + +static const struct dp_mst_sim_port output_port = { + false, false, true, 3, false, 0x12, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + 0, 0, 2520, 2520, NULL, 0 +}; + +static int dp_sim_register_hpd(struct dp_aux_bridge *bridge, + int (*hpd_cb)(void *, bool, bool), void *dev) +{ + struct dp_sim_device *sim_dev = to_dp_sim_dev(bridge); + + sim_dev->host_dev = dev; + sim_dev->hpd_cb = hpd_cb; + + if (sim_dev->skip_hpd) + hpd_cb(dev, true, false); + + return 0; +} + +static u8 dp_sim_read_dpcd(struct dp_sim_device *sim_dev, + u32 addr) +{ + struct dp_sim_dpcd_reg *reg; + + if (addr < MAX_BUILTIN_DPCD_ADDR) { + return sim_dev->dpcd_reg[addr]; + } else { + list_for_each_entry(reg, &sim_dev->dpcd_reg_list, head) { + if (reg->addr == addr) + return reg->val; + } + } + + return 0; +} + +static void dp_sim_write_dpcd(struct dp_sim_device *sim_dev, + u32 addr, u8 val) +{ + struct dp_sim_dpcd_reg *dpcd_reg; + + if (addr < MAX_BUILTIN_DPCD_ADDR) { + sim_dev->dpcd_reg[addr] = val; + } else { + list_for_each_entry(dpcd_reg, &sim_dev->dpcd_reg_list, head) { + if (dpcd_reg->addr == addr) { + dpcd_reg->val = val; + return; + } + } + + dpcd_reg = devm_kzalloc(sim_dev->dev, + sizeof(*dpcd_reg), GFP_KERNEL); + if (!dpcd_reg) + return; + + dpcd_reg->addr = addr; + dpcd_reg->val = val; + list_add_tail(&dpcd_reg->head, &sim_dev->dpcd_reg_list); + } +} + +static int dp_sim_read_dpcd_regs(struct dp_sim_device *sim_dev, + u8 *buf, u32 size, u32 offset) +{ + u32 i; + + if (offset + size <= MAX_BUILTIN_DPCD_ADDR) { + memcpy(buf, &sim_dev->dpcd_reg[offset], size); + } else { + for (i = 0; i < size; i++) + buf[i] = dp_sim_read_dpcd(sim_dev, offset + i); + } + + return size; +} + +static int dp_sim_read_edid(struct dp_sim_device *sim_dev, + struct drm_dp_aux_msg *msg) +{ + u8 *buf = (u8 *)msg->buffer; + u32 addr; + + if (!sim_dev->port_num || !msg->size) + return 0; + + if (msg->request & DP_AUX_I2C_READ) { + addr = (sim_dev->edid_seg_int << 8) + sim_dev->edid_addr; + if (addr + msg->size <= sim_dev->ports[0].edid_size) { + memcpy(msg->buffer, &sim_dev->ports[0].edid[addr], + msg->size); + } else if (addr < sim_dev->ports[0].edid_size) { + memcpy(msg->buffer, &sim_dev->ports[0].edid[addr], + sim_dev->ports[0].edid_size - addr); + } + sim_dev->edid_addr += msg->size; + sim_dev->edid_addr &= 0xFF; + } else { + if (msg->address == 0x30) { + sim_dev->edid_seg = buf[0]; + } else if (msg->address == 0x50) { + sim_dev->edid_seg_int = sim_dev->edid_seg; + sim_dev->edid_addr = buf[0]; + sim_dev->edid_seg = 0; + } + } + + return msg->size; +} + +static int dp_sim_link_training(struct dp_sim_device *sim_dev, + struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + u8 *link_status = msg->buffer; + int ret, i; + + if (msg->request == DP_AUX_NATIVE_READ && + msg->address == DP_LANE0_1_STATUS) { + /* + * remain is an option to allow limited actual + * link training. this is needed for some device + * when actual read is needed. + */ + if (sim_dev->link_training_remain) { + sim_dev->link_training_remain--; + ret = drm_aux->transfer(drm_aux, msg); + if (ret >= 0) + link_status[2] &= ~DP_LINK_STATUS_UPDATED; + return ret; + } + + memcpy(msg->buffer, &sim_dev->dpcd_reg[msg->address], + msg->size); + + /* + * when mismatch happens, clear status and fail the link + * training. + */ + if (sim_dev->link_training_mismatch) { + link_status[0] = 0; + link_status[1] = 0; + } + + return msg->size; + } + + if (msg->request == DP_AUX_NATIVE_WRITE) { + if (msg->address == DP_TRAINING_LANE0_SET) { + const u8 mask = DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK; + /* + * when link training is set, only pre-set vx/px is + * going through. here we will fail the initial + * vx/px and correct them automatically. + */ + sim_dev->link_training_mismatch = false; + for (i = 0; i < sim_dev->link_training_lane_cnt; i++) { + if ((link_status[i] & mask) != + (sim_dev->dpcd_reg[ + DP_TRAINING_LANE0_SET + i] & mask)) { + sim_dev->link_training_mismatch = true; + break; + } + } + } else if (msg->address == DP_TRAINING_PATTERN_SET) { + sim_dev->link_training_remain = + sim_dev->link_training_cnt; + } else if (msg->address == DP_LINK_BW_SET) { + sim_dev->link_training_lane_cnt = + link_status[1] & 0x1F; + } + } + + return 0; +} + +static ssize_t dp_sim_transfer(struct dp_aux_bridge *bridge, + struct drm_dp_aux *drm_aux, + struct drm_dp_aux_msg *msg) +{ + struct dp_sim_device *sim_dev = to_dp_sim_dev(bridge); + int ret; + + mutex_lock(&sim_dev->lock); + + if (sim_dev->skip_link_training && + !(sim_dev->sim_mode & DP_SIM_MODE_LINK_TRAIN)) { + ret = dp_sim_link_training(sim_dev, drm_aux, msg); + if (ret) + goto end; + } + + if ((sim_dev->sim_mode & DP_SIM_MODE_MST) || sim_dev->skip_mst) { + ret = dp_mst_sim_transfer(sim_dev->bridge.mst_ctx, msg); + if (ret >= 0) { + ret = msg->size; + goto end; + } + } + + if (msg->request == DP_AUX_NATIVE_WRITE) { + sim_dev->dpcd_write_addr = msg->address; + sim_dev->dpcd_write_size = msg->size; + } + + if (((sim_dev->sim_mode & DP_SIM_MODE_EDID) || + sim_dev->skip_edid) && + (msg->request & DP_AUX_I2C_MOT)) + ret = dp_sim_read_edid(sim_dev, msg); + else if (((sim_dev->sim_mode & DP_SIM_MODE_DPCD_READ) || + sim_dev->skip_dpcd) && + msg->request == DP_AUX_NATIVE_READ) + ret = dp_sim_read_dpcd_regs(sim_dev, msg->buffer, + msg->size, msg->address); + else if (((sim_dev->sim_mode & DP_SIM_MODE_DPCD_WRITE) || + sim_dev->skip_config) && + msg->request == DP_AUX_NATIVE_WRITE) + ret = msg->size; + else + ret = drm_aux->transfer(drm_aux, msg); + +end: + mutex_unlock(&sim_dev->lock); + + return ret; +} + +static void dp_sim_host_hpd_irq(void *host_dev) +{ + struct dp_sim_device *sim_dev = host_dev; + + if (sim_dev->hpd_cb) + sim_dev->hpd_cb(sim_dev->host_dev, true, true); +} + +int dp_sim_set_sim_mode(struct dp_aux_bridge *bridge, u32 sim_mode) +{ + struct dp_sim_device *sim_dev; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + sim_dev = to_dp_sim_dev(bridge); + + mutex_lock(&sim_dev->lock); + sim_dev->sim_mode = sim_mode; + mutex_unlock(&sim_dev->lock); + + return 0; +} + +int dp_sim_update_port_num(struct dp_aux_bridge *bridge, u32 port_num) +{ + struct dp_sim_device *sim_dev; + struct dp_mst_sim_port *ports; + u32 i, rc; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + sim_dev = to_dp_sim_dev(bridge); + DP_INFO("Update port count from %d to %d\n", sim_dev->port_num, port_num); + + mutex_lock(&sim_dev->lock); + + if (port_num > sim_dev->port_num) { + ports = devm_kzalloc(sim_dev->dev, + port_num * sizeof(*ports), GFP_KERNEL); + if (!ports) { + rc = -ENOMEM; + goto bail; + } + + memcpy(ports, sim_dev->ports, + sim_dev->port_num * sizeof(*ports)); + + if (sim_dev->ports) + devm_kfree(sim_dev->dev, sim_dev->ports); + + sim_dev->ports = ports; + + for (i = sim_dev->port_num; i < port_num; i++) { + memcpy(&ports[i], &output_port, sizeof(*ports)); + ports[i].peer_guid[0] = i; + } + } + + sim_dev->port_num = port_num; + rc = dp_mst_sim_update(sim_dev->bridge.mst_ctx, + port_num, sim_dev->ports); + if (rc) + goto bail; + + sim_dev->current_port_num = port_num; + +bail: + mutex_unlock(&sim_dev->lock); + + return rc; +} + +int dp_sim_update_port_status(struct dp_aux_bridge *bridge, + int port, enum drm_connector_status status) +{ + struct dp_sim_device *sim_dev; + int rc; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + sim_dev = to_dp_sim_dev(bridge); + + mutex_lock(&sim_dev->lock); + + if (port < 0 || port >= sim_dev->current_port_num) { + rc = -EINVAL; + goto bail; + } + + sim_dev->ports[port].pdt = (status == connector_status_connected) ? + DP_PEER_DEVICE_SST_SINK : DP_PEER_DEVICE_NONE; + + rc = dp_mst_sim_update(sim_dev->bridge.mst_ctx, sim_dev->current_port_num, sim_dev->ports); + +bail: + mutex_unlock(&sim_dev->lock); + + return rc; +} + +int dp_sim_update_port_edid(struct dp_aux_bridge *bridge, + int port, const u8 *edid, u32 size) +{ + struct dp_sim_device *sim_dev; + struct dp_mst_sim_port *sim_port; + int rc; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + sim_dev = to_dp_sim_dev(bridge); + + mutex_lock(&sim_dev->lock); + + if (port < 0 || port >= sim_dev->current_port_num) { + rc = -EINVAL; + goto bail; + } + + sim_port = &sim_dev->ports[port]; + + if (size != sim_port->edid_size) { + if (sim_port->edid) + devm_kfree(sim_dev->dev, (u8 *)sim_port->edid); + + sim_port->edid = devm_kzalloc(sim_dev->dev, + size, GFP_KERNEL); + if (!sim_port->edid) + return -ENOMEM; + + sim_port->edid_size = size; + } + + memcpy((u8 *)sim_port->edid, edid, size); + + rc = dp_mst_sim_update(sim_dev->bridge.mst_ctx, sim_dev->current_port_num, sim_dev->ports); +bail: + mutex_unlock(&sim_dev->lock); + + return rc; +} + +int dp_sim_write_dpcd_reg(struct dp_aux_bridge *bridge, + const u8 *dpcd, u32 size, u32 offset) +{ + struct dp_sim_device *sim_dev; + int i; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + sim_dev = to_dp_sim_dev(bridge); + mutex_lock(&sim_dev->lock); + for (i = 0; i < size; i++) + dp_sim_write_dpcd(sim_dev, offset + i, dpcd[i]); + mutex_unlock(&sim_dev->lock); + + return 0; +} + +int dp_sim_read_dpcd_reg(struct dp_aux_bridge *bridge, + u8 *dpcd, u32 size, u32 offset) +{ + struct dp_sim_device *sim_dev; + int rc; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + sim_dev = to_dp_sim_dev(bridge); + mutex_lock(&sim_dev->lock); + rc = dp_sim_read_dpcd_regs(sim_dev, dpcd, size, offset); + mutex_unlock(&sim_dev->lock); + + return rc; +} + +static void dp_sim_update_dtd(struct edid *edid, + struct drm_display_mode *mode) +{ + struct detailed_timing *dtd = &edid->detailed_timings[0]; + struct detailed_pixel_timing *pd = &dtd->data.pixel_data; + u32 h_blank = mode->htotal - mode->hdisplay; + u32 v_blank = mode->vtotal - mode->vdisplay; + u32 h_img = 0, v_img = 0; + + dtd->pixel_clock = cpu_to_le16(mode->clock / 10); + + pd->hactive_lo = mode->hdisplay & 0xFF; + pd->hblank_lo = h_blank & 0xFF; + pd->hactive_hblank_hi = ((h_blank >> 8) & 0xF) | + ((mode->hdisplay >> 8) & 0xF) << 4; + + pd->vactive_lo = mode->vdisplay & 0xFF; + pd->vblank_lo = v_blank & 0xFF; + pd->vactive_vblank_hi = ((v_blank >> 8) & 0xF) | + ((mode->vdisplay >> 8) & 0xF) << 4; + + pd->hsync_offset_lo = + (mode->hsync_start - mode->hdisplay) & 0xFF; + pd->hsync_pulse_width_lo = + (mode->hsync_end - mode->hsync_start) & 0xFF; + pd->vsync_offset_pulse_width_lo = + (((mode->vsync_start - mode->vdisplay) & 0xF) << 4) | + ((mode->vsync_end - mode->vsync_start) & 0xF); + + pd->hsync_vsync_offset_pulse_width_hi = + ((((mode->hsync_start - mode->hdisplay) >> 8) & 0x3) << 6) | + ((((mode->hsync_end - mode->hsync_start) >> 8) & 0x3) << 4) | + ((((mode->vsync_start - mode->vdisplay) >> 4) & 0x3) << 2) | + ((((mode->vsync_end - mode->vsync_start) >> 4) & 0x3) << 0); + + pd->width_mm_lo = h_img & 0xFF; + pd->height_mm_lo = v_img & 0xFF; + pd->width_height_mm_hi = (((h_img >> 8) & 0xF) << 4) | + ((v_img >> 8) & 0xF); + + pd->hborder = 0; + pd->vborder = 0; + pd->misc = 0; +} + +static void dp_sim_update_checksum(struct edid *edid) +{ + u8 *data = (u8 *)edid; + u32 i, sum = 0; + + for (i = 0; i < EDID_LENGTH - 1; i++) + sum += data[i]; + + edid->checksum = 0x100 - (sum & 0xFF); +} + +static int dp_sim_parse_edid_from_node(struct dp_sim_device *sim_dev, + int index, struct device_node *node) +{ + struct dp_mst_sim_port *port; + struct drm_display_mode mode_buf, *mode = &mode_buf; + u16 h_front_porch, h_pulse_width, h_back_porch; + u16 v_front_porch, v_pulse_width, v_back_porch; + bool h_active_high, v_active_high; + u32 flags = 0; + int rc; + struct edid *edid; + + const u8 edid_buf[EDID_LENGTH] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x44, 0x6D, + 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1B, 0x10, 0x01, 0x03, + 0x80, 0x50, 0x2D, 0x78, 0x0A, 0x0D, 0xC9, 0xA0, 0x57, 0x47, + 0x98, 0x27, 0x12, 0x48, 0x4C, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, + }; + + rc = of_property_read_u16(node, "qcom,mode-h-active", + &mode->hdisplay); + if (rc) { + DP_ERR("failed to read h-active, rc=%d\n", rc); + goto fail; + } + + rc = of_property_read_u16(node, "qcom,mode-h-front-porch", + &h_front_porch); + if (rc) { + DP_ERR("failed to read h-front-porch, rc=%d\n", rc); + goto fail; + } + + rc = of_property_read_u16(node, "qcom,mode-h-pulse-width", + &h_pulse_width); + if (rc) { + DP_ERR("failed to read h-pulse-width, rc=%d\n", rc); + goto fail; + } + + rc = of_property_read_u16(node, "qcom,mode-h-back-porch", + &h_back_porch); + if (rc) { + DP_ERR("failed to read h-back-porch, rc=%d\n", rc); + goto fail; + } + + h_active_high = of_property_read_bool(node, + "qcom,mode-h-active-high"); + + rc = of_property_read_u16(node, "qcom,mode-v-active", + &mode->vdisplay); + if (rc) { + DP_ERR("failed to read v-active, rc=%d\n", rc); + goto fail; + } + + rc = of_property_read_u16(node, "qcom,mode-v-front-porch", + &v_front_porch); + if (rc) { + DP_ERR("failed to read v-front-porch, rc=%d\n", rc); + goto fail; + } + + rc = of_property_read_u16(node, "qcom,mode-v-pulse-width", + &v_pulse_width); + if (rc) { + DP_ERR("failed to read v-pulse-width, rc=%d\n", rc); + goto fail; + } + + rc = of_property_read_u16(node, "qcom,mode-v-back-porch", + &v_back_porch); + if (rc) { + DP_ERR("failed to read v-back-porch, rc=%d\n", rc); + goto fail; + } + + v_active_high = of_property_read_bool(node, + "qcom,mode-v-active-high"); + + rc = of_property_read_u32(node, "qcom,mode-clock-in-khz", + &mode->clock); + if (rc) { + DP_ERR("failed to read clock, rc=%d\n", rc); + goto fail; + } + + mode->hsync_start = mode->hdisplay + h_front_porch; + mode->hsync_end = mode->hsync_start + h_pulse_width; + mode->htotal = mode->hsync_end + h_back_porch; + mode->vsync_start = mode->vdisplay + v_front_porch; + mode->vsync_end = mode->vsync_start + v_pulse_width; + mode->vtotal = mode->vsync_end + v_back_porch; + if (h_active_high) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + if (v_active_high) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + mode->flags = flags; + + edid = devm_kzalloc(sim_dev->dev, sizeof(*edid), GFP_KERNEL); + if (!edid) { + rc = -ENOMEM; + goto fail; + } + + memcpy(edid, edid_buf, sizeof(edid_buf)); + dp_sim_update_dtd(edid, mode); + dp_sim_update_checksum(edid); + + port = &sim_dev->ports[index]; + memcpy(port, &output_port, sizeof(*port)); + port->peer_guid[0] = index; + + if (port->edid) + devm_kfree(sim_dev->dev, (u8 *)port->edid); + + port->edid = (u8 *)edid; + port->edid_size = sizeof(*edid); + +fail: + return rc; +} + +static int dp_sim_parse_edid_from_data(struct dp_sim_device *sim_dev, + int index, const char *data, int len) +{ + struct dp_mst_sim_port *port; + u8 *edid_data; + + edid_data = devm_kzalloc(sim_dev->dev, len, GFP_KERNEL); + if (!edid_data) + return -ENOMEM; + + memcpy(edid_data, data, len); + + port = &sim_dev->ports[index]; + memcpy(port, &output_port, sizeof(*port)); + port->peer_guid[0] = index; + + if (port->edid) + devm_kfree(sim_dev->dev, (u8 *)port->edid); + + port->edid = edid_data; + port->edid_size = len; + + return 0; +} + +static int dp_sim_parse_edid(struct dp_sim_device *sim_dev) +{ + struct dp_mst_sim_port *ports; + struct device_node *of_node = sim_dev->bridge.of_node; + struct device_node *node; + const char *data; + int rc, port_num, i, len; + + port_num = of_get_child_count(of_node); + + if (!port_num) + port_num = 1; + + if (port_num >= 15) + return -EINVAL; + + ports = devm_kzalloc(sim_dev->dev, + port_num * sizeof(*ports), GFP_KERNEL); + if (!ports) + return -ENOMEM; + + sim_dev->ports = ports; + sim_dev->port_num = port_num; + sim_dev->current_port_num = port_num; + + i = 0; + for_each_child_of_node(of_node, node) { + data = of_get_property(node, "qcom,edid", &len); + + if (data) + rc = dp_sim_parse_edid_from_data(sim_dev, i, + data, len); + else + rc = dp_sim_parse_edid_from_node(sim_dev, i, + node); + + if (rc) + return rc; + + i++; + } + + if (i == 0) + memcpy(ports, &output_port, sizeof(*ports)); + + return 0; +} + +static int dp_sim_parse_dpcd(struct dp_sim_device *sim_dev) +{ + struct device_node *node = sim_dev->bridge.of_node; + u32 val, i; + const __be32 *arr; + int rc; + + rc = of_property_read_u32(node, "qcom,dpcd-max-rate", &val); + if (!rc) + sim_dev->dpcd_reg[DP_MAX_LINK_RATE] = val; + + rc = of_property_read_u32(node, "qcom,dpcd-max-lane", &val); + if (!rc) + sim_dev->dpcd_reg[DP_MAX_LANE_COUNT] = val; + + rc = of_property_read_u32(node, "qcom,dpcd-mst", &val); + if (!rc) + sim_dev->dpcd_reg[DP_MSTM_CAP] = val; + + arr = of_get_property(node, "qcom,dpcd-regs", &val); + if (arr) { + val /= sizeof(u32); + val &= ~0x1; + for (i = 0; i < val; i += 2) + dp_sim_write_dpcd(sim_dev, + be32_to_cpu(arr[i]), + be32_to_cpu(arr[i+1])); + } + + rc = of_property_read_u32(node, "qcom,voltage-swing", &val); + if (!rc) + for (i = 0; i < 4; i++) { + sim_dev->dpcd_reg[DP_TRAINING_LANE0_SET + i] |= + val; + sim_dev->dpcd_reg[DP_ADJUST_REQUEST_LANE0_1 + (i/2)] |= + (val & 0x3) << ((i & 0x1) << 2); + } + + rc = of_property_read_u32(node, "qcom,pre-emphasis", &val); + if (!rc) + for (i = 0; i < 4; i++) { + sim_dev->dpcd_reg[DP_TRAINING_LANE0_SET + i] |= + val << 3; + sim_dev->dpcd_reg[DP_ADJUST_REQUEST_LANE0_1 + (i/2)] |= + (val & 0x3) << (((i & 0x1) << 2) + 2); + } + + rc = of_property_read_u32(node, "qcom,link-training-cnt", &val); + if (!rc) + sim_dev->link_training_cnt = val; + else + sim_dev->link_training_cnt = 0; + + return 0; +} + +static int dp_sim_parse_misc(struct dp_sim_device *sim_dev) +{ + struct device_node *node = sim_dev->bridge.of_node; + + sim_dev->skip_edid = of_property_read_bool(node, + "qcom,skip-edid"); + + sim_dev->skip_dpcd = of_property_read_bool(node, + "qcom,skip-dpcd-read"); + + sim_dev->skip_link_training = of_property_read_bool(node, + "qcom,skip-link-training"); + + sim_dev->skip_config = of_property_read_bool(node, + "qcom,skip-dpcd-write"); + + sim_dev->skip_hpd = of_property_read_bool(node, + "qcom,skip-hpd"); + + sim_dev->skip_mst = of_property_read_bool(node, + "qcom,skip-mst"); + + DP_DEBUG("skip: edid=%d dpcd=%d LT=%d config=%d hpd=%d mst=%d\n", + sim_dev->skip_edid, + sim_dev->skip_dpcd, + sim_dev->skip_link_training, + sim_dev->skip_config, + sim_dev->skip_hpd, + sim_dev->skip_mst); + + return 0; +} + +static ssize_t dp_sim_debug_write_edid(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_debug_edid_entry *entry = file->private_data; + struct dp_sim_device *debug; + struct dp_mst_sim_port *port; + u8 *buf = NULL, *buf_t = NULL; + const int char_to_nib = 2; + size_t edid_size = 0; + size_t size = 0, edid_buf_index = 0; + ssize_t rc = count; + + if (!entry) + return -ENODEV; + + debug = entry->sim_dev; + if (!debug || entry->index >= debug->port_num) + return -EINVAL; + + port = &debug->ports[entry->index]; + + mutex_lock(&debug->lock); + + if (*ppos) + goto bail; + + size = min_t(size_t, count, SZ_1K); + + buf = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto bail; + } + + if (copy_from_user(buf, user_buff, size)) + goto bail; + + edid_size = size / char_to_nib; + buf_t = buf; + + if (edid_size != port->edid_size) { + if (port->edid) + devm_kfree(debug->dev, (u8 *)port->edid); + + port->edid = devm_kzalloc(debug->dev, + edid_size, GFP_KERNEL); + if (!port->edid) { + rc = -ENOMEM; + goto bail; + } + port->edid_size = edid_size; + } + + while (edid_size--) { + char t[3]; + int d; + + memcpy(t, buf_t, sizeof(char) * char_to_nib); + t[char_to_nib] = '\0'; + + if (kstrtoint(t, 16, &d)) { + DP_ERR("kstrtoint error\n"); + goto bail; + } + + if (port->edid && (edid_buf_index < port->edid_size)) + ((u8 *)port->edid)[edid_buf_index++] = d; + + buf_t += char_to_nib; + } + + if (debug->skip_mst) + dp_mst_sim_update(debug->bridge.mst_ctx, + debug->port_num, debug->ports); + + debug->skip_edid = true; + +bail: + kfree(buf); + + mutex_unlock(&debug->lock); + return rc; +} + +static ssize_t dp_sim_debug_write_dpcd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + u8 *buf = NULL, *buf_t = NULL; + const int char_to_nib = 2; + size_t dpcd_size = 0; + size_t size = 0, dpcd_buf_index = 0; + ssize_t rc = count; + char offset_ch[5]; + u32 offset, data_len; + + if (!debug) + return -ENODEV; + + mutex_lock(&debug->lock); + + if (*ppos) + goto bail; + + size = min_t(size_t, count, SZ_2K); + if (size < 4) + goto bail; + + buf = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(buf)) { + rc = -ENOMEM; + goto bail; + } + + if (copy_from_user(buf, user_buff, size)) + goto bail; + + memcpy(offset_ch, buf, 4); + offset_ch[4] = '\0'; + + if (kstrtoint(offset_ch, 16, &offset)) { + DP_ERR("offset kstrtoint error\n"); + goto bail; + } + + if (offset == 0xFFFF) { + DP_ERR("clearing dpcd\n"); + memset(debug->dpcd_reg, 0, sizeof(debug->dpcd_reg)); + goto bail; + } + + size -= 4; + if (size == 0) + goto bail; + + dpcd_size = size / char_to_nib; + data_len = dpcd_size; + buf_t = buf + 4; + + dpcd_buf_index = offset; + + while (dpcd_size--) { + char t[3]; + int d; + + memcpy(t, buf_t, sizeof(char) * char_to_nib); + t[char_to_nib] = '\0'; + + if (kstrtoint(t, 16, &d)) { + DP_ERR("kstrtoint error\n"); + goto bail; + } + + dp_sim_write_dpcd(debug, dpcd_buf_index, d); + dpcd_buf_index++; + + buf_t += char_to_nib; + } + + debug->skip_dpcd = true; + debug->skip_config = true; + +bail: + kfree(buf); + + mutex_unlock(&debug->lock); + return rc; +} + +static ssize_t dp_sim_debug_read_dpcd(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char *buf; + int const buf_size = SZ_4K; + u32 offset = 0; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&debug->lock); + len += snprintf(buf, buf_size, "0x%x", debug->dpcd_write_addr); + + while (1) { + if (debug->dpcd_write_addr + offset >= buf_size || + offset >= debug->dpcd_write_size) + break; + + len += snprintf(buf + len, buf_size - len, "0x%x", + debug->dpcd_reg[debug->dpcd_write_addr + offset++]); + } + + mutex_unlock(&debug->lock); + len = min_t(size_t, count, len); + if (!copy_to_user(user_buff, buf, len)) + *ppos += len; + + kfree(buf); + return len; +} + +static ssize_t dp_sim_debug_write_hpd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int hpd = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &hpd) != 0) + goto end; + + mutex_lock(&debug->lock); + if (debug->hpd_cb) + debug->hpd_cb(debug->host_dev, !!hpd, false); + mutex_unlock(&debug->lock); + +end: + return len; +} + +static ssize_t dp_sim_debug_write_skip_link_training(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int skip_lk, lk_cnt; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %u", &skip_lk, &lk_cnt) != 2) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + mutex_lock(&debug->lock); + debug->skip_link_training = !!skip_lk; + debug->link_training_cnt = lk_cnt; + mutex_unlock(&debug->lock); +end: + return len; +} + +static ssize_t dp_sim_debug_write_skip_edid(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int val = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &val) != 0) + goto end; + + mutex_lock(&debug->lock); + debug->skip_edid = !!val; + mutex_unlock(&debug->lock); +end: + return len; +} + +static ssize_t dp_sim_debug_write_skip_dpcd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int val = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &val) != 0) + goto end; + + mutex_lock(&debug->lock); + debug->skip_dpcd = !!val; + mutex_unlock(&debug->lock); +end: + return len; +} + +static ssize_t dp_sim_debug_write_skip_config(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + int val = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &val) != 0) + goto end; + + mutex_lock(&debug->lock); + debug->skip_config = !!val; + mutex_unlock(&debug->lock); +end: + return len; +} + +static ssize_t dp_sim_debug_write_mst_hpd(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_debug_edid_entry *entry = file->private_data; + struct dp_sim_device *debug; + char buf[SZ_8]; + size_t len = 0; + int hpd = 0; + + if (!entry) + return -ENODEV; + + debug = entry->sim_dev; + if (!debug || entry->index >= debug->port_num) + return -EINVAL; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + goto end; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &hpd) != 0) + goto end; + + mutex_lock(&debug->lock); + dp_sim_update_port_status(&debug->bridge, + entry->index, hpd ? + connector_status_connected : + connector_status_disconnected); + mutex_unlock(&debug->lock); + +end: + return len; +} + +static const struct file_operations sim_edid_fops = { + .open = simple_open, + .write = dp_sim_debug_write_edid, +}; + +static const struct file_operations sim_mst_hpd_fops = { + .open = simple_open, + .write = dp_sim_debug_write_mst_hpd, +}; + +static ssize_t dp_sim_debug_write_mst_mode(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_sim_device *debug = file->private_data; + char buf[SZ_16]; + size_t len = 0; + int mst_sideband_mode = 0; + u32 mst_port_cnt = 0; + u32 mst_old_port_cnt; + struct dp_sim_debug_edid_entry *edid_entry; + u8 *edid; + u32 i, rc; + + if (!debug) + return -ENODEV; + + /* Leave room for termination char */ + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (sscanf(buf, "%d %u", &mst_sideband_mode, &mst_port_cnt) != 2) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (mst_port_cnt >= MAX_MST_PORT) { + DP_ERR("port cnt:%d exceeding max:%d\n", mst_port_cnt, + MAX_MST_PORT); + return -EINVAL; + } + + mutex_lock(&debug->lock); + + if (!mst_port_cnt) + mst_port_cnt = 1; + + debug->skip_mst = !mst_sideband_mode; + DP_DEBUG("mst_sideband_mode: %d port_cnt:%d\n", + mst_sideband_mode, mst_port_cnt); + + mst_old_port_cnt = debug->port_num; + rc = dp_sim_update_port_num(&debug->bridge, mst_port_cnt); + if (rc) + goto bail; + + /* write mst */ + dp_sim_write_dpcd(debug, DP_MSTM_CAP, debug->skip_mst); + + /* create default edid nodes */ + for (i = mst_old_port_cnt; i < mst_port_cnt; i++) { + edid_entry = devm_kzalloc(debug->dev, + sizeof(*edid_entry), GFP_KERNEL); + if (!edid_entry) + continue; + + edid_entry->index = i; + edid_entry->sim_dev = debug; + scnprintf(buf, sizeof(buf), "edid-%d", i); + debugfs_create_file(buf, + 0444, + debug->debugfs_edid_dir, + edid_entry, + &sim_edid_fops); + scnprintf(buf, sizeof(buf), "hpd-%d", i); + debugfs_create_file(buf, + 0444, + debug->debugfs_edid_dir, + edid_entry, + &sim_mst_hpd_fops); + + if (!debug->ports[0].edid_size) + continue; + + edid = devm_kzalloc(debug->dev, + debug->ports[0].edid_size, GFP_KERNEL); + if (!edid) { + rc = -ENOMEM; + goto bail; + } + + memcpy(edid, debug->ports[0].edid, debug->ports[0].edid_size); + debug->ports[i].edid = edid; + debug->ports[i].edid_size = debug->ports[0].edid_size; + } + + rc = count; +bail: + mutex_unlock(&debug->lock); + + return rc; +} + +static const struct file_operations sim_dpcd_fops = { + .open = simple_open, + .write = dp_sim_debug_write_dpcd, + .read = dp_sim_debug_read_dpcd, +}; + +static const struct file_operations sim_hpd_fops = { + .open = simple_open, + .write = dp_sim_debug_write_hpd, +}; + +static const struct file_operations sim_skip_link_training_fops = { + .open = simple_open, + .write = dp_sim_debug_write_skip_link_training, +}; + +static const struct file_operations sim_skip_edid_fops = { + .open = simple_open, + .write = dp_sim_debug_write_skip_edid, +}; + +static const struct file_operations sim_skip_dpcd_fops = { + .open = simple_open, + .write = dp_sim_debug_write_skip_dpcd, +}; + +static const struct file_operations sim_skip_config_fops = { + .open = simple_open, + .write = dp_sim_debug_write_skip_config, +}; + +static const struct file_operations sim_mst_mode_fops = { + .open = simple_open, + .write = dp_sim_debug_write_mst_mode, +}; + +static int dp_sim_debug_init(struct dp_sim_device *sim_dev) +{ + struct dp_sim_debug_edid_entry *edid_entry; + struct dentry *dir, *file, *edid_dir; + char name[SZ_16]; + int rc = 0, i; + + if (!sim_dev->label) + return 0; + + dir = debugfs_create_dir(sim_dev->label, NULL); + if (IS_ERR_OR_NULL(dir)) { + rc = PTR_ERR(dir); + DP_ERR("[%s] debugfs create dir failed, rc = %d\n", + sim_dev->label, rc); + goto error; + } + + edid_dir = debugfs_create_dir("mst_edid", dir); + if (IS_ERR_OR_NULL(edid_dir)) { + rc = PTR_ERR(edid_dir); + DP_ERR("[%s] debugfs create dir failed, rc = %d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + for (i = 0; i < sim_dev->port_num; i++) { + edid_entry = devm_kzalloc(sim_dev->dev, + sizeof(*edid_entry), GFP_KERNEL); + edid_entry->index = i; + edid_entry->sim_dev = sim_dev; + scnprintf(name, sizeof(name), "edid-%d", i); + file = debugfs_create_file(name, + 0444, + edid_dir, + edid_entry, + &sim_edid_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create edid failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + scnprintf(name, sizeof(name), "hpd-%d", i); + file = debugfs_create_file(name, + 0444, + edid_dir, + edid_entry, + &sim_mst_hpd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create hpd failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + } + + file = debugfs_create_symlink("edid", dir, "./mst_edid/edid-0"); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create edid link failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("dpcd", + 0444, + dir, + sim_dev, + &sim_dpcd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("hpd", + 0444, + dir, + sim_dev, + &sim_hpd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("skip_link_training", + 0444, + dir, + sim_dev, + &sim_skip_link_training_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("skip_edid", + 0444, + dir, + sim_dev, + &sim_skip_edid_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("skip_dpcd_read", + 0444, + dir, + sim_dev, + &sim_skip_dpcd_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("skip_dpcd_write", + 0444, + dir, + sim_dev, + &sim_skip_config_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + file = debugfs_create_file("mst_sideband_mode", + 0444, + dir, + sim_dev, + &sim_mst_mode_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + DP_ERR("[%s] debugfs create failed, rc=%d\n", + sim_dev->label, rc); + goto error_remove_dir; + } + + sim_dev->debugfs_dir = dir; + sim_dev->debugfs_edid_dir = edid_dir; + + return 0; + +error_remove_dir: + debugfs_remove_recursive(dir); +error: + return rc; +} + +static int dp_sim_parse(struct dp_sim_device *sim_dev) +{ + int rc; + + sim_dev->label = of_get_property(sim_dev->bridge.of_node, + "label", NULL); + + rc = dp_sim_parse_dpcd(sim_dev); + if (rc) { + DP_ERR("failed to parse DPCD nodes\n"); + return rc; + } + + rc = dp_sim_parse_edid(sim_dev); + if (rc) { + DP_ERR("failed to parse EDID nodes\n"); + return rc; + } + + rc = dp_sim_parse_misc(sim_dev); + if (rc) { + DP_ERR("failed to parse misc nodes\n"); + return rc; + } + + return 0; +} + +int dp_sim_create_bridge(struct device *dev, struct dp_aux_bridge **bridge) +{ + struct dp_sim_device *dp_sim_dev; + struct dp_mst_sim_cfg cfg; + int ret; + + dp_sim_dev = devm_kzalloc(dev, sizeof(*dp_sim_dev), GFP_KERNEL); + if (!dp_sim_dev) + return -ENOMEM; + + dp_sim_dev->dev = dev; + dp_sim_dev->bridge.of_node = dev->of_node; + dp_sim_dev->bridge.register_hpd = dp_sim_register_hpd; + dp_sim_dev->bridge.transfer = dp_sim_transfer; + dp_sim_dev->bridge.dev_priv = dp_sim_dev; + dp_sim_dev->bridge.flag = DP_AUX_BRIDGE_MST | DP_SIM_BRIDGE_PRIV_FLAG; + INIT_LIST_HEAD(&dp_sim_dev->dpcd_reg_list); + mutex_init(&dp_sim_dev->lock); + + memset(&cfg, 0, sizeof(cfg)); + cfg.host_dev = dp_sim_dev; + cfg.host_hpd_irq = dp_sim_host_hpd_irq; + + ret = dp_mst_sim_create(&cfg, &dp_sim_dev->bridge.mst_ctx); + if (ret) { + devm_kfree(dev, dp_sim_dev); + return ret; + } + + /* default dpcd reg value */ + dp_sim_dev->dpcd_reg[DP_DPCD_REV] = 0x12; + dp_sim_dev->dpcd_reg[DP_MAX_LINK_RATE] = 0x14; + dp_sim_dev->dpcd_reg[DP_MAX_LANE_COUNT] = 0xc4; + dp_sim_dev->dpcd_reg[DP_SINK_COUNT] = 0x1; + dp_sim_dev->dpcd_reg[DP_LANE0_1_STATUS] = 0x77; + dp_sim_dev->dpcd_reg[DP_LANE2_3_STATUS] = 0x77; + dp_sim_dev->dpcd_reg[DP_LANE_ALIGN_STATUS_UPDATED] = 0x1; + dp_sim_dev->dpcd_reg[DP_SINK_STATUS] = 0x3; + dp_sim_dev->dpcd_reg[DP_PAYLOAD_TABLE_UPDATE_STATUS] = 0x3; + + /* enable link training by default */ + dp_sim_dev->skip_link_training = true; + dp_sim_dev->link_training_cnt = (u32)-1; + + *bridge = &dp_sim_dev->bridge; + return 0; +} + +int dp_sim_destroy_bridge(struct dp_aux_bridge *bridge) +{ + struct dp_sim_device *dp_sim_dev; + struct dp_sim_dpcd_reg *reg, *p; + + if (!bridge || !(bridge->flag & DP_SIM_BRIDGE_PRIV_FLAG)) + return -EINVAL; + + dp_sim_dev = to_dp_sim_dev(bridge); + + dp_mst_sim_destroy(dp_sim_dev->bridge.mst_ctx); + + list_for_each_entry_safe(reg, p, &dp_sim_dev->dpcd_reg_list, head) { + list_del(®->head); + devm_kfree(dp_sim_dev->dev, reg); + } + + if (dp_sim_dev->ports) + devm_kfree(dp_sim_dev->dev, dp_sim_dev->ports); + + devm_kfree(dp_sim_dev->dev, dp_sim_dev); + + return 0; +} + +int dp_sim_probe(struct platform_device *pdev) +{ + struct dp_sim_device *dp_sim_dev; + struct dp_aux_bridge *bridge; + int ret; + + ret = dp_sim_create_bridge(&pdev->dev, &bridge); + if (ret) + return ret; + + dp_sim_dev = to_dp_sim_dev(bridge); + + ret = dp_sim_parse(dp_sim_dev); + if (ret) + goto fail; + + if (dp_sim_dev->skip_hpd) + dp_sim_dev->bridge.flag |= DP_AUX_BRIDGE_HPD; + + ret = dp_mst_sim_update(dp_sim_dev->bridge.mst_ctx, + dp_sim_dev->port_num, dp_sim_dev->ports); + if (ret) + goto fail; + + ret = dp_sim_debug_init(dp_sim_dev); + if (ret) + goto fail; + + ret = dp_aux_add_bridge(&dp_sim_dev->bridge); + if (ret) + goto fail; + + platform_set_drvdata(pdev, dp_sim_dev); + + return 0; + +fail: + dp_sim_destroy_bridge(bridge); + return ret; +} + +int dp_sim_remove(struct platform_device *pdev) +{ + struct dp_sim_device *dp_sim_dev; + + dp_sim_dev = platform_get_drvdata(pdev); + if (!dp_sim_dev) + return 0; + + debugfs_remove_recursive(dp_sim_dev->debugfs_dir); + + dp_sim_destroy_bridge(&dp_sim_dev->bridge); + + return 0; +} diff --git a/msm/dp/dp_mst_sim.h b/msm/dp/dp_mst_sim.h new file mode 100644 index 000000000..d652f29da --- /dev/null +++ b/msm/dp/dp_mst_sim.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DP_MST_SIM_H_ +#define _DP_MST_SIM_H_ + +#include "dp_aux_bridge.h" +#include "dp_mst_sim_helper.h" +#include +#include + +enum dp_sim_mode_type { + DP_SIM_MODE_EDID = 0x00000001, + DP_SIM_MODE_DPCD_READ = 0x00000002, + DP_SIM_MODE_DPCD_WRITE = 0x00000004, + DP_SIM_MODE_LINK_TRAIN = 0x00000008, + DP_SIM_MODE_MST = 0x00000010, + DP_SIM_MODE_ALL = 0x0000001F, +}; + +int dp_sim_create_bridge(struct device *dev, + struct dp_aux_bridge **bridge); + +int dp_sim_destroy_bridge(struct dp_aux_bridge *bridge); + +int dp_sim_set_sim_mode(struct dp_aux_bridge *bridge, u32 sim_mode); + +int dp_sim_update_port_num(struct dp_aux_bridge *bridge, u32 port_num); + +int dp_sim_update_port_status(struct dp_aux_bridge *bridge, + int port, enum drm_connector_status status); + +int dp_sim_update_port_edid(struct dp_aux_bridge *bridge, + int port, const u8 *edid, u32 size); + +int dp_sim_write_dpcd_reg(struct dp_aux_bridge *bridge, + const u8 *dpcd, u32 size, u32 offset); + +int dp_sim_read_dpcd_reg(struct dp_aux_bridge *bridge, + u8 *dpcd, u32 size, u32 offset); + +#endif /* _DP_MST_SIM_H_ */ diff --git a/msm/dp/dp_mst_sim_helper.c b/msm/dp/dp_mst_sim_helper.c new file mode 100644 index 000000000..0b92c648b --- /dev/null +++ b/msm/dp/dp_mst_sim_helper.c @@ -0,0 +1,1193 @@ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Copyright © 2014 Red Hat + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif +#include "dp_mst_sim_helper.h" +#include "dp_debug.h" + +#define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__) +#define DP_MST_INFO(fmt, ...) DP_INFO(fmt, ##__VA_ARGS__) +#define DP_MST_DEBUG_V(fmt, ...) DP_DEBUG_V(fmt, ##__VA_ARGS__) +#define DP_MST_INFO_V(fmt, ...) DP_INFO_V(fmt, ##__VA_ARGS__) + +#define DDC_SEGMENT_ADDR 0x30 + +struct dp_mst_sim_context { + void *host_dev; + void (*host_hpd_irq)(void *host_dev); + void (*host_req)(void *host_dev, const u8 *in, int in_size, + u8 *out, int *out_size); + + struct dp_mst_sim_port *ports; + u32 port_num; + + struct drm_dp_sideband_msg_rx down_req; + struct drm_dp_sideband_msg_rx down_rep; + + struct mutex session_lock; + struct completion session_comp; + struct workqueue_struct *wq; + int reset_cnt; + + u8 esi[16]; + u8 guid[16]; + u8 dpcd[1024]; +}; + +struct dp_mst_sim_work { + struct work_struct base; + struct dp_mst_sim_context *ctx; + unsigned int address; + u8 buffer[256]; + size_t size; +}; + +struct dp_mst_notify_work { + struct work_struct base; + struct dp_mst_sim_context *ctx; + u32 port_mask; +}; + +#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) +static void dp_sideband_hex_dump(const char *name, + u32 address, u8 *buffer, size_t size) +{ + char prefix[64]; + int i, linelen, remaining = size; + const int rowsize = 16; + u8 linebuf[64]; + + snprintf(prefix, sizeof(prefix), "%s(%d) %4xh(%2zu): ", + name, current->pid, address, size); + + for (i = 0; i < size; i += rowsize) { + linelen = min(remaining, rowsize); + remaining -= rowsize; + + hex_dump_to_buffer(buffer + i, linelen, rowsize, 1, + linebuf, sizeof(linebuf), false); + + DP_MST_DEBUG_V("%s%s\n", prefix, linebuf); + } +} +#else +static void dp_sideband_hex_dump(const char *name, + u32 address, u8 *buffer, size_t size) +{ +} +#endif /* CONFIG_DYNAMIC_DEBUG */ + +static u8 dp_mst_sim_msg_header_crc4(const uint8_t *data, size_t num_nibbles) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = num_nibbles * 4; + u8 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x10) == 0x10) + remainder ^= 0x13; + } + + number_of_bits = 4; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x10) != 0) + remainder ^= 0x13; + } + + return remainder; +} + +static u8 dp_mst_sim_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = number_of_bytes * 8; + u16 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x100) == 0x100) + remainder ^= 0xd5; + } + + number_of_bits = 8; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x100) != 0) + remainder ^= 0xd5; + } + + return remainder & 0xff; +} + +static bool dp_mst_sim_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int buflen, u8 *hdrlen) +{ + u8 crc4; + u8 len; + int i; + u8 idx; + + if (buf[0] == 0) + return false; + len = 3; + len += ((buf[0] & 0xf0) >> 4) / 2; + if (len > buflen) + return false; + crc4 = dp_mst_sim_msg_header_crc4(buf, (len * 2) - 1); + + if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { + DP_MST_DEBUG("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); + return false; + } + + hdr->lct = (buf[0] & 0xf0) >> 4; + hdr->lcr = (buf[0] & 0xf); + idx = 1; + for (i = 0; i < (hdr->lct / 2); i++) + hdr->rad[i] = buf[idx++]; + hdr->broadcast = (buf[idx] >> 7) & 0x1; + hdr->path_msg = (buf[idx] >> 6) & 0x1; + hdr->msg_len = buf[idx] & 0x3f; + idx++; + hdr->somt = (buf[idx] >> 7) & 0x1; + hdr->eomt = (buf[idx] >> 6) & 0x1; + hdr->seqno = (buf[idx] >> 4) & 0x1; + idx++; + *hdrlen = idx; + return true; +} + +static bool dp_mst_sim_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, + u8 *replybuf, u8 replybuflen, bool hdr) +{ + int ret; + u8 crc4; + + if (hdr) { + u8 hdrlen; + struct drm_dp_sideband_msg_hdr recv_hdr; + + ret = dp_mst_sim_decode_sideband_msg_hdr(&recv_hdr, + replybuf, replybuflen, &hdrlen); + if (ret == false) + return false; + + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + + /* get length contained in this portion */ + msg->curchunk_len = recv_hdr.msg_len; + msg->curchunk_hdrlen = hdrlen; + + /* we have already gotten an somt - don't bother parsing */ + if (recv_hdr.somt && msg->have_somt) + return false; + + if (recv_hdr.somt) { + memcpy(&msg->initial_hdr, &recv_hdr, + sizeof(struct drm_dp_sideband_msg_hdr)); + msg->have_somt = true; + } + if (recv_hdr.eomt) + msg->have_eomt = true; + + /* copy the bytes for the remainder of this header chunk */ + msg->curchunk_idx = min(msg->curchunk_len, + (u8)(replybuflen - hdrlen)); + memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); + } else { + memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); + msg->curchunk_idx += replybuflen; + } + + if (msg->curchunk_idx >= msg->curchunk_len) { + /* do CRC */ + crc4 = dp_mst_sim_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); + /* copy chunk into bigger msg */ + memcpy(&msg->msg[msg->curlen], msg->chunk, + msg->curchunk_len - 1); + msg->curlen += msg->curchunk_len - 1; + } + return true; +} + +static void dp_mst_sim_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int *len) +{ + int idx = 0; + int i; + u8 crc4; + + buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); + for (i = 0; i < (hdr->lct / 2); i++) + buf[idx++] = hdr->rad[i]; + buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | + (hdr->msg_len & 0x3f); + buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); + + crc4 = dp_mst_sim_msg_header_crc4(buf, (idx * 2) - 1); + buf[idx - 1] |= (crc4 & 0xf); + + *len = idx; +} + +static bool dp_get_one_sb_msg(struct drm_dp_sideband_msg_rx *msg, + struct drm_dp_aux_msg *aux_msg) +{ + int ret; + + if (!msg->have_somt) { + ret = dp_mst_sim_sideband_msg_build(msg, + aux_msg->buffer, aux_msg->size, true); + if (!ret) { + DP_ERR("sideband hdr build failed\n"); + return false; + } + } else { + ret = dp_mst_sim_sideband_msg_build(msg, + aux_msg->buffer, aux_msg->size, false); + if (!ret) { + DP_ERR("sideband msg build failed\n"); + return false; + } + } + + return true; +} + +static int dp_sideband_build_nak_rep( + struct dp_mst_sim_context *ctx) +{ + struct drm_dp_sideband_msg_rx *msg = &ctx->down_req; + u8 *buf = ctx->down_rep.msg; + int idx = 0; + + buf[idx] = msg->msg[0] | 0x80; + idx++; + + memcpy(&buf[idx], ctx->guid, 16); + idx += 16; + + buf[idx] = 0x4; + idx++; + + buf[idx] = 0; + idx++; + + return idx; +} + + +static int dp_sideband_build_link_address_rep( + struct dp_mst_sim_context *ctx) +{ + struct dp_mst_sim_port *port; + u8 *buf = ctx->down_rep.msg; + int idx = 0; + u32 i, tmp; + + buf[idx] = DP_LINK_ADDRESS; + idx++; + + memcpy(&buf[idx], ctx->guid, 16); + idx += 16; + + buf[idx] = ctx->port_num; + idx++; + + for (i = 0; i < ctx->port_num; i++) { + port = &ctx->ports[i]; + + tmp = 0; + if (port->input) + tmp |= 0x80; + tmp |= port->pdt << 4; + tmp |= i & 0xF; + buf[idx] = tmp; + idx++; + + tmp = 0; + if (port->mcs) + tmp |= 0x80; + if (port->ddps) + tmp |= 0x40; + + if (port->input) { + buf[idx] = tmp; + idx++; + continue; + } + + if (port->ldps) + tmp |= 0x20; + buf[idx] = tmp; + idx++; + + buf[idx] = port->dpcd_rev; + idx++; + + memcpy(&buf[idx], port->peer_guid, 16); + idx += 16; + + buf[idx] = (port->num_sdp_streams << 4) | + (port->num_sdp_stream_sinks); + idx++; + } + + return idx; +} + +static int dp_sideband_build_remote_i2c_read_rep( + struct dp_mst_sim_context *ctx) +{ + struct dp_mst_sim_port *port; + struct drm_dp_remote_i2c_read i2c_read; + u8 *buf; + int idx; + u32 i, start, len; + + buf = ctx->down_req.msg; + idx = 1; + + i2c_read.num_transactions = buf[idx] & 0x3; + i2c_read.port_number = buf[idx] >> 4; + idx++; + + if (i2c_read.port_number >= ctx->port_num) + goto err; + + for (i = 0; i < i2c_read.num_transactions; i++) { + i2c_read.transactions[i].i2c_dev_id = buf[idx] & 0x7f; + idx++; + + i2c_read.transactions[i].num_bytes = buf[idx]; + idx++; + + i2c_read.transactions[i].bytes = &buf[idx]; + idx += i2c_read.transactions[i].num_bytes; + + i2c_read.transactions[i].no_stop_bit = (buf[idx] >> 4) & 0x1; + i2c_read.transactions[i].i2c_transaction_delay = buf[idx] & 0xf; + idx++; + } + + i2c_read.read_i2c_device_id = buf[idx]; + idx++; + + i2c_read.num_bytes_read = buf[idx]; + idx++; + + port = &ctx->ports[i2c_read.port_number]; + + if (i2c_read.num_transactions == 1) { + if (i2c_read.transactions[0].i2c_dev_id != DDC_ADDR || + i2c_read.transactions[0].num_bytes != 1) { + DP_ERR("unsupported i2c address\n"); + goto err; + } + + start = i2c_read.transactions[0].bytes[0]; + } else if (i2c_read.num_transactions == 2) { + if (i2c_read.transactions[0].i2c_dev_id != DDC_SEGMENT_ADDR || + i2c_read.transactions[0].num_bytes != 1 || + i2c_read.transactions[1].i2c_dev_id != DDC_ADDR || + i2c_read.transactions[1].num_bytes != 1) { + DP_ERR("unsupported i2c address\n"); + goto err; + } + + start = i2c_read.transactions[0].bytes[0] * EDID_LENGTH * 2 + + i2c_read.transactions[1].bytes[0]; + } else { + DP_ERR("unsupported i2c transaction\n"); + goto err; + } + + len = i2c_read.num_bytes_read; + + if (start + len > port->edid_size) { + DP_ERR("edid data exceeds maximum\n"); + goto err; + } + + buf = ctx->down_rep.msg; + idx = 0; + + buf[idx] = DP_REMOTE_I2C_READ; + idx++; + + buf[idx] = i2c_read.port_number; + idx++; + + buf[idx] = len; + idx++; + + memcpy(&buf[idx], &port->edid[start], len); + idx += len; + + return idx; +err: + return dp_sideband_build_nak_rep(ctx); +} + +static int dp_sideband_build_enum_path_resources_rep( + struct dp_mst_sim_context *ctx) +{ + struct dp_mst_sim_port *port; + u8 port_num; + u8 *buf; + int idx; + + buf = ctx->down_req.msg; + port_num = buf[1] >> 4; + + if (port_num >= ctx->port_num) { + DP_ERR("invalid port num\n"); + goto err; + } + + port = &ctx->ports[port_num]; + + buf = ctx->down_rep.msg; + idx = 0; + + buf[idx] = DP_ENUM_PATH_RESOURCES; + idx++; + + buf[idx] = port_num << 4; + idx++; + + buf[idx] = port->full_pbn >> 8; + idx++; + + buf[idx] = port->full_pbn & 0xFF; + idx++; + + buf[idx] = port->avail_pbn >> 8; + idx++; + + buf[idx] = port->avail_pbn & 0xFF; + idx++; + + return idx; +err: + return dp_sideband_build_nak_rep(ctx); +} + +static int dp_sideband_build_allocate_payload_rep( + struct dp_mst_sim_context *ctx) +{ + struct drm_dp_allocate_payload allocate_payload; + u8 *buf; + int idx; + u32 i; + + buf = ctx->down_req.msg; + idx = 1; + + allocate_payload.port_number = buf[idx] >> 4; + allocate_payload.number_sdp_streams = buf[idx] & 0xF; + idx++; + + allocate_payload.vcpi = buf[idx]; + idx++; + + allocate_payload.pbn = (buf[idx] << 8) | buf[idx+1]; + idx += 2; + + for (i = 0; i < allocate_payload.number_sdp_streams / 2; i++) { + allocate_payload.sdp_stream_sink[i * 2] = buf[idx] >> 4; + allocate_payload.sdp_stream_sink[i * 2 + 1] = buf[idx] & 0xf; + idx++; + } + if (allocate_payload.number_sdp_streams & 1) { + i = allocate_payload.number_sdp_streams - 1; + allocate_payload.sdp_stream_sink[i] = buf[idx] >> 4; + idx++; + } + + if (allocate_payload.port_number >= ctx->port_num) { + DP_ERR("invalid port num\n"); + goto err; + } + + buf = ctx->down_rep.msg; + idx = 0; + + buf[idx] = DP_ALLOCATE_PAYLOAD; + idx++; + + buf[idx] = allocate_payload.port_number; + idx++; + + buf[idx] = allocate_payload.vcpi; + idx++; + + buf[idx] = allocate_payload.pbn >> 8; + idx++; + + buf[idx] = allocate_payload.pbn & 0xFF; + idx++; + + return idx; +err: + return dp_sideband_build_nak_rep(ctx); +} + +static int dp_sideband_build_power_updown_phy_rep( + struct dp_mst_sim_context *ctx) +{ + u8 port_num; + u8 *buf; + int idx; + + buf = ctx->down_req.msg; + port_num = buf[1] >> 4; + + if (port_num >= ctx->port_num) { + DP_ERR("invalid port num\n"); + goto err; + } + + buf = ctx->down_rep.msg; + idx = 0; + + buf[idx] = ctx->down_req.msg[0]; + idx++; + + buf[idx] = port_num; + idx++; + + return idx; +err: + return dp_sideband_build_nak_rep(ctx); +} + +static int dp_sideband_build_clear_payload_id_table_rep( + struct dp_mst_sim_context *ctx) +{ + u8 *buf = ctx->down_rep.msg; + int idx = 0; + + buf[idx] = DP_CLEAR_PAYLOAD_ID_TABLE; + idx++; + + return idx; +} + +static int dp_sideband_build_connection_notify_req( + struct dp_mst_sim_context *ctx, int port_idx) +{ + struct dp_mst_sim_port *port = &ctx->ports[port_idx]; + u8 *buf = ctx->down_rep.msg; + int idx = 0; + + buf[idx] = DP_CONNECTION_STATUS_NOTIFY; + idx++; + + buf[idx] = port_idx << 4; + idx++; + + memcpy(&buf[idx], &port->peer_guid, 16); + idx += 16; + + buf[idx] = (port->ldps << 6) | + (port->ddps << 5) | + (port->mcs << 4) | + (port->input << 3) | + (port->pdt & 0x7); + idx++; + + return idx; +} + +static inline int dp_sideband_update_esi( + struct dp_mst_sim_context *ctx, u8 val) +{ + ctx->esi[0] = ctx->port_num; + ctx->esi[1] = val; + ctx->esi[2] = 0; + + return 0; +} + +static inline bool dp_sideband_pending_esi( + struct dp_mst_sim_context *ctx, u8 val) +{ + return !!(ctx->esi[1] & val); +} + +static int dp_mst_sim_clear_esi(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *msg) +{ + size_t i; + u8 old_esi = ctx->esi[1]; + u32 addr = msg->address - DP_SINK_COUNT_ESI; + + if (msg->size - addr >= 16) { + msg->reply = DP_AUX_NATIVE_REPLY_NACK; + return 0; + } + + mutex_lock(&ctx->session_lock); + + for (i = 0; i < msg->size; i++) + ctx->esi[addr + i] &= ~((u8 *)msg->buffer)[i]; + + if (old_esi != ctx->esi[1]) + complete(&ctx->session_comp); + + mutex_unlock(&ctx->session_lock); + + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + return 0; +} + +static int dp_mst_sim_read_esi(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *msg) +{ + u32 addr = msg->address - DP_SINK_COUNT_ESI; + + if (msg->size - addr >= 16) { + msg->reply = DP_AUX_NATIVE_REPLY_NACK; + return 0; + } + + memcpy(msg->buffer, &ctx->esi[addr], msg->size); + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + + return 0; +} + +static int dp_mst_sim_down_req_internal(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *aux_msg) +{ + struct drm_dp_sideband_msg_rx *msg = &ctx->down_req; + struct drm_dp_sideband_msg_hdr hdr; + bool seqno; + int ret, size, len, hdr_len; + + ret = dp_get_one_sb_msg(msg, aux_msg); + if (!ret) + return -EINVAL; + + if (!msg->have_eomt) + return 0; + + seqno = msg->initial_hdr.seqno; + + switch (msg->msg[0]) { + case DP_LINK_ADDRESS: + size = dp_sideband_build_link_address_rep(ctx); + break; + case DP_REMOTE_I2C_READ: + size = dp_sideband_build_remote_i2c_read_rep(ctx); + break; + case DP_ENUM_PATH_RESOURCES: + size = dp_sideband_build_enum_path_resources_rep(ctx); + break; + case DP_ALLOCATE_PAYLOAD: + size = dp_sideband_build_allocate_payload_rep(ctx); + break; + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: + size = dp_sideband_build_power_updown_phy_rep(ctx); + break; + case DP_CLEAR_PAYLOAD_ID_TABLE: + size = dp_sideband_build_clear_payload_id_table_rep(ctx); + break; + default: + size = dp_sideband_build_nak_rep(ctx); + break; + } + + if (ctx->host_req) + ctx->host_req(ctx->host_dev, + ctx->down_req.msg, ctx->down_req.curlen, + ctx->down_rep.msg, &size); + + memset(msg, 0, sizeof(*msg)); + msg = &ctx->down_rep; + msg->curlen = 0; + + mutex_lock(&ctx->session_lock); + + while (msg->curlen < size) { + if (ctx->reset_cnt) + break; + + /* copy data */ + len = min(size - msg->curlen, 44); + memcpy(&ctx->dpcd[3], &msg->msg[msg->curlen], len); + msg->curlen += len; + + /* build header */ + memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); + hdr.broadcast = 0; + hdr.path_msg = 0; + hdr.lct = 1; + hdr.lcr = 0; + hdr.seqno = seqno; + hdr.msg_len = len + 1; + hdr.eomt = (msg->curlen == size); + hdr.somt = (msg->curlen == len); + dp_mst_sim_encode_sideband_msg_hdr(&hdr, ctx->dpcd, &hdr_len); + + /* build crc */ + ctx->dpcd[len + 3] = dp_mst_sim_msg_data_crc4(&ctx->dpcd[3], len); + + /* update esi */ + dp_sideband_update_esi(ctx, DP_DOWN_REP_MSG_RDY); + + /* notify host */ + mutex_unlock(&ctx->session_lock); + ctx->host_hpd_irq(ctx->host_dev); + mutex_lock(&ctx->session_lock); + + /* wait until esi is cleared */ + while (dp_sideband_pending_esi(ctx, DP_DOWN_REP_MSG_RDY)) { + if (ctx->reset_cnt) + break; + mutex_unlock(&ctx->session_lock); + wait_for_completion(&ctx->session_comp); + mutex_lock(&ctx->session_lock); + } + } + + mutex_unlock(&ctx->session_lock); + + return 0; +} + +static void dp_mst_sim_down_req_work(struct work_struct *work) +{ + struct dp_mst_sim_work *sim_work = + container_of(work, struct dp_mst_sim_work, base); + struct drm_dp_aux_msg msg; + + msg.address = sim_work->address; + msg.buffer = sim_work->buffer; + msg.size = sim_work->size; + + dp_mst_sim_down_req_internal(sim_work->ctx, &msg); + + kfree(sim_work); +} + +static int dp_mst_sim_down_req(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *aux_msg) +{ + struct dp_mst_sim_work *work; + + if (aux_msg->size >= 256) { + aux_msg->reply = DP_AUX_NATIVE_REPLY_NACK; + return 0; + } + + dp_sideband_hex_dump("request", + aux_msg->address, aux_msg->buffer, aux_msg->size); + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) { + aux_msg->reply = DP_AUX_NATIVE_REPLY_NACK; + return 0; + } + + work->ctx = ctx; + work->address = aux_msg->address; + work->size = aux_msg->size; + memcpy(work->buffer, aux_msg->buffer, aux_msg->size); + + INIT_WORK(&work->base, dp_mst_sim_down_req_work); + queue_work(ctx->wq, &work->base); + + aux_msg->reply = DP_AUX_NATIVE_REPLY_ACK; + return 0; +} + +static int dp_mst_sim_down_rep(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *msg) +{ + u32 addr = msg->address - DP_SIDEBAND_MSG_DOWN_REP_BASE; + + memcpy(msg->buffer, &ctx->dpcd[addr], msg->size); + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + + dp_sideband_hex_dump("reply", + addr, msg->buffer, msg->size); + + return 0; +} + +static int dp_mst_sim_up_req(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *msg) +{ + u32 addr = msg->address - DP_SIDEBAND_MSG_UP_REQ_BASE; + + memcpy(msg->buffer, &ctx->dpcd[addr], msg->size); + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + + dp_sideband_hex_dump("up_req", + addr, msg->buffer, msg->size); + + return 0; +} + +static void dp_mst_sim_reset_work(struct work_struct *work) +{ + struct dp_mst_notify_work *notify_work = + container_of(work, struct dp_mst_notify_work, base); + struct dp_mst_sim_context *ctx = notify_work->ctx; + + mutex_lock(&ctx->session_lock); + --ctx->reset_cnt; + reinit_completion(&ctx->session_comp); + mutex_unlock(&ctx->session_lock); +} + +static int dp_mst_sim_reset(struct dp_mst_sim_context *ctx, + struct drm_dp_aux_msg *msg) +{ + struct dp_mst_notify_work *work; + + if (!msg->size || ((u8 *)msg->buffer)[0]) + return msg->size; + + mutex_lock(&ctx->session_lock); + ++ctx->reset_cnt; + complete(&ctx->session_comp); + mutex_unlock(&ctx->session_lock); + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return msg->size; + + work->ctx = ctx; + INIT_WORK(&work->base, dp_mst_sim_reset_work); + queue_work(ctx->wq, &work->base); + + return msg->size; +} + +int dp_mst_sim_transfer(void *mst_sim_context, struct drm_dp_aux_msg *msg) +{ + struct dp_mst_sim_context *ctx = mst_sim_context; + + if (!ctx || !ctx->port_num || !msg) + return -ENOENT; + + if (msg->request == DP_AUX_NATIVE_WRITE) { + if (msg->address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && + msg->address < DP_SIDEBAND_MSG_DOWN_REQ_BASE + 256) + return dp_mst_sim_down_req(mst_sim_context, msg); + + if (msg->address >= DP_SIDEBAND_MSG_UP_REP_BASE && + msg->address < DP_SIDEBAND_MSG_UP_REP_BASE + 256) + return 0; + + if (msg->address >= DP_SINK_COUNT_ESI && + msg->address < DP_SINK_COUNT_ESI + 14) + return dp_mst_sim_clear_esi(mst_sim_context, msg); + + if (msg->address == DP_MSTM_CTRL) + return dp_mst_sim_reset(mst_sim_context, msg); + + } else if (msg->request == DP_AUX_NATIVE_READ) { + if (msg->address >= DP_SIDEBAND_MSG_DOWN_REP_BASE && + msg->address < DP_SIDEBAND_MSG_DOWN_REP_BASE + 256) + return dp_mst_sim_down_rep(mst_sim_context, msg); + + if (msg->address >= DP_SIDEBAND_MSG_UP_REQ_BASE && + msg->address < DP_SIDEBAND_MSG_UP_REQ_BASE + 256) + return dp_mst_sim_up_req(mst_sim_context, msg); + + if (msg->address >= DP_SINK_COUNT_ESI && + msg->address < DP_SINK_COUNT_ESI + 14) + return dp_mst_sim_read_esi(mst_sim_context, msg); + } + + return -EINVAL; +} + +static void dp_mst_sim_up_req_work(struct work_struct *work) +{ + struct dp_mst_notify_work *notify_work = + container_of(work, struct dp_mst_notify_work, base); + struct dp_mst_sim_context *ctx = notify_work->ctx; + struct drm_dp_sideband_msg_rx *msg = &ctx->down_rep; + struct drm_dp_sideband_msg_hdr hdr; + int len, hdr_len, i; + + mutex_lock(&ctx->session_lock); + + for (i = 0; i < ctx->port_num; i++) { + if (ctx->reset_cnt) + break; + + if (!(notify_work->port_mask & (1 << i))) + continue; + + len = dp_sideband_build_connection_notify_req(ctx, i); + + /* copy data */ + memcpy(&ctx->dpcd[3], msg->msg, len); + + /* build header */ + memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); + hdr.broadcast = 0; + hdr.path_msg = 0; + hdr.lct = 1; + hdr.lcr = 0; + hdr.seqno = 0; + hdr.msg_len = len + 1; + hdr.eomt = 1; + hdr.somt = 1; + dp_mst_sim_encode_sideband_msg_hdr(&hdr, ctx->dpcd, &hdr_len); + + /* build crc */ + ctx->dpcd[len + 3] = dp_mst_sim_msg_data_crc4(&ctx->dpcd[3], len); + + /* update esi */ + dp_sideband_update_esi(ctx, DP_UP_REQ_MSG_RDY); + + /* notify host */ + mutex_unlock(&ctx->session_lock); + ctx->host_hpd_irq(ctx->host_dev); + mutex_lock(&ctx->session_lock); + + /* wait until esi is cleared */ + while (dp_sideband_pending_esi(ctx, DP_UP_REQ_MSG_RDY)) { + if (ctx->reset_cnt) + break; + mutex_unlock(&ctx->session_lock); + wait_for_completion(&ctx->session_comp); + mutex_lock(&ctx->session_lock); + } + } + + mutex_unlock(&ctx->session_lock); + + kfree(notify_work); +} + +static void dp_mst_sim_notify(struct dp_mst_sim_context *ctx, + u32 port_mask) +{ + struct dp_mst_notify_work *work; + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return; + + work->ctx = ctx; + work->port_mask = port_mask; + + INIT_WORK(&work->base, dp_mst_sim_up_req_work); + queue_work(ctx->wq, &work->base); +} + +static void dp_mst_sim_free_ports(struct dp_mst_sim_context *ctx) +{ + u32 i; + + for (i = 0; i < ctx->port_num; i++) + kfree(ctx->ports[i].edid); + + kfree(ctx->ports); + ctx->ports = NULL; + ctx->port_num = 0; +} + +int dp_mst_sim_update(void *mst_sim_context, u32 port_num, + struct dp_mst_sim_port *ports) +{ + struct dp_mst_sim_context *ctx = mst_sim_context; + u8 *edid; + int rc = 0; + u32 update_mask = 0; + u32 i; + + if (!ctx || port_num >= 15 || !ports) + return -EINVAL; + + mutex_lock(&ctx->session_lock); + + /* get update mask */ + if (port_num && ctx->port_num == port_num) { + for (i = 0; i < port_num; i++) { + if (ports[i].pdt != ctx->ports[i].pdt || + ports[i].input != ctx->ports[i].input || + ports[i].ldps != ctx->ports[i].ldps || + ports[i].ddps != ctx->ports[i].ddps || + ports[i].mcs != ctx->ports[i].mcs) + update_mask |= (1 << i); + } + } + + dp_mst_sim_free_ports(ctx); + + if (!port_num) + goto end; + + ctx->ports = kcalloc(port_num, sizeof(*ports), GFP_KERNEL); + if (!ctx->ports) { + rc = -ENOMEM; + goto fail; + } + ctx->port_num = port_num; + + for (i = 0; i < port_num; i++) { + ctx->ports[i] = ports[i]; + if (ports[i].edid_size) { + if (!ports[i].edid) { + rc = -EINVAL; + goto fail; + } + + edid = kzalloc(ports[i].edid_size, + GFP_KERNEL); + if (!edid) { + rc = -ENOMEM; + goto fail; + } + + memcpy(edid, ports[i].edid, ports[i].edid_size); + ctx->ports[i].edid = edid; + } + } + +fail: + if (rc) + dp_mst_sim_free_ports(ctx); + +end: + mutex_unlock(&ctx->session_lock); + + if (update_mask) + dp_mst_sim_notify(ctx, update_mask); + + return rc; +} + +int dp_mst_sim_create(const struct dp_mst_sim_cfg *cfg, + void **mst_sim_context) +{ + struct dp_mst_sim_context *ctx; + + if (!cfg || !mst_sim_context) + return -EINVAL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->host_dev = cfg->host_dev; + ctx->host_hpd_irq = cfg->host_hpd_irq; + ctx->host_req = cfg->host_req; + memcpy(ctx->guid, cfg->guid, 16); + + mutex_init(&ctx->session_lock); + init_completion(&ctx->session_comp); + + ctx->wq = create_singlethread_workqueue("dp_mst_sim"); + if (IS_ERR_OR_NULL(ctx->wq)) { + DP_ERR("Error creating wq\n"); + kfree(ctx); + return -EPERM; + } + + *mst_sim_context = ctx; + return 0; +} + +int dp_mst_sim_destroy(void *mst_sim_context) +{ + struct dp_mst_sim_context *ctx = mst_sim_context; + u32 i; + + if (!ctx) + return -EINVAL; + + for (i = 0; i < ctx->port_num; i++) + kfree(ctx->ports[i].edid); + kfree(ctx->ports); + + destroy_workqueue(ctx->wq); + + return 0; +} + diff --git a/msm/dp/dp_mst_sim_helper.h b/msm/dp/dp_mst_sim_helper.h new file mode 100644 index 000000000..b8ba83431 --- /dev/null +++ b/msm/dp/dp_mst_sim_helper.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Copyright (c) 2014 Red Hat. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DP_MST_SIM_HELPER_H_ +#define _DP_MST_SIM_HELPER_H_ + +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif + +/** + * struct dp_mst_sim_port - MST port configuration + * @input: if this port is an input port. + * @mcs: message capability status - DP 1.2 spec. + * @ddps: DisplayPort Device Plug Status - DP 1.2 + * @pdt: Peer Device Type + * @ldps: Legacy Device Plug Status + * @dpcd_rev: DPCD revision of device on this port + * @peer_guid: Peer GUID on this port + * @num_sdp_streams: Number of simultaneous streams + * @num_sdp_stream_sinks: Number of stream sinks + * @full_pbn: Full bandwidth for this port. + * @avail_pbn: Available bandwidth for this port. + * @edid: EDID data on this port. + * @edid_size: size of EDID data on this port. + */ +struct dp_mst_sim_port { + bool input; + bool mcs; + bool ddps; + u8 pdt; + bool ldps; + u8 dpcd_rev; + u8 peer_guid[16]; + u8 num_sdp_streams; + u8 num_sdp_stream_sinks; + u16 full_pbn; + u16 avail_pbn; + const u8 *edid; + u32 edid_size; +}; + +/** + * struct dp_mst_sim_cfg - MST simulator configuration + * @host_dev: host device pointer used in callback functions + * @guid: GUID of the top MST branch. + */ +struct dp_mst_sim_cfg { + void *host_dev; + u8 guid[16]; + + /** + * @host_hpd_irq: + * + * This callback is invoked whenever simulator need to + * notify host that there is a HPD_IRQ. + * @host_dev: host_dev pointer + */ + void (*host_hpd_irq)(void *host_dev); + + /** + * @host_req: + * + * This callback is invoked whenever simulator's reply is ready + * to response downstream request. Host can use this function + * to replace the reply generated by simulator. + * @host_dev: host_dev pointer + * @in: pointer of downstream request buffer to simulator + * @in_size: size of downstream request buffer to simulator + * @out: pointer of downstream reply from simulator + * @out_size: pointer of size of downstream reply from simulator + * + * This callback is optional. + */ + void (*host_req)(void *host_dev, const u8 *in, int in_size, + u8 *out, int *out_size); +}; + +/** + * dp_mst_sim_create - Create simulator context + * @cfg: see dp_mst_sim_cfg + * @mst_sim_context: simulator context returned + * return: 0 if successful + */ +int dp_mst_sim_create(const struct dp_mst_sim_cfg *cfg, + void **mst_sim_context); + +/** + * dp_mst_sim_destroy - Destroy simulator context + * @mst_sim_context: simulator context + * return: 0 if successful + */ +int dp_mst_sim_destroy(void *mst_sim_context); + +/** + * dp_mst_sim_transfer - Send aux message to simulator context + * @mst_sim_context: simulator context + * @msg: aux message + * return: 0 if successful + */ +int dp_mst_sim_transfer(void *mst_sim_context, struct drm_dp_aux_msg *msg); + +/** + * dp_mst_sim_update - Update port configuration + * @mst_sim_context: simulator context + * @port_num: number of ports + * @ports: ports configuration + * return: 0 if successful + */ +int dp_mst_sim_update(void *mst_sim_context, u32 port_num, + struct dp_mst_sim_port *ports); + +#endif /* _DP_MST_SIM_HELPER_H_ */ + diff --git a/msm/dp/dp_panel.c b/msm/dp/dp_panel.c new file mode 100644 index 000000000..cee530f48 --- /dev/null +++ b/msm/dp/dp_panel.c @@ -0,0 +1,3240 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include "dp_panel.h" +#include +#include +#include "dp_debug.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0)) +#include +#else +#include +#endif +#include "sde_dsc_helper.h" +#include + +#define DP_KHZ_TO_HZ 1000 +#define DP_PANEL_DEFAULT_BPP 24 +#define DP_MAX_DS_PORT_COUNT 1 +#define DP_PANEL_MAX_SUPPORTED_BPP 30 + +#define DSC_TGT_BPP 8 +#define DPRX_FEATURE_ENUMERATION_LIST 0x2210 +#define DPRX_EXTENDED_DPCD_FIELD 0x2200 +#define VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED BIT(3) +#define VSC_EXT_VESA_SDP_SUPPORTED BIT(4) +#define VSC_EXT_VESA_SDP_CHAINING_SUPPORTED BIT(5) + +enum dp_panel_hdr_pixel_encoding { + RGB, + YCbCr444, + YCbCr422, + YCbCr420, + YONLY, + RAW, +}; + +enum dp_panel_hdr_rgb_colorimetry { + sRGB, + RGB_WIDE_GAMUT_FIXED_POINT, + RGB_WIDE_GAMUT_FLOATING_POINT, + ADOBERGB, + DCI_P3, + CUSTOM_COLOR_PROFILE, + ITU_R_BT_2020_RGB, +}; + +enum dp_panel_hdr_dynamic_range { + VESA, + CEA, +}; + +enum dp_panel_hdr_content_type { + NOT_DEFINED, + GRAPHICS, + PHOTO, + VIDEO, + GAME, +}; + +enum dp_panel_hdr_state { + HDR_DISABLED, + HDR_ENABLED, +}; + +struct dp_panel_private { + struct device *dev; + struct dp_panel dp_panel; + struct dp_aux *aux; + struct dp_link *link; + struct dp_parser *parser; + struct dp_catalog_panel *catalog; + struct dp_panel *base; + bool panel_on; + bool vsc_supported; + bool vscext_supported; + bool vscext_chaining_supported; + enum dp_panel_hdr_state hdr_state; + u8 spd_vendor_name[8]; + u8 spd_product_description[16]; + u8 major; + u8 minor; +}; + +/* OEM NAME */ +static const u8 vendor_name[8] = {81, 117, 97, 108, 99, 111, 109, 109}; + +/* MODEL NAME */ +static const u8 product_desc[16] = {83, 110, 97, 112, 100, 114, 97, 103, + 111, 110, 0, 0, 0, 0, 0, 0}; + +struct dp_dhdr_maxpkt_calc_input { + u32 mdp_clk; + u32 lclk; + u32 pclk; + u32 h_active; + u32 nlanes; + s64 mst_target_sc; + bool mst_en; + bool fec_en; +}; + +struct tu_algo_data { + s64 lclk_fp; + s64 orig_lclk_fp; + + s64 pclk_fp; + s64 orig_pclk_fp; + s64 lwidth; + s64 lwidth_fp; + int orig_lwidth; + s64 hbp_relative_to_pclk; + s64 hbp_relative_to_pclk_fp; + int orig_hbp; + int nlanes; + int bpp; + int pixelEnc; + int dsc_en; + int async_en; + int fec_en; + int bpc; + + int rb2; + uint delay_start_link_extra_pixclk; + int extra_buffer_margin; + s64 ratio_fp; + s64 original_ratio_fp; + + s64 err_fp; + s64 n_err_fp; + s64 n_n_err_fp; + int tu_size; + int tu_size_desired; + int tu_size_minus1; + + int valid_boundary_link; + s64 resulting_valid_fp; + s64 total_valid_fp; + s64 effective_valid_fp; + s64 effective_valid_recorded_fp; + int n_tus; + int n_tus_per_lane; + int paired_tus; + int remainder_tus; + int remainder_tus_upper; + int remainder_tus_lower; + int extra_bytes; + int filler_size; + int delay_start_link; + + int extra_pclk_cycles; + int extra_pclk_cycles_in_link_clk; + s64 ratio_by_tu_fp; + s64 average_valid2_fp; + int new_valid_boundary_link; + int remainder_symbols_exist; + int n_symbols; + s64 n_remainder_symbols_per_lane_fp; + s64 last_partial_tu_fp; + s64 TU_ratio_err_fp; + + int n_tus_incl_last_incomplete_tu; + int extra_pclk_cycles_tmp; + int extra_pclk_cycles_in_link_clk_tmp; + int extra_required_bytes_new_tmp; + int filler_size_tmp; + int lower_filler_size_tmp; + int delay_start_link_tmp; + + bool boundary_moderation_en; + int boundary_mod_lower_err; + int upper_boundary_count; + int lower_boundary_count; + int i_upper_boundary_count; + int i_lower_boundary_count; + int valid_lower_boundary_link; + int even_distribution_BF; + int even_distribution_legacy; + int even_distribution; + int hbp_delayStartCheck; + int pre_tu_hw_pipe_delay; + int post_tu_hw_pipe_delay; + int link_config_hactive_time; + int delay_start_link_lclk; + int tu_active_cycles; + s64 parity_symbols; + int resolution_line_time; + int last_partial_lclk; + int min_hblank_violated; + s64 delay_start_time_fp; + s64 hbp_time_fp; + s64 hactive_time_fp; + s64 diff_abs_fp; + int second_loop_set; + s64 ratio; +}; + +/** + * Mapper function which outputs colorimetry and dynamic range + * to be used for a given colorspace value when the vsc sdp + * packets are used to change the colorimetry. + */ +static void get_sdp_colorimetry_range(struct dp_panel_private *panel, + u32 colorspace, u32 *colorimetry, u32 *dynamic_range) +{ + + u32 cc; + + /* + * Some rules being used for assignment of dynamic + * range for colorimetry using SDP: + * + * 1) If compliance test is ongoing return sRGB with + * CEA primaries + * 2) For BT2020 cases, dynamic range shall be CEA + * 3) For DCI-P3 cases, as per HW team dynamic range + * shall be VESA for RGB and CEA for YUV content + * Hence defaulting to RGB and picking VESA + * 4) Default shall be sRGB with VESA + */ + + cc = panel->link->get_colorimetry_config(panel->link); + + if (cc) { + *colorimetry = sRGB; + *dynamic_range = CEA; + return; + } + + switch (colorspace) { + case DRM_MODE_COLORIMETRY_BT2020_RGB: + *colorimetry = ITU_R_BT_2020_RGB; + *dynamic_range = CEA; + break; + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + *colorimetry = DCI_P3; + *dynamic_range = VESA; + break; + default: + *colorimetry = sRGB; + *dynamic_range = VESA; + } +} + +/** + * Mapper function which outputs colorimetry to be used for a + * given colorspace value when misc field of MSA is used to + * change the colorimetry. Currently only RGB formats have been + * added. This API will be extended to YUV once its supported on DP. + */ +static u8 get_misc_colorimetry_val(struct dp_panel_private *panel, + u32 colorspace) +{ + u8 colorimetry; + u32 cc; + + cc = panel->link->get_colorimetry_config(panel->link); + /* + * If there is a non-zero value then compliance test-case + * is going on, otherwise we can honor the colorspace setting + */ + if (cc) + return cc; + + switch (colorspace) { + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + colorimetry = 0x7; + break; + case DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED: + colorimetry = 0x3; + break; + case DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT: + colorimetry = 0xb; + break; + case DRM_MODE_COLORIMETRY_OPRGB: + colorimetry = 0xc; + break; + default: + colorimetry = 0; + } + + return colorimetry; +} + +static int _tu_param_compare(s64 a, s64 b) +{ + u32 a_int, a_frac, a_sign; + u32 b_int, b_frac, b_sign; + s64 a_temp, b_temp, minus_1; + + if (a == b) + return 0; + + minus_1 = drm_fixp_from_fraction(-1, 1); + + a_int = (a >> 32) & 0x7FFFFFFF; + a_frac = a & 0xFFFFFFFF; + a_sign = (a >> 32) & 0x80000000 ? 1 : 0; + + b_int = (b >> 32) & 0x7FFFFFFF; + b_frac = b & 0xFFFFFFFF; + b_sign = (b >> 32) & 0x80000000 ? 1 : 0; + + if (a_sign > b_sign) + return 2; + else if (b_sign > a_sign) + return 1; + + if (!a_sign && !b_sign) { /* positive */ + if (a > b) + return 1; + else + return 2; + } else { /* negative */ + a_temp = drm_fixp_mul(a, minus_1); + b_temp = drm_fixp_mul(b, minus_1); + + if (a_temp > b_temp) + return 2; + else + return 1; + } +} + +static s64 fixp_subtract(s64 a, s64 b) +{ + s64 minus_1 = drm_fixp_from_fraction(-1, 1); + + if (a >= b) + return a - b; + + return drm_fixp_mul(b - a, minus_1); +} + +static inline int fixp2int_ceil(s64 a) +{ + return (a ? drm_fixp2int_ceil(a) : 0); +} + +static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, + struct tu_algo_data *tu) +{ + int nlanes = in->nlanes; + int dsc_num_slices = in->num_of_dsc_slices; + int dsc_num_bytes = 0; + int numerator; + s64 pclk_dsc_fp; + s64 dwidth_dsc_fp; + s64 hbp_dsc_fp; + s64 overhead_dsc; + + int tot_num_eoc_symbols = 0; + int tot_num_hor_bytes = 0; + int tot_num_dummy_bytes = 0; + int dwidth_dsc_bytes = 0; + int eoc_bytes = 0; + + s64 temp1_fp, temp2_fp, temp3_fp; + + tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1); + tu->orig_lclk_fp = tu->lclk_fp; + tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000); + tu->orig_pclk_fp = tu->pclk_fp; + tu->lwidth = in->hactive; + tu->hbp_relative_to_pclk = in->hporch; + tu->nlanes = in->nlanes; + tu->bpp = in->bpp; + tu->pixelEnc = in->pixel_enc; + tu->dsc_en = in->dsc_en; + tu->fec_en = in->fec_en; + tu->async_en = in->async_en; + tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1); + tu->orig_lwidth = in->hactive; + tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1); + tu->orig_hbp = in->hporch; + tu->rb2 = (in->hporch < 160) ? 1 : 0; + + if (tu->pixelEnc == 420) { + temp1_fp = drm_fixp_from_fraction(2, 1); + tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp); + tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp); + tu->hbp_relative_to_pclk_fp = + drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2); + } + + if (tu->pixelEnc == 422) { + switch (tu->bpp) { + case 24: + tu->bpp = 16; + tu->bpc = 8; + break; + case 30: + tu->bpp = 20; + tu->bpc = 10; + break; + default: + tu->bpp = 16; + tu->bpc = 8; + break; + } + } else + tu->bpc = tu->bpp/3; + + if (!in->dsc_en) + goto fec_check; + + tu->bpp = 24; // hardcode to 24 if DSC is enabled. + + temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100); + temp2_fp = drm_fixp_from_fraction(in->bpp, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp); + + temp1_fp = drm_fixp_from_fraction(8, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + + numerator = drm_fixp2int(temp3_fp); + + dsc_num_bytes = numerator / dsc_num_slices; + eoc_bytes = dsc_num_bytes % nlanes; + tot_num_eoc_symbols = nlanes * dsc_num_slices; + tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices; + tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices; + + if (dsc_num_bytes == 0) + DP_WARN("incorrect no of bytes per slice=%d\n", dsc_num_bytes); + + dwidth_dsc_bytes = (tot_num_hor_bytes + + tot_num_eoc_symbols + + (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes)); + overhead_dsc = dwidth_dsc_bytes / tot_num_hor_bytes; + + dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3); + + temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp); + temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp); + pclk_dsc_fp = temp1_fp; + + temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp); + temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp); + hbp_dsc_fp = temp2_fp; + + /* output */ + tu->pclk_fp = pclk_dsc_fp; + tu->lwidth_fp = dwidth_dsc_fp; + tu->hbp_relative_to_pclk_fp = hbp_dsc_fp; + +fec_check: + if (in->fec_en) { + temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */ + tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp); + } +} + +static void _tu_valid_boundary_calc(struct tu_algo_data *tu) +{ + s64 temp1_fp, temp2_fp, temp, temp1, temp2; + int compare_result_1, compare_result_2, compare_result_3; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + + tu->new_valid_boundary_link = fixp2int_ceil(temp2_fp); + + temp = (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link - 1)); + tu->average_valid2_fp = drm_fixp_from_fraction(temp, + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu->n_tus += 1; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp); + temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->n_remainder_symbols_per_lane_fp = temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + tu->last_partial_tu_fp = + drm_fixp_div(tu->n_remainder_symbols_per_lane_fp, + temp1_fp); + + if (tu->n_remainder_symbols_per_lane_fp != 0) + tu->remainder_symbols_exist = 1; + else + tu->remainder_symbols_exist = 0; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes); + tu->n_tus_per_lane = drm_fixp2int(temp1_fp); + + tu->paired_tus = (int)((tu->n_tus_per_lane) / + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus * + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count); + + if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) { + tu->remainder_tus_upper = tu->i_upper_boundary_count; + tu->remainder_tus_lower = tu->remainder_tus - + tu->i_upper_boundary_count; + } else { + tu->remainder_tus_upper = tu->remainder_tus; + tu->remainder_tus_lower = 0; + } + + temp = tu->paired_tus * (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link - 1)) + + (tu->remainder_tus_upper * + tu->new_valid_boundary_link) + + (tu->remainder_tus_lower * + (tu->new_valid_boundary_link - 1)); + tu->total_valid_fp = drm_fixp_from_fraction(temp, 1); + + if (tu->remainder_symbols_exist) { + temp1_fp = tu->total_valid_fp + + tu->n_remainder_symbols_per_lane_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp2_fp = temp2_fp + tu->last_partial_tu_fp; + temp1_fp = drm_fixp_div(temp1_fp, temp2_fp); + } else { + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp); + } + tu->effective_valid_fp = temp1_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_n_err_fp = fixp_subtract(tu->effective_valid_fp, temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_err_fp = fixp_subtract(tu->average_valid2_fp, temp2_fp); + + tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + tu->n_tus_incl_last_incomplete_tu = fixp2int_ceil(temp2_fp); + + temp1 = 0; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = tu->average_valid2_fp - temp2_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp1 = fixp2int_ceil(temp1_fp); + + temp = tu->i_upper_boundary_count * tu->nlanes; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp2 = fixp2int_ceil(temp2_fp); + + tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2); + + temp1_fp = drm_fixp_from_fraction(8, tu->bpp); + temp2_fp = drm_fixp_from_fraction( + tu->extra_required_bytes_new_tmp, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + tu->extra_pclk_cycles_tmp = fixp2int_ceil(temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1); + temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + tu->extra_pclk_cycles_in_link_clk_tmp = fixp2int_ceil(temp1_fp); + + tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link; + + tu->lower_filler_size_tmp = tu->filler_size_tmp + 1; + + tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp + + tu->lower_filler_size_tmp + + tu->extra_buffer_margin; + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + if (tu->rb2) + { + temp1_fp = drm_fixp_mul(tu->delay_start_time_fp, tu->lclk_fp); + tu->delay_start_link_lclk = fixp2int_ceil(temp1_fp); + + if (tu->remainder_tus > tu->i_upper_boundary_count) { + temp = (tu->remainder_tus - tu->i_upper_boundary_count) * (tu->new_valid_boundary_link - 1); + temp += (tu->i_upper_boundary_count * tu->new_valid_boundary_link); + temp *= tu->nlanes; + } else { + temp = tu->nlanes * tu->remainder_tus * tu->new_valid_boundary_link; + } + + temp1 = tu->i_lower_boundary_count * (tu->new_valid_boundary_link - 1); + temp1 += tu->i_upper_boundary_count * tu->new_valid_boundary_link; + temp1 *= tu->paired_tus * tu->nlanes; + temp1_fp = drm_fixp_from_fraction(tu->n_symbols - temp1 - temp, tu->nlanes); + tu->last_partial_lclk = fixp2int_ceil(temp1_fp); + + tu->tu_active_cycles = (int)((tu->n_tus_per_lane * tu->tu_size) + tu->last_partial_lclk); + tu->post_tu_hw_pipe_delay = 4 /*BS_on_the_link*/ + 1 /*BE_next_ren*/; + temp = tu->pre_tu_hw_pipe_delay + tu->delay_start_link_lclk + tu->tu_active_cycles + tu->post_tu_hw_pipe_delay; + + if (tu->fec_en == 1) + { + if (tu->nlanes == 1) + { + temp1_fp = drm_fixp_from_fraction(temp, 500); + tu->parity_symbols = fixp2int_ceil(temp1_fp) * 12 + 1; + } + else + { + temp1_fp = drm_fixp_from_fraction(temp, 250); + tu->parity_symbols = fixp2int_ceil(temp1_fp) * 6 + 1; + } + } + else //no fec BW impact + { + tu->parity_symbols = 0; + } + + tu->link_config_hactive_time = temp + tu->parity_symbols; + + if (tu->resolution_line_time >= tu->link_config_hactive_time + 1 /*margin*/) + tu->hbp_delayStartCheck = 1; + else + tu->hbp_delayStartCheck = 0; + } else { + compare_result_3 = _tu_param_compare(tu->hbp_time_fp, tu->delay_start_time_fp); + if (compare_result_3 < 2) + tu->hbp_delayStartCheck = 1; + else + tu->hbp_delayStartCheck = 0; + } + + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp); + if (compare_result_1 == 2) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp); + if (compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + if (((tu->even_distribution == 1) || + ((tu->even_distribution_BF == 0) && + (tu->even_distribution_legacy == 0))) && + tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 && + compare_result_2 && + (compare_result_1 || (tu->min_hblank_violated == 1)) && + (tu->new_valid_boundary_link - 1) > 0 && + (tu->hbp_delayStartCheck == 1) && + (tu->delay_start_link_tmp <= 1023)) { + tu->upper_boundary_count = tu->i_upper_boundary_count; + tu->lower_boundary_count = tu->i_lower_boundary_count; + tu->err_fp = tu->n_n_err_fp; + tu->boundary_moderation_en = true; + tu->tu_size_desired = tu->tu_size; + tu->valid_boundary_link = tu->new_valid_boundary_link; + tu->effective_valid_recorded_fp = tu->effective_valid_fp; + tu->even_distribution_BF = 1; + tu->delay_start_link = tu->delay_start_link_tmp; + } else if (tu->boundary_mod_lower_err == 0) { + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, + tu->diff_abs_fp); + if (compare_result_1 == 2) + tu->boundary_mod_lower_err = 1; + } +} + +static void _dp_calc_boundary(struct tu_algo_data *tu) +{ + s64 temp1_fp = 0, temp2_fp = 0; + + do { + tu->err_fp = drm_fixp_from_fraction(1000, 1); + + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction( + tu->delay_start_link_extra_pixclk, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + tu->extra_buffer_margin = fixp2int_ceil(temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp); + tu->n_symbols = fixp2int_ceil(temp1_fp); + + for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) { + for (tu->i_upper_boundary_count = 1; + tu->i_upper_boundary_count <= 15; + tu->i_upper_boundary_count++) { + for (tu->i_lower_boundary_count = 1; + tu->i_lower_boundary_count <= 15; + tu->i_lower_boundary_count++) { + _tu_valid_boundary_calc(tu); + } + } + } + tu->delay_start_link_extra_pixclk--; + } while (!tu->boundary_moderation_en && + tu->boundary_mod_lower_err == 1 && + tu->delay_start_link_extra_pixclk != 0 && + ((tu->second_loop_set == 0 && tu->rb2 == 1) || tu->rb2 == 0)); +} + +static void _dp_calc_extra_bytes(struct tu_algo_data *tu) +{ + u64 temp = 0; + s64 temp1_fp = 0, temp2_fp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + temp = drm_fixp2int(temp2_fp); + if (temp) + tu->extra_bytes = fixp2int_ceil(temp2_fp); + else + tu->extra_bytes = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1); + temp2_fp = drm_fixp_from_fraction(8, tu->bpp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + tu->extra_pclk_cycles = fixp2int_ceil(temp1_fp); + + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + tu->extra_pclk_cycles_in_link_clk = fixp2int_ceil(temp1_fp); +} + +static void _dp_panel_calc_tu(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct tu_algo_data tu; + int compare_result_1, compare_result_2; + u64 temp = 0, temp1; + s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0; + + s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */ + s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000); + + u8 DP_BRUTE_FORCE = 1; + s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */ + uint EXTRA_PIXCLK_CYCLE_DELAY = 4; + s64 HBLANK_MARGIN = drm_fixp_from_fraction(4, 1); + s64 HBLANK_MARGIN_EXTRA = 0; + + memset(&tu, 0, sizeof(tu)); + + dp_panel_update_tu_timings(in, &tu); + + tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ + + temp1_fp = drm_fixp_from_fraction(4, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp); + temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp); + tu.extra_buffer_margin = fixp2int_ceil(temp_fp); + + if (in->compress_ratio == 375 && tu.bpp == 30) + temp1_fp = drm_fixp_from_fraction(24, 8); + else + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + + temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp); + + tu.original_ratio_fp = tu.ratio_fp; + tu.boundary_moderation_en = false; + tu.upper_boundary_count = 0; + tu.lower_boundary_count = 0; + tu.i_upper_boundary_count = 0; + tu.i_lower_boundary_count = 0; + tu.valid_lower_boundary_link = 0; + tu.even_distribution_BF = 0; + tu.even_distribution_legacy = 0; + tu.even_distribution = 0; + tu.hbp_delayStartCheck = 0; + tu.pre_tu_hw_pipe_delay = 0; + tu.post_tu_hw_pipe_delay = 0; + tu.link_config_hactive_time = 0; + tu.delay_start_link_lclk = 0; + tu.tu_active_cycles = 0; + tu.resolution_line_time = 0; + tu.last_partial_lclk = 0; + tu.delay_start_time_fp = 0; + tu.second_loop_set = 0; + + tu.err_fp = drm_fixp_from_fraction(1000, 1); + tu.n_err_fp = 0; + tu.n_n_err_fp = 0; + + temp = drm_fixp2int(tu.lwidth_fp); + if ((((u32)temp % tu.nlanes) != 0) && (_tu_param_compare(tu.ratio_fp, DRM_FIXED_ONE) == 2) + && (tu.dsc_en == 0)) { + tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp); + if (_tu_param_compare(tu.ratio_fp, DRM_FIXED_ONE) == 1) + tu.ratio_fp = DRM_FIXED_ONE; + } + + if (_tu_param_compare(tu.ratio_fp, DRM_FIXED_ONE) == 1) + tu.ratio_fp = DRM_FIXED_ONE; + + if (HBLANK_MARGIN_EXTRA != 0) { + HBLANK_MARGIN += HBLANK_MARGIN_EXTRA; + DP_DEBUG("Info: increase HBLANK_MARGIN to %d. (PLUS%d)\n", HBLANK_MARGIN, + HBLANK_MARGIN_EXTRA); + } + + for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) { + temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + temp = fixp2int_ceil(temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + tu.n_err_fp = temp1_fp - temp2_fp; + + if (tu.n_err_fp < tu.err_fp) { + tu.err_fp = tu.n_err_fp; + tu.tu_size_desired = tu.tu_size; + } + } + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + tu.valid_boundary_link = fixp2int_ceil(temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = tu.lwidth_fp; + temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu.n_tus += 1; + + tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0; + DP_DEBUG("Info: n_sym = %d, num_of_tus = %d\n", + tu.valid_boundary_link, tu.n_tus); + + _dp_calc_extra_bytes(&tu); + + tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link; + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp); + + tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk + + tu.filler_size + tu.extra_buffer_margin; + + tu.resulting_valid_fp = + drm_fixp_from_fraction(tu.valid_boundary_link, 1); + + temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + + temp1_fp = drm_fixp_from_fraction((tu.hbp_relative_to_pclk - HBLANK_MARGIN), 1); + tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp); + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + compare_result_1 = _tu_param_compare(tu.hbp_time_fp, + tu.delay_start_time_fp); + if (compare_result_1 == 2) /* hbp_time_fp < delay_start_time_fp */ + tu.min_hblank_violated = 1; + + tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp); + + compare_result_2 = _tu_param_compare(tu.hactive_time_fp, + tu.delay_start_time_fp); + if (compare_result_2 == 2) + tu.min_hblank_violated = 1; + + /* brute force */ + + tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; + tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp; + + temp = drm_fixp2int(tu.diff_abs_fp); + if (!temp && tu.diff_abs_fp <= 0xffff) + tu.diff_abs_fp = 0; + + /* if(diff_abs < 0) diff_abs *= -1 */ + if (tu.diff_abs_fp < 0) + tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1); + + tu.boundary_mod_lower_err = 0; + + temp1_fp = drm_fixp_div(tu.orig_lclk_fp, tu.orig_pclk_fp); + + temp2_fp = drm_fixp_from_fraction(tu.orig_lwidth + tu.orig_hbp, 2); + temp_fp = drm_fixp_mul(temp1_fp, temp2_fp); + tu.resolution_line_time = drm_fixp2int(temp_fp); + tu.pre_tu_hw_pipe_delay = fixp2int_ceil(temp1_fp) + 2 /*cdc fifo write jitter+2*/ + + 3 /*pre-delay start cycles*/ + + 3 /*post-delay start cycles*/ + 1 /*BE on the link*/; + tu.post_tu_hw_pipe_delay = 4 /*BS_on_the_link*/ + 1 /*BE_next_ren*/; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + tu.n_symbols = fixp2int_ceil(temp1_fp); + + if (tu.rb2) + { + temp1_fp = drm_fixp_mul(tu.delay_start_time_fp, tu.lclk_fp); + tu.delay_start_link_lclk = fixp2int_ceil(temp1_fp); + + tu.new_valid_boundary_link = tu.valid_boundary_link; + tu.i_upper_boundary_count = 1; + tu.i_lower_boundary_count = 0; + + temp1 = tu.i_upper_boundary_count * tu.new_valid_boundary_link; + temp1 += tu.i_lower_boundary_count * (tu.new_valid_boundary_link - 1); + tu.average_valid2_fp = drm_fixp_from_fraction(temp1, (tu.i_upper_boundary_count + tu.i_lower_boundary_count)); + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu.average_valid2_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + + tu.n_tus_per_lane = tu.n_tus / tu.nlanes; + tu.paired_tus = (int)((tu.n_tus_per_lane) / (tu.i_upper_boundary_count + tu.i_lower_boundary_count)); + + tu.remainder_tus = tu.n_tus_per_lane - tu.paired_tus * (tu.i_upper_boundary_count + tu.i_lower_boundary_count); + + if (tu.remainder_tus > tu.i_upper_boundary_count) { + temp = (tu.remainder_tus - tu.i_upper_boundary_count) * (tu.new_valid_boundary_link - 1); + temp += (tu.i_upper_boundary_count * tu.new_valid_boundary_link); + temp *= tu.nlanes; + } else { + temp = tu.nlanes * tu.remainder_tus * tu.new_valid_boundary_link; + } + + temp1 = tu.i_lower_boundary_count * (tu.new_valid_boundary_link - 1); + temp1 += tu.i_upper_boundary_count * tu.new_valid_boundary_link; + temp1 *= tu.paired_tus * tu.nlanes; + temp1_fp = drm_fixp_from_fraction(tu.n_symbols - temp1 - temp, tu.nlanes); + tu.last_partial_lclk = fixp2int_ceil(temp1_fp); + + tu.tu_active_cycles = (int)((tu.n_tus_per_lane * tu.tu_size) + tu.last_partial_lclk); + + temp = tu.pre_tu_hw_pipe_delay + tu.delay_start_link_lclk + tu.tu_active_cycles + tu.post_tu_hw_pipe_delay; + + if (tu.fec_en == 1) + { + if (tu.nlanes == 1) + { + temp1_fp = drm_fixp_from_fraction(temp, 500); + tu.parity_symbols = fixp2int_ceil(temp1_fp) * 12 + 1; + } + else + { + temp1_fp = drm_fixp_from_fraction(temp, 250); + tu.parity_symbols = fixp2int_ceil(temp1_fp) * 6 + 1; + } + } + else //no fec BW impact + { + tu.parity_symbols = 0; + } + + tu.link_config_hactive_time = temp + tu.parity_symbols; + + if (tu.link_config_hactive_time + 1 /*margin*/ >= tu.resolution_line_time) + tu.min_hblank_violated = 1; + } + + tu.delay_start_time_fp = 0; + + if ((tu.diff_abs_fp != 0 && + ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || + (tu.even_distribution_legacy == 0) || + (DP_BRUTE_FORCE == 1))) || + (tu.min_hblank_violated == 1)) { + _dp_calc_boundary(&tu); + + if (tu.boundary_moderation_en) { + temp1_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count * + tu.valid_boundary_link + + tu.lower_boundary_count * + (tu.valid_boundary_link - 1)), 1); + temp2_fp = drm_fixp_from_fraction( + (tu.upper_boundary_count + + tu.lower_boundary_count), 1); + tu.resulting_valid_fp = + drm_fixp_div(temp1_fp, temp2_fp); + + temp1_fp = drm_fixp_from_fraction( + tu.tu_size_desired, 1); + tu.ratio_by_tu_fp = + drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + + tu.valid_lower_boundary_link = + tu.valid_boundary_link - 1; + + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, + tu.resulting_valid_fp); + tu.n_tus = drm_fixp2int(temp2_fp); + + tu.tu_size_minus1 = tu.tu_size_desired - 1; + tu.even_distribution_BF = 1; + + temp1_fp = + drm_fixp_from_fraction(tu.tu_size_desired, 1); + temp2_fp = + drm_fixp_div(tu.resulting_valid_fp, temp1_fp); + tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp; + } + } + + if (tu.async_en) { + temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp); + temp = fixp2int_ceil(temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1); + temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu.bpp, 8); + temp2_fp = drm_fixp_div(temp1_fp, temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp2_fp); + + tu.delay_start_link += (int)temp; + } + + temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1); + tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp); + + /* OUTPUTS */ + tu_table->valid_boundary_link = tu.valid_boundary_link; + tu_table->delay_start_link = tu.delay_start_link; + tu_table->boundary_moderation_en = tu.boundary_moderation_en; + tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link; + tu_table->upper_boundary_count = tu.upper_boundary_count; + tu_table->lower_boundary_count = tu.lower_boundary_count; + tu_table->tu_size_minus1 = tu.tu_size_minus1; + + DP_DEBUG("TU: valid_boundary_link: %d\n", tu_table->valid_boundary_link); + DP_DEBUG("TU: delay_start_link: %d\n", tu_table->delay_start_link); + DP_DEBUG("TU: boundary_moderation_en: %d\n", + tu_table->boundary_moderation_en); + DP_DEBUG("TU: valid_lower_boundary_link: %d\n", + tu_table->valid_lower_boundary_link); + DP_DEBUG("TU: upper_boundary_count: %d\n", + tu_table->upper_boundary_count); + DP_DEBUG("TU: lower_boundary_count: %d\n", + tu_table->lower_boundary_count); + DP_DEBUG("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1); +} + +static void dp_panel_calc_tu_parameters(struct dp_panel *dp_panel, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct dp_tu_calc_input in; + struct dp_panel_info *pinfo; + struct dp_panel_private *panel; + int bw_code; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + pinfo = &dp_panel->pinfo; + bw_code = panel->link->link_params.bw_code; + + in.lclk = drm_dp_bw_code_to_link_rate(bw_code) / 1000; + in.pclk_khz = pinfo->pixel_clk_khz; + in.hactive = pinfo->h_active; + in.hporch = pinfo->h_back_porch + pinfo->h_front_porch + + pinfo->h_sync_width; + in.nlanes = panel->link->link_params.lane_count; + in.bpp = pinfo->bpp; + in.pixel_enc = 444; + in.dsc_en = pinfo->comp_info.enabled; + in.async_en = 0; + in.fec_en = dp_panel->fec_en; + in.num_of_dsc_slices = pinfo->comp_info.dsc_info.slice_per_pkt; + + if (pinfo->comp_info.enabled) + in.compress_ratio = mult_frac(100, pinfo->comp_info.src_bpp, + pinfo->comp_info.tgt_bpp); + + _dp_panel_calc_tu(&in, tu_table); +} + +void dp_panel_calc_tu_test(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + _dp_panel_calc_tu(in, tu_table); +} + +static void dp_panel_config_tr_unit(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + u32 dp_tu = 0x0; + u32 valid_boundary = 0x0; + u32 valid_boundary2 = 0x0; + struct dp_vc_tu_mapping_table tu_calc_table; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + if (dp_panel->stream_id != DP_STREAM_0) + return; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + dp_panel_calc_tu_parameters(dp_panel, &tu_calc_table); + + dp_tu |= tu_calc_table.tu_size_minus1; + valid_boundary |= tu_calc_table.valid_boundary_link; + valid_boundary |= (tu_calc_table.delay_start_link << 16); + + valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1); + valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16); + valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20); + + if (tu_calc_table.boundary_moderation_en) + valid_boundary2 |= BIT(0); + + DP_DEBUG("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", + dp_tu, valid_boundary, valid_boundary2); + + catalog->dp_tu = dp_tu; + catalog->valid_boundary = valid_boundary; + catalog->valid_boundary2 = valid_boundary2; + + catalog->update_transfer_unit(catalog); +} + +static void dp_panel_get_dto_params(u32 src_bpp, u32 tgt_bpp, u32 *num, u32 *denom) +{ + if ((tgt_bpp == 12) && (src_bpp == 24)) { + *num = 1; + *denom = 2; + } else if ((tgt_bpp == 15) && (src_bpp == 30)) { + *num = 5; + *denom = 8; + } else if ((tgt_bpp == 8) && ((src_bpp == 24) || (src_bpp == 30))) { + *num = 1; + *denom = 3; + } else if ((tgt_bpp == 10) && (src_bpp == 30)) { + *num = 5; + *denom = 12; + } else { + DP_ERR("dto params not found\n"); + *num = 0; + *denom = 1; + } +} + +static void dp_panel_dsc_prepare_pps_packet(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_dsc_cfg_data *dsc; + u8 *pps, *parity; + u32 *pps_word, *parity_word; + int i, index_4; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + dsc = &panel->catalog->dsc; + pps = dsc->pps; + pps_word = dsc->pps_word; + parity = dsc->parity; + parity_word = dsc->parity_word; + + memset(parity, 0, sizeof(dsc->parity)); + + dsc->pps_word_len = dsc->pps_len >> 2; + dsc->parity_len = dsc->pps_word_len; + dsc->parity_word_len = (dsc->parity_len >> 2) + 1; + + for (i = 0; i < dsc->pps_word_len; i++) { + index_4 = i << 2; + pps_word[i] = pps[index_4 + 0] << 0 | + pps[index_4 + 1] << 8 | + pps[index_4 + 2] << 16 | + pps[index_4 + 3] << 24; + + parity[i] = dp_header_get_parity(pps_word[i]); + } + + for (i = 0; i < dsc->parity_word_len; i++) { + index_4 = i << 2; + parity_word[i] = parity[index_4 + 0] << 0 | + parity[index_4 + 1] << 8 | + parity[index_4 + 2] << 16 | + parity[index_4 + 3] << 24; + } +} + +static void _dp_panel_dsc_get_num_extra_pclk(struct msm_compression_info *comp_info) +{ + unsigned int dto_n = 0, dto_d = 0, remainder; + int ack_required, last_few_ack_required, accum_ack; + int last_few_pclk, last_few_pclk_required; + struct msm_display_dsc_info *dsc = &comp_info->dsc_info; + int start, temp, line_width = dsc->config.pic_width/2; + s64 temp1_fp, temp2_fp; + + dp_panel_get_dto_params(comp_info->src_bpp, comp_info->tgt_bpp, &dto_n, &dto_d); + + ack_required = dsc->pclk_per_line; + + /* number of pclk cycles left outside of the complete DTO set */ + last_few_pclk = line_width % dto_d; + + /* number of pclk cycles outside of the complete dto */ + temp1_fp = drm_fixp_from_fraction(line_width, dto_d); + temp2_fp = drm_fixp_from_fraction(dto_n, 1); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp1_fp); + last_few_ack_required = ack_required - temp; + + /* + * check how many more pclk is needed to + * accommodate the last few ack required + */ + remainder = dto_n; + accum_ack = 0; + last_few_pclk_required = 0; + while (accum_ack < last_few_ack_required) { + last_few_pclk_required++; + + if (remainder >= dto_n) + start = remainder; + else + start = remainder + dto_d; + + remainder = start - dto_n; + if (remainder < dto_n) + accum_ack++; + } + + /* if fewer pclk than required */ + if (last_few_pclk < last_few_pclk_required) + dsc->extra_width = last_few_pclk_required - last_few_pclk; + else + dsc->extra_width = 0; + + DP_DEBUG_V("extra pclks required: %d\n", dsc->extra_width); +} + +static void _dp_panel_dsc_bw_overhead_calc(struct dp_panel *dp_panel, + struct msm_display_dsc_info *dsc, + struct dp_display_mode *dp_mode, u32 dsc_byte_cnt) +{ + int num_slices, tot_num_eoc_symbols; + int tot_num_hor_bytes, tot_num_dummy_bytes; + int dwidth_dsc_bytes, eoc_bytes; + u32 num_lanes; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + num_lanes = panel->link->link_params.lane_count; + num_slices = dsc->slice_per_pkt; + + eoc_bytes = dsc_byte_cnt % num_lanes; + tot_num_eoc_symbols = num_lanes * num_slices; + tot_num_hor_bytes = dsc_byte_cnt * num_slices; + tot_num_dummy_bytes = (num_lanes - eoc_bytes) * num_slices; + + if (!eoc_bytes) + tot_num_dummy_bytes = 0; + + dwidth_dsc_bytes = tot_num_hor_bytes + tot_num_eoc_symbols + + tot_num_dummy_bytes; + + DP_DEBUG_V("dwidth_dsc_bytes:%d, tot_num_hor_bytes:%d\n", + dwidth_dsc_bytes, tot_num_hor_bytes); + + dp_mode->dsc_overhead_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, + tot_num_hor_bytes); + dp_mode->timing.dsc_overhead_fp = dp_mode->dsc_overhead_fp; +} + +static void dp_panel_dsc_pclk_param_calc(struct dp_panel *dp_panel, + struct msm_compression_info *comp_info, + struct dp_display_mode *dp_mode) +{ + int comp_ratio = 100, intf_width; + int slice_per_pkt, slice_per_intf; + s64 temp1_fp, temp2_fp; + s64 numerator_fp, denominator_fp; + s64 dsc_byte_count_fp; + u32 dsc_byte_count, temp1, temp2; + struct msm_display_dsc_info *dsc = &comp_info->dsc_info; + + intf_width = dp_mode->timing.h_active; + if (!dsc || !dsc->config.slice_width || !dsc->slice_per_pkt || + (intf_width < dsc->config.slice_width)) + return; + + slice_per_pkt = dsc->slice_per_pkt; + slice_per_intf = DIV_ROUND_UP(intf_width, + dsc->config.slice_width); + + comp_ratio = mult_frac(100, comp_info->src_bpp, comp_info->tgt_bpp); + + temp1_fp = drm_fixp_from_fraction(comp_ratio, 100); + temp2_fp = drm_fixp_from_fraction(slice_per_pkt * 8, 1); + denominator_fp = drm_fixp_mul(temp1_fp, temp2_fp); + numerator_fp = drm_fixp_from_fraction( + intf_width * dsc->config.bits_per_component * 3, 1); + dsc_byte_count_fp = drm_fixp_div(numerator_fp, denominator_fp); + dsc_byte_count = fixp2int_ceil(dsc_byte_count_fp); + + temp1 = dsc_byte_count * slice_per_intf; + temp2 = temp1; + if (temp1 % 3 != 0) + temp1 += 3 - (temp1 % 3); + + dsc->eol_byte_num = temp1 - temp2; + + temp1_fp = drm_fixp_from_fraction(slice_per_intf, 6); + temp2_fp = drm_fixp_mul(dsc_byte_count_fp, temp1_fp); + dsc->pclk_per_line = fixp2int_ceil(temp2_fp); + + _dp_panel_dsc_get_num_extra_pclk(comp_info); + dsc->pclk_per_line--; + + _dp_panel_dsc_bw_overhead_calc(dp_panel, dsc, dp_mode, dsc_byte_count); +} + +struct dp_dsc_slices_per_line { + u32 min_ppr; + u32 max_ppr; + u8 num_slices; +}; + +struct dp_dsc_peak_throughput { + u32 index; + u32 peak_throughput; +}; + +struct dp_dsc_slice_caps_bit_map { + u32 num_slices; + u32 bit_index; +}; + +const struct dp_dsc_slices_per_line slice_per_line_tbl[] = { + {0, 340, 1 }, + {340, 680, 2 }, + {680, 1360, 4 }, + {1360, 3200, 8 }, + {3200, 4800, 12 }, + {4800, 6400, 16 }, + {6400, 8000, 20 }, + {8000, 9600, 24 } +}; + +const struct dp_dsc_peak_throughput peak_throughput_mode_0_tbl[] = { + {0, 0}, + {1, 340}, + {2, 400}, + {3, 450}, + {4, 500}, + {5, 550}, + {6, 600}, + {7, 650}, + {8, 700}, + {9, 750}, + {10, 800}, + {11, 850}, + {12, 900}, + {13, 950}, + {14, 1000}, +}; + +const struct dp_dsc_slice_caps_bit_map slice_caps_bit_map_tbl[] = { + {1, 0}, + {2, 1}, + {4, 3}, + {6, 4}, + {8, 5}, + {10, 6}, + {12, 7}, + {16, 0}, + {20, 1}, + {24, 2}, +}; + +static bool dp_panel_check_slice_support(u32 num_slices, u32 raw_data_1, + u32 raw_data_2) +{ + const struct dp_dsc_slice_caps_bit_map *bcap; + u32 raw_data; + int i; + + if (num_slices <= 12) + raw_data = raw_data_1; + else + raw_data = raw_data_2; + + for (i = 0; i < ARRAY_SIZE(slice_caps_bit_map_tbl); i++) { + bcap = &slice_caps_bit_map_tbl[i]; + + if (bcap->num_slices == num_slices) { + raw_data &= (1 << bcap->bit_index); + + if (raw_data) + return true; + else + return false; + } + } + + return false; +} + +static int dp_panel_dsc_prepare_basic_params( + struct msm_compression_info *comp_info, + const struct dp_display_mode *dp_mode, + struct dp_panel *dp_panel) +{ + int i; + const struct dp_dsc_slices_per_line *rec; + const struct dp_dsc_peak_throughput *tput; + u32 slice_width; + u32 ppr = dp_mode->timing.pixel_clk_khz/1000; + u32 max_slice_width; + u32 ppr_max_index; + u32 peak_throughput; + u32 ppr_per_slice; + u32 slice_caps_1; + u32 slice_caps_2; + u32 dsc_version_major, dsc_version_minor; + bool dsc_version_supported = false; + + dsc_version_major = dp_panel->sink_dsc_caps.version & 0xF; + dsc_version_minor = (dp_panel->sink_dsc_caps.version >> 4) & 0xF; + dsc_version_supported = (dsc_version_major == 0x1 && + (dsc_version_minor == 0x1 || dsc_version_minor == 0x2)) + ? true : false; + + DP_DEBUG_V("DSC version: %d.%d, dpcd value: %x\n", + dsc_version_major, dsc_version_minor, + dp_panel->sink_dsc_caps.version); + + if (!dsc_version_supported) { + dsc_version_major = 1; + dsc_version_minor = 1; + DP_ERR("invalid sink DSC version, fallback to %d.%d\n", + dsc_version_major, dsc_version_minor); + } + + comp_info->dsc_info.config.dsc_version_major = dsc_version_major; + comp_info->dsc_info.config.dsc_version_minor = dsc_version_minor; + comp_info->dsc_info.scr_rev = 0x0; + + comp_info->dsc_info.slice_per_pkt = 0; + for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) { + rec = &slice_per_line_tbl[i]; + if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) { + comp_info->dsc_info.slice_per_pkt = rec->num_slices; + i++; + break; + } + } + + if (comp_info->dsc_info.slice_per_pkt == 0) + return -EINVAL; + + ppr_max_index = dp_panel->dsc_dpcd[11] &= 0xf; + if (!ppr_max_index || ppr_max_index >= 15) { + DP_DEBUG("Throughput mode 0 not supported"); + return -EINVAL; + } + + tput = &peak_throughput_mode_0_tbl[ppr_max_index]; + peak_throughput = tput->peak_throughput; + + max_slice_width = dp_panel->dsc_dpcd[12] * 320; + slice_width = (dp_mode->timing.h_active / + comp_info->dsc_info.slice_per_pkt); + + ppr_per_slice = ppr/comp_info->dsc_info.slice_per_pkt; + + slice_caps_1 = dp_panel->dsc_dpcd[4]; + slice_caps_2 = dp_panel->dsc_dpcd[13] & 0x7; + + /* + * There are 3 conditions to check for sink support: + * 1. The slice width cannot exceed the maximum. + * 2. The ppr per slice cannot exceed the maximum. + * 3. The number of slices must be explicitly supported. + */ + while (slice_width > max_slice_width || + ppr_per_slice > peak_throughput || + !dp_panel_check_slice_support( + comp_info->dsc_info.slice_per_pkt, slice_caps_1, + slice_caps_2)) { + if (i == ARRAY_SIZE(slice_per_line_tbl)) + return -EINVAL; + + rec = &slice_per_line_tbl[i]; + comp_info->dsc_info.slice_per_pkt = rec->num_slices; + slice_width = (dp_mode->timing.h_active / + comp_info->dsc_info.slice_per_pkt); + ppr_per_slice = ppr/comp_info->dsc_info.slice_per_pkt; + i++; + } + + comp_info->dsc_info.config.block_pred_enable = + dp_panel->sink_dsc_caps.block_pred_en; + + comp_info->dsc_info.config.pic_width = dp_mode->timing.h_active; + comp_info->dsc_info.config.pic_height = dp_mode->timing.v_active; + comp_info->dsc_info.config.slice_width = slice_width; + + if (comp_info->dsc_info.config.pic_height % 108 == 0) + comp_info->dsc_info.config.slice_height = 108; + else if (comp_info->dsc_info.config.pic_height % 16 == 0) + comp_info->dsc_info.config.slice_height = 16; + else if (comp_info->dsc_info.config.pic_height % 12 == 0) + comp_info->dsc_info.config.slice_height = 12; + else + comp_info->dsc_info.config.slice_height = 15; + + comp_info->dsc_info.config.bits_per_component = + (dp_mode->timing.bpp / 3); + comp_info->dsc_info.config.bits_per_pixel = DSC_TGT_BPP << 4; + comp_info->dsc_info.config.slice_count = + DIV_ROUND_UP(dp_mode->timing.h_active, slice_width); + + comp_info->comp_type = MSM_DISPLAY_COMPRESSION_DSC; + comp_info->tgt_bpp = DSC_TGT_BPP; + comp_info->src_bpp = dp_mode->timing.bpp; + comp_info->comp_ratio = mult_frac(100, dp_mode->timing.bpp, DSC_TGT_BPP); + comp_info->enabled = true; + + return 0; +} + +static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func) +{ + int rlen, rc = 0; + struct dp_panel_private *panel; + struct drm_dp_link *link_info; + struct drm_dp_aux *drm_aux; + struct drm_connector *connector; + struct sde_connector *sde_conn; + u8 *dpcd, rx_feature, temp; + u32 dfp_count = 0, offset = DP_DPCD_REV; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + dpcd = dp_panel->dpcd; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + drm_aux = panel->aux->drm_aux; + link_info = &dp_panel->link_info; + + /* reset vsc data */ + panel->vsc_supported = false; + panel->vscext_supported = false; + panel->vscext_chaining_supported = false; + + connector = dp_panel->connector; + sde_conn = to_sde_connector(connector); + rlen = drm_dp_dpcd_read(drm_aux, DP_TRAINING_AUX_RD_INTERVAL, &temp, 1); + if (rlen != 1) { + DP_ERR("error reading DP_TRAINING_AUX_RD_INTERVAL\n"); + rc = -EINVAL; + goto end; + } + + /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */ + if (temp & BIT(7)) { + DP_DEBUG("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n"); + offset = DPRX_EXTENDED_DPCD_FIELD; + } + + rlen = drm_dp_dpcd_read(drm_aux, offset, + dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DP_ERR("dpcd read failed, rlen=%d\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + print_hex_dump_debug("[drm-dp] SINK DPCD: ", + DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false); + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, + DPRX_FEATURE_ENUMERATION_LIST, &rx_feature, 1); + if (rlen != 1) { + DP_DEBUG("failed to read DPRX_FEATURE_ENUMERATION_LIST\n"); + rx_feature = 0; + } else { + panel->vsc_supported = !!(rx_feature & + VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED); + panel->vscext_supported = !!(rx_feature & + VSC_EXT_VESA_SDP_SUPPORTED); + panel->vscext_chaining_supported = !!(rx_feature & + VSC_EXT_VESA_SDP_CHAINING_SUPPORTED); + + sde_conn->hdr_supported = panel->vsc_supported; + DP_DEBUG("vsc=%d, vscext=%d, vscext_chaining=%d\n", + panel->vsc_supported, panel->vscext_supported, + panel->vscext_chaining_supported); + } + + link_info->revision = dpcd[DP_DPCD_REV]; + panel->major = (link_info->revision >> 4) & 0x0f; + panel->minor = link_info->revision & 0x0f; + /* override link params updated in dp_panel_init_panel_info */ + link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz, + drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE])); + + link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + if (is_link_rate_valid(panel->dp_panel.link_bw_code)) { + DP_DEBUG("debug link bandwidth code: 0x%x\n", + panel->dp_panel.link_bw_code); + link_info->rate = drm_dp_bw_code_to_link_rate( + panel->dp_panel.link_bw_code); + } + + if (is_lane_count_valid(panel->dp_panel.lane_count)) { + DP_DEBUG("debug lane count: %d\n", panel->dp_panel.lane_count); + link_info->num_lanes = panel->dp_panel.lane_count; + } + + if (multi_func) + link_info->num_lanes = min_t(unsigned int, + link_info->num_lanes, 2); + + DP_DEBUG("version:%d.%d, rate:%d, lanes:%d\n", panel->major, + panel->minor, link_info->rate, link_info->num_lanes); + + if (drm_dp_enhanced_frame_cap(dpcd)) + link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_TEST_SINK_MISC, &temp, 1); + if ((rlen == 1) && (temp & DP_TEST_CRC_SUPPORTED)) + link_info->capabilities |= DP_LINK_CAP_CRC; + + dfp_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] & + DP_DOWN_STREAM_PORT_COUNT; + + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) + && (dpcd[DP_DPCD_REV] > 0x10)) { + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, + DP_DOWNSTREAM_PORT_0, dp_panel->ds_ports, + DP_MAX_DOWNSTREAM_PORTS); + if (rlen < DP_MAX_DOWNSTREAM_PORTS) { + DP_ERR("ds port status failed, rlen=%d\n", rlen); + rc = -EINVAL; + goto end; + } + } + + if (dfp_count > DP_MAX_DS_PORT_COUNT) + DP_DEBUG("DS port count %d greater that max (%d) supported\n", + dfp_count, DP_MAX_DS_PORT_COUNT); + +end: + return rc; +} + +static int dp_panel_set_default_link_params(struct dp_panel *dp_panel) +{ + struct drm_dp_link *link_info; + const int default_bw_code = 162000; + const int default_num_lanes = 1; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + link_info = &dp_panel->link_info; + link_info->rate = default_bw_code; + link_info->num_lanes = default_num_lanes; + DP_DEBUG("link_rate=%d num_lanes=%d\n", + link_info->rate, link_info->num_lanes); + + return 0; +} + +static int dp_panel_read_edid(struct dp_panel *dp_panel, + struct drm_connector *connector) +{ + int ret = 0; + struct dp_panel_private *panel; + struct edid *edid; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + sde_get_edid(connector, &panel->aux->drm_aux->ddc, + (void **)&dp_panel->edid_ctrl); + if (!dp_panel->edid_ctrl->edid) { + DP_ERR("EDID read failed\n"); + ret = -EINVAL; + goto end; + } +end: + edid = dp_panel->edid_ctrl->edid; + dp_panel->audio_supported = drm_detect_monitor_audio(edid); + + return ret; +} + +static void dp_panel_decode_dsc_dpcd(struct dp_panel *dp_panel) +{ + if (dp_panel->dsc_dpcd[0]) { + dp_panel->sink_dsc_caps.dsc_capable = true; + dp_panel->sink_dsc_caps.version = dp_panel->dsc_dpcd[1]; + dp_panel->sink_dsc_caps.block_pred_en = + dp_panel->dsc_dpcd[6] ? true : false; + dp_panel->sink_dsc_caps.color_depth = + dp_panel->dsc_dpcd[10]; + + if (dp_panel->sink_dsc_caps.version >= 0x11) + dp_panel->dsc_en = true; + } else { + dp_panel->sink_dsc_caps.dsc_capable = false; + dp_panel->dsc_en = false; + } +} + +static void dp_panel_read_sink_dsc_caps(struct dp_panel *dp_panel) +{ + int rlen; + struct dp_panel_private *panel; + int dpcd_rev; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + dpcd_rev = dp_panel->dpcd[DP_DPCD_REV]; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + if (panel->parser->dsc_feature_enable && dpcd_rev >= 0x14) { + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DSC_SUPPORT, + dp_panel->dsc_dpcd, (DP_RECEIVER_DSC_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_DSC_CAP_SIZE + 1)) { + DP_DEBUG("dsc dpcd read failed, rlen=%d\n", rlen); + return; + } + + print_hex_dump_debug("[drm-dp] SINK DSC DPCD: ", + DUMP_PREFIX_NONE, 8, 1, dp_panel->dsc_dpcd, rlen, + false); + + dp_panel_decode_dsc_dpcd(dp_panel); + } +} + +static void dp_panel_read_sink_fec_caps(struct dp_panel *dp_panel) +{ + int rlen; + struct dp_panel_private *panel; + s64 fec_overhead_fp = drm_fixp_from_fraction(1, 1); + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + rlen = drm_dp_dpcd_readb(panel->aux->drm_aux, DP_FEC_CAPABILITY, + &dp_panel->fec_dpcd); + if (rlen < 1) { + DP_ERR("fec capability read failed, rlen=%d\n", rlen); + return; + } + + dp_panel->fec_en = dp_panel->fec_dpcd & DP_FEC_CAPABLE; + if (dp_panel->fec_en) + fec_overhead_fp = drm_fixp_from_fraction(100000, 97582); + + dp_panel->fec_overhead_fp = fec_overhead_fp; + + return; +} + +static int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector, bool multi_func) +{ + int rc = 0, rlen, count, downstream_ports; + const int count_len = 1; + struct dp_panel_private *panel; + + if (!dp_panel || !connector) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rc = dp_panel_read_dpcd(dp_panel, multi_func); + if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code( + dp_panel->link_info.rate)) || !is_lane_count_valid( + dp_panel->link_info.num_lanes) || + ((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) > + dp_panel->max_bw_code)) { + if ((rc == -ETIMEDOUT) || (rc == -ENODEV)) { + DP_ERR("DPCD read failed, return early\n"); + goto end; + } + DP_ERR("panel dpcd read failed/incorrect, set default params\n"); + dp_panel_set_default_link_params(dp_panel); + } + + downstream_ports = dp_panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT; + + if (downstream_ports) { + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT, + &count, count_len); + if (rlen == count_len) { + count = DP_GET_SINK_COUNT(count); + if (!count) { + DP_ERR("no downstream ports connected\n"); + panel->link->sink_count.count = 0; + rc = -ENOTCONN; + goto end; + } + } + } + + /* There is no need to read EDID from MST branch */ + if (panel->parser->has_mst && dp_panel->read_mst_cap(dp_panel)) + goto skip_edid; + + rc = dp_panel_read_edid(dp_panel, connector); + if (rc) { + DP_ERR("panel edid read failed, set failsafe mode\n"); + return rc; + } + +skip_edid: + dp_panel->widebus_en = panel->parser->has_widebus; + dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable; + dp_panel->fec_feature_enable = panel->parser->fec_feature_enable; + + dp_panel->fec_en = false; + dp_panel->dsc_en = false; + + if (dp_panel->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && + dp_panel->fec_feature_enable) { + dp_panel_read_sink_fec_caps(dp_panel); + + if (dp_panel->dsc_feature_enable && dp_panel->fec_en) + dp_panel_read_sink_dsc_caps(dp_panel); + } + + DP_INFO("fec_en=%d, dsc_en=%d, widebus_en=%d\n", dp_panel->fec_en, + dp_panel->dsc_en, dp_panel->widebus_en); +end: + return rc; +} + +static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz, bool dsc_en) +{ + struct dp_link_params *link_params; + struct dp_panel_private *panel; + u32 max_supported_bpp = dp_panel->max_supported_bpp; + u32 min_supported_bpp = 18; + u32 bpp = 0, link_bitrate = 0, mode_bitrate; + s64 rate_fp = 0; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->mst_state && panel->base) + max_supported_bpp = panel->base->max_supported_bpp; + + if (dsc_en) + min_supported_bpp = 24; + + bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); + + link_params = &panel->link->link_params; + + rate_fp = drm_int2fixp(drm_dp_bw_code_to_link_rate(link_params->bw_code) * + link_params->lane_count * 8); + + if (dp_panel->fec_en) + rate_fp = drm_fixp_div(rate_fp, dp_panel->fec_overhead_fp); + + link_bitrate = drm_fixp2int(rate_fp); + + for (; bpp > min_supported_bpp; bpp -= 6) { + if (dsc_en) { + if (bpp == 30 && !(dp_panel->sink_dsc_caps.color_depth & DP_DSC_10_BPC)) + continue; + else if (bpp == 24 && !(dp_panel->sink_dsc_caps.color_depth & DP_DSC_8_BPC)) + continue; + + mode_bitrate = mode_pclk_khz * DSC_TGT_BPP; + } else { + mode_bitrate = mode_pclk_khz * bpp; + } + + if (mode_bitrate <= link_bitrate) + break; + } + + if (bpp < min_supported_bpp) + DP_ERR("bpp %d is below minimum supported bpp %d\n", bpp, + min_supported_bpp); + if (dsc_en && bpp != 24 && bpp != 30 && bpp != 36) + DP_ERR("bpp %d is not supported when dsc is enabled\n", bpp); + + return bpp; +} + +static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz, bool dsc_en) +{ + struct dp_panel_private *panel; + u32 bpp = mode_edid_bpp; + + if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + DP_ERR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) + bpp = dp_link_bit_depth_to_bpp( + panel->link->test_video.test_bit_depth); + else + bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + mode_pclk_khz, dsc_en); + + return bpp; +} + +static void dp_panel_set_test_mode(struct dp_panel_private *panel, + struct dp_display_mode *mode) +{ + struct dp_panel_info *pinfo = NULL; + struct dp_link_test_video *test_info = NULL; + + if (!panel) { + DP_ERR("invalid params\n"); + return; + } + + pinfo = &mode->timing; + test_info = &panel->link->test_video; + + pinfo->h_active = test_info->test_h_width; + pinfo->h_sync_width = test_info->test_hsync_width; + pinfo->h_back_porch = test_info->test_h_start - + test_info->test_hsync_width; + pinfo->h_front_porch = test_info->test_h_total - + (test_info->test_h_start + test_info->test_h_width); + + pinfo->v_active = test_info->test_v_height; + pinfo->v_sync_width = test_info->test_vsync_width; + pinfo->v_back_porch = test_info->test_v_start - + test_info->test_vsync_width; + pinfo->v_front_porch = test_info->test_v_total - + (test_info->test_v_start + test_info->test_v_height); + + pinfo->bpp = dp_link_bit_depth_to_bpp(test_info->test_bit_depth); + pinfo->h_active_low = test_info->test_hsync_pol; + pinfo->v_active_low = test_info->test_vsync_pol; + + pinfo->refresh_rate = test_info->test_rr_n; + pinfo->pixel_clk_khz = test_info->test_h_total * + test_info->test_v_total * pinfo->refresh_rate; + + if (test_info->test_rr_d == 0) + pinfo->pixel_clk_khz /= 1000; + else + pinfo->pixel_clk_khz /= 1001; + + if (test_info->test_h_width == 640) + pinfo->pixel_clk_khz = 25170; +} + +static int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) { + dp_panel_set_test_mode(panel, mode); + return 1; + } else if (dp_panel->edid_ctrl->edid) { + return _sde_edid_update_modes(connector, dp_panel->edid_ctrl); + } + + return 0; +} + +static void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { + u8 checksum; + + if (dp_panel->edid_ctrl->edid) + checksum = sde_get_edid_checksum(dp_panel->edid_ctrl); + else + checksum = dp_panel->connector->real_edid_checksum; + + panel->link->send_edid_checksum(panel->link, checksum); + panel->link->send_test_response(panel->link); + } +} + +static void dp_panel_tpg_config(struct dp_panel *dp_panel, u32 pattern) +{ + u32 hsync_start_x, hsync_end_x, hactive; + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return; + } + + if (dp_panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", dp_panel->stream_id); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + pinfo = &panel->dp_panel.pinfo; + + if (!panel->panel_on) { + DP_DEBUG("DP panel not enabled, handle TPG on next panel on\n"); + return; + } + + if (!pattern) { + panel->catalog->tpg_config(catalog, pattern); + return; + } + + hactive = pinfo->h_active; + if (pinfo->widebus_en) + hactive >>= 1; + + /* TPG config */ + catalog->hsync_period = pinfo->h_sync_width + pinfo->h_back_porch + + hactive + pinfo->h_front_porch; + catalog->vsync_period = pinfo->v_sync_width + pinfo->v_back_porch + + pinfo->v_active + pinfo->v_front_porch; + + catalog->display_v_start = ((pinfo->v_sync_width + + pinfo->v_back_porch) * catalog->hsync_period); + catalog->display_v_end = ((catalog->vsync_period - + pinfo->v_front_porch) * catalog->hsync_period) - 1; + + catalog->display_v_start += pinfo->h_sync_width + pinfo->h_back_porch; + catalog->display_v_end -= pinfo->h_front_porch; + + hsync_start_x = pinfo->h_back_porch + pinfo->h_sync_width; + hsync_end_x = catalog->hsync_period - pinfo->h_front_porch - 1; + + catalog->v_sync_width = pinfo->v_sync_width; + + catalog->hsync_ctl = (catalog->hsync_period << 16) | + pinfo->h_sync_width; + catalog->display_hctl = (hsync_end_x << 16) | hsync_start_x; + + panel->catalog->tpg_config(catalog, pattern); +} + +static int dp_panel_config_timing(struct dp_panel *dp_panel) +{ + int rc = 0; + u32 data, total_ver, total_hor; + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + pinfo = &panel->dp_panel.pinfo; + + DP_DEBUG("width=%d hporch= %d %d %d\n", + pinfo->h_active, pinfo->h_back_porch, + pinfo->h_front_porch, pinfo->h_sync_width); + + DP_DEBUG("height=%d vporch= %d %d %d\n", + pinfo->v_active, pinfo->v_back_porch, + pinfo->v_front_porch, pinfo->v_sync_width); + + total_hor = pinfo->h_active + pinfo->h_back_porch + + pinfo->h_front_porch + pinfo->h_sync_width; + + total_ver = pinfo->v_active + pinfo->v_back_porch + + pinfo->v_front_porch + pinfo->v_sync_width; + + data = total_ver; + data <<= 16; + data |= total_hor; + + catalog->total = data; + + data = (pinfo->v_back_porch + pinfo->v_sync_width); + data <<= 16; + data |= (pinfo->h_back_porch + pinfo->h_sync_width); + + catalog->sync_start = data; + + data = pinfo->v_sync_width; + data <<= 16; + data |= (pinfo->v_active_low << 31); + data |= pinfo->h_sync_width; + data |= (pinfo->h_active_low << 15); + + catalog->width_blanking = data; + + data = pinfo->v_active; + data <<= 16; + data |= pinfo->h_active; + + catalog->dp_active = data; + + catalog->widebus_en = pinfo->widebus_en; + + panel->catalog->timing_cfg(catalog); + panel->panel_on = true; +end: + return rc; +} + +static u32 _dp_panel_calc_be_in_lane(struct dp_panel *dp_panel) +{ + struct msm_compression_info *comp_info; + u32 htotal, mod_result; + u32 be_in_lane = 10; + + comp_info = &dp_panel->pinfo.comp_info; + + if (!dp_panel->mst_state) + return be_in_lane; + + htotal = comp_info->dsc_info.bytes_per_pkt * comp_info->dsc_info.pkt_per_line; + + mod_result = htotal % 12; + if (mod_result == 0) + be_in_lane = 8; + else if (mod_result <= 3) + be_in_lane = 1; + else if (mod_result <= 6) + be_in_lane = 2; + else if (mod_result <= 9) + be_in_lane = 4; + else if (mod_result <= 11) + be_in_lane = 8; + else + be_in_lane = 10; + + return be_in_lane; +} + +static void dp_panel_config_dsc(struct dp_panel *dp_panel, bool enable) +{ + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + struct msm_compression_info *comp_info; + struct dp_dsc_cfg_data *dsc; + int rc; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog = panel->catalog; + dsc = &catalog->dsc; + pinfo = &dp_panel->pinfo; + comp_info = &pinfo->comp_info; + + if (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC && enable) { + rc = sde_dsc_create_pps_buf_cmd(&comp_info->dsc_info, + dsc->pps, 0, sizeof(dsc->pps)); + if (rc) { + DP_ERR("failed to create pps cmd %d\n", rc); + return; + } + dsc->pps_len = DSC_1_1_PPS_PARAMETER_SET_ELEMENTS; + dp_panel_dsc_prepare_pps_packet(dp_panel); + + dsc->slice_per_pkt = comp_info->dsc_info.slice_per_pkt - 1; + dsc->bytes_per_pkt = comp_info->dsc_info.bytes_per_pkt; + dsc->bytes_per_pkt /= comp_info->dsc_info.slice_per_pkt; + dsc->eol_byte_num = comp_info->dsc_info.eol_byte_num; + dsc->dto_count = comp_info->dsc_info.pclk_per_line; + dsc->be_in_lane = _dp_panel_calc_be_in_lane(dp_panel); + dsc->dsc_en = true; + dsc->dto_en = true; + dsc->continuous_pps = dp_panel->dsc_continuous_pps; + dp_panel_get_dto_params(comp_info->src_bpp, comp_info->tgt_bpp, &dsc->dto_n, + &dsc->dto_d); + } else { + dsc->dsc_en = false; + dsc->dto_en = false; + dsc->dto_n = 0; + dsc->dto_d = 0; + dsc->continuous_pps = false; + } + + catalog->stream_id = dp_panel->stream_id; + catalog->dsc_cfg(catalog); + + if (catalog->dsc.dsc_en && enable) + catalog->pps_flush(catalog); +} + +static int dp_panel_edid_register(struct dp_panel_private *panel) +{ + int rc = 0; + + panel->dp_panel.edid_ctrl = sde_edid_init(); + if (!panel->dp_panel.edid_ctrl) { + DP_ERR("sde edid init for DP failed\n"); + rc = -ENOMEM; + } + + return rc; +} + +static void dp_panel_edid_deregister(struct dp_panel_private *panel) +{ + sde_edid_deinit((void **)&panel->dp_panel.edid_ctrl); +} + +static int dp_panel_set_stream_info(struct dp_panel *dp_panel, + enum dp_stream_id stream_id, u32 ch_start_slot, + u32 ch_tot_slots, u32 pbn, int vcpi) +{ + if (!dp_panel || stream_id > DP_STREAM_MAX) { + DP_ERR("invalid input. stream_id: %d\n", stream_id); + return -EINVAL; + } + + dp_panel->vcpi = vcpi; + dp_panel->stream_id = stream_id; + dp_panel->channel_start_slot = ch_start_slot; + dp_panel->channel_total_slots = ch_tot_slots; + dp_panel->pbn = pbn; + + return 0; +} + +static int dp_panel_init_panel_info(struct dp_panel *dp_panel) +{ + int rc = 0; + struct dp_panel_private *panel; + struct dp_panel_info *pinfo; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + pinfo = &dp_panel->pinfo; + + drm_dp_dpcd_writeb(panel->aux->drm_aux, DP_SET_POWER, DP_SET_POWER_D3); + /* 200us propagation time for the power down to take effect */ + usleep_range(200, 205); + drm_dp_dpcd_writeb(panel->aux->drm_aux, DP_SET_POWER, DP_SET_POWER_D0); + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); +end: + return rc; +} + +static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel, u32 flags) +{ + int rc = 0; + struct dp_panel_private *panel; + struct drm_msm_ext_hdr_metadata *hdr_meta; + struct dp_sdp_header *dhdr_vsif_sdp; + struct sde_connector *sde_conn; + struct dp_sdp_header *shdr_if_sdp; + struct dp_catalog_vsc_sdp_colorimetry *vsc_colorimetry; + struct drm_connector *connector; + struct sde_connector_state *c_state; + + if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) { + DP_DEBUG("retain states in src initiated power down request\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + hdr_meta = &panel->catalog->hdr_meta; + dhdr_vsif_sdp = &panel->catalog->dhdr_vsif_sdp; + shdr_if_sdp = &panel->catalog->shdr_if_sdp; + vsc_colorimetry = &panel->catalog->vsc_colorimetry; + + /*clearing LINK INFO capabilities during disconnect*/ + dp_panel->link_info.capabilities = 0; + + if (dp_panel->edid_ctrl->edid) + sde_free_edid((void **)&dp_panel->edid_ctrl); + + dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0); + memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo)); + memset(hdr_meta, 0, sizeof(struct drm_msm_ext_hdr_metadata)); + memset(dhdr_vsif_sdp, 0, sizeof(struct dp_sdp_header)); + memset(shdr_if_sdp, 0, sizeof(struct dp_sdp_header)); + memset(vsc_colorimetry, 0, + sizeof(struct dp_catalog_vsc_sdp_colorimetry)); + + panel->panel_on = false; + + connector = dp_panel->connector; + sde_conn = to_sde_connector(connector); + c_state = to_sde_connector_state(connector->state); + + sde_conn->hdr_eotf = 0; + sde_conn->hdr_metadata_type_one = 0; + sde_conn->hdr_max_luminance = 0; + sde_conn->hdr_avg_luminance = 0; + sde_conn->hdr_min_luminance = 0; + sde_conn->hdr_supported = false; + sde_conn->hdr_plus_app_ver = 0; + + sde_conn->colorspace_updated = false; + + memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta)); + memset(&c_state->dyn_hdr_meta, 0, sizeof(c_state->dyn_hdr_meta)); + + dp_panel->link_bw_code = 0; + dp_panel->lane_count = 0; + + return rc; +} + +static bool dp_panel_hdr_supported(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return false; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + return panel->major >= 1 && panel->vsc_supported && + (panel->minor >= 4 || panel->vscext_supported); +} + +static u32 dp_panel_calc_dhdr_pkt_limit(struct dp_panel *dp_panel, + struct dp_dhdr_maxpkt_calc_input *input) +{ + s64 mdpclk_fp = drm_fixp_from_fraction(input->mdp_clk, 1000000); + s64 lclk_fp = drm_fixp_from_fraction(input->lclk, 1000); + s64 pclk_fp = drm_fixp_from_fraction(input->pclk, 1000); + s64 nlanes_fp = drm_int2fixp(input->nlanes); + s64 target_sc = input->mst_target_sc; + s64 hactive_fp = drm_int2fixp(input->h_active); + const s64 i1_fp = DRM_FIXED_ONE; + const s64 i2_fp = drm_int2fixp(2); + const s64 i10_fp = drm_int2fixp(10); + const s64 i56_fp = drm_int2fixp(56); + const s64 i64_fp = drm_int2fixp(64); + s64 mst_bw_fp = i1_fp; + s64 fec_factor_fp = i1_fp; + s64 mst_bw64_fp, mst_bw64_ceil_fp, nlanes56_fp; + u32 f1, f2, f3, f4, f5, deploy_period, target_period; + s64 f3_f5_slot_fp; + u32 calc_pkt_limit; + const u32 max_pkt_limit = 64; + + if (input->fec_en && input->mst_en) + fec_factor_fp = drm_fixp_from_fraction(64000, 65537); + + if (input->mst_en) + mst_bw_fp = drm_fixp_div(target_sc, i64_fp); + + f1 = fixp2int_ceil(drm_fixp_div(drm_fixp_mul(i10_fp, lclk_fp), + mdpclk_fp)); + f2 = fixp2int_ceil(drm_fixp_div(drm_fixp_mul(i2_fp, lclk_fp), + mdpclk_fp)) + fixp2int_ceil(drm_fixp_div( + drm_fixp_mul(i1_fp, lclk_fp), mdpclk_fp)); + + mst_bw64_fp = drm_fixp_mul(mst_bw_fp, i64_fp); + if (drm_fixp2int(mst_bw64_fp) == 0) + f3_f5_slot_fp = drm_fixp_div(i1_fp, drm_int2fixp( + fixp2int_ceil(drm_fixp_div( + i1_fp, mst_bw64_fp)))); + else + f3_f5_slot_fp = drm_int2fixp(drm_fixp2int(mst_bw_fp)); + + mst_bw64_ceil_fp = drm_int2fixp(fixp2int_ceil(mst_bw64_fp)); + f3 = drm_fixp2int(drm_fixp_mul(drm_int2fixp(drm_fixp2int( + drm_fixp_div(i2_fp, f3_f5_slot_fp)) + 1), + (i64_fp - mst_bw64_ceil_fp))) + 2; + + if (!input->mst_en) { + f4 = 1 + drm_fixp2int(drm_fixp_div(drm_int2fixp(50), + nlanes_fp)) + drm_fixp2int(drm_fixp_div( + nlanes_fp, i2_fp)); + f5 = 0; + } else { + f4 = 0; + nlanes56_fp = drm_fixp_div(i56_fp, nlanes_fp); + f5 = drm_fixp2int(drm_fixp_mul(drm_int2fixp(drm_fixp2int( + drm_fixp_div(i1_fp + nlanes56_fp, + f3_f5_slot_fp)) + 1), (i64_fp - + mst_bw64_ceil_fp + i1_fp + nlanes56_fp))); + } + + deploy_period = f1 + f2 + f3 + f4 + f5 + 19; + target_period = drm_fixp2int(drm_fixp_mul(fec_factor_fp, drm_fixp_mul( + hactive_fp, drm_fixp_div(lclk_fp, pclk_fp)))); + + calc_pkt_limit = target_period / deploy_period; + + DP_DEBUG("input: %d, %d, %d, %d, %d, 0x%llx, %d, %d\n", + input->mdp_clk, input->lclk, input->pclk, input->h_active, + input->nlanes, input->mst_target_sc, input->mst_en ? 1 : 0, + input->fec_en ? 1 : 0); + DP_DEBUG("factors: %d, %d, %d, %d, %d\n", f1, f2, f3, f4, f5); + DP_DEBUG("d_p: %d, t_p: %d, maxPkts: %d%s\n", deploy_period, + target_period, calc_pkt_limit, calc_pkt_limit > max_pkt_limit ? + " CAPPED" : ""); + + if (calc_pkt_limit > max_pkt_limit) + calc_pkt_limit = max_pkt_limit; + + DP_DEBUG("packet limit per line = %d\n", calc_pkt_limit); + return calc_pkt_limit; +} + +static void dp_panel_setup_colorimetry_sdp(struct dp_panel *dp_panel, + u32 cspace) +{ + struct dp_panel_private *panel; + struct dp_catalog_vsc_sdp_colorimetry *hdr_colorimetry; + u8 bpc; + u32 colorimetry = 0; + u32 dynamic_range = 0; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + hdr_colorimetry = &panel->catalog->vsc_colorimetry; + + hdr_colorimetry->header.HB0 = 0x00; + hdr_colorimetry->header.HB1 = 0x07; + hdr_colorimetry->header.HB2 = 0x05; + hdr_colorimetry->header.HB3 = 0x13; + + get_sdp_colorimetry_range(panel, cspace, &colorimetry, + &dynamic_range); + + /* VSC SDP Payload for DB16 */ + hdr_colorimetry->data[16] = (RGB << 4) | colorimetry; + + /* VSC SDP Payload for DB17 */ + hdr_colorimetry->data[17] = (dynamic_range << 7); + bpc = (dp_panel->pinfo.bpp / 3); + + switch (bpc) { + default: + case 10: + hdr_colorimetry->data[17] |= BIT(1); + break; + case 8: + hdr_colorimetry->data[17] |= BIT(0); + break; + case 6: + hdr_colorimetry->data[17] |= 0; + break; + } + + /* VSC SDP Payload for DB18 */ + hdr_colorimetry->data[18] = GRAPHICS; +} + +static void dp_panel_setup_hdr_if(struct dp_panel_private *panel) +{ + struct dp_sdp_header *shdr_if; + + shdr_if = &panel->catalog->shdr_if_sdp; + + shdr_if->HB0 = 0x00; + shdr_if->HB1 = 0x87; + shdr_if->HB2 = 0x1D; + shdr_if->HB3 = 0x13 << 2; +} + +static void dp_panel_setup_dhdr_vsif(struct dp_panel_private *panel) +{ + struct dp_sdp_header *dhdr_vsif; + + dhdr_vsif = &panel->catalog->dhdr_vsif_sdp; + + dhdr_vsif->HB0 = 0x00; + dhdr_vsif->HB1 = 0x81; + dhdr_vsif->HB2 = 0x1D; + dhdr_vsif->HB3 = 0x13 << 2; +} + +static void dp_panel_setup_misc_colorimetry(struct dp_panel *dp_panel, + u32 colorspace) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + catalog->misc_val &= ~0x1e; + + catalog->misc_val |= (get_misc_colorimetry_val(panel, + colorspace) << 1); +} + +static int dp_panel_set_colorspace(struct dp_panel *dp_panel, + u32 colorspace) +{ + int rc = 0; + struct dp_panel_private *panel; + + if (!dp_panel) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->vsc_supported) + dp_panel_setup_colorimetry_sdp(dp_panel, + colorspace); + else + dp_panel_setup_misc_colorimetry(dp_panel, + colorspace); + + /* + * During the first frame update panel_on will be false and + * the colorspace will be cached in the connector's state which + * shall be used in the dp_panel_hw_cfg + */ + if (panel->panel_on) { + DP_DEBUG("panel is ON programming colorspace\n"); + rc = panel->catalog->set_colorspace(panel->catalog, + panel->vsc_supported); + } + +end: + return rc; +} + +static int dp_panel_setup_hdr(struct dp_panel *dp_panel, + struct drm_msm_ext_hdr_metadata *hdr_meta, + bool dhdr_update, u64 core_clk_rate, bool flush) +{ + int rc = 0, max_pkts = 0; + struct dp_panel_private *panel; + struct dp_dhdr_maxpkt_calc_input input; + struct drm_msm_ext_hdr_metadata *catalog_hdr_meta; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog_hdr_meta = &panel->catalog->hdr_meta; + + /* use cached meta data in case meta data not provided */ + if (!hdr_meta) { + if (catalog_hdr_meta->hdr_state) + goto cached; + else + goto end; + } + + panel->hdr_state = hdr_meta->hdr_state; + + dp_panel_setup_hdr_if(panel); + + if (panel->hdr_state) { + memcpy(catalog_hdr_meta, hdr_meta, + sizeof(struct drm_msm_ext_hdr_metadata)); + } else { + memset(catalog_hdr_meta, 0, + sizeof(struct drm_msm_ext_hdr_metadata)); + } +cached: + if (dhdr_update) { + dp_panel_setup_dhdr_vsif(panel); + + input.mdp_clk = core_clk_rate; + input.lclk = drm_dp_bw_code_to_link_rate( + panel->link->link_params.bw_code); + input.nlanes = panel->link->link_params.lane_count; + input.pclk = dp_panel->pinfo.pixel_clk_khz; + input.h_active = dp_panel->pinfo.h_active; + input.mst_target_sc = dp_panel->mst_target_sc; + input.mst_en = dp_panel->mst_state; + input.fec_en = dp_panel->fec_en; + max_pkts = dp_panel_calc_dhdr_pkt_limit(dp_panel, &input); + } + + if (panel->panel_on) { + panel->catalog->stream_id = dp_panel->stream_id; + panel->catalog->config_hdr(panel->catalog, panel->hdr_state, + max_pkts, flush); + if (dhdr_update) + panel->catalog->dhdr_flush(panel->catalog); + } +end: + return rc; +} + +static int dp_panel_spd_config(struct dp_panel *dp_panel) +{ + int rc = 0; + struct dp_panel_private *panel; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto end; + } + + if (dp_panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream id:%d\n", dp_panel->stream_id); + return -EINVAL; + } + + if (!dp_panel->spd_enabled) { + DP_DEBUG("SPD Infoframe not enabled\n"); + goto end; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + panel->catalog->spd_vendor_name = panel->spd_vendor_name; + panel->catalog->spd_product_description = + panel->spd_product_description; + + panel->catalog->stream_id = dp_panel->stream_id; + panel->catalog->config_spd(panel->catalog); +end: + return rc; +} + +static void dp_panel_config_ctrl(struct dp_panel *dp_panel) +{ + u32 config = 0, tbd; + u8 *dpcd = dp_panel->dpcd; + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + struct msm_compression_info *comp_info; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + comp_info = &dp_panel->pinfo.comp_info; + + config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */ + config |= (0 << 11); /* RGB */ + + tbd = panel->link->get_test_bits_depth(panel->link, + dp_panel->pinfo.bpp); + + if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN || comp_info->enabled) + tbd = (DP_TEST_BIT_DEPTH_8 >> DP_TEST_BIT_DEPTH_SHIFT); + + config |= tbd << 8; + + /* Num of Lanes */ + config |= ((panel->link->link_params.lane_count - 1) << 4); + + if (drm_dp_enhanced_frame_cap(dpcd)) + config |= 0x40; + + config |= 0x04; /* progressive video */ + + config |= 0x03; /* sycn clock & static Mvid */ + + catalog->config_ctrl(catalog, config); +} + +static void dp_panel_config_misc(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + struct drm_connector *connector; + u32 misc_val; + u32 tb, cc, colorspace; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + connector = dp_panel->connector; + cc = 0; + + tb = panel->link->get_test_bits_depth(panel->link, dp_panel->pinfo.bpp); + colorspace = connector->state->colorspace; + + + cc = (get_misc_colorimetry_val(panel, colorspace) << 1); + + misc_val = cc; + misc_val |= (tb << 5); + misc_val |= BIT(0); /* Configure clock to synchronous mode */ + + /* if VSC is supported then set bit 6 of MISC1 */ + if (panel->vsc_supported) + misc_val |= BIT(14); + + catalog->misc_val = misc_val; + catalog->config_misc(catalog); +} + +static void dp_panel_config_msa(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct dp_catalog_panel *catalog; + u32 rate; + u32 stream_rate_khz; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + catalog->widebus_en = dp_panel->widebus_en; + + rate = drm_dp_bw_code_to_link_rate(panel->link->link_params.bw_code); + stream_rate_khz = dp_panel->pinfo.pixel_clk_khz; + + catalog->config_msa(catalog, rate, stream_rate_khz); +} + +static void dp_panel_resolution_info(struct dp_panel_private *panel) +{ + struct dp_panel_info *pinfo = &panel->dp_panel.pinfo; + + /* + * print resolution info as this is a result + * of user initiated action of cable connection + */ + DP_INFO("DP RESOLUTION: active(back|front|width|low)\n"); + DP_INFO("%d(%d|%d|%d|%d)x%d(%d|%d|%d|%d)@%dfps %dbpp %dKhz %dLR %dLn\n", + pinfo->h_active, pinfo->h_back_porch, pinfo->h_front_porch, + pinfo->h_sync_width, pinfo->h_active_low, + pinfo->v_active, pinfo->v_back_porch, pinfo->v_front_porch, + pinfo->v_sync_width, pinfo->v_active_low, + pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz, + panel->link->link_params.bw_code, + panel->link->link_params.lane_count); +} + +static void dp_panel_config_sdp(struct dp_panel *dp_panel, + bool en) +{ + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel->catalog->stream_id = dp_panel->stream_id; + + panel->catalog->config_sdp(panel->catalog, en); +} + +static int dp_panel_hw_cfg(struct dp_panel *dp_panel, bool enable) +{ + struct dp_panel_private *panel; + struct drm_connector *connector; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return -EINVAL; + } + + if (dp_panel->stream_id >= DP_STREAM_MAX) { + DP_ERR("invalid stream_id: %d\n", dp_panel->stream_id); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + panel->catalog->stream_id = dp_panel->stream_id; + connector = dp_panel->connector; + + if (enable) { + dp_panel_config_ctrl(dp_panel); + dp_panel_config_misc(dp_panel); + dp_panel_config_msa(dp_panel); + if (panel->vsc_supported) { + dp_panel_setup_colorimetry_sdp(dp_panel, + connector->state->colorspace); + dp_panel_config_sdp(dp_panel, true); + } + dp_panel_config_dsc(dp_panel, enable); + dp_panel_config_tr_unit(dp_panel); + dp_panel_config_timing(dp_panel); + dp_panel_resolution_info(panel); + } else { + dp_panel_config_sdp(dp_panel, false); + } + + panel->catalog->config_dto(panel->catalog, !enable); + + return 0; +} + +static int dp_panel_read_sink_sts(struct dp_panel *dp_panel, u8 *sts, u32 size) +{ + int rlen, rc = 0; + struct dp_panel_private *panel; + + if (!dp_panel || !sts || !size) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + return rc; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_SINK_COUNT_ESI, + sts, size); + if (rlen != size) { + DP_ERR("dpcd sink sts fail rlen:%d size:%d\n", rlen, size); + rc = -EINVAL; + return rc; + } + + return 0; +} + +static int dp_panel_update_edid(struct dp_panel *dp_panel, struct edid *edid) +{ + int rc; + + dp_panel->edid_ctrl->edid = edid; + sde_parse_edid(dp_panel->edid_ctrl); + + rc = _sde_edid_update_modes(dp_panel->connector, dp_panel->edid_ctrl); + dp_panel->audio_supported = drm_detect_monitor_audio(edid); + + return rc; +} + +static bool dp_panel_read_mst_cap(struct dp_panel *dp_panel) +{ + int rlen; + struct dp_panel_private *panel; + u8 dpcd; + bool mst_cap = false; + + if (!dp_panel) { + DP_ERR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_MSTM_CAP, + &dpcd, 1); + if (rlen < 1) { + DP_ERR("dpcd mstm_cap read failed, rlen=%d\n", rlen); + goto end; + } + + mst_cap = (dpcd & DP_MST_CAP) ? true : false; + +end: + DP_DEBUG("dp mst-cap: %d\n", mst_cap); + + return mst_cap; +} + +static void dp_panel_convert_to_dp_mode(struct dp_panel *dp_panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode) +{ + const u32 num_components = 3, default_bpp = 24; + struct msm_compression_info *comp_info; + bool dsc_en = (dp_mode->capabilities & DP_PANEL_CAPS_DSC) ? true : false; + int rc; + + dp_mode->timing.h_active = drm_mode->hdisplay; + dp_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end; + dp_mode->timing.h_sync_width = drm_mode->htotal - + (drm_mode->hsync_start + dp_mode->timing.h_back_porch); + dp_mode->timing.h_front_porch = drm_mode->hsync_start - + drm_mode->hdisplay; + dp_mode->timing.h_skew = drm_mode->hskew; + + dp_mode->timing.v_active = drm_mode->vdisplay; + dp_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end; + dp_mode->timing.v_sync_width = drm_mode->vtotal - + (drm_mode->vsync_start + dp_mode->timing.v_back_porch); + + dp_mode->timing.v_front_porch = drm_mode->vsync_start - + drm_mode->vdisplay; + + dp_mode->timing.refresh_rate = drm_mode_vrefresh(drm_mode); + dp_mode->timing.pixel_clk_khz = drm_mode->clock; + + dp_mode->timing.v_active_low = + !!(drm_mode->flags & DRM_MODE_FLAG_NVSYNC); + + dp_mode->timing.h_active_low = + !!(drm_mode->flags & DRM_MODE_FLAG_NHSYNC); + + dp_mode->timing.bpp = + dp_panel->connector->display_info.bpc * num_components; + if (!dp_mode->timing.bpp) + dp_mode->timing.bpp = default_bpp; + + dp_mode->timing.widebus_en = dp_panel->widebus_en; + dp_mode->timing.dsc_overhead_fp = 0; + + comp_info = &dp_mode->timing.comp_info; + comp_info->src_bpp = default_bpp; + comp_info->tgt_bpp = default_bpp; + comp_info->comp_type = MSM_DISPLAY_COMPRESSION_NONE; + comp_info->comp_ratio = MSM_DISPLAY_COMPRESSION_RATIO_NONE; + comp_info->enabled = false; + + /* As YUV was not supported now, so set the default format to RGB */ + dp_mode->output_format = DP_OUTPUT_FORMAT_RGB; + /* + * If a given videomode can be only supported in YCBCR420, set + * the output format to YUV420. While now our driver did not + * support YUV display over DP, so just place this flag here. + * When we want to support YUV, we can use this flag to do + * a lot of settings, like CDM, CSC and pixel_clock. + */ + if (drm_mode_is_420_only(&dp_panel->connector->display_info, + drm_mode)) { + dp_mode->output_format = DP_OUTPUT_FORMAT_YCBCR420; + DP_DEBUG("YCBCR420 was not supported"); + } + + dp_mode->timing.bpp = dp_panel_get_mode_bpp(dp_panel, + dp_mode->timing.bpp, dp_mode->timing.pixel_clk_khz, dsc_en); + + if (dsc_en) { + if (dp_panel_dsc_prepare_basic_params(comp_info, + dp_mode, dp_panel)) { + DP_DEBUG("prepare DSC basic params failed\n"); + return; + } + + rc = sde_dsc_populate_dsc_config(&comp_info->dsc_info.config, 0); + if (rc) { + DP_DEBUG("failed populating dsc params \n"); + return; + } + + rc = sde_dsc_populate_dsc_private_params(&comp_info->dsc_info, + dp_mode->timing.h_active, dp_mode->timing.widebus_en); + if (rc) { + DP_DEBUG("failed populating other dsc params\n"); + return; + } + + dp_panel_dsc_pclk_param_calc(dp_panel, comp_info, dp_mode); + } + dp_mode->fec_overhead_fp = dp_panel->fec_overhead_fp; +} + +static void dp_panel_update_pps(struct dp_panel *dp_panel, char *pps_cmd) +{ + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog = panel->catalog; + catalog->stream_id = dp_panel->stream_id; + catalog->pps_flush(catalog); +} + +int dp_panel_get_src_crc(struct dp_panel *dp_panel, u16 *crc) +{ + struct dp_catalog_panel *catalog; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + catalog = panel->catalog; + return catalog->get_src_crc(catalog, crc); +} + +int dp_panel_get_sink_crc(struct dp_panel *dp_panel, u16 *crc) +{ + int rc = 0; + struct dp_panel_private *panel; + struct drm_dp_aux *drm_aux; + u8 crc_bytes[6]; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + drm_aux = panel->aux->drm_aux; + + /* + * At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes + * per component (RGB or CrYCb). + */ + rc = drm_dp_dpcd_read(drm_aux, DP_TEST_CRC_R_CR, crc_bytes, 6); + if (rc != 6) { + DP_ERR("failed to read sink CRC, ret:%d\n", rc); + return -EIO; + } + + rc = 0; + crc[0] = crc_bytes[0] | crc_bytes[1] << 8; + crc[1] = crc_bytes[2] | crc_bytes[3] << 8; + crc[2] = crc_bytes[4] | crc_bytes[5] << 8; + + return rc; +} + +int dp_panel_sink_crc_enable(struct dp_panel *dp_panel, bool enable) +{ + int rc = 0; + struct dp_panel_private *panel; + struct drm_dp_aux *drm_aux; + ssize_t ret; + u8 buf; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + drm_aux = panel->aux->drm_aux; + + if (dp_panel->link_info.capabilities & DP_LINK_CAP_CRC) { + ret = drm_dp_dpcd_readb(drm_aux, DP_TEST_SINK, &buf); + if (ret != 1) { + DP_ERR("failed to read CRC cap, ret:%d\n", ret); + return -EIO; + } + + ret = drm_dp_dpcd_writeb(drm_aux, DP_TEST_SINK, buf | DP_TEST_SINK_START); + if (ret != 1) { + DP_ERR("failed to enable Sink CRC, ret:%d\n", ret); + return -EIO; + } + + drm_dp_dpcd_readb(drm_aux, DP_TEST_SINK, &buf); + } + + return rc; +} + +bool dp_panel_get_panel_on(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + return panel->panel_on; +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in) +{ + int rc = 0; + struct dp_panel_private *panel; + struct dp_panel *dp_panel; + struct sde_connector *sde_conn; + + if (!in->dev || !in->catalog || !in->aux || + !in->link || !in->connector) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL); + if (!panel) { + rc = -ENOMEM; + goto error; + } + + panel->dev = in->dev; + panel->aux = in->aux; + panel->catalog = in->catalog; + panel->link = in->link; + panel->parser = in->parser; + + dp_panel = &panel->dp_panel; + dp_panel->max_bw_code = DP_LINK_BW_8_1; + dp_panel->spd_enabled = true; + dp_panel->link_bw_code = 0; + dp_panel->lane_count = 0; + dp_panel->max_supported_bpp = DP_PANEL_MAX_SUPPORTED_BPP; + + memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8)); + memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16)); + dp_panel->connector = in->connector; + + dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable; + dp_panel->fec_feature_enable = panel->parser->fec_feature_enable; + dp_panel->dsc_continuous_pps = panel->parser->dsc_continuous_pps; + + if (in->base_panel) { + panel->base = in->base_panel; + memcpy(dp_panel->dpcd, in->base_panel->dpcd, + DP_RECEIVER_CAP_SIZE + 1); + memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd, + DP_RECEIVER_DSC_CAP_SIZE + 1); + memcpy(&dp_panel->link_info, &in->base_panel->link_info, + sizeof(dp_panel->link_info)); + dp_panel->mst_state = in->base_panel->mst_state; + dp_panel->widebus_en = in->base_panel->widebus_en; + dp_panel->fec_en = in->base_panel->fec_en; + dp_panel->dsc_en = in->base_panel->dsc_en; + dp_panel->fec_overhead_fp = in->base_panel->fec_overhead_fp; + dp_panel->sink_dsc_caps = in->base_panel->sink_dsc_caps; + } + + dp_panel->init = dp_panel_init_panel_info; + dp_panel->deinit = dp_panel_deinit_panel_info; + dp_panel->hw_cfg = dp_panel_hw_cfg; + dp_panel->read_sink_caps = dp_panel_read_sink_caps; + dp_panel->get_mode_bpp = dp_panel_get_mode_bpp; + dp_panel->get_modes = dp_panel_get_modes; + dp_panel->handle_sink_request = dp_panel_handle_sink_request; + dp_panel->tpg_config = dp_panel_tpg_config; + dp_panel->spd_config = dp_panel_spd_config; + dp_panel->setup_hdr = dp_panel_setup_hdr; + dp_panel->set_colorspace = dp_panel_set_colorspace; + dp_panel->hdr_supported = dp_panel_hdr_supported; + dp_panel->set_stream_info = dp_panel_set_stream_info; + dp_panel->read_sink_status = dp_panel_read_sink_sts; + dp_panel->update_edid = dp_panel_update_edid; + dp_panel->read_mst_cap = dp_panel_read_mst_cap; + dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode; + dp_panel->update_pps = dp_panel_update_pps; + dp_panel->get_src_crc = dp_panel_get_src_crc; + dp_panel->get_sink_crc = dp_panel_get_sink_crc; + dp_panel->sink_crc_enable = dp_panel_sink_crc_enable; + dp_panel->get_panel_on = dp_panel_get_panel_on; + + sde_conn = to_sde_connector(dp_panel->connector); + sde_conn->drv_panel = dp_panel; + + dp_panel_edid_register(panel); + + return dp_panel; +error: + return ERR_PTR(rc); +} + +void dp_panel_put(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct sde_connector *sde_conn; + + if (!dp_panel) + return; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + dp_panel_edid_deregister(panel); + sde_conn = to_sde_connector(dp_panel->connector); + if (sde_conn) + sde_conn->drv_panel = NULL; + + devm_kfree(panel->dev, panel); +} diff --git a/msm/dp/dp_panel.h b/msm/dp/dp_panel.h new file mode 100644 index 000000000..18ae11f5f --- /dev/null +++ b/msm/dp/dp_panel.h @@ -0,0 +1,267 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PANEL_H_ +#define _DP_PANEL_H_ + +#include + +#include "dp_aux.h" +#include "dp_link.h" +#include "sde_edid_parser.h" +#include "sde_connector.h" +#include "msm_drv.h" + +#define DP_RECEIVER_DSC_CAP_SIZE 15 +#define DP_RECEIVER_FEC_STATUS_SIZE 3 +#define DP_RECEIVER_EXT_CAP_SIZE 4 +/* + * A source initiated power down flag is set + * when the DP is powered off while physical + * DP cable is still connected i.e. without + * HPD or not initiated by sink like HPD_IRQ. + * This can happen if framework reboots or + * device suspends. + */ +#define DP_PANEL_SRC_INITIATED_POWER_DOWN BIT(0) + +#define DP_EXT_REC_CAP_FIELD BIT(7) + +enum dp_lane_count { + DP_LANE_COUNT_1 = 1, + DP_LANE_COUNT_2 = 2, + DP_LANE_COUNT_4 = 4, +}; + +enum dp_output_format { + DP_OUTPUT_FORMAT_RGB, + DP_OUTPUT_FORMAT_YCBCR420, + DP_OUTPUT_FORMAT_YCBCR422, + DP_OUTPUT_FORMAT_YCBCR444, + DP_OUTPUT_FORMAT_INVALID, +}; + +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +struct dp_panel_info { + u32 h_active; + u32 v_active; + u32 h_back_porch; + u32 h_front_porch; + u32 h_sync_width; + u32 h_active_low; + u32 v_back_porch; + u32 v_front_porch; + u32 v_sync_width; + u32 v_active_low; + u32 h_skew; + u32 refresh_rate; + u32 pixel_clk_khz; + u32 bpp; + bool widebus_en; + struct msm_compression_info comp_info; + s64 dsc_overhead_fp; + u32 pbn_no_overhead; + u32 pbn; +}; + +struct dp_display_mode { + struct dp_panel_info timing; + u32 capabilities; + s64 fec_overhead_fp; + s64 dsc_overhead_fp; + /** + * @output_format: + * + * This is used to indicate DP output format. + * The output format can be read from drm_mode. + */ + enum dp_output_format output_format; + u32 lm_count; +}; + +struct dp_panel; + +struct dp_panel_in { + struct device *dev; + struct dp_aux *aux; + struct dp_link *link; + struct dp_catalog_panel *catalog; + struct drm_connector *connector; + struct dp_panel *base_panel; + struct dp_parser *parser; +}; + +struct dp_dsc_caps { + bool dsc_capable; + u8 version; + bool block_pred_en; + u8 color_depth; +}; + +struct dp_audio; + +#define DP_PANEL_CAPS_DSC BIT(0) + +struct dp_panel { + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE + DP_RECEIVER_EXT_CAP_SIZE + 1]; + u8 ds_ports[DP_MAX_DOWNSTREAM_PORTS]; + u8 dsc_dpcd[DP_RECEIVER_DSC_CAP_SIZE + 1]; + u8 fec_dpcd; + u8 fec_sts_dpcd[DP_RECEIVER_FEC_STATUS_SIZE + 1]; + + struct drm_dp_link link_info; + struct sde_edid_ctrl *edid_ctrl; + struct dp_panel_info pinfo; + bool video_test; + bool spd_enabled; + + u32 vic; + u32 max_pclk_khz; + s64 mst_target_sc; + + /* debug */ + u32 max_bw_code; + u32 lane_count; + u32 link_bw_code; + u32 max_supported_bpp; + + /* By default, stream_id is assigned to DP_INVALID_STREAM. + * Client sets the stream id value using set_stream_id interface. + */ + enum dp_stream_id stream_id; + int vcpi; + + u32 channel_start_slot; + u32 channel_total_slots; + u32 pbn; + + u32 dsc_blks_in_use; + u32 max_lm; + /* DRM connector assosiated with this panel */ + struct drm_connector *connector; + + struct dp_audio *audio; + bool audio_supported; + + struct dp_dsc_caps sink_dsc_caps; + bool dsc_feature_enable; + bool fec_feature_enable; + bool dsc_en; + bool fec_en; + bool widebus_en; + bool dsc_continuous_pps; + bool mst_state; + bool pclk_on; + + /* override debug option */ + bool mst_hide; + bool mode_override; + int hdisplay; + int vdisplay; + int vrefresh; + int aspect_ratio; + + s64 fec_overhead_fp; + + int (*init)(struct dp_panel *dp_panel); + int (*deinit)(struct dp_panel *dp_panel, u32 flags); + int (*hw_cfg)(struct dp_panel *dp_panel, bool enable); + int (*read_sink_caps)(struct dp_panel *dp_panel, + struct drm_connector *connector, bool multi_func); + u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp, + u32 mode_pclk_khz, bool dsc_en); + int (*get_modes)(struct dp_panel *dp_panel, + struct drm_connector *connector, struct dp_display_mode *mode); + void (*handle_sink_request)(struct dp_panel *dp_panel); + int (*setup_hdr)(struct dp_panel *dp_panel, + struct drm_msm_ext_hdr_metadata *hdr_meta, + bool dhdr_update, u64 core_clk_rate, bool flush); + int (*set_colorspace)(struct dp_panel *dp_panel, + u32 colorspace); + void (*tpg_config)(struct dp_panel *dp_panel, u32 pattern); + int (*spd_config)(struct dp_panel *dp_panel); + bool (*hdr_supported)(struct dp_panel *dp_panel); + + int (*set_stream_info)(struct dp_panel *dp_panel, + enum dp_stream_id stream_id, u32 ch_start_slot, + u32 ch_tot_slots, u32 pbn, int vcpi); + + int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size); + int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid); + bool (*read_mst_cap)(struct dp_panel *dp_panel); + void (*convert_to_dp_mode)(struct dp_panel *dp_panel, + const struct drm_display_mode *drm_mode, + struct dp_display_mode *dp_mode); + void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd); + int (*sink_crc_enable)(struct dp_panel *dp_panel, bool enable); + int (*get_src_crc)(struct dp_panel *dp_panel, u16 *crc); + int (*get_sink_crc)(struct dp_panel *dp_panel, u16 *crc); + bool (*get_panel_on)(struct dp_panel *dp_panel); +}; + +struct dp_tu_calc_input { + u64 lclk; /* 162, 270, 540 and 810 */ + u64 pclk_khz; /* in KHz */ + u64 hactive; /* active h-width */ + u64 hporch; /* bp + fp + pulse */ + int nlanes; /* no.of.lanes */ + int bpp; /* bits */ + int pixel_enc; /* 444, 420, 422 */ + int dsc_en; /* dsc on/off */ + int async_en; /* async mode */ + int fec_en; /* fec */ + int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */ + int num_of_dsc_slices; /* number of slices per line */ +}; + +struct dp_vc_tu_mapping_table { + u32 vic; + u8 lanes; + u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ + u8 bpp; + u32 valid_boundary_link; + u32 delay_start_link; + bool boundary_moderation_en; + u32 valid_lower_boundary_link; + u32 upper_boundary_count; + u32 lower_boundary_count; + u32 tu_size_minus1; +}; + +/** + * is_link_rate_valid() - validates the link rate + * @lane_rate: link rate requested by the sink + * + * Returns true if the requested link rate is supported. + */ +static inline bool is_link_rate_valid(u32 bw_code) +{ + return ((bw_code == DP_LINK_BW_1_62) || + (bw_code == DP_LINK_BW_2_7) || + (bw_code == DP_LINK_BW_5_4) || + (bw_code == DP_LINK_BW_8_1)); +} + +/** + * dp_link_is_lane_count_valid() - validates the lane count + * @lane_count: lane count requested by the sink + * + * Returns true if the requested lane count is supported. + */ +static inline bool is_lane_count_valid(u32 lane_count) +{ + return (lane_count == DP_LANE_COUNT_1) || + (lane_count == DP_LANE_COUNT_2) || + (lane_count == DP_LANE_COUNT_4); +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in); +void dp_panel_put(struct dp_panel *dp_panel); +void dp_panel_calc_tu_test(struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table); +#endif /* _DP_PANEL_H_ */ diff --git a/msm/dp/dp_parser.c b/msm/dp/dp_parser.c new file mode 100644 index 000000000..48c76dc80 --- /dev/null +++ b/msm/dp/dp_parser.c @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include "dp_parser.h" +#include "dp_debug.h" + +static void dp_parser_unmap_io_resources(struct dp_parser *parser) +{ + int i = 0; + struct dp_io *io = &parser->io; + + for (i = 0; i < io->len; i++) + msm_dss_iounmap(&io->data[i].io); +} + +static int dp_parser_reg(struct dp_parser *parser) +{ + int rc = 0, i = 0; + u32 reg_count; + struct platform_device *pdev = parser->pdev; + struct dp_io *io = &parser->io; + struct device *dev = &pdev->dev; + + reg_count = of_property_count_strings(dev->of_node, "reg-names"); + if (reg_count <= 0) { + DP_ERR("no reg defined\n"); + return -EINVAL; + } + + io->len = reg_count; + io->data = devm_kzalloc(dev, sizeof(struct dp_io_data) * reg_count, + GFP_KERNEL); + if (!io->data) + return -ENOMEM; + + for (i = 0; i < reg_count; i++) { + of_property_read_string_index(dev->of_node, + "reg-names", i, &io->data[i].name); + rc = msm_dss_ioremap_byname(pdev, &io->data[i].io, + io->data[i].name); + if (rc) { + DP_ERR("unable to remap %s resources\n", + io->data[i].name); + goto err; + } + } + + return 0; +err: + dp_parser_unmap_io_resources(parser); + return rc; +} + +static const char *dp_get_phy_aux_config_property(u32 cfg_type) +{ + switch (cfg_type) { + case PHY_AUX_CFG0: + return "qcom,aux-cfg0-settings"; + case PHY_AUX_CFG1: + return "qcom,aux-cfg1-settings"; + case PHY_AUX_CFG2: + return "qcom,aux-cfg2-settings"; + case PHY_AUX_CFG3: + return "qcom,aux-cfg3-settings"; + case PHY_AUX_CFG4: + return "qcom,aux-cfg4-settings"; + case PHY_AUX_CFG5: + return "qcom,aux-cfg5-settings"; + case PHY_AUX_CFG6: + return "qcom,aux-cfg6-settings"; + case PHY_AUX_CFG7: + return "qcom,aux-cfg7-settings"; + case PHY_AUX_CFG8: + return "qcom,aux-cfg8-settings"; + case PHY_AUX_CFG9: + return "qcom,aux-cfg9-settings"; + default: + return "unknown"; + } +} + +static void dp_parser_phy_aux_cfg_reset(struct dp_parser *parser) +{ + int i = 0; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + parser->aux_cfg[i] = (const struct dp_aux_cfg){ 0 }; +} + +static int dp_parser_aux(struct dp_parser *parser) +{ + struct device_node *of_node = parser->pdev->dev.of_node; + int len = 0, i = 0, j = 0, config_count = 0; + const char *data; + int const minimum_config_count = 1; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) { + const char *property = dp_get_phy_aux_config_property(i); + + data = of_get_property(of_node, property, &len); + if (!data) { + DP_ERR("Unable to read %s\n", property); + goto error; + } + + config_count = len - 1; + if ((config_count < minimum_config_count) || + (config_count > DP_AUX_CFG_MAX_VALUE_CNT)) { + DP_ERR("Invalid config count (%d) configs for %s\n", + config_count, property); + goto error; + } + + parser->aux_cfg[i].offset = data[0]; + parser->aux_cfg[i].cfg_cnt = config_count; + DP_DEBUG("%s offset=0x%x, cfg_cnt=%d\n", + property, + parser->aux_cfg[i].offset, + parser->aux_cfg[i].cfg_cnt); + for (j = 1; j < len; j++) { + parser->aux_cfg[i].lut[j - 1] = data[j]; + DP_DEBUG("%s lut[%d]=0x%x\n", + property, + i, + parser->aux_cfg[i].lut[j - 1]); + } + } + return 0; + +error: + dp_parser_phy_aux_cfg_reset(parser); + return -EINVAL; +} + +static int dp_parser_misc(struct dp_parser *parser) +{ + int rc = 0, len = 0, i = 0; + const char *data = NULL; + + struct device_node *of_node = parser->pdev->dev.of_node; + + data = of_get_property(of_node, "qcom,logical2physical-lane-map", &len); + if (data && (len == DP_MAX_PHY_LN)) { + for (i = 0; i < len; i++) + parser->l_map[i] = data[i]; + } + + data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len); + if (data && (len == DP_MAX_PHY_LN)) { + for (i = 0; i < len; i++) + parser->l_pnswap |= (data[i] & 0x01) << i; + } + + rc = of_property_read_u32(of_node, + "qcom,max-pclk-frequency-khz", &parser->max_pclk_khz); + if (rc) + parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; + + rc = of_property_read_u32(of_node, + "qcom,max-lclk-frequency-khz", &parser->max_lclk_khz); + if (rc) + parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ; + + return 0; +} + +static int dp_parser_msm_hdcp_dev(struct dp_parser *parser) +{ + struct device_node *node; + struct platform_device *pdev; + + node = of_find_compatible_node(NULL, NULL, "qcom,msm-hdcp"); + if (!node) { + // This is a non-fatal error, module initialization can proceed + DP_WARN("couldn't find msm-hdcp node\n"); + return 0; + } + + pdev = of_find_device_by_node(node); + if (!pdev) { + // This is a non-fatal error, module initialization can proceed + DP_WARN("couldn't find msm-hdcp pdev\n"); + return 0; + } + + parser->msm_hdcp_dev = &pdev->dev; + + return 0; +} + +static int dp_parser_pinctrl(struct dp_parser *parser) +{ + int rc = 0; + struct dp_pinctrl *pinctrl = &parser->pinctrl; + + pinctrl->pin = devm_pinctrl_get(&parser->pdev->dev); + + if (IS_ERR_OR_NULL(pinctrl->pin)) { + DP_DEBUG("failed to get pinctrl, rc=%d\n", rc); + goto error; + } + + pinctrl->state_active = pinctrl_lookup_state(pinctrl->pin, + "mdss_dp_active"); + if (IS_ERR_OR_NULL(pinctrl->state_active)) { + rc = PTR_ERR(pinctrl->state_active); + DP_ERR("failed to get pinctrl active state, rc=%d\n", rc); + goto error; + } + + pinctrl->state_suspend = pinctrl_lookup_state(pinctrl->pin, + "mdss_dp_sleep"); + if (IS_ERR_OR_NULL(pinctrl->state_suspend)) { + rc = PTR_ERR(pinctrl->state_suspend); + DP_ERR("failed to get pinctrl suspend state, rc=%d\n", rc); + goto error; + } +error: + return rc; +} + +static int dp_parser_gpio(struct dp_parser *parser) +{ + int i = 0; + struct device *dev = &parser->pdev->dev; + struct device_node *of_node = dev->of_node; + struct dss_module_power *mp = &parser->mp[DP_CORE_PM]; + static const char * const dp_gpios[] = { + "qcom,aux-en-gpio", + "qcom,aux-sel-gpio", + "qcom,usbplug-cc-gpio", + }; + + if (of_find_property(of_node, "qcom,dp-gpio-aux-switch", NULL)) + parser->gpio_aux_switch = true; + mp->gpio_config = devm_kzalloc(dev, + sizeof(struct dss_gpio) * ARRAY_SIZE(dp_gpios), GFP_KERNEL); + if (!mp->gpio_config) + return -ENOMEM; + + mp->num_gpio = ARRAY_SIZE(dp_gpios); + + for (i = 0; i < ARRAY_SIZE(dp_gpios); i++) { + mp->gpio_config[i].gpio = of_get_named_gpio(of_node, + dp_gpios[i], 0); + + if (!gpio_is_valid(mp->gpio_config[i].gpio)) { + DP_DEBUG("%s gpio not specified\n", dp_gpios[i]); + /* In case any gpio was not specified, we think gpio + * aux switch also was not specified. + */ + parser->gpio_aux_switch = false; + continue; + } + + strlcpy(mp->gpio_config[i].gpio_name, dp_gpios[i], + sizeof(mp->gpio_config[i].gpio_name)); + + mp->gpio_config[i].value = 0; + } + + return 0; +} + +static const char *dp_parser_supply_node_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "qcom,core-supply-entries"; + case DP_CTRL_PM: return "qcom,ctrl-supply-entries"; + case DP_PHY_PM: return "qcom,phy-supply-entries"; + case DP_PLL_PM: return "qcom,pll-supply-entries"; + default: return "???"; + } +} + +static int dp_parser_get_vreg(struct dp_parser *parser, + enum dp_pm_type module) +{ + int i = 0, rc = 0; + u32 tmp = 0; + const char *pm_supply_name = NULL; + struct device_node *supply_node = NULL; + struct device_node *of_node = parser->pdev->dev.of_node; + struct device_node *supply_root_node = NULL; + struct dss_module_power *mp = &parser->mp[module]; + + mp->num_vreg = 0; + pm_supply_name = dp_parser_supply_node_name(module); + supply_root_node = of_get_child_by_name(of_node, pm_supply_name); + if (!supply_root_node) { + DP_DEBUG("no supply entry present: %s\n", pm_supply_name); + goto novreg; + } + + mp->num_vreg = of_get_available_child_count(supply_root_node); + + if (mp->num_vreg == 0) { + DP_DEBUG("no vreg\n"); + goto novreg; + } else { + DP_DEBUG("vreg found. count=%d\n", mp->num_vreg); + } + + mp->vreg_config = devm_kzalloc(&parser->pdev->dev, + sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL); + if (!mp->vreg_config) { + rc = -ENOMEM; + goto error; + } + + for_each_child_of_node(supply_root_node, supply_node) { + const char *st = NULL; + /* vreg-name */ + rc = of_property_read_string(supply_node, + "qcom,supply-name", &st); + if (rc) { + DP_ERR("error reading name. rc=%d\n", + rc); + goto error; + } + snprintf(mp->vreg_config[i].vreg_name, + ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st); + /* vreg-min-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-min-voltage", &tmp); + if (rc) { + DP_ERR("error reading min volt. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].min_voltage = tmp; + + /* vreg-max-voltage */ + rc = of_property_read_u32(supply_node, + "qcom,supply-max-voltage", &tmp); + if (rc) { + DP_ERR("error reading max volt. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].max_voltage = tmp; + + /* enable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-enable-load", &tmp); + if (rc) { + DP_ERR("error reading enable load. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].enable_load = tmp; + + /* disable-load */ + rc = of_property_read_u32(supply_node, + "qcom,supply-disable-load", &tmp); + if (rc) { + DP_ERR("error reading disable load. rc=%d\n", + rc); + goto error; + } + mp->vreg_config[i].disable_load = tmp; + + DP_DEBUG("%s min=%d, max=%d, enable=%d, disable=%d\n", + mp->vreg_config[i].vreg_name, + mp->vreg_config[i].min_voltage, + mp->vreg_config[i].max_voltage, + mp->vreg_config[i].enable_load, + mp->vreg_config[i].disable_load + ); + ++i; + } + + return rc; + +error: + if (mp->vreg_config) { + devm_kfree(&parser->pdev->dev, mp->vreg_config); + mp->vreg_config = NULL; + } +novreg: + mp->num_vreg = 0; + + return rc; +} + +static void dp_parser_put_vreg_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("invalid input\n"); + return; + } + + if (mp->vreg_config) { + devm_kfree(dev, mp->vreg_config); + mp->vreg_config = NULL; + } + mp->num_vreg = 0; +} + +static int dp_parser_regulator(struct dp_parser *parser) +{ + int i, rc = 0; + struct platform_device *pdev = parser->pdev; + + /* Parse the regulator information */ + for (i = DP_CORE_PM; i < DP_MAX_PM; i++) { + rc = dp_parser_get_vreg(parser, i); + if (rc) { + DP_ERR("get_dt_vreg_data failed for %s. rc=%d\n", + dp_parser_pm_name(i), rc); + i--; + for (; i >= DP_CORE_PM; i--) + dp_parser_put_vreg_data(&pdev->dev, + &parser->mp[i]); + break; + } + } + + return rc; +} + +static bool dp_parser_check_prefix(const char *clk_prefix, const char *clk_name) +{ + return !!strnstr(clk_name, clk_prefix, strlen(clk_name)); +} + +static void dp_parser_put_clk_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + if (mp->clk_config) { + devm_kfree(dev, mp->clk_config); + mp->clk_config = NULL; + } + + mp->num_clk = 0; +} + +static void dp_parser_put_gpio_data(struct device *dev, + struct dss_module_power *mp) +{ + if (!mp) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + if (mp->gpio_config) { + devm_kfree(dev, mp->gpio_config); + mp->gpio_config = NULL; + } + + mp->num_gpio = 0; +} + +static int dp_parser_init_clk_data(struct dp_parser *parser) +{ + int num_clk = 0, i = 0, rc = 0; + int core_clk_count = 0, link_clk_count = 0; + int strm0_clk_count = 0, strm1_clk_count = 0; + const char *core_clk = "core"; + const char *strm0_clk = "strm0"; + const char *strm1_clk = "strm1"; + const char *link_clk = "link"; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *strm0_power = &parser->mp[DP_STREAM0_PM]; + struct dss_module_power *strm1_power = &parser->mp[DP_STREAM1_PM]; + struct dss_module_power *link_power = &parser->mp[DP_LINK_PM]; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + if (num_clk <= 0) { + DP_ERR("no clocks are defined\n"); + rc = -EINVAL; + goto exit; + } + + for (i = 0; i < num_clk; i++) { + of_property_read_string_index(dev->of_node, + "clock-names", i, &clk_name); + + if (dp_parser_check_prefix(core_clk, clk_name)) + core_clk_count++; + + if (dp_parser_check_prefix(strm0_clk, clk_name)) + strm0_clk_count++; + + if (dp_parser_check_prefix(strm1_clk, clk_name)) + strm1_clk_count++; + + if (dp_parser_check_prefix(link_clk, clk_name)) + link_clk_count++; + } + + /* Initialize the CORE power module */ + if (core_clk_count <= 0) { + DP_ERR("no core clocks are defined\n"); + rc = -EINVAL; + goto exit; + } + + core_power->num_clk = core_clk_count; + core_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * core_power->num_clk, + GFP_KERNEL); + if (!core_power->clk_config) { + rc = -EINVAL; + goto exit; + } + + /* Initialize the STREAM0 power module */ + if (strm0_clk_count <= 0) { + DP_DEBUG("no strm0 clocks are defined\n"); + } else { + strm0_power->num_clk = strm0_clk_count; + strm0_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * strm0_power->num_clk, + GFP_KERNEL); + if (!strm0_power->clk_config) { + strm0_power->num_clk = 0; + rc = -EINVAL; + goto strm0_clock_error; + } + } + + /* Initialize the STREAM1 power module */ + if (strm1_clk_count <= 0) { + DP_DEBUG("no strm1 clocks are defined\n"); + } else { + strm1_power->num_clk = strm1_clk_count; + strm1_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * strm1_power->num_clk, + GFP_KERNEL); + if (!strm1_power->clk_config) { + strm1_power->num_clk = 0; + rc = -EINVAL; + goto strm1_clock_error; + } + } + + /* Initialize the link power module */ + if (link_clk_count <= 0) { + DP_ERR("no link clocks are defined\n"); + rc = -EINVAL; + goto link_clock_error; + } + + link_power->num_clk = link_clk_count; + link_power->clk_config = devm_kzalloc(dev, + sizeof(struct dss_clk) * link_power->num_clk, + GFP_KERNEL); + if (!link_power->clk_config) { + link_power->num_clk = 0; + rc = -EINVAL; + goto link_clock_error; + } + + return rc; + +link_clock_error: + dp_parser_put_clk_data(dev, strm1_power); +strm1_clock_error: + dp_parser_put_clk_data(dev, strm0_power); +strm0_clock_error: + dp_parser_put_clk_data(dev, core_power); +exit: + return rc; +} + +static int dp_parser_clock(struct dp_parser *parser) +{ + int rc = 0, i = 0; + int num_clk = 0; + int core_clk_index = 0, link_clk_index = 0; + int core_clk_count = 0, link_clk_count = 0; + int strm0_clk_index = 0, strm1_clk_index = 0; + int strm0_clk_count = 0, strm1_clk_count = 0; + int clock_mmrm = 0; + const char *clk_name; + const char *core_clk = "core"; + const char *strm0_clk = "strm0"; + const char *strm1_clk = "strm1"; + const char *link_clk = "link"; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power; + struct dss_module_power *strm0_power; + struct dss_module_power *strm1_power; + struct dss_module_power *link_power; + + core_power = &parser->mp[DP_CORE_PM]; + strm0_power = &parser->mp[DP_STREAM0_PM]; + strm1_power = &parser->mp[DP_STREAM1_PM]; + link_power = &parser->mp[DP_LINK_PM]; + + rc = dp_parser_init_clk_data(parser); + if (rc) { + DP_ERR("failed to initialize power data\n"); + rc = -EINVAL; + goto exit; + } + + core_clk_count = core_power->num_clk; + link_clk_count = link_power->num_clk; + strm0_clk_count = strm0_power->num_clk; + strm1_clk_count = strm1_power->num_clk; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + + for (i = 0; i < num_clk; i++) { + of_property_read_string_index(dev->of_node, "clock-names", + i, &clk_name); + + if (dp_parser_check_prefix(core_clk, clk_name) && + core_clk_index < core_clk_count) { + struct dss_clk *clk = + &core_power->clk_config[core_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + clk->type = DSS_CLK_AHB; + core_clk_index++; + } else if (dp_parser_check_prefix(link_clk, clk_name) && + link_clk_index < link_clk_count) { + struct dss_clk *clk = + &link_power->clk_config[link_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + link_clk_index++; + clock_mmrm = 0; + of_property_read_u32_index(dev->of_node, "clock-mmrm", i, &clock_mmrm); + if (clock_mmrm) { + clk->type = DSS_CLK_MMRM; + clk->mmrm.clk_id = clock_mmrm; + } else if (!strcmp(clk_name, "link_clk_src")) { + clk->type = DSS_CLK_PCLK; + } else { + clk->type = DSS_CLK_AHB; + } + } else if (dp_parser_check_prefix(strm0_clk, clk_name) && + strm0_clk_index < strm0_clk_count) { + struct dss_clk *clk = + &strm0_power->clk_config[strm0_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + strm0_clk_index++; + + clk->type = DSS_CLK_PCLK; + } else if (dp_parser_check_prefix(strm1_clk, clk_name) && + strm1_clk_index < strm1_clk_count) { + struct dss_clk *clk = + &strm1_power->clk_config[strm1_clk_index]; + strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name)); + strm1_clk_index++; + + clk->type = DSS_CLK_PCLK; + } + } + + DP_DEBUG("clock parsing successful\n"); + +exit: + return rc; +} + +static int dp_parser_catalog(struct dp_parser *parser) +{ + int rc; + u32 version; + struct device *dev = &parser->pdev->dev; + + rc = of_property_read_u32(dev->of_node, "qcom,phy-version", &version); + + if (!rc) + parser->hw_cfg.phy_version = version; + + return 0; +} + +static int dp_parser_mst(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + int i; + + parser->has_mst = of_property_read_bool(dev->of_node, + "qcom,mst-enable"); + parser->has_mst_sideband = parser->has_mst; + + DP_DEBUG("mst parsing successful. mst:%d\n", parser->has_mst); + + for (i = 0; i < MAX_DP_MST_STREAMS; i++) { + of_property_read_u32_index(dev->of_node, + "qcom,mst-fixed-topology-ports", i, + &parser->mst_fixed_port[i]); + } + + return 0; +} + +static void dp_parser_dsc(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + + parser->dsc_feature_enable = of_property_read_bool(dev->of_node, + "qcom,dsc-feature-enable"); + + parser->dsc_continuous_pps = of_property_read_bool(dev->of_node, + "qcom,dsc-continuous-pps"); + + DP_DEBUG("dsc parsing successful. dsc:%d\n", + parser->dsc_feature_enable); + DP_DEBUG("cont_pps:%d\n", + parser->dsc_continuous_pps); +} + +static void dp_parser_qos(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + u32 mask, latency; + int rc; + + rc = of_property_read_u32(dev->of_node, "qcom,qos-cpu-latency-us", &latency); + if (rc) + return; + + rc = of_property_read_u32(dev->of_node, "qcom,qos-cpu-mask", &mask); + if (rc) + return; + + parser->qos_cpu_mask = mask; + parser->qos_cpu_latency = latency; + + DP_DEBUG("qos parsing successful. mask:%x latency:%ld\n", mask, latency); +} + +static void dp_parser_fec(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + + parser->fec_feature_enable = of_property_read_bool(dev->of_node, + "qcom,fec-feature-enable"); + + DP_DEBUG("fec parsing successful. fec:%d\n", + parser->fec_feature_enable); +} + +static void dp_parser_widebus(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + + parser->has_widebus = of_property_read_bool(dev->of_node, + "qcom,widebus-enable"); + + DP_DEBUG("widebus parsing successful. widebus:%d\n", + parser->has_widebus); +} + +static int parse_lt_param(struct device *dev, u8 **ptr, char *property) { + int ret = 0, i = 0, j = 0, index = 0; + u32 out_val = 0; + u32 expected_elems = MAX_SWING_LEVELS * MAX_PRE_EMP_LEVELS; + u8 parsed_val = 0; + + ret = of_property_count_u32_elems(dev->of_node, property); + if (ret != expected_elems) { + return ret; + } + + *ptr = devm_kzalloc(dev, sizeof(u8) * expected_elems, GFP_KERNEL); + if (!*ptr) + return -ENOMEM; + + for (i = 0; i < MAX_SWING_LEVELS; i++) { + for (j = 0; j < MAX_PRE_EMP_LEVELS; j++) { + index = i * MAX_SWING_LEVELS + j; + + ret = of_property_read_u32_index(dev->of_node, property, index, &out_val); + if (ret) + return ret; + + parsed_val = out_val & 0xFF; + + ((u8 *)*ptr)[index] = parsed_val; + } + } + + return ret; +} + +static void dp_parser_clear_link_training_params(struct dp_parser *dp_parser) +{ + devm_kfree(&dp_parser->pdev->dev, dp_parser->swing_hbr2_3); + devm_kfree(&dp_parser->pdev->dev, dp_parser->pre_emp_hbr2_3); + devm_kfree(&dp_parser->pdev->dev, dp_parser->swing_hbr_rbr); + devm_kfree(&dp_parser->pdev->dev, dp_parser->pre_emp_hbr_rbr); + + dp_parser->swing_hbr2_3 = NULL; + dp_parser->pre_emp_hbr2_3 = NULL; + dp_parser->swing_hbr_rbr = NULL; + dp_parser->pre_emp_hbr_rbr = NULL; + + dp_parser->valid_lt_params = false; +} + +static void dp_parser_link_training_params(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + int ret = 0; + + ret = parse_lt_param(dev, &parser->swing_hbr2_3, "qcom,hbr2-3-voltage-swing"); + if (ret) + goto early_exit; + + ret = parse_lt_param(dev, &parser->pre_emp_hbr2_3, "qcom,hbr2-3-pre-emphasis"); + if (ret) + goto early_exit; + + ret = parse_lt_param(dev, &parser->swing_hbr_rbr, "qcom,hbr-rbr-voltage-swing"); + if (ret) + goto early_exit; + + ret = parse_lt_param(dev, &parser->pre_emp_hbr_rbr, "qcom,hbr-rbr-pre-emphasis"); + if (ret) + goto early_exit; + + parser->valid_lt_params = true; + + DP_DEBUG("link training parameters parsing success\n"); + goto end; + +early_exit: + if(ret == -EINVAL) + DP_WARN("link training parameters not found - using default values\n"); + else + DP_ERR("link training parameters parsing failure ret: %d\n", ret); + + dp_parser_clear_link_training_params(parser); +end: + return; +} + +static int dp_parser_parse(struct dp_parser *parser) +{ + int rc = 0; + + if (!parser) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto err; + } + + rc = dp_parser_reg(parser); + if (rc) + goto err; + + rc = dp_parser_aux(parser); + if (rc) + goto err; + + rc = dp_parser_misc(parser); + if (rc) + goto err; + + rc = dp_parser_clock(parser); + if (rc) + goto err; + + rc = dp_parser_regulator(parser); + if (rc) + goto err; + + rc = dp_parser_gpio(parser); + if (rc) + goto err; + + rc = dp_parser_catalog(parser); + if (rc) + goto err; + + rc = dp_parser_pinctrl(parser); + if (rc) + goto err; + + rc = dp_parser_msm_hdcp_dev(parser); + if (rc) + goto err; + + rc = dp_parser_mst(parser); + if (rc) + goto err; + + dp_parser_dsc(parser); + dp_parser_fec(parser); + dp_parser_widebus(parser); + dp_parser_qos(parser); + dp_parser_link_training_params(parser); +err: + return rc; +} + +static struct dp_io_data *dp_parser_get_io(struct dp_parser *dp_parser, + char *name) +{ + int i = 0; + struct dp_io *io; + + if (!dp_parser) { + DP_ERR("invalid input\n"); + goto err; + } + + io = &dp_parser->io; + + for (i = 0; i < io->len; i++) { + struct dp_io_data *data = &io->data[i]; + + if (!strcmp(data->name, name)) + return data; + } +err: + return NULL; +} + +static void dp_parser_get_io_buf(struct dp_parser *dp_parser, char *name) +{ + int i = 0; + struct dp_io *io; + + if (!dp_parser) { + DP_ERR("invalid input\n"); + return; + } + + io = &dp_parser->io; + + for (i = 0; i < io->len; i++) { + struct dp_io_data *data = &io->data[i]; + + if (!strcmp(data->name, name)) { + if (!data->buf) + data->buf = devm_kzalloc(&dp_parser->pdev->dev, + data->io.len, GFP_KERNEL); + } + } +} + +static void dp_parser_clear_io_buf(struct dp_parser *dp_parser) +{ + int i = 0; + struct dp_io *io; + + if (!dp_parser) { + DP_ERR("invalid input\n"); + return; + } + + io = &dp_parser->io; + + for (i = 0; i < io->len; i++) { + struct dp_io_data *data = &io->data[i]; + + if (data->buf) + devm_kfree(&dp_parser->pdev->dev, data->buf); + + data->buf = NULL; + } +} + +struct dp_parser *dp_parser_get(struct platform_device *pdev) +{ + struct dp_parser *parser; + + parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL); + if (!parser) + return ERR_PTR(-ENOMEM); + + parser->parse = dp_parser_parse; + parser->get_io = dp_parser_get_io; + parser->get_io_buf = dp_parser_get_io_buf; + parser->clear_io_buf = dp_parser_clear_io_buf; + parser->pdev = pdev; + + return parser; +} + +void dp_parser_put(struct dp_parser *parser) +{ + int i = 0; + struct dss_module_power *power = NULL; + + if (!parser) { + DP_ERR("invalid parser module\n"); + return; + } + + power = parser->mp; + + for (i = 0; i < DP_MAX_PM; i++) { + dp_parser_put_clk_data(&parser->pdev->dev, &power[i]); + dp_parser_put_vreg_data(&parser->pdev->dev, &power[i]); + dp_parser_put_gpio_data(&parser->pdev->dev, &power[i]); + } + + dp_parser_clear_link_training_params(parser); + dp_parser_clear_io_buf(parser); + devm_kfree(&parser->pdev->dev, parser->io.data); + devm_kfree(&parser->pdev->dev, parser); +} diff --git a/msm/dp/dp_parser.h b/msm/dp/dp_parser.h new file mode 100644 index 000000000..1ddd2af45 --- /dev/null +++ b/msm/dp/dp_parser.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PARSER_H_ +#define _DP_PARSER_H_ + +#include + +#define DP_LABEL "MDSS DP DISPLAY" +#define AUX_CFG_LEN 10 +#define DP_MAX_PIXEL_CLK_KHZ 675000 +#define DP_MAX_LINK_CLK_KHZ 810000 +#define MAX_DP_MST_STREAMS 2 +#define MAX_SWING_LEVELS 4 +#define MAX_PRE_EMP_LEVELS 4 + +enum dp_pm_type { + DP_CORE_PM, + DP_CTRL_PM, + DP_PHY_PM, + DP_STREAM0_PM, + DP_STREAM1_PM, + DP_LINK_PM, + DP_PLL_PM, + DP_MAX_PM +}; + +static inline const char *dp_parser_pm_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "DP_CORE_PM"; + case DP_CTRL_PM: return "DP_CTRL_PM"; + case DP_PHY_PM: return "DP_PHY_PM"; + case DP_STREAM0_PM: return "DP_STREAM0_PM"; + case DP_STREAM1_PM: return "DP_STREAM1_PM"; + case DP_LINK_PM: return "DP_LINK_PM"; + case DP_PLL_PM: return "DP_PLL_PM"; + default: return "???"; + } +} + +/** + * struct dp_display_data - display related device tree data. + * + * @ctrl_node: referece to controller device + * @phy_node: reference to phy device + * @is_active: is the controller currently active + * @name: name of the display + * @display_type: type of the display + */ +struct dp_display_data { + struct device_node *ctrl_node; + struct device_node *phy_node; + bool is_active; + const char *name; + const char *display_type; +}; + +/** + * struct dp_io_data - data structure to store DP IO related info + * @name: name of the IO + * @buf: buffer corresponding to IO for debugging + * @io: io data which give len and mapped address + */ +struct dp_io_data { + const char *name; + u8 *buf; + struct dss_io_data io; +}; + +/** + * struct dp_io - data struct to store array of DP IO info + * @len: total number of IOs + * @data: pointer to an array of DP IO data structures. + */ +struct dp_io { + u32 len; + struct dp_io_data *data; +}; + +/** + * struct dp_pinctrl - DP's pin control + * + * @pin: pin-controller's instance + * @state_active: active state pin control + * @state_hpd_active: hpd active state pin control + * @state_suspend: suspend state pin control + */ +struct dp_pinctrl { + struct pinctrl *pin; + struct pinctrl_state *state_active; + struct pinctrl_state *state_hpd_active; + struct pinctrl_state *state_hpd_tlmm; + struct pinctrl_state *state_hpd_ctrl; + struct pinctrl_state *state_suspend; +}; + +#define DP_ENUM_STR(x) #x +#define DP_AUX_CFG_MAX_VALUE_CNT 3 +/** + * struct dp_aux_cfg - DP's AUX configuration settings + * + * @cfg_cnt: count of the configurable settings for the AUX register + * @current_index: current index of the AUX config lut + * @offset: register offset of the AUX config register + * @lut: look up table for the AUX config values for this register + */ +struct dp_aux_cfg { + u32 cfg_cnt; + u32 current_index; + u32 offset; + u32 lut[DP_AUX_CFG_MAX_VALUE_CNT]; +}; + +/* PHY AUX config registers */ +enum dp_phy_aux_config_type { + PHY_AUX_CFG0, + PHY_AUX_CFG1, + PHY_AUX_CFG2, + PHY_AUX_CFG3, + PHY_AUX_CFG4, + PHY_AUX_CFG5, + PHY_AUX_CFG6, + PHY_AUX_CFG7, + PHY_AUX_CFG8, + PHY_AUX_CFG9, + PHY_AUX_CFG_MAX, +}; + +/** + * enum dp_phy_version - version of the dp phy + * @DP_PHY_VERSION_UNKNOWN: Unknown controller version + * @DP_PHY_VERSION_4_2_0: DP phy v4.2.0 controller + * @DP_PHY_VERSION_6_0_0: DP phy v6.0.0 controller + * @DP_PHY_VERSION_MAX: max version + */ +enum dp_phy_version { + DP_PHY_VERSION_UNKNOWN, + DP_PHY_VERSION_2_0_0 = 0x200, + DP_PHY_VERSION_4_2_0 = 0x420, + DP_PHY_VERSION_6_0_0 = 0x600, + DP_PHY_VERSION_MAX +}; + +/** + * struct dp_hw_cfg - DP HW specific configuration + * + * @phy_version: DP PHY HW version + */ +struct dp_hw_cfg { + enum dp_phy_version phy_version; +}; + +static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type) +{ + switch (cfg_type) { + case PHY_AUX_CFG0: + return DP_ENUM_STR(PHY_AUX_CFG0); + case PHY_AUX_CFG1: + return DP_ENUM_STR(PHY_AUX_CFG1); + case PHY_AUX_CFG2: + return DP_ENUM_STR(PHY_AUX_CFG2); + case PHY_AUX_CFG3: + return DP_ENUM_STR(PHY_AUX_CFG3); + case PHY_AUX_CFG4: + return DP_ENUM_STR(PHY_AUX_CFG4); + case PHY_AUX_CFG5: + return DP_ENUM_STR(PHY_AUX_CFG5); + case PHY_AUX_CFG6: + return DP_ENUM_STR(PHY_AUX_CFG6); + case PHY_AUX_CFG7: + return DP_ENUM_STR(PHY_AUX_CFG7); + case PHY_AUX_CFG8: + return DP_ENUM_STR(PHY_AUX_CFG8); + case PHY_AUX_CFG9: + return DP_ENUM_STR(PHY_AUX_CFG9); + default: + return "unknown"; + } +} + +/** + * struct dp_parser - DP parser's data exposed to clients + * + * @pdev: platform data of the client + * @msm_hdcp_dev: device pointer for the HDCP driver + * @mp: gpio, regulator and clock related data + * @pinctrl: pin-control related data + * @disp_data: controller's display related data + * @l_pnswap: P/N swap status on each lane + * @max_pclk_khz: maximum pixel clock supported for the platform + * @max_lclk_khz: maximum link clock supported for the platform + * @hw_cfg: DP HW specific settings + * @has_mst: MST feature enable status + * @has_mst_sideband: MST sideband feature enable status + * @gpio_aux_switch: presence GPIO AUX switch status + * @dsc_feature_enable: DSC feature enable status + * @fec_feature_enable: FEC feature enable status + * @dsc_continuous_pps: PPS sent every frame by HW + * @has_widebus: widebus (2PPC) feature eanble status + *@mst_fixed_port: mst port_num reserved for fixed topology + * @qos_cpu_mask: CPU mask for QOS + * @qos_cpu_latency: CPU Latency setting for QOS + * @swing_hbr2_3: Voltage swing levels for HBR2 and HBR3 rates + * @pre_emp_hbr2_3: Pre-emphasis for HBR2 and HBR3 rates + * @swing_hbr_rbr: Voltage swing levels for HBR and RBR rates + * @pre_emp_hbr_rbr: Pre-emphasis for HBR and RBR rates + * @valid_lt_params: valid lt params + * @parse: function to be called by client to parse device tree. + * @get_io: function to be called by client to get io data. + * @get_io_buf: function to be called by client to get io buffers. + * @clear_io_buf: function to be called by client to clear io buffers. + */ +struct dp_parser { + struct platform_device *pdev; + struct device *msm_hdcp_dev; + struct dss_module_power mp[DP_MAX_PM]; + struct dp_pinctrl pinctrl; + struct dp_io io; + struct dp_display_data disp_data; + + u8 l_map[4]; + u8 l_pnswap; + struct dp_aux_cfg aux_cfg[AUX_CFG_LEN]; + u32 max_pclk_khz; + u32 max_lclk_khz; + struct dp_hw_cfg hw_cfg; + bool has_mst; + bool has_mst_sideband; + bool dsc_feature_enable; + bool fec_feature_enable; + bool dsc_continuous_pps; + bool has_widebus; + bool gpio_aux_switch; + u32 mst_fixed_port[MAX_DP_MST_STREAMS]; + u32 qos_cpu_mask; + unsigned long qos_cpu_latency; + + u8 *swing_hbr2_3; + u8 *pre_emp_hbr2_3; + + u8 *swing_hbr_rbr; + u8 *pre_emp_hbr_rbr; + bool valid_lt_params; + + int (*parse)(struct dp_parser *parser); + struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name); + void (*get_io_buf)(struct dp_parser *parser, char *name); + void (*clear_io_buf)(struct dp_parser *parser); +}; + +enum dp_phy_lane_num { + DP_PHY_LN0 = 0, + DP_PHY_LN1 = 1, + DP_PHY_LN2 = 2, + DP_PHY_LN3 = 3, + DP_MAX_PHY_LN = 4, +}; + +enum dp_mainlink_lane_num { + DP_ML0 = 0, + DP_ML1 = 1, + DP_ML2 = 2, + DP_ML3 = 3, +}; + +/** + * dp_parser_get() - get the DP's device tree parser module + * + * @pdev: platform data of the client + * return: pointer to dp_parser structure. + * + * This function provides client capability to parse the + * device tree and populate the data structures. The data + * related to clock, regulators, pin-control and other + * can be parsed using this module. + */ +struct dp_parser *dp_parser_get(struct platform_device *pdev); + +/** + * dp_parser_put() - cleans the dp_parser module + * + * @parser: pointer to the parser's data. + */ +void dp_parser_put(struct dp_parser *parser); +#endif diff --git a/msm/dp/dp_pll.c b/msm/dp/dp_pll.c new file mode 100644 index 000000000..2a727ec2e --- /dev/null +++ b/msm/dp/dp_pll.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include "dp_debug.h" +#include "dp_pll.h" + +static int dp_pll_fill_io(struct dp_pll *pll) +{ + struct dp_parser *parser = pll->parser; + + pll->io.dp_phy = parser->get_io(parser, "dp_phy"); + if (!pll->io.dp_phy) { + DP_ERR("Invalid dp_phy resource\n"); + return -ENOMEM; + } + + pll->io.dp_pll = parser->get_io(parser, "dp_pll"); + if (!pll->io.dp_pll) { + DP_ERR("Invalid dp_pll resource\n"); + return -ENOMEM; + } + + pll->io.dp_ln_tx0 = parser->get_io(parser, "dp_ln_tx0"); + if (!pll->io.dp_ln_tx0) { + DP_ERR("Invalid dp_ln_tx1 resource\n"); + return -ENOMEM; + } + + pll->io.dp_ln_tx1 = parser->get_io(parser, "dp_ln_tx1"); + if (!pll->io.dp_ln_tx1) { + DP_ERR("Invalid dp_ln_tx1 resource\n"); + return -ENOMEM; + } + + pll->io.gdsc = parser->get_io(parser, "gdsc"); + if (!pll->io.gdsc) { + DP_ERR("Invalid gdsc resource\n"); + return -ENOMEM; + } + + return 0; +} + +static int dp_pll_clock_register(struct dp_pll *pll) +{ + int rc; + + switch (pll->revision) { + case DP_PLL_5NM_V1: + case DP_PLL_5NM_V2: + rc = dp_pll_clock_register_5nm(pll); + break; + case DP_PLL_4NM_V1: + case DP_PLL_4NM_V1_1: + rc = dp_pll_clock_register_4nm(pll); + break; + default: + rc = -ENOTSUPP; + break; + } + + return rc; +} + +static void dp_pll_clock_unregister(struct dp_pll *pll) +{ + switch (pll->revision) { + case DP_PLL_5NM_V1: + case DP_PLL_5NM_V2: + dp_pll_clock_unregister_5nm(pll); + break; + case DP_PLL_4NM_V1: + case DP_PLL_4NM_V1_1: + dp_pll_clock_unregister_4nm(pll); + break; + default: + break; + } +} + +int dp_pll_clock_register_helper(struct dp_pll *pll, struct dp_pll_vco_clk *clks, int num_clks) +{ + int rc = 0, i = 0; + struct platform_device *pdev; + struct clk *clk; + + if (!pll || !clks) { + DP_ERR("input not initialized\n"); + return -EINVAL; + } + + pdev = pll->pdev; + + for (i = 0; i < num_clks; i++) { + clks[i].priv = pll; + + clk = clk_register(&pdev->dev, &clks[i].hw); + if (IS_ERR(clk)) { + DP_ERR("%s registration failed for DP: %d\n", + clk_hw_get_name(&clks[i].hw), pll->index); + return -EINVAL; + } + pll->clk_data->clks[i] = clk; + } + + return rc; +} + +struct dp_pll *dp_pll_get(struct dp_pll_in *in) +{ + int rc = 0; + struct dp_pll *pll; + struct dp_parser *parser; + const char *label = NULL; + struct platform_device *pdev; + + if (!in || !in->pdev || !in->pdev->dev.of_node || !in->parser) { + DP_ERR("Invalid resource pointers\n"); + return ERR_PTR(-EINVAL); + } + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + pll->pdev = in->pdev; + pll->parser = in->parser; + pll->aux = in->aux; + pll->dp_core_revision = in->dp_core_revision; + parser = pll->parser; + pdev = pll->pdev; + + label = of_get_property(pdev->dev.of_node, "qcom,pll-revision", NULL); + if (label) { + if (!strcmp(label, "5nm-v1")) { + pll->revision = DP_PLL_5NM_V1; + } else if (!strcmp(label, "5nm-v2")) { + pll->revision = DP_PLL_5NM_V2; + } else if (!strcmp(label, "4nm-v1")) { + pll->revision = DP_PLL_4NM_V1; + } else if (!strcmp(label, "4nm-v1.1")) { + pll->revision = DP_PLL_4NM_V1_1; + } else { + DP_ERR("Unsupported pll revision\n"); + rc = -ENOTSUPP; + goto error; + } + } else { + DP_ERR("pll revision not specified\n"); + rc = -EINVAL; + goto error; + } + + pll->ssc_en = of_property_read_bool(pdev->dev.of_node, + "qcom,ssc-feature-enable"); + pll->bonding_en = of_property_read_bool(pdev->dev.of_node, + "qcom,bonding-feature-enable"); + + rc = dp_pll_fill_io(pll); + if (rc) + goto error; + + rc = dp_pll_clock_register(pll); + if (rc) + goto error; + + DP_INFO("revision=%s, ssc_en=%d, bonding_en=%d\n", + dp_pll_get_revision(pll->revision), pll->ssc_en, + pll->bonding_en); + + return pll; +error: + kfree(pll); + return ERR_PTR(rc); +} + +void dp_pll_put(struct dp_pll *pll) +{ + dp_pll_clock_unregister(pll); + kfree(pll); +} diff --git a/msm/dp/dp_pll.h b/msm/dp/dp_pll.h new file mode 100644 index 000000000..529670569 --- /dev/null +++ b/msm/dp/dp_pll.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __DP_PLL_H +#define __DP_PLL_H + +#include +#include +#include +#include "dp_parser.h" +#include "sde_dbg.h" + +#define DP_VCO_HSCLK_RATE_1620MHZDIV1000 1620000UL +#define DP_VCO_HSCLK_RATE_2700MHZDIV1000 2700000UL +#define DP_VCO_HSCLK_RATE_5400MHZDIV1000 5400000UL +#define DP_VCO_HSCLK_RATE_8100MHZDIV1000 8100000UL +#define DP_PHY_VCO_DIV 0x0070 + +#define dp_pll_get_base(x) pll->io.x->io.base + +#define dp_pll_read(x, offset) ({ \ + readl_relaxed((dp_pll_get_base(x)) + (offset)); \ +}) + +#define dp_pll_write(x, offset, data) ({ \ + DP_DEBUG(#offset", addr=0x%llx, val=0x%x\n", \ + ((u64)(dp_pll_get_base(x)) + (offset)), (data)); \ + SDE_EVT32_VERBOSE((dp_pll_get_base(x)) + (offset), (data)); \ + writel_relaxed((data), (dp_pll_get_base(x)) + (offset)); \ +}) + +enum dp_pll_revision { + DP_PLL_UNKNOWN, + DP_PLL_5NM_V1, + DP_PLL_5NM_V2, + DP_PLL_4NM_V1, + DP_PLL_4NM_V1_1, +}; + +enum hsclk_rate { + HSCLK_RATE_1620MHZ, + HSCLK_RATE_2700MHZ, + HSCLK_RATE_5400MHZ, + HSCLK_RATE_8100MHZ, + HSCLK_RATE_MAX, +}; + +static inline const char *dp_pll_get_revision(enum dp_pll_revision rev) +{ + switch (rev) { + case DP_PLL_UNKNOWN: return "DP_PLL_UNKNOWN"; + case DP_PLL_5NM_V1: return "DP_PLL_5NM_V1"; + case DP_PLL_5NM_V2: return "DP_PLL_5NM_V2"; + case DP_PLL_4NM_V1: return "DP_PLL_4NM_V1"; + case DP_PLL_4NM_V1_1: return "DP_PLL_4NM_V1_1"; + default: return "???"; + } +} + +struct dp_pll_io { + struct dp_io_data *dp_phy; + struct dp_io_data *dp_pll; + struct dp_io_data *dp_ln_tx0; + struct dp_io_data *dp_ln_tx1; + struct dp_io_data *gdsc; +}; + +struct dp_pll_vco_clk { + struct clk_hw hw; + void *priv; +}; + +struct dp_pll { + /* target pll revision information */ + u32 revision; + /* save vco current rate */ + unsigned long vco_rate; + /* + * PLL index if multiple index are available. Eg. in case of + * DSI we have 2 plls. + */ + uint32_t index; + + bool ssc_en; + bool bonding_en; + + void *priv; + struct platform_device *pdev; + struct dp_parser *parser; + struct dp_power *power; + struct dp_aux *aux; + struct dp_pll_io io; + struct clk_onecell_data *clk_data; + u32 dp_core_revision; + u32 clk_factor; + + int (*pll_cfg)(struct dp_pll *pll, unsigned long rate); + int (*pll_prepare)(struct dp_pll *pll); + int (*pll_unprepare)(struct dp_pll *pll); +}; + +struct dp_pll_params { + /* COM PHY settings */ + u32 hsclk_sel; + u32 integloop_gain0_mode0; + u32 integloop_gain1_mode0; + u32 lock_cmp_en; + /* PHY vco divider */ + u32 phy_vco_div; + u32 dec_start_mode0; + u32 div_frac_start1_mode0; + u32 div_frac_start2_mode0; + u32 div_frac_start3_mode0; + u32 lock_cmp1_mode0; + u32 lock_cmp2_mode0; + u32 ssc_step_size1_mode0; + u32 ssc_step_size2_mode0; + u32 ssc_per1; + u32 ssc_per2; + u32 cmp_code1_mode0; + u32 cmp_code2_mode0; + u32 pll_ivco; + u32 bg_timer; + u32 core_clk_en; + u32 lane_offset_tx; + u32 lane_offset_rx; +}; + +struct dp_pll_db { + struct dp_pll *pll; + /* lane and orientation settings */ + u8 lane_cnt; + u8 orientation; + u32 rate_idx; + const struct dp_pll_params *pll_params; +}; + +static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw) +{ + return container_of(hw, struct dp_pll_vco_clk, hw); +} + +static inline bool is_gdsc_disabled(struct dp_pll *pll) +{ + return (dp_pll_read(gdsc, 0x0) & BIT(31)) ? false : true; +} + +int dp_pll_clock_register_5nm(struct dp_pll *pll); +void dp_pll_clock_unregister_5nm(struct dp_pll *pll); +int dp_pll_clock_register_4nm(struct dp_pll *pll); +void dp_pll_clock_unregister_4nm(struct dp_pll *pll); + +struct dp_pll_in { + struct platform_device *pdev; + struct dp_aux *aux; + struct dp_parser *parser; + u32 dp_core_revision; +}; + +int dp_pll_clock_register_helper(struct dp_pll *pll, struct dp_pll_vco_clk *clks, int num_clks); +struct dp_pll *dp_pll_get(struct dp_pll_in *in); +void dp_pll_put(struct dp_pll *pll); +#endif /* __DP_PLL_H */ diff --git a/msm/dp/dp_pll_4nm.c b/msm/dp/dp_pll_4nm.c new file mode 100644 index 000000000..25cca3f43 --- /dev/null +++ b/msm/dp/dp_pll_4nm.c @@ -0,0 +1,904 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +/* + * Display Port PLL driver block diagram for branch clocks + * + * +------------------------+ +------------------------+ + * | dp_phy_pll_link_clk | | dp_phy_pll_vco_div_clk | + * +------------------------+ +------------------------+ + * | | + * | | + * V V + * dp_link_clk dp_pixel_clk + * + * + */ + +#include +#include +#include +#include +#include +#include +#include "dp_hpd.h" +#include "dp_debug.h" +#include "dp_pll.h" + +#define DP_PHY_CFG 0x0010 +#define DP_PHY_CFG_1 0x0014 +#define DP_PHY_PD_CTL 0x0018 +#define DP_PHY_MODE 0x001C + +#define DP_PHY_AUX_CFG1 0x0024 +#define DP_PHY_AUX_CFG2 0x0028 + +#define DP_PHY_TX0_TX1_LANE_CTL 0x0078 +#define DP_PHY_TX2_TX3_LANE_CTL 0x009C + +#define DP_PHY_SPARE0 0x00C8 +#define DP_PHY_STATUS 0x00E4 + +/* Tx registers */ +#define TXn_CLKBUF_ENABLE 0x0008 +#define TXn_TX_EMP_POST1_LVL 0x000C + +#define TXn_TX_DRV_LVL 0x0014 + +#define TXn_RESET_TSYNC_EN 0x001C +#define TXn_PRE_STALL_LDO_BOOST_EN 0x0020 +#define TXn_TX_BAND 0x0024 +#define TXn_INTERFACE_SELECT 0x002C + +#define TXn_RES_CODE_LANE_OFFSET_TX 0x003C +#define TXn_RES_CODE_LANE_OFFSET_RX 0x0040 + +#define TXn_TRANSCEIVER_BIAS_EN 0x0054 +#define TXn_HIGHZ_DRVR_EN 0x0058 +#define TXn_TX_POL_INV 0x005C +#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0060 + +/* PLL register offset */ +#define QSERDES_COM_BG_TIMER 0x00BC +#define QSERDES_COM_SSC_EN_CENTER 0x00C0 +#define QSERDES_COM_SSC_ADJ_PER1 0x00C4 +#define QSERDES_COM_SSC_PER1 0x00CC +#define QSERDES_COM_SSC_PER2 0x00D0 +#define QSERDES_COM_SSC_STEP_SIZE1_MODE0 0x0060 +#define QSERDES_COM_SSC_STEP_SIZE2_MODE0 0X0064 +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00DC +#define QSERDES_COM_CLK_ENABLE1 0x00E0 +#define QSERDES_COM_SYS_CLK_CTRL 0x00E4 +#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x00E8 +#define QSERDES_COM_PLL_IVCO 0x00F4 + +#define QSERDES_COM_CP_CTRL_MODE0 0x0070 +#define QSERDES_COM_PLL_RCTRL_MODE0 0x0074 +#define QSERDES_COM_PLL_CCTRL_MODE0 0x0078 +#define QSERDES_COM_SYSCLK_EN_SEL 0x0110 +#define QSERDES_COM_RESETSM_CNTRL 0x0118 +#define QSERDES_COM_LOCK_CMP_EN 0x0120 +#define QSERDES_COM_LOCK_CMP1_MODE0 0x0080 +#define QSERDES_COM_LOCK_CMP2_MODE0 0x0084 + +#define QSERDES_COM_DEC_START_MODE0 0x0088 +#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0090 +#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0094 +#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0098 +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00A0 +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00A4 +#define QSERDES_COM_VCO_TUNE_CTRL 0x013C +#define QSERDES_COM_VCO_TUNE_MAP 0x0140 + +#define QSERDES_COM_CMN_STATUS 0x01D0 +#define QSERDES_COM_CLK_SEL 0x0164 +#define QSERDES_COM_HSCLK_SEL_1 0x003C + +#define QSERDES_COM_CORECLK_DIV_MODE0 0x007C + +#define QSERDES_COM_CORE_CLK_EN 0x0170 +#define QSERDES_COM_C_READY_STATUS 0x01F8 +#define QSERDES_COM_CMN_CONFIG_1 0x0174 + +#define QSERDES_COM_SVS_MODE_CLK_SEL 0x017C +#define QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x0058 +#define QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x005C +/* Tx tran offsets */ +#define DP_TRAN_DRVR_EMP_EN 0x00C0 +#define DP_TX_INTERFACE_MODE 0x00C4 + +/* Tx VMODE offsets */ +#define DP_VMODE_CTRL1 0x00C8 + +#define DP_PHY_PLL_POLL_SLEEP_US 500 +#define DP_PHY_PLL_POLL_TIMEOUT_US 10000 + +#define DP_VCO_RATE_8100MHZDIV1000 8100000UL +#define DP_VCO_RATE_9720MHZDIV1000 9720000UL +#define DP_VCO_RATE_10800MHZDIV1000 10800000UL + +#define DP_PLL_NUM_CLKS 2 + +#define DP_4NM_C_READY BIT(0) +#define DP_4NM_FREQ_DONE BIT(0) +#define DP_4NM_PLL_LOCKED BIT(1) +#define DP_4NM_PHY_READY BIT(1) +#define DP_4NM_TSYNC_DONE BIT(0) + +static const struct dp_pll_params pll_params_v1[HSCLK_RATE_MAX] = { + {0x05, 0x3f, 0x00, 0x04, 0x01, 0x69, 0x00, 0x80, 0x07, 0x6f, 0x08, 0x45, 0x06, 0x36, 0x01, + 0xe2, 0x18, 0x0f, 0x0e, 0x0f, 0x0c, 0x0c}, + {0x03, 0x3f, 0x00, 0x08, 0x01, 0x69, 0x00, 0x80, 0x07, 0x0f, 0x0e, 0x45, 0x06, 0x36, 0x01, + 0xe2, 0x18, 0x0f, 0x0e, 0x0f, 0x0c, 0x0c}, + {0x01, 0x3f, 0x00, 0x08, 0x02, 0x8c, 0x00, 0x00, 0x0a, 0x1f, 0x1c, 0x5c, 0x08, 0x36, 0x01, + 0x2e, 0x21, 0x0f, 0x0e, 0x0f, 0x0c, 0x0c}, + {0x00, 0x3f, 0x00, 0x08, 0x00, 0x69, 0x00, 0x80, 0x07, 0x2f, 0x2a, 0x45, 0x06, 0x36, 0x01, + 0xe2, 0x18, 0x0f, 0x0e, 0x0f, 0x0c, 0x0c}, +}; + +static const struct dp_pll_params pll_params_v1_1[HSCLK_RATE_MAX] = { + {0x05, 0x3f, 0x00, 0x04, 0x01, 0x34, 0x00, 0xc0, 0x0b, 0x37, 0x04, 0x92, 0x01, 0x6b, 0x02, + 0x71, 0x0c, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c}, + {0x03, 0x3f, 0x00, 0x08, 0x01, 0x34, 0x00, 0xc0, 0x0b, 0x07, 0x07, 0x92, 0x01, 0x6b, 0x02, + 0x71, 0x0c, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c}, + {0x01, 0x3f, 0x00, 0x08, 0x02, 0x46, 0x00, 0x00, 0x05, 0x0f, 0x0e, 0x18, 0x02, 0x6b, 0x02, + 0x97, 0x10, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c}, + {0x00, 0x3f, 0x00, 0x08, 0x00, 0x34, 0x00, 0xc0, 0x0b, 0x17, 0x15, 0x92, 0x01, 0x6b, 0x02, + 0x71, 0x0c, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c} + +}; + +static int set_vco_div(struct dp_pll *pll, unsigned long rate) +{ + u32 div, val; + + if (!pll) + return -EINVAL; + + if (is_gdsc_disabled(pll)) + return -EINVAL; + + val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV); + val &= ~0x03; + + switch (rate) { + case DP_VCO_HSCLK_RATE_1620MHZDIV1000: + case DP_VCO_HSCLK_RATE_2700MHZDIV1000: + div = 2; + val |= 1; + break; + case DP_VCO_HSCLK_RATE_5400MHZDIV1000: + div = 4; + val |= 2; + break; + case DP_VCO_HSCLK_RATE_8100MHZDIV1000: + div = 6; + /* val = 0 for this case, so no update needed */ + break; + default: + /* No other link rates are supported */ + return -EINVAL; + } + + dp_pll_write(dp_phy, DP_PHY_VCO_DIV, val); + /* Make sure the PHY registers writes are done */ + wmb(); + + /* + * Set the rate for the link and pixel clock sources so that the + * linux clock framework can appropriately compute the MND values + * whenever the pixel clock rate is set. + */ + clk_set_rate(pll->clk_data->clks[0], pll->vco_rate / 10); + clk_set_rate(pll->clk_data->clks[1], pll->vco_rate / div); + + DP_DEBUG("val=%#x div=%x link_clk rate=%lu vco_div_clk rate=%lu\n", + val, div, pll->vco_rate / 10, pll->vco_rate / div); + + return 0; +} + +static int dp_vco_pll_init_db_4nm(struct dp_pll_db *pdb, + unsigned long rate) +{ + struct dp_pll *pll = pdb->pll; + u32 spare_value = 0; + + spare_value = dp_pll_read(dp_phy, DP_PHY_SPARE0); + pdb->lane_cnt = spare_value & 0x0F; + pdb->orientation = (spare_value & 0xF0) >> 4; + + DP_DEBUG("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n", + spare_value, pdb->lane_cnt, pdb->orientation); + + switch (rate) { + case DP_VCO_HSCLK_RATE_1620MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_1620MHZ; + break; + case DP_VCO_HSCLK_RATE_2700MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_2700MHZ; + break; + case DP_VCO_HSCLK_RATE_5400MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_5400MHZ; + break; + case DP_VCO_HSCLK_RATE_8100MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_8100MHZ; + break; + default: + DP_ERR("unsupported rate %ld\n", rate); + return -EINVAL; + } + return 0; +} + +static int dp_config_vco_rate_4nm(struct dp_pll *pll, + unsigned long rate) +{ + int rc = 0; + struct dp_pll_db *pdb = (struct dp_pll_db *)pll->priv; + const struct dp_pll_params *params; + + rc = dp_vco_pll_init_db_4nm(pdb, rate); + if (rc < 0) { + DP_ERR("VCO Init DB failed\n"); + return rc; + } + + dp_pll_write(dp_phy, DP_PHY_CFG_1, 0x0F); + + if (pdb->lane_cnt != 4) { + if (pdb->orientation == ORIENTATION_CC2) + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x6d); + else + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x75); + } else { + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x7d); + } + + if (pdb->rate_idx < HSCLK_RATE_MAX) { + params = &pdb->pll_params[pdb->rate_idx]; + } else { + DP_ERR("link rate not set\n"); + return -EINVAL; + } + + /* Make sure the PHY register writes are done */ + wmb(); + + dp_pll_write(dp_pll, QSERDES_COM_SVS_MODE_CLK_SEL, 0x15); + dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_EN_SEL, 0x3b); + dp_pll_write(dp_pll, QSERDES_COM_SYS_CLK_CTRL, 0x02); + dp_pll_write(dp_pll, QSERDES_COM_CLK_ENABLE1, 0x0c); + dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06); + dp_pll_write(dp_pll, QSERDES_COM_CLK_SEL, 0x30); + /* Make sure the PHY register writes are done */ + wmb(); + + /* PLL Optimization */ + dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, params->pll_ivco); + dp_pll_write(dp_pll, QSERDES_COM_PLL_CCTRL_MODE0, 0x36); + dp_pll_write(dp_pll, QSERDES_COM_PLL_RCTRL_MODE0, 0x16); + dp_pll_write(dp_pll, QSERDES_COM_CP_CTRL_MODE0, 0x06); + /* Make sure the PLL register writes are done */ + wmb(); + + /* link rate dependent params */ + dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL_1, params->hsclk_sel); + dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, params->dec_start_mode0); + dp_pll_write(dp_pll, + QSERDES_COM_DIV_FRAC_START1_MODE0, params->div_frac_start1_mode0); + dp_pll_write(dp_pll, + QSERDES_COM_DIV_FRAC_START2_MODE0, params->div_frac_start2_mode0); + dp_pll_write(dp_pll, + QSERDES_COM_DIV_FRAC_START3_MODE0, params->div_frac_start3_mode0); + dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, params->lock_cmp1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, params->lock_cmp2_mode0); + dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, params->lock_cmp_en); + dp_pll_write(dp_phy, DP_PHY_VCO_DIV, params->phy_vco_div); + /* Make sure the PLL register writes are done */ + wmb(); + + dp_pll_write(dp_pll, QSERDES_COM_CMN_CONFIG_1, 0x12); + dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, + params->integloop_gain0_mode0); + dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, + params->integloop_gain1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_MAP, 0x00); + /* Make sure the PHY register writes are done */ + wmb(); + + dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, params->bg_timer); + dp_pll_write(dp_pll, QSERDES_COM_CORECLK_DIV_MODE0, 0x14); + dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_CTRL, 0x00); + + if (pll->bonding_en) + dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1f); + else + dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17); + + dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, params->core_clk_en); + dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, + params->cmp_code1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, + params->cmp_code2_mode0); + /* Make sure the PHY register writes are done */ + wmb(); + + if (pll->ssc_en) { + dp_pll_write(dp_pll, QSERDES_COM_SSC_EN_CENTER, 0x01); + dp_pll_write(dp_pll, QSERDES_COM_SSC_ADJ_PER1, 0x00); + dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, params->ssc_per1); + dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, params->ssc_per2); + dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE1_MODE0, + params->ssc_step_size1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE2_MODE0, + params->ssc_step_size2_mode0); + } + + if (pdb->orientation == ORIENTATION_CC2) + dp_pll_write(dp_phy, DP_PHY_MODE, 0x4c); + else + dp_pll_write(dp_phy, DP_PHY_MODE, 0x5c); + + dp_pll_write(dp_phy, DP_PHY_AUX_CFG1, 0x13); + dp_pll_write(dp_phy, DP_PHY_AUX_CFG2, 0xA4); + /* Make sure the PLL register writes are done */ + wmb(); + + /* TX-0 register configuration */ + dp_pll_write(dp_phy, DP_PHY_TX0_TX1_LANE_CTL, 0x05); + dp_pll_write(dp_ln_tx0, DP_VMODE_CTRL1, 0x40); + dp_pll_write(dp_ln_tx0, TXn_PRE_STALL_LDO_BOOST_EN, 0x30); + dp_pll_write(dp_ln_tx0, TXn_INTERFACE_SELECT, 0x3b); + dp_pll_write(dp_ln_tx0, TXn_CLKBUF_ENABLE, 0x0f); + dp_pll_write(dp_ln_tx0, TXn_RESET_TSYNC_EN, 0x03); + dp_pll_write(dp_ln_tx0, DP_TRAN_DRVR_EMP_EN, 0xf); + dp_pll_write(dp_ln_tx0, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00); + dp_pll_write(dp_ln_tx0, DP_TX_INTERFACE_MODE, 0x00); + dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx); + dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx); + dp_pll_write(dp_ln_tx0, TXn_TX_BAND, 0x04); + /* Make sure the PLL register writes are done */ + wmb(); + + /* TX-1 register configuration */ + dp_pll_write(dp_phy, DP_PHY_TX2_TX3_LANE_CTL, 0x05); + dp_pll_write(dp_ln_tx1, DP_VMODE_CTRL1, 0x40); + dp_pll_write(dp_ln_tx1, TXn_PRE_STALL_LDO_BOOST_EN, 0x30); + dp_pll_write(dp_ln_tx1, TXn_INTERFACE_SELECT, 0x3b); + dp_pll_write(dp_ln_tx1, TXn_CLKBUF_ENABLE, 0x0f); + dp_pll_write(dp_ln_tx1, TXn_RESET_TSYNC_EN, 0x03); + dp_pll_write(dp_ln_tx1, DP_TRAN_DRVR_EMP_EN, 0xf); + dp_pll_write(dp_ln_tx1, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00); + dp_pll_write(dp_ln_tx1, DP_TX_INTERFACE_MODE, 0x00); + dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx); + dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx); + dp_pll_write(dp_ln_tx1, TXn_TX_BAND, 0x04); + /* Make sure the PHY register writes are done */ + wmb(); + + return set_vco_div(pll, rate); +} + +enum dp_4nm_pll_status { + C_READY, + FREQ_DONE, + PLL_LOCKED, + PHY_READY, + TSYNC_DONE, +}; + +char *dp_4nm_pll_get_status_name(enum dp_4nm_pll_status status) +{ + switch (status) { + case C_READY: + return "C_READY"; + case FREQ_DONE: + return "FREQ_DONE"; + case PLL_LOCKED: + return "PLL_LOCKED"; + case PHY_READY: + return "PHY_READY"; + case TSYNC_DONE: + return "TSYNC_DONE"; + default: + return "unknown"; + } +} + +static bool dp_4nm_pll_get_status(struct dp_pll *pll, + enum dp_4nm_pll_status status) +{ + u32 reg, state, bit; + void __iomem *base; + bool success = true; + + switch (status) { + case C_READY: + base = dp_pll_get_base(dp_pll); + reg = QSERDES_COM_C_READY_STATUS; + bit = DP_4NM_C_READY; + break; + case FREQ_DONE: + base = dp_pll_get_base(dp_pll); + reg = QSERDES_COM_CMN_STATUS; + bit = DP_4NM_FREQ_DONE; + break; + case PLL_LOCKED: + base = dp_pll_get_base(dp_pll); + reg = QSERDES_COM_CMN_STATUS; + bit = DP_4NM_PLL_LOCKED; + break; + case PHY_READY: + base = dp_pll_get_base(dp_phy); + reg = DP_PHY_STATUS; + bit = DP_4NM_PHY_READY; + break; + case TSYNC_DONE: + base = dp_pll_get_base(dp_phy); + reg = DP_PHY_STATUS; + bit = DP_4NM_TSYNC_DONE; + break; + default: + return false; + } + + if (readl_poll_timeout_atomic((base + reg), state, + ((state & bit) > 0), + DP_PHY_PLL_POLL_SLEEP_US, + DP_PHY_PLL_POLL_TIMEOUT_US)) { + DP_ERR("%s failed, status=%x\n", + dp_4nm_pll_get_status_name(status), state); + + success = false; + } + + return success; +} + +static int dp_pll_enable_4nm(struct dp_pll *pll) +{ + int rc = 0; + + pll->aux->state &= ~DP_STATE_PLL_LOCKED; + + dp_pll_write(dp_phy, DP_PHY_CFG, 0x01); + dp_pll_write(dp_phy, DP_PHY_CFG, 0x05); + dp_pll_write(dp_phy, DP_PHY_CFG, 0x01); + dp_pll_write(dp_phy, DP_PHY_CFG, 0x09); + dp_pll_write(dp_pll, QSERDES_COM_RESETSM_CNTRL, 0x20); + wmb(); /* Make sure the PLL register writes are done */ + + if (!dp_4nm_pll_get_status(pll, C_READY)) { + rc = -EINVAL; + goto lock_err; + } + + if (!dp_4nm_pll_get_status(pll, FREQ_DONE)) { + rc = -EINVAL; + goto lock_err; + } + + if (!dp_4nm_pll_get_status(pll, PLL_LOCKED)) { + rc = -EINVAL; + goto lock_err; + } + + dp_pll_write(dp_phy, DP_PHY_CFG, 0x19); + /* Make sure the PHY register writes are done */ + wmb(); + + if (!dp_4nm_pll_get_status(pll, TSYNC_DONE)) { + rc = -EINVAL; + goto lock_err; + } + + if (!dp_4nm_pll_get_status(pll, PHY_READY)) { + rc = -EINVAL; + goto lock_err; + } + + pll->aux->state |= DP_STATE_PLL_LOCKED; + DP_DEBUG("PLL is locked\n"); + +lock_err: + return rc; +} + +static void dp_pll_disable_4nm(struct dp_pll *pll) +{ + /* Assert DP PHY power down */ + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x2); + /* + * Make sure all the register writes to disable PLL are + * completed before doing any other operation + */ + wmb(); +} + +static int dp_vco_set_rate_4nm(struct dp_pll *pll, unsigned long rate) +{ + int rc = 0; + + if (!pll) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + DP_DEBUG("DP lane CLK rate=%ld\n", rate); + + rc = dp_config_vco_rate_4nm(pll, rate); + if (rc < 0) { + DP_ERR("Failed to set clk rate\n"); + return rc; + } + + return rc; +} + +static int dp_regulator_enable_4nm(struct dp_parser *parser, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dss_module_power mp; + + if (pm_type < DP_CORE_PM || pm_type >= DP_MAX_PM) { + DP_ERR("invalid resource: %d %s\n", pm_type, + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + mp = parser->mp[pm_type]; + rc = msm_dss_enable_vreg(mp.vreg_config, mp.num_vreg, enable); + if (rc) { + DP_ERR("failed to '%s' vregs for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + return rc; + } + + DP_DEBUG("success: '%s' vregs for %s\n", enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + return rc; +} + +static int dp_pll_configure(struct dp_pll *pll, unsigned long rate) +{ + int rc = 0; + + if (!pll || !rate) { + DP_ERR("invalid input parameters rate = %lu\n", rate); + return -EINVAL; + } + + rate = rate * 10; + + if (rate <= DP_VCO_HSCLK_RATE_1620MHZDIV1000) + rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000; + else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000) + rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000; + else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000) + rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000; + else + rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000; + + pll->vco_rate = rate; + rc = dp_vco_set_rate_4nm(pll, rate); + if (rc < 0) { + DP_ERR("pll rate %s set failed\n", rate); + pll->vco_rate = 0; + return rc; + } + + DP_DEBUG("pll rate %lu set success\n", rate); + return rc; +} + +static int dp_pll_prepare(struct dp_pll *pll) +{ + int rc = 0; + + if (!pll) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + /* + * Enable DP_PM_PLL regulator if the PLL revision is 4nm-V1 and the + * link rate is 8.1Gbps. This will result in voting to place Mx rail in + * turbo as required for V1 hardware PLL functionality. + */ + if (pll->revision >= DP_PLL_4NM_V1 && + pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) { + rc = dp_regulator_enable_4nm(pll->parser, DP_PLL_PM, true); + if (rc < 0) { + DP_ERR("enable pll power failed\n"); + return rc; + } + } + + rc = dp_pll_enable_4nm(pll); + if (rc < 0) + DP_ERR("ndx=%d failed to enable dp pll\n", pll->index); + + return rc; +} + +static int dp_pll_unprepare(struct dp_pll *pll) +{ + int rc = 0; + + if (!pll) { + DP_ERR("invalid input parameter\n"); + return -EINVAL; + } + + if (pll->revision >= DP_PLL_4NM_V1 && + pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) { + rc = dp_regulator_enable_4nm(pll->parser, DP_PLL_PM, false); + if (rc < 0) { + DP_ERR("disable pll power failed\n"); + return rc; + } + } + + dp_pll_disable_4nm(pll); + pll->vco_rate = 0; + + return rc; +} + +unsigned long dp_vco_recalc_rate_4nm(struct dp_pll *pll) +{ + u32 hsclk_sel, link_clk_divsel, hsclk_div, link_clk_div = 0; + unsigned long vco_rate = 0; + + if (!pll) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + if (is_gdsc_disabled(pll)) + return 0; + + hsclk_sel = dp_pll_read(dp_pll, QSERDES_COM_HSCLK_SEL_1); + hsclk_sel &= 0x0f; + + switch (hsclk_sel) { + case 5: + hsclk_div = 5; + break; + case 3: + hsclk_div = 3; + break; + case 1: + hsclk_div = 2; + break; + case 0: + hsclk_div = 1; + break; + default: + DP_DEBUG("unknown divider. forcing to default\n"); + hsclk_div = 5; + break; + } + + link_clk_divsel = dp_pll_read(dp_phy, DP_PHY_AUX_CFG2); + link_clk_divsel >>= 2; + link_clk_divsel &= 0x3; + + if (link_clk_divsel == 0) + link_clk_div = 5; + else if (link_clk_divsel == 1) + link_clk_div = 10; + else if (link_clk_divsel == 2) + link_clk_div = 20; + else + DP_ERR("unsupported div. Phy_mode: %d\n", link_clk_divsel); + + if (link_clk_div == 20) { + vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000; + } else { + if (hsclk_div == 5) + vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000; + else if (hsclk_div == 3) + vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000; + else if (hsclk_div == 2) + vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000; + else + vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000; + } + + DP_DEBUG("hsclk: sel=0x%x, div=0x%x; lclk: sel=%u, div=%u, rate=%lu\n", + hsclk_sel, hsclk_div, link_clk_divsel, link_clk_div, vco_rate); + + return vco_rate; +} + +static unsigned long dp_pll_link_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dp_pll *pll = NULL; + struct dp_pll_vco_clk *pll_link = NULL; + unsigned long rate = 0; + + if (!hw) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + pll_link = to_dp_vco_hw(hw); + pll = pll_link->priv; + + rate = pll->vco_rate * pll->clk_factor / 10; + + return rate; +} + +static long dp_pll_link_clk_round(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct dp_pll *pll = NULL; + struct dp_pll_vco_clk *pll_link = NULL; + + if (!hw) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + pll_link = to_dp_vco_hw(hw); + pll = pll_link->priv; + + rate = pll->vco_rate * pll->clk_factor / 10; + + return rate; +} + +static unsigned long dp_pll_vco_div_clk_get_rate(struct dp_pll *pll) +{ + if (pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) + return ((pll->vco_rate / 6) * pll->clk_factor); + else if (pll->vco_rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) + return ((pll->vco_rate / 4) * pll->clk_factor); + else + return ((pll->vco_rate / 2) * pll->clk_factor); +} + +static unsigned long dp_pll_vco_div_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dp_pll *pll = NULL; + struct dp_pll_vco_clk *pll_link = NULL; + + if (!hw) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + pll_link = to_dp_vco_hw(hw); + pll = pll_link->priv; + + return dp_pll_vco_div_clk_get_rate(pll); +} + +static long dp_pll_vco_div_clk_round(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + return dp_pll_vco_div_clk_recalc_rate(hw, *parent_rate); +} + +static const struct clk_ops pll_link_clk_ops = { + .recalc_rate = dp_pll_link_clk_recalc_rate, + .round_rate = dp_pll_link_clk_round, +}; + +static const struct clk_ops pll_vco_div_clk_ops = { + .recalc_rate = dp_pll_vco_div_clk_recalc_rate, + .round_rate = dp_pll_vco_div_clk_round, +}; + +static struct dp_pll_vco_clk dp0_phy_pll_clks[DP_PLL_NUM_CLKS] = { + { + .hw.init = &(struct clk_init_data) { + .name = "dp0_phy_pll_link_clk", + .ops = &pll_link_clk_ops, + }, + }, + { + .hw.init = &(struct clk_init_data) { + .name = "dp0_phy_pll_vco_div_clk", + .ops = &pll_vco_div_clk_ops, + }, + }, +}; + +static struct dp_pll_vco_clk dp_phy_pll_clks[DP_PLL_NUM_CLKS] = { + { + .hw.init = &(struct clk_init_data) { + .name = "dp_phy_pll_link_clk", + .ops = &pll_link_clk_ops, + }, + }, + { + .hw.init = &(struct clk_init_data) { + .name = "dp_phy_pll_vco_div_clk", + .ops = &pll_vco_div_clk_ops, + }, + }, +}; + +static struct dp_pll_db dp_pdb; + +int dp_pll_clock_register_4nm(struct dp_pll *pll) +{ + int rc = 0; + struct platform_device *pdev; + struct dp_pll_vco_clk *pll_clks; + + if (!pll) { + DP_ERR("pll data not initialized\n"); + return -EINVAL; + } + pdev = pll->pdev; + + pll->clk_data = kzalloc(sizeof(*pll->clk_data), GFP_KERNEL); + if (!pll->clk_data) + return -ENOMEM; + + pll->clk_data->clks = kcalloc(DP_PLL_NUM_CLKS, sizeof(struct clk *), + GFP_KERNEL); + if (!pll->clk_data->clks) { + kfree(pll->clk_data); + return -ENOMEM; + } + + pll->clk_data->clk_num = DP_PLL_NUM_CLKS; + pll->priv = &dp_pdb; + dp_pdb.pll = pll; + + if (pll->revision == DP_PLL_4NM_V1_1) + dp_pdb.pll_params = pll_params_v1_1; + else + dp_pdb.pll_params = pll_params_v1; + + pll->pll_cfg = dp_pll_configure; + pll->pll_prepare = dp_pll_prepare; + pll->pll_unprepare = dp_pll_unprepare; + + if (pll->dp_core_revision >= 0x10040000) + pll_clks = dp0_phy_pll_clks; + else + pll_clks = dp_phy_pll_clks; + + rc = dp_pll_clock_register_helper(pll, pll_clks, DP_PLL_NUM_CLKS); + if (rc) { + DP_ERR("Clock register failed rc=%d\n", rc); + goto clk_reg_fail; + } + + rc = of_clk_add_provider(pdev->dev.of_node, + of_clk_src_onecell_get, pll->clk_data); + if (rc) { + DP_ERR("Clock add provider failed rc=%d\n", rc); + goto clk_reg_fail; + } + + DP_DEBUG("success\n"); + return rc; + +clk_reg_fail: + dp_pll_clock_unregister_4nm(pll); + return rc; +} + +void dp_pll_clock_unregister_4nm(struct dp_pll *pll) +{ + kfree(pll->clk_data->clks); + kfree(pll->clk_data); +} diff --git a/msm/dp/dp_pll_5nm.c b/msm/dp/dp_pll_5nm.c new file mode 100644 index 000000000..fdc698969 --- /dev/null +++ b/msm/dp/dp_pll_5nm.c @@ -0,0 +1,882 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +/* + * Display Port PLL driver block diagram for branch clocks + * + * +------------------------+ +------------------------+ + * | dp_phy_pll_link_clk | | dp_phy_pll_vco_div_clk | + * +------------------------+ +------------------------+ + * | | + * | | + * V V + * dp_link_clk dp_pixel_clk + * + * + */ + +#include +#include +#include +#include +#include +#include +#include "dp_hpd.h" +#include "dp_debug.h" +#include "dp_pll.h" + +#define DP_PHY_CFG 0x0010 +#define DP_PHY_CFG_1 0x0014 +#define DP_PHY_PD_CTL 0x0018 +#define DP_PHY_MODE 0x001C + +#define DP_PHY_AUX_CFG1 0x0024 +#define DP_PHY_AUX_CFG2 0x0028 + +#define DP_PHY_TX0_TX1_LANE_CTL 0x0078 +#define DP_PHY_TX2_TX3_LANE_CTL 0x009C + +#define DP_PHY_SPARE0 0x00C8 +#define DP_PHY_STATUS 0x00DC + +/* Tx registers */ +#define TXn_CLKBUF_ENABLE 0x0008 +#define TXn_TX_EMP_POST1_LVL 0x000C + +#define TXn_TX_DRV_LVL 0x0014 + +#define TXn_RESET_TSYNC_EN 0x001C +#define TXn_PRE_STALL_LDO_BOOST_EN 0x0020 +#define TXn_TX_BAND 0x0024 +#define TXn_INTERFACE_SELECT 0x002C + +#define TXn_RES_CODE_LANE_OFFSET_TX 0x003C +#define TXn_RES_CODE_LANE_OFFSET_RX 0x0040 + +#define TXn_TRANSCEIVER_BIAS_EN 0x0054 +#define TXn_HIGHZ_DRVR_EN 0x0058 +#define TXn_TX_POL_INV 0x005C +#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0060 + +/* PLL register offset */ +#define QSERDES_COM_BG_TIMER 0x000C +#define QSERDES_COM_SSC_EN_CENTER 0x0010 +#define QSERDES_COM_SSC_ADJ_PER1 0x0014 +#define QSERDES_COM_SSC_PER1 0x001C +#define QSERDES_COM_SSC_PER2 0x0020 +#define QSERDES_COM_SSC_STEP_SIZE1_MODE0 0x0024 +#define QSERDES_COM_SSC_STEP_SIZE2_MODE0 0X0028 +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x0044 +#define QSERDES_COM_CLK_ENABLE1 0x0048 +#define QSERDES_COM_SYS_CLK_CTRL 0x004C +#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x0050 +#define QSERDES_COM_PLL_IVCO 0x0058 + +#define QSERDES_COM_CP_CTRL_MODE0 0x0074 +#define QSERDES_COM_PLL_RCTRL_MODE0 0x007C +#define QSERDES_COM_PLL_CCTRL_MODE0 0x0084 +#define QSERDES_COM_SYSCLK_EN_SEL 0x0094 +#define QSERDES_COM_RESETSM_CNTRL 0x009C +#define QSERDES_COM_LOCK_CMP_EN 0x00A4 +#define QSERDES_COM_LOCK_CMP1_MODE0 0x00AC +#define QSERDES_COM_LOCK_CMP2_MODE0 0x00B0 + +#define QSERDES_COM_DEC_START_MODE0 0x00BC +#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x00CC +#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x00D0 +#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x00D4 +#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00EC +#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00F0 +#define QSERDES_COM_VCO_TUNE_CTRL 0x0108 +#define QSERDES_COM_VCO_TUNE_MAP 0x010C + +#define QSERDES_COM_CMN_STATUS 0x0140 +#define QSERDES_COM_CLK_SEL 0x0154 +#define QSERDES_COM_HSCLK_SEL 0x0158 + +#define QSERDES_COM_CORECLK_DIV_MODE0 0x0168 + +#define QSERDES_COM_CORE_CLK_EN 0x0174 +#define QSERDES_COM_C_READY_STATUS 0x0178 +#define QSERDES_COM_CMN_CONFIG 0x017C + +#define QSERDES_COM_SVS_MODE_CLK_SEL 0x0184 + +/* Tx tran offsets */ +#define DP_TRAN_DRVR_EMP_EN 0x00C0 +#define DP_TX_INTERFACE_MODE 0x00C4 + +/* Tx VMODE offsets */ +#define DP_VMODE_CTRL1 0x00C8 + +#define DP_PHY_PLL_POLL_SLEEP_US 500 +#define DP_PHY_PLL_POLL_TIMEOUT_US 10000 + +#define DP_VCO_RATE_8100MHZDIV1000 8100000UL +#define DP_VCO_RATE_9720MHZDIV1000 9720000UL +#define DP_VCO_RATE_10800MHZDIV1000 10800000UL + +#define DP_PLL_NUM_CLKS 2 + +#define DP_5NM_C_READY BIT(0) +#define DP_5NM_FREQ_DONE BIT(0) +#define DP_5NM_PLL_LOCKED BIT(1) +#define DP_5NM_PHY_READY BIT(1) +#define DP_5NM_TSYNC_DONE BIT(0) + +static const struct dp_pll_params pll_params[HSCLK_RATE_MAX] = { + {0x05, 0x3f, 0x00, 0x04, 0x01, 0x69, 0x00, 0x80, 0x07, 0x6f, 0x08, 0x45, 0x06, 0x36, 0x01, + 0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11}, + {0x03, 0x3f, 0x00, 0x08, 0x01, 0x69, 0x00, 0x80, 0x07, 0x0f, 0x0e, 0x45, 0x06, 0x36, 0x01, + 0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11}, + {0x01, 0x3f, 0x00, 0x08, 0x02, 0x8c, 0x00, 0x00, 0x0a, 0x1f, 0x1c, 0x5c, 0x08, 0x36, 0x01, + 0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11}, + {0x00, 0x3f, 0x00, 0x08, 0x00, 0x69, 0x00, 0x80, 0x07, 0x2f, 0x2a, 0x45, 0x06, 0x36, 0x01, + 0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11}, +}; + +static int set_vco_div(struct dp_pll *pll, unsigned long rate) +{ + u32 div, val; + + if (!pll) + return -EINVAL; + + if (is_gdsc_disabled(pll)) + return -EINVAL; + + val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV); + val &= ~0x03; + + switch (rate) { + case DP_VCO_HSCLK_RATE_1620MHZDIV1000: + case DP_VCO_HSCLK_RATE_2700MHZDIV1000: + div = 2; + val |= 1; + break; + case DP_VCO_HSCLK_RATE_5400MHZDIV1000: + div = 4; + val |= 2; + break; + case DP_VCO_HSCLK_RATE_8100MHZDIV1000: + div = 6; + /* val = 0 for this case, so no update needed */ + break; + default: + /* No other link rates are supported */ + return -EINVAL; + } + + dp_pll_write(dp_phy, DP_PHY_VCO_DIV, val); + /* Make sure the PHY registers writes are done */ + wmb(); + + /* + * Set the rate for the link and pixel clock sources so that the + * linux clock framework can appropriately compute the MND values + * whenever the pixel clock rate is set. + */ + clk_set_rate(pll->clk_data->clks[0], pll->vco_rate / 10); + clk_set_rate(pll->clk_data->clks[1], pll->vco_rate / div); + + DP_DEBUG("val=%#x div=%x link_clk rate=%lu vco_div_clk rate=%lu\n", + val, div, pll->vco_rate / 10, pll->vco_rate / div); + + return 0; +} + +static int dp_vco_pll_init_db_5nm(struct dp_pll_db *pdb, + unsigned long rate) +{ + struct dp_pll *pll = pdb->pll; + u32 spare_value = 0; + + spare_value = dp_pll_read(dp_phy, DP_PHY_SPARE0); + pdb->lane_cnt = spare_value & 0x0F; + pdb->orientation = (spare_value & 0xF0) >> 4; + + DP_DEBUG("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n", + spare_value, pdb->lane_cnt, pdb->orientation); + + switch (rate) { + case DP_VCO_HSCLK_RATE_1620MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_1620MHZ; + break; + case DP_VCO_HSCLK_RATE_2700MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_2700MHZ; + break; + case DP_VCO_HSCLK_RATE_5400MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_5400MHZ; + break; + case DP_VCO_HSCLK_RATE_8100MHZDIV1000: + DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000); + pdb->rate_idx = HSCLK_RATE_8100MHZ; + break; + default: + DP_ERR("unsupported rate %ld\n", rate); + return -EINVAL; + } + return 0; +} + +static int dp_config_vco_rate_5nm(struct dp_pll *pll, + unsigned long rate) +{ + int rc = 0; + struct dp_pll_db *pdb = (struct dp_pll_db *)pll->priv; + const struct dp_pll_params *params; + + rc = dp_vco_pll_init_db_5nm(pdb, rate); + if (rc < 0) { + DP_ERR("VCO Init DB failed\n"); + return rc; + } + + dp_pll_write(dp_phy, DP_PHY_CFG_1, 0x0F); + + if (pdb->lane_cnt != 4) { + if (pdb->orientation == ORIENTATION_CC2) + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x6d); + else + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x75); + } else { + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x7d); + } + + /* Make sure the PHY register writes are done */ + wmb(); + + if (pdb->rate_idx < HSCLK_RATE_MAX) { + params = &pdb->pll_params[pdb->rate_idx]; + } else { + DP_ERR("link rate not set\n"); + return -EINVAL; + } + + dp_pll_write(dp_pll, QSERDES_COM_SVS_MODE_CLK_SEL, 0x05); + dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_EN_SEL, 0x3b); + dp_pll_write(dp_pll, QSERDES_COM_SYS_CLK_CTRL, 0x02); + dp_pll_write(dp_pll, QSERDES_COM_CLK_ENABLE1, 0x0c); + dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06); + dp_pll_write(dp_pll, QSERDES_COM_CLK_SEL, 0x30); + /* Make sure the PHY register writes are done */ + wmb(); + + /* PLL Optimization */ + dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, params->pll_ivco); + dp_pll_write(dp_pll, QSERDES_COM_PLL_CCTRL_MODE0, 0x36); + dp_pll_write(dp_pll, QSERDES_COM_PLL_RCTRL_MODE0, 0x16); + dp_pll_write(dp_pll, QSERDES_COM_CP_CTRL_MODE0, 0x06); + /* Make sure the PLL register writes are done */ + wmb(); + + /* link rate dependent params */ + dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL, params->hsclk_sel); + dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, params->dec_start_mode0); + dp_pll_write(dp_pll, + QSERDES_COM_DIV_FRAC_START1_MODE0, params->div_frac_start1_mode0); + dp_pll_write(dp_pll, + QSERDES_COM_DIV_FRAC_START2_MODE0, params->div_frac_start2_mode0); + dp_pll_write(dp_pll, + QSERDES_COM_DIV_FRAC_START3_MODE0, params->div_frac_start3_mode0); + dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, params->lock_cmp1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, params->lock_cmp2_mode0); + dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, params->lock_cmp_en); + dp_pll_write(dp_phy, DP_PHY_VCO_DIV, params->phy_vco_div); + /* Make sure the PLL register writes are done */ + wmb(); + + dp_pll_write(dp_pll, QSERDES_COM_CMN_CONFIG, 0x02); + dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, + params->integloop_gain0_mode0); + dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, + params->integloop_gain1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_MAP, 0x00); + /* Make sure the PHY register writes are done */ + wmb(); + + dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, params->bg_timer); + dp_pll_write(dp_pll, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a); + dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_CTRL, 0x00); + if (pll->bonding_en) + dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1f); + else + dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17); + dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, params->core_clk_en); + /* Make sure the PHY register writes are done */ + wmb(); + + if (pll->ssc_en) { + dp_pll_write(dp_pll, QSERDES_COM_SSC_EN_CENTER, 0x01); + dp_pll_write(dp_pll, QSERDES_COM_SSC_ADJ_PER1, 0x00); + dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, params->ssc_per1); + dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, params->ssc_per2); + dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE1_MODE0, + params->ssc_step_size1_mode0); + dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE2_MODE0, + params->ssc_step_size2_mode0); + } + + if (pdb->orientation == ORIENTATION_CC2) + dp_pll_write(dp_phy, DP_PHY_MODE, 0x4c); + else + dp_pll_write(dp_phy, DP_PHY_MODE, 0x5c); + + dp_pll_write(dp_phy, DP_PHY_AUX_CFG1, 0x13); + dp_pll_write(dp_phy, DP_PHY_AUX_CFG2, 0xA4); + /* Make sure the PLL register writes are done */ + wmb(); + + /* TX-0 register configuration */ + dp_pll_write(dp_phy, DP_PHY_TX0_TX1_LANE_CTL, 0x05); + dp_pll_write(dp_ln_tx0, DP_VMODE_CTRL1, 0x40); + dp_pll_write(dp_ln_tx0, TXn_PRE_STALL_LDO_BOOST_EN, 0x30); + dp_pll_write(dp_ln_tx0, TXn_INTERFACE_SELECT, 0x3b); + dp_pll_write(dp_ln_tx0, TXn_CLKBUF_ENABLE, 0x0f); + dp_pll_write(dp_ln_tx0, TXn_RESET_TSYNC_EN, 0x03); + dp_pll_write(dp_ln_tx0, DP_TRAN_DRVR_EMP_EN, 0xf); + dp_pll_write(dp_ln_tx0, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00); + dp_pll_write(dp_ln_tx0, DP_TX_INTERFACE_MODE, 0x00); + dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx); + dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx); + dp_pll_write(dp_ln_tx0, TXn_TX_BAND, 0x04); + /* Make sure the PLL register writes are done */ + wmb(); + + /* TX-1 register configuration */ + dp_pll_write(dp_phy, DP_PHY_TX2_TX3_LANE_CTL, 0x05); + dp_pll_write(dp_ln_tx1, DP_VMODE_CTRL1, 0x40); + dp_pll_write(dp_ln_tx1, TXn_PRE_STALL_LDO_BOOST_EN, 0x30); + dp_pll_write(dp_ln_tx1, TXn_INTERFACE_SELECT, 0x3b); + dp_pll_write(dp_ln_tx1, TXn_CLKBUF_ENABLE, 0x0f); + dp_pll_write(dp_ln_tx1, TXn_RESET_TSYNC_EN, 0x03); + dp_pll_write(dp_ln_tx1, DP_TRAN_DRVR_EMP_EN, 0xf); + dp_pll_write(dp_ln_tx1, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00); + dp_pll_write(dp_ln_tx1, DP_TX_INTERFACE_MODE, 0x00); + dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx); + dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx); + dp_pll_write(dp_ln_tx1, TXn_TX_BAND, 0x04); + /* Make sure the PHY register writes are done */ + wmb(); + + return set_vco_div(pll, rate); +} + +enum dp_5nm_pll_status { + C_READY, + FREQ_DONE, + PLL_LOCKED, + PHY_READY, + TSYNC_DONE, +}; + +char *dp_5nm_pll_get_status_name(enum dp_5nm_pll_status status) +{ + switch (status) { + case C_READY: + return "C_READY"; + case FREQ_DONE: + return "FREQ_DONE"; + case PLL_LOCKED: + return "PLL_LOCKED"; + case PHY_READY: + return "PHY_READY"; + case TSYNC_DONE: + return "TSYNC_DONE"; + default: + return "unknown"; + } +} + +static bool dp_5nm_pll_get_status(struct dp_pll *pll, + enum dp_5nm_pll_status status) +{ + u32 reg, state, bit; + void __iomem *base; + bool success = true; + + switch (status) { + case C_READY: + base = dp_pll_get_base(dp_pll); + reg = QSERDES_COM_C_READY_STATUS; + bit = DP_5NM_C_READY; + break; + case FREQ_DONE: + base = dp_pll_get_base(dp_pll); + reg = QSERDES_COM_CMN_STATUS; + bit = DP_5NM_FREQ_DONE; + break; + case PLL_LOCKED: + base = dp_pll_get_base(dp_pll); + reg = QSERDES_COM_CMN_STATUS; + bit = DP_5NM_PLL_LOCKED; + break; + case PHY_READY: + base = dp_pll_get_base(dp_phy); + reg = DP_PHY_STATUS; + bit = DP_5NM_PHY_READY; + break; + case TSYNC_DONE: + base = dp_pll_get_base(dp_phy); + reg = DP_PHY_STATUS; + bit = DP_5NM_TSYNC_DONE; + break; + default: + return false; + } + + if (readl_poll_timeout_atomic((base + reg), state, + ((state & bit) > 0), + DP_PHY_PLL_POLL_SLEEP_US, + DP_PHY_PLL_POLL_TIMEOUT_US)) { + DP_ERR("%s failed, status=%x\n", + dp_5nm_pll_get_status_name(status), state); + + success = false; + } + + return success; +} + +static int dp_pll_enable_5nm(struct dp_pll *pll) +{ + int rc = 0; + + pll->aux->state &= ~DP_STATE_PLL_LOCKED; + + dp_pll_write(dp_phy, DP_PHY_CFG, 0x01); + dp_pll_write(dp_phy, DP_PHY_CFG, 0x05); + dp_pll_write(dp_phy, DP_PHY_CFG, 0x01); + dp_pll_write(dp_phy, DP_PHY_CFG, 0x09); + dp_pll_write(dp_pll, QSERDES_COM_RESETSM_CNTRL, 0x20); + wmb(); /* Make sure the PLL register writes are done */ + + if (!dp_5nm_pll_get_status(pll, C_READY)) { + rc = -EINVAL; + goto lock_err; + } + + if (!dp_5nm_pll_get_status(pll, FREQ_DONE)) { + rc = -EINVAL; + goto lock_err; + } + + if (!dp_5nm_pll_get_status(pll, PLL_LOCKED)) { + rc = -EINVAL; + goto lock_err; + } + + dp_pll_write(dp_phy, DP_PHY_CFG, 0x19); + /* Make sure the PHY register writes are done */ + wmb(); + + if (!dp_5nm_pll_get_status(pll, TSYNC_DONE)) { + rc = -EINVAL; + goto lock_err; + } + + if (!dp_5nm_pll_get_status(pll, PHY_READY)) { + rc = -EINVAL; + goto lock_err; + } + + pll->aux->state |= DP_STATE_PLL_LOCKED; + DP_DEBUG("PLL is locked\n"); + +lock_err: + return rc; +} + +static void dp_pll_disable_5nm(struct dp_pll *pll) +{ + /* Assert DP PHY power down */ + dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x2); + /* + * Make sure all the register writes to disable PLL are + * completed before doing any other operation + */ + wmb(); +} + +static int dp_vco_set_rate_5nm(struct dp_pll *pll, unsigned long rate) +{ + int rc = 0; + + if (!pll) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + DP_DEBUG("DP lane CLK rate=%ld\n", rate); + + rc = dp_config_vco_rate_5nm(pll, rate); + if (rc < 0) { + DP_ERR("Failed to set clk rate\n"); + return rc; + } + + return rc; +} + +static int dp_regulator_enable_5nm(struct dp_parser *parser, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dss_module_power mp; + + if (pm_type < DP_CORE_PM || pm_type >= DP_MAX_PM) { + DP_ERR("invalid resource: %d %s\n", pm_type, + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + mp = parser->mp[pm_type]; + rc = msm_dss_enable_vreg(mp.vreg_config, mp.num_vreg, enable); + if (rc) { + DP_ERR("failed to '%s' vregs for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + return rc; + } + + DP_DEBUG("success: '%s' vregs for %s\n", enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + return rc; +} + +static int dp_pll_configure(struct dp_pll *pll, unsigned long rate) +{ + int rc = 0; + + if (!pll || !rate) { + DP_ERR("invalid input parameters rate = %lu\n", rate); + return -EINVAL; + } + + rate = rate * 10; + + if (rate <= DP_VCO_HSCLK_RATE_1620MHZDIV1000) + rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000; + else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000) + rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000; + else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000) + rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000; + else + rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000; + + pll->vco_rate = rate; + rc = dp_vco_set_rate_5nm(pll, rate); + if (rc < 0) { + DP_ERR("pll rate %s set failed\n", rate); + pll->vco_rate = 0; + return rc; + } + + DP_DEBUG("pll rate %lu set success\n", rate); + return rc; +} + +static int dp_pll_prepare(struct dp_pll *pll) +{ + int rc = 0; + + if (!pll) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + /* + * Enable DP_PM_PLL regulator if the PLL revision is 5nm-V1 and the + * link rate is 8.1Gbps. This will result in voting to place Mx rail in + * turbo as required for V1 hardware PLL functionality. + */ + if (pll->revision == DP_PLL_5NM_V1 && + pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) { + rc = dp_regulator_enable_5nm(pll->parser, DP_PLL_PM, true); + if (rc < 0) { + DP_ERR("enable pll power failed\n"); + return rc; + } + } + + rc = dp_pll_enable_5nm(pll); + if (rc < 0) + DP_ERR("ndx=%d failed to enable dp pll\n", pll->index); + + return rc; +} + +static int dp_pll_unprepare(struct dp_pll *pll) +{ + int rc = 0; + + if (!pll) { + DP_ERR("invalid input parameter\n"); + return -EINVAL; + } + + if (pll->revision == DP_PLL_5NM_V1 && + pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) { + rc = dp_regulator_enable_5nm(pll->parser, DP_PLL_PM, false); + if (rc < 0) { + DP_ERR("disable pll power failed\n"); + return rc; + } + } + + dp_pll_disable_5nm(pll); + pll->vco_rate = 0; + + return rc; +} + +unsigned long dp_vco_recalc_rate_5nm(struct dp_pll *pll) +{ + u32 hsclk_sel, link_clk_divsel, hsclk_div, link_clk_div = 0; + unsigned long vco_rate = 0; + + if (!pll) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + if (is_gdsc_disabled(pll)) + return 0; + + hsclk_sel = dp_pll_read(dp_pll, QSERDES_COM_HSCLK_SEL); + hsclk_sel &= 0x0f; + + switch (hsclk_sel) { + case 5: + hsclk_div = 5; + break; + case 3: + hsclk_div = 3; + break; + case 1: + hsclk_div = 2; + break; + case 0: + hsclk_div = 1; + break; + default: + DP_DEBUG("unknown divider. forcing to default\n"); + hsclk_div = 5; + break; + } + + link_clk_divsel = dp_pll_read(dp_phy, DP_PHY_AUX_CFG2); + link_clk_divsel >>= 2; + link_clk_divsel &= 0x3; + + if (link_clk_divsel == 0) + link_clk_div = 5; + else if (link_clk_divsel == 1) + link_clk_div = 10; + else if (link_clk_divsel == 2) + link_clk_div = 20; + else + DP_ERR("unsupported div. Phy_mode: %d\n", link_clk_divsel); + + if (link_clk_div == 20) { + vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000; + } else { + if (hsclk_div == 5) + vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000; + else if (hsclk_div == 3) + vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000; + else if (hsclk_div == 2) + vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000; + else + vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000; + } + + DP_DEBUG("hsclk: sel=0x%x, div=0x%x; lclk: sel=%u, div=%u, rate=%lu\n", + hsclk_sel, hsclk_div, link_clk_divsel, link_clk_div, vco_rate); + + return vco_rate; +} + +static unsigned long dp_pll_link_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dp_pll *pll = NULL; + struct dp_pll_vco_clk *pll_link = NULL; + unsigned long rate = 0; + + if (!hw) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + pll_link = to_dp_vco_hw(hw); + pll = pll_link->priv; + + rate = pll->vco_rate * pll->clk_factor / 10; + + return rate; +} + +static long dp_pll_link_clk_round(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct dp_pll *pll = NULL; + struct dp_pll_vco_clk *pll_link = NULL; + + if (!hw) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + pll_link = to_dp_vco_hw(hw); + pll = pll_link->priv; + + rate = pll->vco_rate * pll->clk_factor / 10; + + return rate; +} + +static unsigned long dp_pll_vco_div_clk_get_rate(struct dp_pll *pll) +{ + if (pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) + return (pll->vco_rate / (6 * pll->clk_factor)); + else if (pll->vco_rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) + return (pll->vco_rate / (4 * pll->clk_factor)); + else + return (pll->vco_rate / (2 * pll->clk_factor)); +} + +static unsigned long dp_pll_vco_div_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dp_pll *pll = NULL; + struct dp_pll_vco_clk *pll_link = NULL; + + if (!hw) { + DP_ERR("invalid input parameters\n"); + return -EINVAL; + } + + pll_link = to_dp_vco_hw(hw); + pll = pll_link->priv; + + return dp_pll_vco_div_clk_get_rate(pll); +} + +static long dp_pll_vco_div_clk_round(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + return dp_pll_vco_div_clk_recalc_rate(hw, *parent_rate); +} + +static const struct clk_ops pll_link_clk_ops = { + .recalc_rate = dp_pll_link_clk_recalc_rate, + .round_rate = dp_pll_link_clk_round, +}; + +static const struct clk_ops pll_vco_div_clk_ops = { + .recalc_rate = dp_pll_vco_div_clk_recalc_rate, + .round_rate = dp_pll_vco_div_clk_round, +}; + +static struct dp_pll_vco_clk dp0_phy_pll_clks[DP_PLL_NUM_CLKS] = { + { + .hw.init = &(struct clk_init_data) { + .name = "dp0_phy_pll_link_clk", + .ops = &pll_link_clk_ops, + }, + }, + { + .hw.init = &(struct clk_init_data) { + .name = "dp0_phy_pll_vco_div_clk", + .ops = &pll_vco_div_clk_ops, + }, + }, +}; + +static struct dp_pll_vco_clk dp_phy_pll_clks[DP_PLL_NUM_CLKS] = { + { + .hw.init = &(struct clk_init_data) { + .name = "dp_phy_pll_link_clk", + .ops = &pll_link_clk_ops, + }, + }, + { + .hw.init = &(struct clk_init_data) { + .name = "dp_phy_pll_vco_div_clk", + .ops = &pll_vco_div_clk_ops, + }, + }, +}; + +static struct dp_pll_db dp_pdb; + +int dp_pll_clock_register_5nm(struct dp_pll *pll) +{ + int rc = 0; + struct platform_device *pdev; + struct dp_pll_vco_clk *pll_clks; + + if (!pll) { + DP_ERR("pll data not initialized\n"); + return -EINVAL; + } + pdev = pll->pdev; + + pll->clk_data = kzalloc(sizeof(*pll->clk_data), GFP_KERNEL); + if (!pll->clk_data) + return -ENOMEM; + + pll->clk_data->clks = kcalloc(DP_PLL_NUM_CLKS, sizeof(struct clk *), + GFP_KERNEL); + if (!pll->clk_data->clks) { + kfree(pll->clk_data); + return -ENOMEM; + } + + pll->clk_data->clk_num = DP_PLL_NUM_CLKS; + pll->priv = &dp_pdb; + dp_pdb.pll = pll; + dp_pdb.pll_params = pll_params; + + pll->pll_cfg = dp_pll_configure; + pll->pll_prepare = dp_pll_prepare; + pll->pll_unprepare = dp_pll_unprepare; + + if (pll->dp_core_revision >= 0x10040000) + pll_clks = dp0_phy_pll_clks; + else + pll_clks = dp_phy_pll_clks; + + rc = dp_pll_clock_register_helper(pll, pll_clks, DP_PLL_NUM_CLKS); + if (rc) { + DP_ERR("Clock register failed rc=%d\n", rc); + goto clk_reg_fail; + } + + rc = of_clk_add_provider(pdev->dev.of_node, + of_clk_src_onecell_get, pll->clk_data); + if (rc) { + DP_ERR("Clock add provider failed rc=%d\n", rc); + goto clk_reg_fail; + } + + DP_DEBUG("success\n"); + return rc; + +clk_reg_fail: + dp_pll_clock_unregister_5nm(pll); + return rc; +} + +void dp_pll_clock_unregister_5nm(struct dp_pll *pll) +{ + kfree(pll->clk_data->clks); + kfree(pll->clk_data); +} diff --git a/msm/dp/dp_power.c b/msm/dp/dp_power.c new file mode 100644 index 000000000..40e2f4b2c --- /dev/null +++ b/msm/dp/dp_power.c @@ -0,0 +1,926 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_debug.h" +#include "dp_pll.h" + +#define DP_CLIENT_NAME_SIZE 20 +#define XO_CLK_KHZ 19200 + +struct dp_power_private { + struct dp_parser *parser; + struct dp_pll *pll; + struct platform_device *pdev; + struct clk *pixel_clk_rcg; + struct clk *pixel_parent; + struct clk *pixel1_clk_rcg; + struct clk *xo_clk; + struct clk *link_clk_rcg; + struct clk *link_parent; + + struct dp_power dp_power; + + bool core_clks_on; + bool link_clks_on; + bool strm0_clks_on; + bool strm1_clks_on; + bool strm0_clks_parked; + bool strm1_clks_parked; +}; + +static int dp_power_regulator_init(struct dp_power_private *power) +{ + int rc = 0, i = 0, j = 0; + struct platform_device *pdev; + struct dp_parser *parser; + + parser = power->parser; + pdev = power->pdev; + + for (i = DP_CORE_PM; !rc && (i < DP_MAX_PM); i++) { + rc = msm_dss_get_vreg(&pdev->dev, + parser->mp[i].vreg_config, + parser->mp[i].num_vreg, 1); + if (rc) { + DP_ERR("failed to init vregs for %s\n", + dp_parser_pm_name(i)); + for (j = i - 1; j >= DP_CORE_PM; j--) { + msm_dss_get_vreg(&pdev->dev, + parser->mp[j].vreg_config, + parser->mp[j].num_vreg, 0); + } + + goto error; + } + } +error: + return rc; +} + +static void dp_power_regulator_deinit(struct dp_power_private *power) +{ + int rc = 0, i = 0; + struct platform_device *pdev; + struct dp_parser *parser; + + parser = power->parser; + pdev = power->pdev; + + for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) { + rc = msm_dss_get_vreg(&pdev->dev, + parser->mp[i].vreg_config, + parser->mp[i].num_vreg, 0); + if (rc) + DP_ERR("failed to deinit vregs for %s\n", + dp_parser_pm_name(i)); + } +} + +static void dp_power_phy_gdsc(struct dp_power *dp_power, bool on) +{ + int rc = 0; + + if (IS_ERR_OR_NULL(dp_power->dp_phy_gdsc)) + return; + + if (on) + rc = regulator_enable(dp_power->dp_phy_gdsc); + else + rc = regulator_disable(dp_power->dp_phy_gdsc); + + if (rc) + DP_ERR("Fail to %s dp_phy_gdsc regulator ret =%d\n", + on ? "enable" : "disable", rc); +} + +static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable) +{ + int rc = 0, i = 0, j = 0; + struct dp_parser *parser; + + parser = power->parser; + + for (i = DP_CORE_PM; i < DP_MAX_PM; i++) { + /* + * The DP_PLL_PM regulator is controlled by dp_display based + * on the link configuration. + */ + if (i == DP_PLL_PM) { + /* DP GDSC vote is needed for new chipsets, define gdsc phandle if needed */ + dp_power_phy_gdsc(&power->dp_power, enable); + DP_DEBUG("skipping: '%s' vregs for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(i)); + continue; + } + + rc = msm_dss_enable_vreg( + parser->mp[i].vreg_config, + parser->mp[i].num_vreg, enable); + if (rc) { + DP_ERR("failed to '%s' vregs for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(i)); + if (enable) { + for (j = i-1; j >= DP_CORE_PM; j--) { + msm_dss_enable_vreg( + parser->mp[j].vreg_config, + parser->mp[j].num_vreg, 0); + } + } + goto error; + } + } +error: + return rc; +} + +static int dp_power_pinctrl_set(struct dp_power_private *power, bool active) +{ + int rc = -EFAULT; + struct pinctrl_state *pin_state; + struct dp_parser *parser; + + parser = power->parser; + + if (IS_ERR_OR_NULL(parser->pinctrl.pin)) + return 0; + + pin_state = active ? parser->pinctrl.state_active + : parser->pinctrl.state_suspend; + if (!IS_ERR_OR_NULL(pin_state)) { + rc = pinctrl_select_state(parser->pinctrl.pin, + pin_state); + if (rc) + DP_ERR("can not set %s pins\n", + active ? "dp_active" + : "dp_sleep"); + } else { + DP_ERR("invalid '%s' pinstate\n", + active ? "dp_active" + : "dp_sleep"); + } + + return rc; +} + +static void dp_power_clk_put(struct dp_power_private *power) +{ + enum dp_pm_type module; + + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = &power->parser->mp[module]; + + if (!pm->num_clk) + continue; + + msm_dss_mmrm_deregister(&power->pdev->dev, pm); + + msm_dss_put_clk(pm->clk_config, pm->num_clk); + } +} + +static int dp_power_clk_init(struct dp_power_private *power, bool enable) +{ + int rc = 0; + struct device *dev; + enum dp_pm_type module; + + dev = &power->pdev->dev; + + if (enable) { + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = + &power->parser->mp[module]; + + if (!pm->num_clk) + continue; + + rc = msm_dss_get_clk(dev, pm->clk_config, pm->num_clk); + if (rc) { + DP_ERR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(module), rc); + goto exit; + } + } + + power->pixel_clk_rcg = clk_get(dev, "pixel_clk_rcg"); + if (IS_ERR(power->pixel_clk_rcg)) { + DP_ERR("Unable to get DP pixel clk RCG: %ld\n", + PTR_ERR(power->pixel_clk_rcg)); + rc = PTR_ERR(power->pixel_clk_rcg); + power->pixel_clk_rcg = NULL; + goto err_pixel_clk_rcg; + } + + power->pixel_parent = clk_get(dev, "pixel_parent"); + if (IS_ERR(power->pixel_parent)) { + DP_ERR("Unable to get DP pixel RCG parent: %d\n", + PTR_ERR(power->pixel_parent)); + rc = PTR_ERR(power->pixel_parent); + power->pixel_parent = NULL; + goto err_pixel_parent; + } + + power->xo_clk = clk_get(dev, "rpmh_cxo_clk"); + if (IS_ERR(power->xo_clk)) { + DP_ERR("Unable to get XO clk: %d\n", PTR_ERR(power->xo_clk)); + rc = PTR_ERR(power->xo_clk); + power->xo_clk = NULL; + goto err_xo_clk; + } + + if (power->parser->has_mst) { + power->pixel1_clk_rcg = clk_get(dev, "pixel1_clk_rcg"); + if (IS_ERR(power->pixel1_clk_rcg)) { + DP_ERR("Unable to get DP pixel1 clk RCG: %d\n", + PTR_ERR(power->pixel1_clk_rcg)); + rc = PTR_ERR(power->pixel1_clk_rcg); + power->pixel1_clk_rcg = NULL; + goto err_pixel1_clk_rcg; + } + } + + power->link_clk_rcg = clk_get(dev, "link_clk_src"); + if (IS_ERR(power->link_clk_rcg)) { + DP_ERR("Unable to get DP link clk RCG: %ld\n", + PTR_ERR(power->link_clk_rcg)); + rc = PTR_ERR(power->link_clk_rcg); + power->link_clk_rcg = NULL; + goto err_link_clk_rcg; + } + + /* If link_parent node is available, convert clk rates to HZ for byte2 ops */ + power->pll->clk_factor = 1000; + power->link_parent = clk_get(dev, "link_parent"); + if (IS_ERR(power->link_parent)) { + DP_WARN("Unable to get DP link parent: %ld\n", + PTR_ERR(power->link_parent)); + power->link_parent = NULL; + power->pll->clk_factor = 1; + } + } else { + if (power->pixel1_clk_rcg) + clk_put(power->pixel1_clk_rcg); + + if (power->pixel_parent) + clk_put(power->pixel_parent); + + if (power->pixel_clk_rcg) + clk_put(power->pixel_clk_rcg); + + if (power->link_parent) + clk_put(power->link_parent); + + if (power->link_clk_rcg) + clk_put(power->link_clk_rcg); + + dp_power_clk_put(power); + } + + return rc; +err_link_clk_rcg: + if (power->pixel1_clk_rcg) + clk_put(power->pixel1_clk_rcg); +err_pixel1_clk_rcg: + clk_put(power->xo_clk); +err_xo_clk: + clk_put(power->pixel_parent); +err_pixel_parent: + clk_put(power->pixel_clk_rcg); +err_pixel_clk_rcg: + dp_power_clk_put(power); +exit: + return rc; +} + +static int dp_power_park_module(struct dp_power_private *power, enum dp_pm_type module) +{ + struct dss_module_power *mp; + struct clk *clk = NULL; + int rc = 0; + bool *parked; + + mp = &power->parser->mp[module]; + + if (module == DP_STREAM0_PM) { + clk = power->pixel_clk_rcg; + parked = &power->strm0_clks_parked; + } else if (module == DP_STREAM1_PM) { + clk = power->pixel1_clk_rcg; + parked = &power->strm1_clks_parked; + } else { + goto exit; + } + + if (!clk) { + DP_WARN("clk type %d not supported\n", module); + rc = -EINVAL; + goto exit; + } + + if (!power->xo_clk) { + rc = -EINVAL; + goto exit; + } + + if (*parked) + goto exit; + + rc = clk_set_parent(clk, power->xo_clk); + if (rc) { + DP_ERR("unable to set xo parent on clk %d\n", module); + goto exit; + } + + mp->clk_config->rate = XO_CLK_KHZ; + rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (rc) { + DP_ERR("failed to set clk rate.\n"); + goto exit; + } + + *parked = true; + +exit: + return rc; +} + + +static int dp_power_clk_set_rate(struct dp_power_private *power, + enum dp_pm_type module, bool enable) +{ + int rc = 0; + struct dss_module_power *mp; + + if (!power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto exit; + } + + mp = &power->parser->mp[module]; + + if (enable) { + rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk); + if (rc) { + DP_ERR("failed to set clks rate.\n"); + goto exit; + } + + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 1); + if (rc) { + DP_ERR("failed to enable clks\n"); + goto exit; + } + } else { + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, 0); + if (rc) { + DP_ERR("failed to disable clks\n"); + goto exit; + } + + dp_power_park_module(power, module); + } +exit: + return rc; +} + +static bool dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) +{ + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + return false; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (pm_type == DP_LINK_PM) + return power->link_clks_on; + else if (pm_type == DP_CORE_PM) + return power->core_clks_on; + else if (pm_type == DP_STREAM0_PM) + return power->strm0_clks_on; + else if (pm_type == DP_STREAM1_PM) + return power->strm1_clks_on; + else + return false; +} + +static int dp_power_clk_enable(struct dp_power *dp_power, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dss_module_power *mp; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto error; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + mp = &power->parser->mp[pm_type]; + + if (pm_type >= DP_MAX_PM) { + DP_ERR("unsupported power module: %s\n", + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + if (enable) { + if (dp_power_clk_status(dp_power, pm_type)) { + DP_DEBUG("%s clks already enabled\n", dp_parser_pm_name(pm_type)); + return 0; + } + + if ((pm_type == DP_CTRL_PM) && (!power->core_clks_on)) { + DP_DEBUG("Need to enable core clks before link clks\n"); + + rc = dp_power_clk_set_rate(power, pm_type, enable); + if (rc) { + DP_ERR("failed to enable clks: %s. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + goto error; + } else { + power->core_clks_on = true; + } + } + + if (pm_type == DP_LINK_PM && power->link_parent) { + rc = clk_set_parent(power->link_clk_rcg, power->link_parent); + if (rc) { + DP_ERR("failed to set link parent\n"); + goto error; + } + } + + if (((pm_type == DP_STREAM0_PM) || (pm_type == DP_STREAM1_PM)) + && (!power->link_clks_on)) { + DP_ERR("Need to enable link clk before stream clks\n"); + goto error; + } + } + + rc = dp_power_clk_set_rate(power, pm_type, enable); + if (rc) { + DP_ERR("failed to '%s' clks for: %s. err=%d\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type), rc); + goto error; + } + + if (pm_type == DP_CORE_PM) + power->core_clks_on = enable; + else if (pm_type == DP_STREAM0_PM) + power->strm0_clks_on = enable; + else if (pm_type == DP_STREAM1_PM) + power->strm1_clks_on = enable; + else if (pm_type == DP_LINK_PM) + power->link_clks_on = enable; + + if (pm_type == DP_STREAM0_PM) + power->strm0_clks_parked = false; + if (pm_type == DP_STREAM1_PM) + power->strm1_clks_parked = false; + + /* + * This log is printed only when user connects or disconnects + * a DP cable. As this is a user-action and not a frequent + * usecase, it is not going to flood the kernel logs. Also, + * helpful in debugging the NOC issues. + */ + DP_INFO("core:%s link:%s strm0:%s strm1:%s\n", + power->core_clks_on ? "on" : "off", + power->link_clks_on ? "on" : "off", + power->strm0_clks_on ? "on" : "off", + power->strm1_clks_on ? "on" : "off"); +error: + return rc; +} + +static int dp_power_request_gpios(struct dp_power_private *power) +{ + int rc = 0, i; + struct device *dev; + struct dss_module_power *mp; + static const char * const gpio_names[] = { + "aux_enable", "aux_sel", "usbplug_cc", + }; + + if (!power) { + DP_ERR("invalid power data\n"); + return -EINVAL; + } + + dev = &power->pdev->dev; + mp = &power->parser->mp[DP_CORE_PM]; + + for (i = 0; i < ARRAY_SIZE(gpio_names); i++) { + unsigned int gpio = mp->gpio_config[i].gpio; + + if (gpio_is_valid(gpio)) { + rc = gpio_request(gpio, gpio_names[i]); + if (rc) { + DP_ERR("request %s gpio failed, rc=%d\n", + gpio_names[i], rc); + goto error; + } + } + } + return 0; +error: + for (i = 0; i < ARRAY_SIZE(gpio_names); i++) { + unsigned int gpio = mp->gpio_config[i].gpio; + + if (gpio_is_valid(gpio)) + gpio_free(gpio); + } + return rc; +} + +static bool dp_power_find_gpio(const char *gpio1, const char *gpio2) +{ + return !!strnstr(gpio1, gpio2, strlen(gpio1)); +} + +static void dp_power_set_gpio(struct dp_power_private *power, bool flip) +{ + int i; + struct dss_module_power *mp = &power->parser->mp[DP_CORE_PM]; + struct dss_gpio *config = mp->gpio_config; + + for (i = 0; i < mp->num_gpio; i++) { + if (dp_power_find_gpio(config->gpio_name, "aux-sel")) + config->value = flip; + + if (gpio_is_valid(config->gpio)) { + DP_DEBUG("gpio %s, value %d\n", config->gpio_name, + config->value); + + if (dp_power_find_gpio(config->gpio_name, "aux-en") || + dp_power_find_gpio(config->gpio_name, "aux-sel")) + gpio_direction_output(config->gpio, + config->value); + else + gpio_set_value(config->gpio, config->value); + + } + config++; + } +} + +static int dp_power_config_gpios(struct dp_power_private *power, bool flip, + bool enable) +{ + int rc = 0, i; + struct dss_module_power *mp; + struct dss_gpio *config; + + mp = &power->parser->mp[DP_CORE_PM]; + config = mp->gpio_config; + + if (enable) { + rc = dp_power_request_gpios(power); + if (rc) { + DP_ERR("gpio request failed\n"); + return rc; + } + + dp_power_set_gpio(power, flip); + } else { + for (i = 0; i < mp->num_gpio; i++) { + if (gpio_is_valid(config[i].gpio)) { + gpio_set_value(config[i].gpio, 0); + gpio_free(config[i].gpio); + } + } + } + + return 0; +} + +static int dp_power_mmrm_init(struct dp_power *dp_power, struct sde_power_handle *phandle, void *dp, + int (*dp_display_mmrm_callback)(struct mmrm_client_notifier_data *notifier_data)) +{ + int rc = 0; + enum dp_pm_type module; + struct dp_power_private *power = container_of(dp_power, struct dp_power_private, dp_power); + struct device *dev = &power->pdev->dev; + + for (module = DP_CORE_PM; module < DP_MAX_PM; module++) { + struct dss_module_power *pm = &power->parser->mp[module]; + if (!pm->num_clk) + continue; + + rc = msm_dss_mmrm_register(dev, pm, dp_display_mmrm_callback, + dp, &phandle->mmrm_enable); + if (rc) + DP_ERR("mmrm register failed rc=%d\n", rc); + } + + return rc; +} + +static int dp_power_client_init(struct dp_power *dp_power, + struct sde_power_handle *phandle, struct drm_device *drm_dev) +{ + int rc = 0; + struct dp_power_private *power; + + if (!drm_dev) { + DP_ERR("invalid drm_dev\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + rc = dp_power_regulator_init(power); + if (rc) { + DP_ERR("failed to init regulators\n"); + goto error_power; + } + + rc = dp_power_clk_init(power, true); + if (rc) { + DP_ERR("failed to init clocks\n"); + goto error_clk; + } + dp_power->phandle = phandle; + dp_power->drm_dev = drm_dev; + + return 0; + +error_clk: + dp_power_regulator_deinit(power); +error_power: + return rc; +} + +static void dp_power_client_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + return; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_init(power, false); + dp_power_regulator_deinit(power); +} + +static int dp_power_park_clocks(struct dp_power *dp_power) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + rc = dp_power_park_module(power, DP_STREAM0_PM); + if (rc) { + DP_ERR("failed to park stream 0. err=%d\n", rc); + goto error; + } + + rc = dp_power_park_module(power, DP_STREAM1_PM); + if (rc) { + DP_ERR("failed to park stream 1. err=%d\n", rc); + goto error; + } + +error: + return rc; +} + +static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power, u32 strm_id) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power || strm_id >= DP_STREAM_MAX) { + DP_ERR("invalid power data. stream %d\n", strm_id); + rc = -EINVAL; + goto exit; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (strm_id == DP_STREAM_0) { + if (power->pixel_clk_rcg && power->pixel_parent) + rc = clk_set_parent(power->pixel_clk_rcg, + power->pixel_parent); + else + DP_WARN("skipped for strm_id=%d\n", strm_id); + } else if (strm_id == DP_STREAM_1) { + if (power->pixel1_clk_rcg && power->pixel_parent) + rc = clk_set_parent(power->pixel1_clk_rcg, + power->pixel_parent); + else + DP_WARN("skipped for strm_id=%d\n", strm_id); + } + + if (rc) + DP_ERR("failed. strm_id=%d, rc=%d\n", strm_id, rc); +exit: + return rc; +} + +static u64 dp_power_clk_get_rate(struct dp_power *dp_power, char *clk_name) +{ + size_t i; + enum dp_pm_type j; + struct dss_module_power *mp; + struct dp_power_private *power; + bool clk_found = false; + u64 rate = 0; + + if (!clk_name) { + DP_ERR("invalid pointer for clk_name\n"); + return 0; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + mp = &dp_power->phandle->mp; + for (i = 0; i < mp->num_clk; i++) { + if (!strcmp(mp->clk_config[i].clk_name, clk_name)) { + rate = clk_get_rate(mp->clk_config[i].clk); + clk_found = true; + break; + } + } + + for (j = DP_CORE_PM; j < DP_MAX_PM && !clk_found; j++) { + mp = &power->parser->mp[j]; + for (i = 0; i < mp->num_clk; i++) { + if (!strcmp(mp->clk_config[i].clk_name, clk_name)) { + rate = clk_get_rate(mp->clk_config[i].clk); + clk_found = true; + break; + } + } + } + + return rate; +} + +static int dp_power_init(struct dp_power *dp_power, bool flip) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto exit; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + rc = dp_power_regulator_ctrl(power, true); + if (rc) { + DP_ERR("failed to enable regulators\n"); + goto exit; + } + + rc = dp_power_pinctrl_set(power, true); + if (rc) { + DP_ERR("failed to set pinctrl state\n"); + goto err_pinctrl; + } + + rc = dp_power_config_gpios(power, flip, true); + if (rc) { + DP_ERR("failed to enable gpios\n"); + goto err_gpio; + } + + rc = pm_runtime_resume_and_get(dp_power->drm_dev->dev); + if (rc < 0) { + DP_ERR("failed to enable power resource %d\n", rc); + goto err_sde_power; + } + + rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); + if (rc) { + DP_ERR("failed to enable DP core clocks\n"); + goto err_clk; + } + + return 0; + +err_clk: + pm_runtime_put_sync(dp_power->drm_dev->dev); +err_sde_power: + dp_power_config_gpios(power, flip, false); +err_gpio: + dp_power_pinctrl_set(power, false); +err_pinctrl: + dp_power_regulator_ctrl(power, false); +exit: + return rc; +} + +static int dp_power_deinit(struct dp_power *dp_power) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DP_ERR("invalid power data\n"); + rc = -EINVAL; + goto exit; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (power->link_clks_on) + dp_power_clk_enable(dp_power, DP_LINK_PM, false); + + dp_power_clk_enable(dp_power, DP_CORE_PM, false); + pm_runtime_put_sync(dp_power->drm_dev->dev); + + dp_power_config_gpios(power, false, false); + dp_power_pinctrl_set(power, false); + dp_power_regulator_ctrl(power, false); +exit: + return rc; +} + +struct dp_power *dp_power_get(struct dp_parser *parser, struct dp_pll *pll) +{ + int rc = 0; + struct dp_power_private *power; + struct dp_power *dp_power; + struct device *dev; + + if (!parser || !pll) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + power = kzalloc(sizeof(*power), GFP_KERNEL); + if (!power) { + rc = -ENOMEM; + goto error; + } + + power->parser = parser; + power->pll = pll; + power->pdev = parser->pdev; + + dp_power = &power->dp_power; + dev = &power->pdev->dev; + + dp_power->init = dp_power_init; + dp_power->deinit = dp_power_deinit; + dp_power->clk_enable = dp_power_clk_enable; + dp_power->clk_status = dp_power_clk_status; + dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent; + dp_power->park_clocks = dp_power_park_clocks; + dp_power->clk_get_rate = dp_power_clk_get_rate; + dp_power->power_client_init = dp_power_client_init; + dp_power->power_client_deinit = dp_power_client_deinit; + dp_power->power_mmrm_init = dp_power_mmrm_init; + + dp_power->dp_phy_gdsc = devm_regulator_get(dev, "dp_phy_gdsc"); + if (IS_ERR(dp_power->dp_phy_gdsc)) { + dp_power->dp_phy_gdsc = NULL; + DP_DEBUG("Optional GDSC regulator is missing\n"); + } + + return dp_power; +error: + return ERR_PTR(rc); +} + +void dp_power_put(struct dp_power *dp_power) +{ + struct dp_power_private *power = NULL; + + if (!dp_power) + return; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + kfree(power); +} diff --git a/msm/dp/dp_power.h b/msm/dp/dp_power.h new file mode 100644 index 000000000..e9984f054 --- /dev/null +++ b/msm/dp/dp_power.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_POWER_H_ +#define _DP_POWER_H_ + +#include "dp_parser.h" +#include "dp_pll.h" +#include "sde_power_handle.h" + +/** + * sruct dp_power - DisplayPort's power related data + * + * @dp_phy_gdsc: GDSC regulator + * @init: initializes the regulators/core clocks/GPIOs/pinctrl + * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl + * @clk_enable: enable/disable the DP clocks + * @clk_status: check for clock status + * @set_pixel_clk_parent: set the parent of DP pixel clock + * @park_clocks: park all clocks driven by PLL + * @clk_get_rate: get the current rate for provided clk_name + * @power_client_init: configures clocks and regulators + * @power_client_deinit: frees clock and regulator resources + * @power_mmrm_init: configures mmrm client registration + */ +struct dp_power { + struct drm_device *drm_dev; + struct sde_power_handle *phandle; + struct regulator *dp_phy_gdsc; + int (*init)(struct dp_power *power, bool flip); + int (*deinit)(struct dp_power *power); + int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type, + bool enable); + bool (*clk_status)(struct dp_power *power, enum dp_pm_type pm_type); + int (*set_pixel_clk_parent)(struct dp_power *power, u32 stream_id); + int (*park_clocks)(struct dp_power *power); + u64 (*clk_get_rate)(struct dp_power *power, char *clk_name); + int (*power_client_init)(struct dp_power *power, + struct sde_power_handle *phandle, + struct drm_device *drm_dev); + void (*power_client_deinit)(struct dp_power *power); + int (*power_mmrm_init)(struct dp_power *power, + struct sde_power_handle *phandle, void *dp, + int (*dp_display_mmrm_callback)(struct mmrm_client_notifier_data *notifier_data)); +}; + +/** + * dp_power_get() - configure and get the DisplayPort power module data + * + * @parser: instance of parser module + * @pll: instance of pll module + * return: pointer to allocated power module data + * + * This API will configure the DisplayPort's power module and provides + * methods to be called by the client to configure the power related + * modueles. + */ +struct dp_power *dp_power_get(struct dp_parser *parser, struct dp_pll *pll); + +/** + * dp_power_put() - release the power related resources + * + * @power: pointer to the power module's data + */ +void dp_power_put(struct dp_power *power); +#endif /* _DP_POWER_H_ */ diff --git a/msm/dp/dp_reg.h b/msm/dp/dp_reg.h new file mode 100644 index 000000000..501975e63 --- /dev/null +++ b/msm/dp/dp_reg.h @@ -0,0 +1,453 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_REG_H_ +#define _DP_REG_H_ + +/* DP_TX Registers */ +#define DP_HW_VERSION (0x00000000) +#define DP_SW_RESET (0x00000010) +#define DP_PHY_CTRL (0x00000014) +#define DP_CLK_CTRL (0x00000018) +#define DP_CLK_ACTIVE (0x0000001C) +#define DP_INTR_STATUS (0x00000020) +#define DP_INTR_STATUS2 (0x00000024) +#define DP_INTR_STATUS3 (0x00000028) +#define DP_INTR_STATUS5 (0x00000034) +#define DP_INTR_STATUS6 (0x00000038) + +#define DP_DP_HPD_CTRL (0x00000000) +#define DP_DP_HPD_INT_STATUS (0x00000004) +#define DP_DP_HPD_INT_ACK (0x00000008) +#define DP_DP_HPD_INT_MASK (0x0000000C) +#define DP_DP_HPD_REFTIMER (0x00000018) +#define DP_DP_HPD_EVENT_TIME_0 (0x0000001C) +#define DP_DP_HPD_EVENT_TIME_1 (0x00000020) +#define DP_AUX_CTRL (0x00000030) +#define DP_AUX_DATA (0x00000034) +#define DP_AUX_TRANS_CTRL (0x00000038) +#define DP_TIMEOUT_COUNT (0x0000003C) +#define DP_AUX_LIMITS (0x00000040) +#define DP_AUX_STATUS (0x00000044) + +#define DP_DPCD_CP_IRQ (0x201) +#define DP_DPCD_RXSTATUS (0x69493) + +#define DP_INTERRUPT_TRANS_NUM (0x000000A0) + +#define DP_MAINLINK_CTRL (0x00000000) +#define DP_STATE_CTRL (0x00000004) +#define DP_CONFIGURATION_CTRL (0x00000008) +#define DP_SOFTWARE_MVID (0x00000010) +#define DP_SOFTWARE_NVID (0x00000018) +#define DP_TOTAL_HOR_VER (0x0000001C) +#define DP_START_HOR_VER_FROM_SYNC (0x00000020) +#define DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024) +#define DP_ACTIVE_HOR_VER (0x00000028) +#define DP_MISC1_MISC0 (0x0000002C) +#define DP_VALID_BOUNDARY (0x00000030) +#define DP_VALID_BOUNDARY_2 (0x00000034) +#define DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038) + +#define DP1_CONFIGURATION_CTRL (0x00000400) +#define DP_DP0_TIMESLOT_1_32 (0x00000404) +#define DP_DP0_TIMESLOT_33_63 (0x00000408) +#define DP_DP1_TIMESLOT_1_32 (0x0000040C) +#define DP_DP1_TIMESLOT_33_63 (0x00000410) +#define DP1_SOFTWARE_MVID (0x00000414) +#define DP1_SOFTWARE_NVID (0x00000418) +#define DP1_TOTAL_HOR_VER (0x0000041C) +#define DP1_START_HOR_VER_FROM_SYNC (0x00000420) +#define DP1_HSYNC_VSYNC_WIDTH_POLARITY (0x00000424) +#define DP1_ACTIVE_HOR_VER (0x00000428) +#define DP1_MISC1_MISC0 (0x0000042C) +#define DP_DP0_RG (0x000004F8) +#define DP_DP1_RG (0x000004FC) + +#define DP_MST_ACT (0x00000500) +#define DP_MST_MAINLINK_READY (0x00000504) + +#define DP_MAINLINK_READY (0x00000040) +#define DP_MAINLINK_LEVELS (0x00000044) +#define DP_TU (0x0000004C) + +#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054) +#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0) +#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4) +#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8) + +#define MMSS_DP_MISC1_MISC0 (0x0000002C) +#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080) +#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084) +#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088) +#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C) +#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090) +#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094) +#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098) + +#define DP_MISR40_CTRL (0x000000D0) +#define DP_MISR40_TX0 (0x000000D4) +#define DP_MISR40_TX1 (0x000000DC) +#define DP_MISR40_TX2 (0x000000E4) +#define DP_MISR40_TX3 (0x000000EC) +#define MMSS_DP_PSR_CRC_RG (0x00000154) +#define MMSS_DP_PSR_CRC_B (0x00000158) + +#define MMSS_DP1_CRC_RG (0x00000164) +#define MMSS_DP1_CRC_B (0x00000168) +#define DP_COMPRESSION_MODE_CTRL (0x00000180) +#define DP_PPS_HB_0_3 (0x00000184) +#define DP_PPS_PB_0_3 (0x00000188) +#define DP_PPS_PB_4_7 (0x0000018C) +#define DP_PPS_PB_8_11 (0x00000190) +#define DP_PPS_PB_12_15 (0x00000194) +#define DP_PPS_PB_16_19 (0x00000198) +#define DP_PPS_PB_20_23 (0x0000019C) +#define DP_PPS_PB_24_27 (0x000001A0) +#define DP_PPS_PB_28_31 (0x000001A4) +#define DP_PPS_PPS_0_3 (0x000001A8) +#define DP_PPS_PPS_4_7 (0x000001AC) +#define DP_PPS_PPS_8_11 (0x000001B0) +#define DP_PPS_PPS_12_15 (0x000001B4) +#define DP_PPS_PPS_16_19 (0x000001B8) +#define DP_PPS_PPS_20_23 (0x000001BC) +#define DP_PPS_PPS_24_27 (0x000001C0) +#define DP_PPS_PPS_28_31 (0x000001C4) +#define DP_PPS_PPS_32_35 (0x000001C8) +#define DP_PPS_PPS_36_39 (0x000001CC) +#define DP_PPS_PPS_40_43 (0x000001D0) +#define DP_PPS_PPS_44_47 (0x000001D4) +#define DP_PPS_PPS_48_51 (0x000001D8) +#define DP_PPS_PPS_52_55 (0x000001DC) +#define DP_PPS_PPS_56_59 (0x000001E0) +#define DP_PPS_PPS_60_63 (0x000001E4) +#define DP_PPS_PPS_64_67 (0x000001E8) +#define DP_PPS_PPS_68_71 (0x000001EC) +#define DP_PPS_PPS_72_75 (0x000001F0) +#define DP_PPS_PPS_76_79 (0x000001F4) +#define DP_PPS_PPS_80_83 (0x000001F8) +#define DP_PPS_PPS_84_87 (0x000001FC) + +#define MMSS_DP_AUDIO_CFG (0x00000200) +#define MMSS_DP_AUDIO_STATUS (0x00000204) +#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208) +#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C) +#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210) +#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214) + +#define MMSS_DP_SDP_CFG (0x00000228) +#define MMSS_DP_SDP_CFG2 (0x0000022C) +#define MMSS_DP_SDP_CFG3 (0x0000024C) +#define MMSS_DP_SDP_CFG4 (0x000004EC) +#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230) +#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234) + +#define MMSS_DP_AUDIO_STREAM_0 (0x00000240) +#define MMSS_DP_AUDIO_STREAM_1 (0x00000244) + +#define MMSS_DP_EXTENSION_0 (0x00000250) +#define MMSS_DP_EXTENSION_1 (0x00000254) +#define MMSS_DP_EXTENSION_2 (0x00000258) +#define MMSS_DP_EXTENSION_3 (0x0000025C) +#define MMSS_DP_EXTENSION_4 (0x00000260) +#define MMSS_DP_EXTENSION_5 (0x00000264) +#define MMSS_DP_EXTENSION_6 (0x00000268) +#define MMSS_DP_EXTENSION_7 (0x0000026C) +#define MMSS_DP_EXTENSION_8 (0x00000270) +#define MMSS_DP_EXTENSION_9 (0x00000274) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C) +#define MMSS_DP_AUDIO_ISRC_0 (0x00000290) +#define MMSS_DP_AUDIO_ISRC_1 (0x00000294) +#define MMSS_DP_AUDIO_ISRC_2 (0x00000298) +#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C) +#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0) +#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4) +#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8) +#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC) +#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0) + +#define MMSS_DP_FLUSH (0x000002F8) +#define MMSS_DP1_FLUSH (0x000002FC) + +#define MMSS_DP_GENERIC0_0 (0x00000300) +#define MMSS_DP_GENERIC0_1 (0x00000304) +#define MMSS_DP_GENERIC0_2 (0x00000308) +#define MMSS_DP_GENERIC0_3 (0x0000030C) +#define MMSS_DP_GENERIC0_4 (0x00000310) +#define MMSS_DP_GENERIC0_5 (0x00000314) +#define MMSS_DP_GENERIC0_6 (0x00000318) +#define MMSS_DP_GENERIC0_7 (0x0000031C) +#define MMSS_DP_GENERIC0_8 (0x00000320) +#define MMSS_DP_GENERIC0_9 (0x00000324) +#define MMSS_DP_GENERIC1_0 (0x00000328) +#define MMSS_DP_GENERIC1_1 (0x0000032C) +#define MMSS_DP_GENERIC1_2 (0x00000330) +#define MMSS_DP_GENERIC1_3 (0x00000334) +#define MMSS_DP_GENERIC1_4 (0x00000338) +#define MMSS_DP_GENERIC1_5 (0x0000033C) +#define MMSS_DP_GENERIC1_6 (0x00000340) +#define MMSS_DP_GENERIC1_7 (0x00000344) +#define MMSS_DP_GENERIC1_8 (0x00000348) +#define MMSS_DP_GENERIC1_9 (0x0000034C) + +#define MMSS_DP1_GENERIC0_0 (0x00000490) +#define MMSS_DP1_GENERIC0_1 (0x00000494) +#define MMSS_DP1_GENERIC0_2 (0x00000498) +#define MMSS_DP1_GENERIC0_3 (0x0000049C) +#define MMSS_DP1_GENERIC0_4 (0x000004A0) +#define MMSS_DP1_GENERIC0_5 (0x000004A4) +#define MMSS_DP1_GENERIC0_6 (0x000004A8) +#define MMSS_DP1_GENERIC0_7 (0x000004AC) +#define MMSS_DP1_GENERIC0_8 (0x000004B0) +#define MMSS_DP1_GENERIC0_9 (0x000004B4) +#define MMSS_DP1_GENERIC1_0 (0x000004B8) +#define MMSS_DP1_GENERIC1_1 (0x000004BC) +#define MMSS_DP1_GENERIC1_2 (0x000004C0) +#define MMSS_DP1_GENERIC1_3 (0x000004C4) +#define MMSS_DP1_GENERIC1_4 (0x000004C8) +#define MMSS_DP1_GENERIC1_5 (0x000004CC) +#define MMSS_DP1_GENERIC1_6 (0x000004D0) +#define MMSS_DP1_GENERIC1_7 (0x000004D4) +#define MMSS_DP1_GENERIC1_8 (0x000004D8) +#define MMSS_DP1_GENERIC1_9 (0x000004DC) + +#define MMSS_DP_GENERIC2_0 (0x000003d8) +#define MMSS_DP_GENERIC2_1 (0x000003dc) +#define MMSS_DP_GENERIC2_2 (0x000003e0) +#define MMSS_DP_GENERIC2_3 (0x000003e4) +#define MMSS_DP_GENERIC2_4 (0x000003e8) +#define MMSS_DP_GENERIC2_5 (0x000003ec) +#define MMSS_DP_GENERIC2_6 (0x000003f0) +#define MMSS_DP_GENERIC2_7 (0x000003f4) +#define MMSS_DP_GENERIC2_8 (0x000003f8) +#define MMSS_DP_GENERIC2_9 (0x000003fc) +#define MMSS_DP1_GENERIC2_0 (0x00000510) +#define MMSS_DP1_GENERIC2_1 (0x00000514) +#define MMSS_DP1_GENERIC2_2 (0x00000518) +#define MMSS_DP1_GENERIC2_3 (0x0000051c) +#define MMSS_DP1_GENERIC2_4 (0x00000520) +#define MMSS_DP1_GENERIC2_5 (0x00000524) +#define MMSS_DP1_GENERIC2_6 (0x00000528) +#define MMSS_DP1_GENERIC2_7 (0x0000052C) +#define MMSS_DP1_GENERIC2_8 (0x00000530) +#define MMSS_DP1_GENERIC2_9 (0x00000534) + +#define MMSS_DP1_SDP_CFG (0x000004E0) +#define MMSS_DP1_SDP_CFG2 (0x000004E4) +#define MMSS_DP1_SDP_CFG3 (0x000004E8) +#define MMSS_DP1_SDP_CFG4 (0x000004F0) + +#define DP1_COMPRESSION_MODE_CTRL (0x00000560) +#define DP1_PPS_HB_0_3 (0x00000564) +#define DP1_PPS_PB_0_3 (0x00000568) +#define DP1_PPS_PB_4_7 (0x0000056C) +#define DP1_PPS_PB_8_11 (0x00000570) +#define DP1_PPS_PB_12_15 (0x00000574) +#define DP1_PPS_PB_16_19 (0x00000578) +#define DP1_PPS_PB_20_23 (0x0000057C) +#define DP1_PPS_PB_24_27 (0x00000580) +#define DP1_PPS_PB_28_31 (0x00000584) +#define DP1_PPS_PPS_0_3 (0x00000588) +#define DP1_PPS_PPS_4_7 (0x0000058C) +#define DP1_PPS_PPS_8_11 (0x00000590) +#define DP1_PPS_PPS_12_15 (0x00000594) +#define DP1_PPS_PPS_16_19 (0x00000598) +#define DP1_PPS_PPS_20_23 (0x0000059C) +#define DP1_PPS_PPS_24_27 (0x000005A0) +#define DP1_PPS_PPS_28_31 (0x000005A4) +#define DP1_PPS_PPS_32_35 (0x000005A8) +#define DP1_PPS_PPS_36_39 (0x000005AC) +#define DP1_PPS_PPS_40_43 (0x000005B0) +#define DP1_PPS_PPS_44_47 (0x000005B4) +#define DP1_PPS_PPS_48_51 (0x000005B8) +#define DP1_PPS_PPS_52_55 (0x000005BC) +#define DP1_PPS_PPS_56_59 (0x000005C0) +#define DP1_PPS_PPS_60_63 (0x000005C4) +#define DP1_PPS_PPS_64_67 (0x000005C8) +#define DP1_PPS_PPS_68_71 (0x000005CC) +#define DP1_PPS_PPS_72_75 (0x000005D0) +#define DP1_PPS_PPS_76_79 (0x000005D4) +#define DP1_PPS_PPS_80_83 (0x000005D8) +#define DP1_PPS_PPS_84_87 (0x000005DC) + +#define MMSS_DP_VSCEXT_0 (0x000002D0) +#define MMSS_DP_VSCEXT_1 (0x000002D4) +#define MMSS_DP_VSCEXT_2 (0x000002D8) +#define MMSS_DP_VSCEXT_3 (0x000002DC) +#define MMSS_DP_VSCEXT_4 (0x000002E0) +#define MMSS_DP_VSCEXT_5 (0x000002E4) +#define MMSS_DP_VSCEXT_6 (0x000002E8) +#define MMSS_DP_VSCEXT_7 (0x000002EC) +#define MMSS_DP_VSCEXT_8 (0x000002F0) +#define MMSS_DP_VSCEXT_9 (0x000002F4) + +#define MMSS_DP1_VSCEXT_0 (0x00000468) +#define MMSS_DP1_VSCEXT_1 (0x0000046c) +#define MMSS_DP1_VSCEXT_2 (0x00000470) +#define MMSS_DP1_VSCEXT_3 (0x00000474) +#define MMSS_DP1_VSCEXT_4 (0x00000478) +#define MMSS_DP1_VSCEXT_5 (0x0000047c) +#define MMSS_DP1_VSCEXT_6 (0x00000480) +#define MMSS_DP1_VSCEXT_7 (0x00000484) +#define MMSS_DP1_VSCEXT_8 (0x00000488) +#define MMSS_DP1_VSCEXT_9 (0x0000048c) + +#define MMSS_DP_BIST_ENABLE (0x00000000) +#define MMSS_DP_TIMING_ENGINE_EN (0x00000010) +#define MMSS_DP_INTF_CONFIG (0x00000014) +#define MMSS_DP_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP_INTF_POLARITY_CTL (0x00000058) +#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064) +#define MMSS_DP_DSC_DTO (0x0000007C) +#define MMSS_DP_DSC_DTO_COUNT (0x00000084) +#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088) + +#define MMSS_DP1_BIST_ENABLE (0x00000000) +#define MMSS_DP1_TIMING_ENGINE_EN (0x00000010) +#define MMSS_DP1_INTF_CONFIG (0x00000014) +#define MMSS_DP1_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP1_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP1_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP1_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_DP1_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_DP1_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP1_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP1_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP1_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP1_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP1_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP1_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP1_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP1_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP1_INTF_POLARITY_CTL (0x00000058) +#define MMSS_DP1_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP1_TPG_VIDEO_CONFIG (0x00000064) +#define MMSS_DP1_DSC_DTO (0x0000007C) +#define MMSS_DP1_DSC_DTO_COUNT (0x00000084) +#define MMSS_DP1_ASYNC_FIFO_CONFIG (0x00000088) + +/*DP PHY Register offsets */ +#define DP_PHY_REVISION_ID0 (0x00000000) +#define DP_PHY_REVISION_ID1 (0x00000004) +#define DP_PHY_REVISION_ID2 (0x00000008) +#define DP_PHY_REVISION_ID3 (0x0000000C) + +#define DP_PHY_CFG (0x00000010) +#define DP_PHY_PD_CTL (0x00000018) +#define DP_PHY_MODE (0x0000001C) + +#define DP_PHY_AUX_CFG0 (0x00000020) +#define DP_PHY_AUX_CFG1 (0x00000024) +#define DP_PHY_AUX_CFG2 (0x00000028) +#define DP_PHY_AUX_CFG3 (0x0000002C) +#define DP_PHY_AUX_CFG4 (0x00000030) +#define DP_PHY_AUX_CFG5 (0x00000034) +#define DP_PHY_AUX_CFG6 (0x00000038) +#define DP_PHY_AUX_CFG7 (0x0000003C) +#define DP_PHY_AUX_CFG8 (0x00000040) +#define DP_PHY_AUX_CFG9 (0x00000044) +#define DP_PHY_AUX_INTERRUPT_MASK (0x00000048) +#define DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C) +#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC) +#define DP_PHY_AUX_INTERRUPT_MASK_V200 (0x00000048) +#define DP_PHY_AUX_INTERRUPT_CLEAR_V200 (0x0000004C) +#define DP_PHY_AUX_INTERRUPT_STATUS_V200 (0x000000BC) + +#define DP_PHY_SPARE0 (0x00AC) + +#define TXn_TX_EMP_POST1_LVL (0x000C) +#define TXn_TX_DRV_LVL (0x001C) +#define TXn_TX_POL_INV (0x0064) + +#define TXn_TRANSCEIVER_BIAS_EN (0x005C) +#define TXn_HIGHZ_DRVR_EN (0x0060) + +#define DP_PHY_STATUS (0x00DC) +#define DP_PHY_STATUS_V600 (0x00E4) +#define DP_PHY_AUX_INTERRUPT_MASK_V420 (0x0054) +#define DP_PHY_AUX_INTERRUPT_CLEAR_V420 (0x0058) +#define DP_PHY_AUX_INTERRUPT_STATUS_V420 (0x00D8) +#define DP_PHY_AUX_INTERRUPT_STATUS_V600 (0x00E0) +#define DP_PHY_SPARE0_V420 (0x00C8) +#define DP_PHY_MISR_CTRL (0x00C0) +#define DP_PHY_MISR_STATUS (0x010C) +#define DP_PHY_MISR_TX0 (0x0110) +#define DP_PHY_MISR_TX1 (0x0130) +#define DP_PHY_MISR_TX2 (0x0150) +#define DP_PHY_MISR_TX3 (0x0170) +#define TXn_TX_DRV_LVL_V420 (0x0014) +#define TXn_TRANSCEIVER_BIAS_EN_V420 (0x0054) +#define TXn_HIGHZ_DRVR_EN_V420 (0x0058) +#define TXn_TX_POL_INV_V420 (0x005C) + +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x044) +#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600 (0x0DC) + +/* DP MMSS_CC registers */ +#define MMSS_DP_PIXEL_M (0x01B4) +#define MMSS_DP_PIXEL_N (0x01B8) +#define MMSS_DP_PIXEL1_M (0x01CC) +#define MMSS_DP_PIXEL1_N (0x01D0) +#define MMSS_DP_PIXEL_M_V200 (0x0130) +#define MMSS_DP_PIXEL_N_V200 (0x0134) +#define MMSS_DP_PIXEL1_M_V200 (0x0148) +#define MMSS_DP_PIXEL1_N_V200 (0x014C) + +/* DP HDCP 1.3 registers */ +#define DP_HDCP_CTRL (0x0A0) +#define DP_HDCP_STATUS (0x0A4) +#define DP_HDCP_SW_UPPER_AKSV (0x098) +#define DP_HDCP_SW_LOWER_AKSV (0x09C) +#define DP_HDCP_ENTROPY_CTRL0 (0x350) +#define DP_HDCP_ENTROPY_CTRL1 (0x35C) +#define DP_HDCP_SHA_STATUS (0x0C8) +#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0) +#define DP_HDCP_RCVPORT_DATA3 (0x0A4) +#define DP_HDCP_RCVPORT_DATA4 (0x0A8) +#define DP_HDCP_RCVPORT_DATA5 (0x0C0) +#define DP_HDCP_RCVPORT_DATA6 (0x0C4) + +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020) + +/* USB3 DP COM registers */ +#define USB3_DP_COM_PHY_MODE_CTRL (0x00) +#define USB3_DP_COM_SW_RESET (0x04) +#define USB3_DP_COM_POWER_DOWN_CTRL (0x08) +#define USB3_DP_COM_SWI_CTRL (0x0C) +#define USB3_DP_COM_TYPEC_CTRL (0x10) +#define USB3_DP_COM_RESET_OVRD_CTRL (0x1C) + +#endif /* _DP_REG_H_ */ diff --git a/msm/dp/dp_usbpd.c b/msm/dp/dp_usbpd.c new file mode 100644 index 000000000..382d1f5ab --- /dev/null +++ b/msm/dp/dp_usbpd.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "dp_usbpd.h" +#include "dp_debug.h" + +/* DP specific VDM commands */ +#define DP_USBPD_VDM_STATUS 0x10 +#define DP_USBPD_VDM_CONFIGURE 0x11 + +/* USBPD-TypeC specific Macros */ +#define VDM_VERSION 0x0 +#define USB_C_DP_SID 0xFF01 + +enum dp_usbpd_pin_assignment { + DP_USBPD_PIN_A, + DP_USBPD_PIN_B, + DP_USBPD_PIN_C, + DP_USBPD_PIN_D, + DP_USBPD_PIN_E, + DP_USBPD_PIN_F, + DP_USBPD_PIN_MAX, +}; + +enum dp_usbpd_events { + DP_USBPD_EVT_DISCOVER, + DP_USBPD_EVT_ENTER, + DP_USBPD_EVT_STATUS, + DP_USBPD_EVT_CONFIGURE, + DP_USBPD_EVT_CC_PIN_POLARITY, + DP_USBPD_EVT_EXIT, + DP_USBPD_EVT_ATTENTION, +}; + +enum dp_usbpd_alt_mode { + DP_USBPD_ALT_MODE_NONE = 0, + DP_USBPD_ALT_MODE_INIT = BIT(0), + DP_USBPD_ALT_MODE_DISCOVER = BIT(1), + DP_USBPD_ALT_MODE_ENTER = BIT(2), + DP_USBPD_ALT_MODE_STATUS = BIT(3), + DP_USBPD_ALT_MODE_CONFIGURE = BIT(4), +}; + +struct dp_usbpd_capabilities { + enum dp_usbpd_port port; + bool receptacle_state; + u8 ulink_pin_config; + u8 dlink_pin_config; +}; + +struct dp_usbpd_private { + bool forced_disconnect; + u32 vdo; + struct device *dev; + struct usbpd *pd; + struct usbpd_svid_handler svid_handler; + struct dp_hpd_cb *dp_cb; + struct dp_usbpd_capabilities cap; + struct dp_usbpd dp_usbpd; + enum dp_usbpd_alt_mode alt_mode; + u32 dp_usbpd_config; +}; + +static const char *dp_usbpd_pin_name(u8 pin) +{ + switch (pin) { + case DP_USBPD_PIN_A: return "DP_USBPD_PIN_ASSIGNMENT_A"; + case DP_USBPD_PIN_B: return "DP_USBPD_PIN_ASSIGNMENT_B"; + case DP_USBPD_PIN_C: return "DP_USBPD_PIN_ASSIGNMENT_C"; + case DP_USBPD_PIN_D: return "DP_USBPD_PIN_ASSIGNMENT_D"; + case DP_USBPD_PIN_E: return "DP_USBPD_PIN_ASSIGNMENT_E"; + case DP_USBPD_PIN_F: return "DP_USBPD_PIN_ASSIGNMENT_F"; + default: return "UNKNOWN"; + } +} + +static const char *dp_usbpd_port_name(enum dp_usbpd_port port) +{ + switch (port) { + case DP_USBPD_PORT_NONE: return "DP_USBPD_PORT_NONE"; + case DP_USBPD_PORT_UFP_D: return "DP_USBPD_PORT_UFP_D"; + case DP_USBPD_PORT_DFP_D: return "DP_USBPD_PORT_DFP_D"; + case DP_USBPD_PORT_D_UFP_D: return "DP_USBPD_PORT_D_UFP_D"; + default: return "DP_USBPD_PORT_NONE"; + } +} + +static const char *dp_usbpd_cmd_name(u8 cmd) +{ + switch (cmd) { + case USBPD_SVDM_DISCOVER_MODES: return "USBPD_SVDM_DISCOVER_MODES"; + case USBPD_SVDM_ENTER_MODE: return "USBPD_SVDM_ENTER_MODE"; + case USBPD_SVDM_ATTENTION: return "USBPD_SVDM_ATTENTION"; + case DP_USBPD_VDM_STATUS: return "DP_USBPD_VDM_STATUS"; + case DP_USBPD_VDM_CONFIGURE: return "DP_USBPD_VDM_CONFIGURE"; + default: return "DP_USBPD_VDM_ERROR"; + } +} + +static void dp_usbpd_init_port(enum dp_usbpd_port *port, u32 in_port) +{ + switch (in_port) { + case 0: + *port = DP_USBPD_PORT_NONE; + break; + case 1: + *port = DP_USBPD_PORT_UFP_D; + break; + case 2: + *port = DP_USBPD_PORT_DFP_D; + break; + case 3: + *port = DP_USBPD_PORT_D_UFP_D; + break; + default: + *port = DP_USBPD_PORT_NONE; + } + DP_DEBUG("port:%s\n", dp_usbpd_port_name(*port)); +} + +static void dp_usbpd_get_capabilities(struct dp_usbpd_private *pd) +{ + struct dp_usbpd_capabilities *cap = &pd->cap; + u32 buf = pd->vdo; + int port = buf & 0x3; + + cap->receptacle_state = (buf & BIT(6)) ? true : false; + cap->dlink_pin_config = (buf >> 8) & 0xff; + cap->ulink_pin_config = (buf >> 16) & 0xff; + + dp_usbpd_init_port(&cap->port, port); +} + +static void dp_usbpd_get_status(struct dp_usbpd_private *pd) +{ + struct dp_usbpd *status = &pd->dp_usbpd; + u32 buf = pd->vdo; + int port = buf & 0x3; + + status->low_pow_st = (buf & BIT(2)) ? true : false; + status->adaptor_dp_en = (buf & BIT(3)) ? true : false; + status->base.multi_func = (buf & BIT(4)) ? true : false; + status->usb_config_req = (buf & BIT(5)) ? true : false; + status->exit_dp_mode = (buf & BIT(6)) ? true : false; + status->base.hpd_high = (buf & BIT(7)) ? true : false; + status->base.hpd_irq = (buf & BIT(8)) ? true : false; + + DP_DEBUG("low_pow_st = %d, adaptor_dp_en = %d, multi_func = %d\n", + status->low_pow_st, status->adaptor_dp_en, + status->base.multi_func); + DP_DEBUG("usb_config_req = %d, exit_dp_mode = %d, hpd_high =%d\n", + status->usb_config_req, + status->exit_dp_mode, status->base.hpd_high); + DP_DEBUG("hpd_irq = %d\n", status->base.hpd_irq); + + dp_usbpd_init_port(&status->port, port); +} + +static u32 dp_usbpd_gen_config_pkt(struct dp_usbpd_private *pd) +{ + u8 pin_cfg, pin; + u32 config = 0; + const u32 ufp_d_config = 0x2, dp_ver = 0x1; + + if (pd->cap.receptacle_state) + pin_cfg = pd->cap.ulink_pin_config; + else + pin_cfg = pd->cap.dlink_pin_config; + + for (pin = DP_USBPD_PIN_A; pin < DP_USBPD_PIN_MAX; pin++) { + if (pin_cfg & BIT(pin)) { + if (pd->dp_usbpd.base.multi_func) { + if (pin == DP_USBPD_PIN_D) + break; + } else { + break; + } + } + } + + if (pin == DP_USBPD_PIN_MAX) + pin = DP_USBPD_PIN_C; + + DP_DEBUG("pin assignment: %s\n", dp_usbpd_pin_name(pin)); + + config |= BIT(pin) << 8; + + config |= (dp_ver << 2); + config |= ufp_d_config; + + DP_DEBUG("config = 0x%x\n", config); + return config; +} + +static void dp_usbpd_send_event(struct dp_usbpd_private *pd, + enum dp_usbpd_events event) +{ + u32 config; + + switch (event) { + case DP_USBPD_EVT_DISCOVER: + usbpd_send_svdm(pd->pd, USB_C_DP_SID, + USBPD_SVDM_DISCOVER_MODES, + SVDM_CMD_TYPE_INITIATOR, 0x0, 0x0, 0x0); + break; + case DP_USBPD_EVT_ENTER: + usbpd_send_svdm(pd->pd, USB_C_DP_SID, + USBPD_SVDM_ENTER_MODE, + SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0); + break; + case DP_USBPD_EVT_EXIT: + usbpd_send_svdm(pd->pd, USB_C_DP_SID, + USBPD_SVDM_EXIT_MODE, + SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0); + break; + case DP_USBPD_EVT_STATUS: + config = 0x1; /* DFP_D connected */ + usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_STATUS, + SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1); + break; + case DP_USBPD_EVT_CONFIGURE: + config = dp_usbpd_gen_config_pkt(pd); + usbpd_send_svdm(pd->pd, USB_C_DP_SID, DP_USBPD_VDM_CONFIGURE, + SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1); + break; + default: + DP_ERR("unknown event:%d\n", event); + } +} + +static void dp_usbpd_connect_cb(struct usbpd_svid_handler *hdlr, + bool peer_usb_comm) +{ + struct dp_usbpd_private *pd; + + pd = container_of(hdlr, struct dp_usbpd_private, svid_handler); + if (!pd) { + DP_ERR("get_usbpd phandle failed\n"); + return; + } + + DP_DEBUG("peer_usb_comm: %d\n", peer_usb_comm); + pd->dp_usbpd.base.peer_usb_comm = peer_usb_comm; + dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER); +} + +static void dp_usbpd_disconnect_cb(struct usbpd_svid_handler *hdlr) +{ + struct dp_usbpd_private *pd; + + pd = container_of(hdlr, struct dp_usbpd_private, svid_handler); + if (!pd) { + DP_ERR("get_usbpd phandle failed\n"); + return; + } + + pd->alt_mode = DP_USBPD_ALT_MODE_NONE; + pd->dp_usbpd.base.alt_mode_cfg_done = false; + DP_DEBUG("\n"); + + if (pd->dp_cb && pd->dp_cb->disconnect) + pd->dp_cb->disconnect(pd->dev); +} + +static int dp_usbpd_validate_callback(u8 cmd, + enum usbpd_svdm_cmd_type cmd_type, int num_vdos) +{ + int ret = 0; + + if (cmd_type == SVDM_CMD_TYPE_RESP_NAK) { + DP_ERR("error: NACK\n"); + ret = -EINVAL; + goto end; + } + + if (cmd_type == SVDM_CMD_TYPE_RESP_BUSY) { + DP_ERR("error: BUSY\n"); + ret = -EBUSY; + goto end; + } + + if (cmd == USBPD_SVDM_ATTENTION) { + if (cmd_type != SVDM_CMD_TYPE_INITIATOR) { + DP_ERR("error: invalid cmd type for attention\n"); + ret = -EINVAL; + goto end; + } + + if (!num_vdos) { + DP_ERR("error: no vdo provided\n"); + ret = -EINVAL; + goto end; + } + } else { + if (cmd_type != SVDM_CMD_TYPE_RESP_ACK) { + DP_ERR("error: invalid cmd type\n"); + ret = -EINVAL; + } + } +end: + return ret; +} + + +static int dp_usbpd_get_ss_lanes(struct dp_usbpd_private *pd) +{ + int rc = 0; + int timeout = 250; + + /* + * By default, USB reserves two lanes for Super Speed. + * Which means DP has remaining two lanes to operate on. + * If multi-function is not supported, request USB to + * release the Super Speed lanes so that DP can use + * all four lanes in case DPCD indicates support for + * four lanes. + */ + if (!pd->dp_usbpd.base.multi_func) { + while (timeout) { + rc = pd->svid_handler.request_usb_ss_lane( + pd->pd, &pd->svid_handler); + if (rc != -EBUSY) + break; + + DP_WARN("USB busy, retry\n"); + + /* wait for hw recommended delay for usb */ + msleep(20); + timeout--; + } + } + + return rc; +} + +static void dp_usbpd_response_cb(struct usbpd_svid_handler *hdlr, u8 cmd, + enum usbpd_svdm_cmd_type cmd_type, + const u32 *vdos, int num_vdos) +{ + struct dp_usbpd_private *pd; + int rc = 0; + + pd = container_of(hdlr, struct dp_usbpd_private, svid_handler); + + DP_DEBUG("callback -> cmd: %s, *vdos = 0x%x, num_vdos = %d\n", + dp_usbpd_cmd_name(cmd), *vdos, num_vdos); + + if (dp_usbpd_validate_callback(cmd, cmd_type, num_vdos)) { + DP_DEBUG("invalid callback received\n"); + return; + } + + switch (cmd) { + case USBPD_SVDM_DISCOVER_MODES: + pd->vdo = *vdos; + dp_usbpd_get_capabilities(pd); + + pd->alt_mode |= DP_USBPD_ALT_MODE_DISCOVER; + + if (pd->cap.port & BIT(0)) + dp_usbpd_send_event(pd, DP_USBPD_EVT_ENTER); + break; + case USBPD_SVDM_ENTER_MODE: + pd->alt_mode |= DP_USBPD_ALT_MODE_ENTER; + + dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS); + break; + case USBPD_SVDM_ATTENTION: + if (pd->forced_disconnect) + break; + + pd->vdo = *vdos; + dp_usbpd_get_status(pd); + + if (!pd->dp_usbpd.base.alt_mode_cfg_done) { + if (pd->dp_usbpd.port & BIT(1)) + dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE); + break; + } + + if (pd->dp_cb && pd->dp_cb->attention) + pd->dp_cb->attention(pd->dev); + + break; + case DP_USBPD_VDM_STATUS: + pd->vdo = *vdos; + dp_usbpd_get_status(pd); + + if (!(pd->alt_mode & DP_USBPD_ALT_MODE_CONFIGURE)) { + pd->alt_mode |= DP_USBPD_ALT_MODE_STATUS; + + if (pd->dp_usbpd.port & BIT(1)) + dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE); + } + break; + case DP_USBPD_VDM_CONFIGURE: + pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE; + pd->dp_usbpd.base.alt_mode_cfg_done = true; + pd->forced_disconnect = false; + dp_usbpd_get_status(pd); + + pd->dp_usbpd.base.orientation = + usbpd_get_plug_orientation(pd->pd); + + rc = dp_usbpd_get_ss_lanes(pd); + if (rc) { + DP_ERR("failed to get SuperSpeed lanes\n"); + break; + } + + if (pd->dp_cb && pd->dp_cb->configure) + pd->dp_cb->configure(pd->dev); + break; + default: + DP_ERR("unknown cmd: %d\n", cmd); + break; + } +} + +static int dp_usbpd_simulate_connect(struct dp_hpd *dp_hpd, bool hpd) +{ + int rc = 0; + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *pd; + + if (!dp_hpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + dp_usbpd->base.hpd_high = hpd; + pd->forced_disconnect = !hpd; + pd->dp_usbpd.base.alt_mode_cfg_done = hpd; + + DP_DEBUG("hpd_high=%d, forced_disconnect=%d, orientation=%d\n", + dp_usbpd->base.hpd_high, pd->forced_disconnect, + pd->dp_usbpd.base.orientation); + if (hpd) + pd->dp_cb->configure(pd->dev); + else + pd->dp_cb->disconnect(pd->dev); + +error: + return rc; +} + +static int dp_usbpd_simulate_attention(struct dp_hpd *dp_hpd, int vdo) +{ + int rc = 0; + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *pd; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + if (!dp_usbpd) { + DP_ERR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + pd->vdo = vdo; + dp_usbpd_get_status(pd); + + if (pd->dp_cb && pd->dp_cb->attention) + pd->dp_cb->attention(pd->dev); +error: + return rc; +} + +int dp_usbpd_register(struct dp_hpd *dp_hpd) +{ + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *usbpd; + int rc = 0; + + if (!dp_hpd) + return -EINVAL; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + + usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + rc = usbpd_register_svid(usbpd->pd, &usbpd->svid_handler); + if (rc) + DP_ERR("pd registration failed\n"); + + return rc; +} + +static void dp_usbpd_wakeup_phy(struct dp_hpd *dp_hpd, bool wakeup) +{ + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *usbpd; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + if (!usbpd->pd) { + DP_ERR("usbpd pointer invalid"); + return; + } + + usbpd_vdm_in_suspend(usbpd->pd, wakeup); +} + +struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb) +{ + int rc = 0; + const char *pd_phandle = "qcom,dp-usbpd-detection"; + struct usbpd *pd = NULL; + struct dp_usbpd_private *usbpd; + struct dp_usbpd *dp_usbpd; + struct usbpd_svid_handler svid_handler = { + .svid = USB_C_DP_SID, + .vdm_received = NULL, + .connect = &dp_usbpd_connect_cb, + .svdm_received = &dp_usbpd_response_cb, + .disconnect = &dp_usbpd_disconnect_cb, + }; + + if (!cb) { + DP_ERR("invalid cb data\n"); + rc = -EINVAL; + goto error; + } + + pd = devm_usbpd_get_by_phandle(dev, pd_phandle); + if (IS_ERR(pd)) { + DP_DEBUG("usbpd phandle failed (%ld)\n", PTR_ERR(pd)); + rc = PTR_ERR(pd); + goto error; + } + + usbpd = devm_kzalloc(dev, sizeof(*usbpd), GFP_KERNEL); + if (!usbpd) { + rc = -ENOMEM; + goto error; + } + + usbpd->dev = dev; + usbpd->pd = pd; + usbpd->svid_handler = svid_handler; + usbpd->dp_cb = cb; + + dp_usbpd = &usbpd->dp_usbpd; + dp_usbpd->base.simulate_connect = dp_usbpd_simulate_connect; + dp_usbpd->base.simulate_attention = dp_usbpd_simulate_attention; + dp_usbpd->base.register_hpd = dp_usbpd_register; + dp_usbpd->base.wakeup_phy = dp_usbpd_wakeup_phy; + + return &dp_usbpd->base; +error: + return ERR_PTR(rc); +} + +void dp_usbpd_put(struct dp_hpd *dp_hpd) +{ + struct dp_usbpd *dp_usbpd; + struct dp_usbpd_private *usbpd; + + dp_usbpd = container_of(dp_hpd, struct dp_usbpd, base); + if (!dp_usbpd) + return; + + usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd); + + usbpd_unregister_svid(usbpd->pd, &usbpd->svid_handler); + + devm_kfree(usbpd->dev, usbpd); +} diff --git a/msm/dp/dp_usbpd.h b/msm/dp/dp_usbpd.h new file mode 100644 index 000000000..e56e3a691 --- /dev/null +++ b/msm/dp/dp_usbpd.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_USBPD_H_ +#define _DP_USBPD_H_ + +#include +#include "dp_hpd.h" + +struct device; + +/** + * enum dp_usbpd_port - usb/dp port type + * @DP_USBPD_PORT_NONE: port not configured + * @DP_USBPD_PORT_UFP_D: Upstream Facing Port - DisplayPort + * @DP_USBPD_PORT_DFP_D: Downstream Facing Port - DisplayPort + * @DP_USBPD_PORT_D_UFP_D: Both UFP & DFP - DisplayPort + */ + +enum dp_usbpd_port { + DP_USBPD_PORT_NONE, + DP_USBPD_PORT_UFP_D, + DP_USBPD_PORT_DFP_D, + DP_USBPD_PORT_D_UFP_D, +}; + +/** + * struct dp_usbpd - DisplayPort status + * + * @port: port configured + * @low_pow_st: low power state + * @adaptor_dp_en: adaptor functionality enabled + * @usb_config_req: request to switch to usb + * @exit_dp_mode: request exit from displayport mode + * @debug_en: bool to specify debug mode + */ +struct dp_usbpd { + struct dp_hpd base; + enum dp_usbpd_port port; + bool low_pow_st; + bool adaptor_dp_en; + bool usb_config_req; + bool exit_dp_mode; + bool debug_en; +}; + +#if IS_ENABLED(CONFIG_DRM_MSM_DP_USBPD_LEGACY) +/** + * dp_usbpd_get() - setup usbpd module + * + * @dev: device instance of the caller + * @cb: struct containing callback function pointers. + * + * This function allows the client to initialize the usbpd + * module. The module will communicate with usb driver and + * handles the power delivery (PD) communication with the + * sink/usb device. This module will notify the client using + * the callback functions about the connection and status. + */ +struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb); +void dp_usbpd_put(struct dp_hpd *pd); +#else +static inline struct dp_hpd *dp_usbpd_get(struct device *dev, + struct dp_hpd_cb *cb) +{ + return ERR_PTR(-ENODEV); +} + +static inline void dp_usbpd_put(struct dp_hpd *pd) +{ +} +#endif /* CONFIG_DRM_MSM_DP_USBPD_LEGACY */ +#endif /* _DP_USBPD_H_ */ diff --git a/msm/dsi/dsi_catalog.c b/msm/dsi/dsi_catalog.c new file mode 100644 index 000000000..08892968b --- /dev/null +++ b/msm/dsi/dsi_catalog.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include "dsi_catalog.h" + +/** + * dsi_catalog_cmn_init() - catalog init for dsi controller v1.4 + */ +static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version) +{ + /* common functions */ + ctrl->ops.host_setup = dsi_ctrl_hw_cmn_host_setup; + ctrl->ops.video_engine_en = dsi_ctrl_hw_cmn_video_engine_en; + ctrl->ops.video_engine_setup = dsi_ctrl_hw_cmn_video_engine_setup; + ctrl->ops.set_video_timing = dsi_ctrl_hw_cmn_set_video_timing; + ctrl->ops.set_timing_db = dsi_ctrl_hw_cmn_set_timing_db; + ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_cmn_cmd_engine_setup; + ctrl->ops.setup_cmd_stream = dsi_ctrl_hw_cmn_setup_cmd_stream; + ctrl->ops.ctrl_en = dsi_ctrl_hw_cmn_ctrl_en; + ctrl->ops.cmd_engine_en = dsi_ctrl_hw_cmn_cmd_engine_en; + ctrl->ops.phy_sw_reset = dsi_ctrl_hw_cmn_phy_sw_reset; + ctrl->ops.soft_reset = dsi_ctrl_hw_cmn_soft_reset; + ctrl->ops.kickoff_command = dsi_ctrl_hw_cmn_kickoff_command; + ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_cmn_kickoff_fifo_command; + ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_cmn_reset_cmd_fifo; + ctrl->ops.trigger_command_dma = dsi_ctrl_hw_cmn_trigger_command_dma; + ctrl->ops.get_interrupt_status = dsi_ctrl_hw_cmn_get_interrupt_status; + ctrl->ops.poll_dma_status = dsi_ctrl_hw_cmn_poll_dma_status; + ctrl->ops.get_error_status = dsi_ctrl_hw_cmn_get_error_status; + ctrl->ops.clear_error_status = dsi_ctrl_hw_cmn_clear_error_status; + ctrl->ops.clear_interrupt_status = + dsi_ctrl_hw_cmn_clear_interrupt_status; + ctrl->ops.enable_status_interrupts = + dsi_ctrl_hw_cmn_enable_status_interrupts; + ctrl->ops.enable_error_interrupts = + dsi_ctrl_hw_cmn_enable_error_interrupts; + ctrl->ops.video_test_pattern_setup = + dsi_ctrl_hw_cmn_video_test_pattern_setup; + ctrl->ops.cmd_test_pattern_setup = + dsi_ctrl_hw_cmn_cmd_test_pattern_setup; + ctrl->ops.test_pattern_enable = dsi_ctrl_hw_cmn_test_pattern_enable; + ctrl->ops.trigger_cmd_test_pattern = + dsi_ctrl_hw_cmn_trigger_cmd_test_pattern; + ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err; + ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config; + ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr; + ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr; + ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data; + ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg; + ctrl->ops.ctrl_reset = dsi_ctrl_hw_cmn_ctrl_reset; + ctrl->ops.mask_error_intr = dsi_ctrl_hw_cmn_mask_error_intr; + ctrl->ops.error_intr_ctrl = dsi_ctrl_hw_cmn_error_intr_ctrl; + ctrl->ops.get_error_mask = dsi_ctrl_hw_cmn_get_error_mask; + ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version; + ctrl->ops.wait_for_cmd_mode_mdp_idle = + dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle; + ctrl->ops.setup_avr = dsi_ctrl_hw_cmn_setup_avr; + ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk; + ctrl->ops.wait4dynamic_refresh_done = + dsi_ctrl_hw_cmn_wait4dynamic_refresh_done; + ctrl->ops.hs_req_sel = dsi_ctrl_hw_cmn_hs_req_sel; + ctrl->ops.vid_engine_busy = dsi_ctrl_hw_cmn_vid_engine_busy; + ctrl->ops.init_cmddma_trig_ctrl = dsi_ctrl_hw_cmn_init_cmddma_trig_ctrl; + + switch (version) { + case DSI_CTRL_VERSION_2_2: + case DSI_CTRL_VERSION_2_3: + case DSI_CTRL_VERSION_2_4: + case DSI_CTRL_VERSION_2_5: + case DSI_CTRL_VERSION_2_6: + case DSI_CTRL_VERSION_2_7: + case DSI_CTRL_VERSION_2_8: + ctrl->ops.phy_reset_config = dsi_ctrl_hw_22_phy_reset_config; + ctrl->ops.config_clk_gating = dsi_ctrl_hw_22_config_clk_gating; + ctrl->ops.setup_lane_map = dsi_ctrl_hw_22_setup_lane_map; + ctrl->ops.wait_for_lane_idle = + dsi_ctrl_hw_22_wait_for_lane_idle; + ctrl->ops.reg_dump_to_buffer = + dsi_ctrl_hw_22_reg_dump_to_buffer; + ctrl->ops.ulps_ops.ulps_request = dsi_ctrl_hw_cmn_ulps_request; + ctrl->ops.ulps_ops.ulps_exit = dsi_ctrl_hw_cmn_ulps_exit; + ctrl->ops.ulps_ops.get_lanes_in_ulps = + dsi_ctrl_hw_cmn_get_lanes_in_ulps; + ctrl->ops.clamp_enable = NULL; + ctrl->ops.clamp_disable = NULL; + ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd; + ctrl->ops.kickoff_command_non_embedded_mode = + dsi_ctrl_hw_kickoff_non_embedded_mode; + ctrl->ops.configure_cmddma_window = + dsi_ctrl_hw_22_configure_cmddma_window; + ctrl->ops.reset_trig_ctrl = + dsi_ctrl_hw_22_reset_trigger_controls; + ctrl->ops.log_line_count = dsi_ctrl_hw_22_log_line_count; + ctrl->ops.splitlink_cmd_setup = dsi_ctrl_hw_22_configure_splitlink; + ctrl->ops.setup_misr = dsi_ctrl_hw_22_setup_misr; + ctrl->ops.collect_misr = dsi_ctrl_hw_22_collect_misr; + break; + default: + break; + } +} + +/** + * dsi_catalog_ctrl_setup() - return catalog info for dsi controller + * @ctrl: Pointer to DSI controller hw object. + * @version: DSI controller version. + * @index: DSI controller instance ID. + * @phy_pll_bypass: DSI PHY/PLL drivers bypass HW access. + * @null_insertion_enabled: DSI controller inserts null packet. + * + * This function setups the catalog information in the dsi_ctrl_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version, u32 index, + bool phy_pll_bypass, bool null_insertion_enabled) +{ + int rc = 0; + + if (version == DSI_CTRL_VERSION_UNKNOWN || + version >= DSI_CTRL_VERSION_MAX) { + DSI_ERR("Unsupported version: %d\n", version); + return -ENOTSUPP; + } + + ctrl->index = index; + ctrl->null_insertion_enabled = null_insertion_enabled; + set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map); + set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map); + set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map); + set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map); + set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map); + set_bit(DSI_CTRL_DPHY, ctrl->feature_map); + + switch (version) { + case DSI_CTRL_VERSION_2_2: + case DSI_CTRL_VERSION_2_3: + case DSI_CTRL_VERSION_2_4: + ctrl->phy_pll_bypass = phy_pll_bypass; + dsi_catalog_cmn_init(ctrl, version); + break; + case DSI_CTRL_VERSION_2_5: + case DSI_CTRL_VERSION_2_6: + case DSI_CTRL_VERSION_2_7: + case DSI_CTRL_VERSION_2_8: + ctrl->widebus_support = true; + ctrl->phy_pll_bypass = phy_pll_bypass; + dsi_catalog_cmn_init(ctrl, version); + break; + default: + return -ENOTSUPP; + } + + return rc; +} + +/** + * dsi_catalog_phy_3_0_init() - catalog init for DSI PHY 10nm + */ +static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = dsi_phy_hw_v3_0_regulator_enable; + phy->ops.regulator_disable = dsi_phy_hw_v3_0_regulator_disable; + phy->ops.enable = dsi_phy_hw_v3_0_enable; + phy->ops.disable = dsi_phy_hw_v3_0_disable; + phy->ops.calculate_timing_params = + dsi_phy_hw_calculate_timing_params; + phy->ops.ulps_ops.wait_for_lane_idle = + dsi_phy_hw_v3_0_wait_for_lane_idle; + phy->ops.ulps_ops.ulps_request = + dsi_phy_hw_v3_0_ulps_request; + phy->ops.ulps_ops.ulps_exit = + dsi_phy_hw_v3_0_ulps_exit; + phy->ops.ulps_ops.get_lanes_in_ulps = + dsi_phy_hw_v3_0_get_lanes_in_ulps; + phy->ops.ulps_ops.is_lanes_in_ulps = + dsi_phy_hw_v3_0_is_lanes_in_ulps; + phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0; + phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl; + phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset; + phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo; + phy->ops.dyn_refresh_ops.dyn_refresh_config = + dsi_phy_hw_v3_0_dyn_refresh_config; + phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = + dsi_phy_hw_v3_0_dyn_refresh_pipe_delay; + phy->ops.dyn_refresh_ops.dyn_refresh_helper = + dsi_phy_hw_v3_0_dyn_refresh_helper; + phy->ops.dyn_refresh_ops.dyn_refresh_trigger_sel = NULL; + phy->ops.dyn_refresh_ops.cache_phy_timings = + dsi_phy_hw_v3_0_cache_phy_timings; + phy->ops.phy_idle_off = NULL; +} + +/** + * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY 7nm + */ +static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = NULL; + phy->ops.regulator_disable = NULL; + phy->ops.enable = dsi_phy_hw_v4_0_enable; + phy->ops.disable = dsi_phy_hw_v4_0_disable; + phy->ops.calculate_timing_params = + dsi_phy_hw_calculate_timing_params; + phy->ops.ulps_ops.wait_for_lane_idle = + dsi_phy_hw_v4_0_wait_for_lane_idle; + phy->ops.ulps_ops.ulps_request = + dsi_phy_hw_v4_0_ulps_request; + phy->ops.ulps_ops.ulps_exit = + dsi_phy_hw_v4_0_ulps_exit; + phy->ops.ulps_ops.get_lanes_in_ulps = + dsi_phy_hw_v4_0_get_lanes_in_ulps; + phy->ops.ulps_ops.is_lanes_in_ulps = + dsi_phy_hw_v4_0_is_lanes_in_ulps; + phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v4_0; + phy->ops.phy_lane_reset = dsi_phy_hw_v4_0_lane_reset; + phy->ops.toggle_resync_fifo = dsi_phy_hw_v4_0_toggle_resync_fifo; + phy->ops.reset_clk_en_sel = dsi_phy_hw_v4_0_reset_clk_en_sel; + + phy->ops.dyn_refresh_ops.dyn_refresh_config = + dsi_phy_hw_v4_0_dyn_refresh_config; + phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = + dsi_phy_hw_v4_0_dyn_refresh_pipe_delay; + phy->ops.dyn_refresh_ops.dyn_refresh_helper = + dsi_phy_hw_v4_0_dyn_refresh_helper; + phy->ops.dyn_refresh_ops.dyn_refresh_trigger_sel = + dsi_phy_hw_v4_0_dyn_refresh_trigger_sel; + phy->ops.dyn_refresh_ops.cache_phy_timings = + dsi_phy_hw_v4_0_cache_phy_timings; + phy->ops.set_continuous_clk = dsi_phy_hw_v4_0_set_continuous_clk; + phy->ops.commit_phy_timing = dsi_phy_hw_v4_0_commit_phy_timing; + phy->ops.phy_idle_off = NULL; +} + +/** + * dsi_catalog_phy_5_0_init() - catalog init for DSI PHY 7nm + */ +static void dsi_catalog_phy_5_0_init(struct dsi_phy_hw *phy) +{ + phy->ops.regulator_enable = NULL; + phy->ops.regulator_disable = NULL; + phy->ops.enable = dsi_phy_hw_v5_0_enable; + phy->ops.disable = dsi_phy_hw_v5_0_disable; + phy->ops.calculate_timing_params = dsi_phy_hw_calculate_timing_params; + phy->ops.ulps_ops.wait_for_lane_idle = dsi_phy_hw_v5_0_wait_for_lane_idle; + phy->ops.ulps_ops.ulps_request = dsi_phy_hw_v5_0_ulps_request; + phy->ops.ulps_ops.ulps_exit = dsi_phy_hw_v5_0_ulps_exit; + phy->ops.ulps_ops.get_lanes_in_ulps = dsi_phy_hw_v5_0_get_lanes_in_ulps; + phy->ops.ulps_ops.is_lanes_in_ulps = dsi_phy_hw_v5_0_is_lanes_in_ulps; + phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v5_0; + phy->ops.phy_lane_reset = dsi_phy_hw_v5_0_lane_reset; + phy->ops.toggle_resync_fifo = dsi_phy_hw_v5_0_toggle_resync_fifo; + phy->ops.reset_clk_en_sel = dsi_phy_hw_v5_0_reset_clk_en_sel; + + phy->ops.dyn_refresh_ops.dyn_refresh_config = dsi_phy_hw_v5_0_dyn_refresh_config; + phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = dsi_phy_hw_v5_0_dyn_refresh_pipe_delay; + phy->ops.dyn_refresh_ops.dyn_refresh_helper = dsi_phy_hw_v5_0_dyn_refresh_helper; + phy->ops.dyn_refresh_ops.dyn_refresh_trigger_sel = dsi_phy_hw_v5_0_dyn_refresh_trigger_sel; + phy->ops.dyn_refresh_ops.cache_phy_timings = dsi_phy_hw_v5_0_cache_phy_timings; + phy->ops.set_continuous_clk = dsi_phy_hw_v5_0_set_continuous_clk; + phy->ops.commit_phy_timing = dsi_phy_hw_v5_0_commit_phy_timing; + phy->ops.phy_idle_off = dsi_phy_hw_v5_0_phy_idle_off; +} + +/** + * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware + * @ctrl: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * @index: DSI PHY instance ID. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_phy_setup(struct dsi_phy_hw *phy, + enum dsi_phy_version version, + u32 index) +{ + int rc = 0; + + if (version == DSI_PHY_VERSION_UNKNOWN || + version >= DSI_PHY_VERSION_MAX) { + DSI_ERR("Unsupported version: %d\n", version); + return -ENOTSUPP; + } + + phy->index = index; + phy->version = version; + set_bit(DSI_PHY_DPHY, phy->feature_map); + + dsi_phy_timing_calc_init(phy, version); + + switch (version) { + case DSI_PHY_VERSION_3_0: + dsi_catalog_phy_3_0_init(phy); + break; + case DSI_PHY_VERSION_4_0: + case DSI_PHY_VERSION_4_1: + case DSI_PHY_VERSION_4_2: + case DSI_PHY_VERSION_4_3: + case DSI_PHY_VERSION_4_3_2: + dsi_catalog_phy_4_0_init(phy); + break; + case DSI_PHY_VERSION_5_2: + dsi_catalog_phy_5_0_init(phy); + break; + default: + return -ENOTSUPP; + } + + return rc; +} + +int dsi_catalog_phy_pll_setup(struct dsi_phy_hw *phy, u32 pll_ver) +{ + int rc = 0; + + if (pll_ver >= DSI_PLL_VERSION_UNKNOWN) { + DSI_ERR("Unsupported version: %d\n", pll_ver); + return -EOPNOTSUPP; + } else if (phy->phy_pll_bypass) { + return 0; + } + + switch (pll_ver) { + case DSI_PLL_VERSION_5NM: + phy->ops.configure = dsi_pll_5nm_configure; + phy->ops.pll_toggle = dsi_pll_5nm_toggle; + break; + case DSI_PLL_VERSION_4NM: + phy->ops.configure = dsi_pll_4nm_configure; + phy->ops.pll_toggle = dsi_pll_4nm_toggle; + break; + default: + phy->ops.configure = NULL; + phy->ops.pll_toggle = NULL; + break; + } + + return rc; +} diff --git a/msm/dsi/dsi_catalog.h b/msm/dsi/dsi_catalog.h new file mode 100644 index 000000000..de6bef4f1 --- /dev/null +++ b/msm/dsi/dsi_catalog.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DSI_CATALOG_H_ +#define _DSI_CATALOG_H_ + +#include "dsi_ctrl_hw.h" +#include "dsi_phy_hw.h" + +/** + * dsi_catalog_ctrl_setup() - return catalog info for dsi controller + * @ctrl: Pointer to DSI controller hw object. + * @version: DSI controller version. + * @index: DSI controller instance ID. + * @phy_pll_bypass: DSI PHY/PLL drivers bypass HW access. + * @null_insertion_enabled: DSI controller inserts null packet. + * + * This function setups the catalog information in the dsi_ctrl_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_ctrl_version version, u32 index, + bool phy_pll_bypass, bool null_insertion_enabled); + +/** + * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware + * @phy: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * @index: DSI PHY instance ID. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_catalog_phy_setup(struct dsi_phy_hw *phy, + enum dsi_phy_version version, + u32 index); + +/** + * dsi_phy_timing_calc_init() - initialize info for DSI PHY timing calculations + * @phy: Pointer to DSI PHY hw object. + * @version: DSI PHY version. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy, + enum dsi_phy_version version); + +/** + * dsi_phy_hw_calculate_timing_params() - DSI PHY timing parameter calculations + * @phy: Pointer to DSI PHY hw object. + * @mode: DSI mode information. + * @host: DSI host configuration. + * @timing: DSI phy lane configurations. + * @use_mode_bit_clk: Boolean to indicate whether to recalculate bit clk. + * + * This function setups the catalog information in the dsi_phy_hw object. + * + * return: error code for failure and 0 for success. + */ +int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy, + struct dsi_mode_info *mode, + struct dsi_host_common_cfg *host, + struct dsi_phy_per_lane_cfgs *timing, + bool use_mode_bit_clk); + +/* Definitions for 14nm PHY hardware driver */ +void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *cfg); +void dsi_phy_hw_v2_0_regulator_disable(struct dsi_phy_hw *phy); +void dsi_phy_hw_v2_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v2_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v2_0_idle_on(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v2_0_idle_off(struct dsi_phy_hw *phy); +int dsi_phy_hw_timing_val_v2_0(struct dsi_phy_per_lane_cfgs *timing_cfg, + u32 *timing_val, u32 size); +void dsi_phy_hw_v2_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable); +void dsi_phy_hw_v2_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v2_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v2_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); +int dsi_phy_hw_v2_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); + +/* Definitions for 10nm PHY hardware driver */ +void dsi_phy_hw_v3_0_regulator_enable(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *cfg); +void dsi_phy_hw_v3_0_regulator_disable(struct dsi_phy_hw *phy); +void dsi_phy_hw_v3_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v3_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +int dsi_phy_hw_v3_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes); +void dsi_phy_hw_v3_0_ulps_request(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +void dsi_phy_hw_v3_0_ulps_exit(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy); +bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes); +int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg, + u32 *timing_val, u32 size); +void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable); +int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy); +void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy); + +/* Definitions for 7nm PHY hardware driver */ +void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +int dsi_phy_hw_v4_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes); +void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy); +bool dsi_phy_hw_v4_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes); +int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg, + u32 *timing_val, u32 size); +int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable); +void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *timing); + +/* Definitions for 4nm PHY hardware driver */ +void dsi_phy_hw_v5_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +void dsi_phy_hw_v5_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg); +int dsi_phy_hw_v5_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes); +void dsi_phy_hw_v5_0_ulps_request(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, u32 lanes); +void dsi_phy_hw_v5_0_ulps_exit(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg, u32 lanes); +u32 dsi_phy_hw_v5_0_get_lanes_in_ulps(struct dsi_phy_hw *phy); +bool dsi_phy_hw_v5_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes); +int dsi_phy_hw_timing_val_v5_0(struct dsi_phy_per_lane_cfgs *timing_cfg, u32 *timing_val, + u32 size); +int dsi_phy_hw_v5_0_lane_reset(struct dsi_phy_hw *phy); +void dsi_phy_hw_v5_0_toggle_resync_fifo(struct dsi_phy_hw *phy); +void dsi_phy_hw_v5_0_reset_clk_en_sel(struct dsi_phy_hw *phy); +void dsi_phy_hw_v5_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable); +void dsi_phy_hw_v5_0_commit_phy_timing(struct dsi_phy_hw *phy, + struct dsi_phy_per_lane_cfgs *timing); + +/* DSI controller common ops */ +u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl); +u32 dsi_ctrl_hw_cmn_poll_dma_status(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints); +void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, + u32 ints); + +u64 dsi_ctrl_hw_cmn_get_error_status(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors); +void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl, + u64 errors); + +void dsi_ctrl_hw_cmn_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val); +void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl, + enum dsi_test_pattern type, + u32 init_val, + u32 stream_id); +void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable, + enum dsi_ctrl_tpg_pattern pattern, + enum dsi_op_mode panel_mode); +void dsi_ctrl_hw_cmn_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl, + u32 stream_id); + +void dsi_ctrl_hw_cmn_host_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *config); +void dsi_ctrl_hw_cmn_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on); +void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_video_engine_cfg *cfg); + +void dsi_ctrl_hw_cmn_setup_avr(struct dsi_ctrl_hw *ctrl, bool enable); + +void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode); +void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl, + bool enable); +void dsi_ctrl_hw_cmn_cmd_engine_setup(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, + struct dsi_cmd_engine_cfg *cfg); + +void dsi_ctrl_hw_cmn_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on); +void dsi_ctrl_hw_cmn_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on); + +void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl, + struct dsi_mode_info *mode, + struct dsi_host_common_cfg *cfg, + u32 vc_id, + struct dsi_rect *roi); +void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl, + enum dsi_op_mode panel_mode, + bool enable, u32 frame_count); +u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl, + enum dsi_op_mode panel_mode); + +void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags); + +void dsi_ctrl_hw_cmn_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_fifo_info *cmd, + u32 flags); +void dsi_ctrl_hw_cmn_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_dln0_phy_err(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_phy_reset_config(struct dsi_ctrl_hw *ctrl, + bool enable); +void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl, + bool enable); +u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl, + u8 *rd_buf, + u32 read_offset, + u32 rx_byte, + u32 pkt_size, u32 *hw_read_cnt); +void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_22_schedule_dma_cmd(struct dsi_ctrl_hw *ctrl, int line_on); +int dsi_ctrl_hw_cmn_ctrl_reset(struct dsi_ctrl_hw *ctrl, + int mask); +void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, + bool en); +void dsi_ctrl_hw_cmn_error_intr_ctrl(struct dsi_ctrl_hw *ctrl, bool en); +u32 dsi_ctrl_hw_cmn_get_error_mask(struct dsi_ctrl_hw *ctrl); +u32 dsi_ctrl_hw_cmn_get_hw_version(struct dsi_ctrl_hw *ctrl); +int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl); +void dsi_ctrl_hw_cmn_init_cmddma_trig_ctrl(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *cfg); + +/* Definitions specific to 1.4 DSI controller hardware */ +int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes); +void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_map *lane_map); +void dsi_ctrl_hw_cmn_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes); +void dsi_ctrl_hw_cmn_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes); +u32 dsi_ctrl_hw_cmn_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl); + +void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool enable_ulps); + +void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl, + u32 lanes, + bool disable_ulps); +ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl, + char *buf, + u32 size); + +/* Definitions specific to 2.0 DSI controller hardware */ +void dsi_ctrl_hw_20_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_map *lane_map); +int dsi_ctrl_hw_20_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes); +ssize_t dsi_ctrl_hw_20_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl, + char *buf, + u32 size); +void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 flags); + +/* Definitions specific to 2.2 DSI controller hardware */ +void dsi_ctrl_hw_22_setup_lane_map(struct dsi_ctrl_hw *ctrl, + struct dsi_lane_map *lane_map); +int dsi_ctrl_hw_22_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes); +ssize_t dsi_ctrl_hw_22_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl, + char *buf, u32 size); + +void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable, + enum dsi_clk_gate_type clk_selection); + +void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable); +void dsi_ctrl_hw_cmn_hs_req_sel(struct dsi_ctrl_hw *ctrl, bool sel_phy); + +void dsi_ctrl_hw_22_setup_misr(struct dsi_ctrl_hw *ctrl, enum dsi_op_mode panel_mode, + bool enable, u32 frame_count); +u32 dsi_ctrl_hw_22_collect_misr(struct dsi_ctrl_hw *ctrl, enum dsi_op_mode panel_mode); + +/* dynamic refresh specific functions */ +void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); + +int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl); +bool dsi_ctrl_hw_cmn_vid_engine_busy(struct dsi_ctrl_hw *ctrl); +int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); + +void dsi_phy_hw_v4_0_dyn_refresh_trigger_sel(struct dsi_phy_hw *phy, + bool is_master); +void dsi_phy_hw_v4_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v4_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v4_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); + +int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); + +void dsi_phy_hw_v5_0_dyn_refresh_trigger_sel(struct dsi_phy_hw *phy, + bool is_master); +void dsi_phy_hw_v5_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset); +void dsi_phy_hw_v5_0_dyn_refresh_config(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg, bool is_master); +void dsi_phy_hw_v5_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, + struct dsi_dyn_clk_delay *delay); + +int dsi_phy_hw_v5_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings, + u32 *dst, u32 size); +void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy, + struct dsi_phy_cfg *cfg); +void dsi_ctrl_hw_22_configure_cmddma_window(struct dsi_ctrl_hw *ctrl, + struct dsi_ctrl_cmd_dma_info *cmd, + u32 line_no, u32 window); +void dsi_ctrl_hw_22_reset_trigger_controls(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *cfg); +u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode); + +/* PLL specific functions */ +int dsi_catalog_phy_pll_setup(struct dsi_phy_hw *phy, u32 pll_ver); +int dsi_pll_5nm_configure(void *pll, bool commit); +int dsi_pll_5nm_toggle(void *pll, bool prepare); +int dsi_pll_4nm_configure(void *pll, bool commit); +int dsi_pll_4nm_toggle(void *pll, bool prepare); + +void dsi_ctrl_hw_22_configure_splitlink(struct dsi_ctrl_hw *ctrl, + struct dsi_host_common_cfg *common_cfg, u32 sublink); +#endif /* _DSI_CATALOG_H_ */ diff --git a/msm/dsi/dsi_clk.h b/msm/dsi/dsi_clk.h new file mode 100644 index 000000000..443d5d063 --- /dev/null +++ b/msm/dsi/dsi_clk.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DSI_CLK_H_ +#define _DSI_CLK_H_ + +#include +#include +#include +#include + +#define MAX_STRING_LEN 32 +#define MAX_DSI_CTRL 2 + +enum dsi_clk_state { + DSI_CLK_OFF, + DSI_CLK_ON, + DSI_CLK_EARLY_GATE, +}; + +enum clk_req_client { + DSI_CLK_REQ_MDP_CLIENT = 0, + DSI_CLK_REQ_DSI_CLIENT, +}; + +enum dsi_link_clk_type { + DSI_LINK_ESC_CLK, + DSI_LINK_BYTE_CLK, + DSI_LINK_PIX_CLK, + DSI_LINK_BYTE_INTF_CLK, + DSI_LINK_CLK_MAX, +}; + +enum dsi_link_clk_op_type { + DSI_LINK_CLK_SET_RATE = BIT(0), + DSI_LINK_CLK_PREPARE = BIT(1), + DSI_LINK_CLK_ENABLE = BIT(2), + DSI_LINK_CLK_START = BIT(0) | BIT(1) | BIT(2), +}; + +enum dsi_clk_type { + DSI_CORE_CLK = BIT(0), + DSI_LINK_CLK = BIT(1), + DSI_ALL_CLKS = (BIT(0) | BIT(1)), + DSI_CLKS_MAX = BIT(2), +}; + +enum dsi_lclk_type { + DSI_LINK_NONE = 0, + DSI_LINK_LP_CLK = BIT(0), + DSI_LINK_HS_CLK = BIT(1), +}; + +struct dsi_clk_ctrl_info { + enum dsi_clk_type clk_type; + enum dsi_clk_state clk_state; + enum clk_req_client client; +}; + +struct clk_ctrl_cb { + void *priv; + int (*dsi_clk_cb)(void *priv, struct dsi_clk_ctrl_info clk_ctrl_info); +}; + +/** + * struct dsi_core_clk_info - Core clock information for DSI hardware + * @mdp_core_clk: Handle to MDP core clock. + * @iface_clk: Handle to MDP interface clock. + * @core_mmss_clk: Handle to MMSS core clock. + * @bus_clk: Handle to bus clock. + * @mnoc_clk: Handle to MMSS NOC clock. + * @drm: Pointer to drm device node + */ +struct dsi_core_clk_info { + struct clk *mdp_core_clk; + struct clk *iface_clk; + struct clk *core_mmss_clk; + struct clk *bus_clk; + struct clk *mnoc_clk; + struct drm_device *drm; +}; + +/** + * struct dsi_link_hs_clk_info - Set of high speed link clocks for DSI HW + * @byte_clk: Handle to DSI byte_clk. + * @pixel_clk: Handle to DSI pixel_clk. + * @byte_intf_clk: Handle to DSI byte intf. clock. + */ +struct dsi_link_hs_clk_info { + struct clk *byte_clk; + struct clk *pixel_clk; + struct clk *byte_intf_clk; +}; + +/** + * struct dsi_link_lp_clk_info - Set of low power link clocks for DSI HW. + * @esc_clk: Handle to DSI escape clock. + */ +struct dsi_link_lp_clk_info { + struct clk *esc_clk; +}; + +/** + * struct link_clk_freq - Clock frequency information for Link clocks + * @byte_clk_rate: Frequency of DSI byte_clk in Hz. + * @byte_intf_clk_rate: Frequency of DSI byte_intf_clk in Hz. + * @pixel_clk_rate: Frequency of DSI pixel_clk in Hz. + * @esc_clk_rate: Frequency of DSI escape clock in Hz. + */ +struct link_clk_freq { + u32 byte_clk_rate; + u32 byte_intf_clk_rate; + u32 pix_clk_rate; + u32 esc_clk_rate; +}; + +/** + * typedef *pre_clockoff_cb() - Callback before clock is turned off + * @priv: private data pointer. + * @clk_type: clock which is being turned off. + * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks. + * @new_state: next state for the clock. + * + * @return: error code. + */ +typedef int (*pre_clockoff_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state new_state); + +/** + * typedef *post_clockoff_cb() - Callback after clock is turned off + * @priv: private data pointer. + * @clk_type: clock which was turned off. + * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks. + * @curr_state: current state for the clock. + * + * @return: error code. + */ +typedef int (*post_clockoff_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state curr_state); + +/** + * typedef *post_clockon_cb() - Callback after clock is turned on + * @priv: private data pointer. + * @clk_type: clock which was turned on. + * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks. + * @curr_state: current state for the clock. + * + * @return: error code. + */ +typedef int (*post_clockon_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state curr_state); + +/** + * typedef *pre_clockon_cb() - Callback before clock is turned on + * @priv: private data pointer. + * @clk_type: clock which is being turned on. + * @l_type: specifies if the clock is HS or LP type.Valid only for link clocks. + * @new_state: next state for the clock. + * + * @return: error code. + */ +typedef int (*pre_clockon_cb)(void *priv, + enum dsi_clk_type clk_type, + enum dsi_lclk_type l_type, + enum dsi_clk_state new_state); + + +/** + * typedef *phy_configure_cb() - Callback to configure PHY for PLL clocks + * @priv: private data pointer. + * @commit: boolean to specify if calculated PHY configuration needs to be + * committed. Set to false in case of dynamic clock switch. + * + * @return: error code. + */ +typedef int (*phy_configure_cb)(void *priv, bool commit); + +/** + * typedef *pll_toggle_cb() - Callback to toggle PHY PLL + * @priv: private data pointer. + * @prepare: specifies if the PLL needs to be turned on or off. + * + * @return: error code. + */ +typedef int (*pll_toggle_cb)(void *priv, bool prepare); + +/** + * struct dsi_clk_info - clock information for DSI hardware. + * @name: client name. + * @c_clks[MAX_DSI_CTRL] array of core clock configurations + * @l_lp_clks[MAX_DSI_CTRL] array of low power(esc) clock configurations + * @l_hs_clks[MAX_DSI_CTRL] array of high speed clock configurations + * @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped + * to core and link clock configurations + * @pre_clkoff_cb callback before clock is turned off + * @post_clkoff_cb callback after clock is turned off + * @post_clkon_cb callback after clock is turned on + * @pre_clkon_cb callback before clock is turned on + * @phy_config_cb callback to configure PHY PLL + * @phy_pll_toggle_cb callback to toggle PHY PLL state + * @priv_data pointer to private data + * @master_ndx master DSI controller index + * @dsi_ctrl_count number of DSI controllers + * @phy_pll_bypass bypass PLL clock related operations + */ +struct dsi_clk_info { + char name[MAX_STRING_LEN]; + struct dsi_core_clk_info c_clks[MAX_DSI_CTRL]; + struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL]; + struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL]; + u32 ctrl_index[MAX_DSI_CTRL]; + pre_clockoff_cb pre_clkoff_cb; + post_clockoff_cb post_clkoff_cb; + post_clockon_cb post_clkon_cb; + pre_clockon_cb pre_clkon_cb; + phy_configure_cb phy_config_cb; + pll_toggle_cb phy_pll_toggle_cb; + void *priv_data; + u32 master_ndx; + u32 dsi_ctrl_count; + bool phy_pll_bypass; +}; + +/** + * struct dsi_clk_link_set - Pair of clock handles to describe link clocks + * @byte_clk: Handle to DSi byte_clk. + * @pixel_clk: Handle to DSI pixel_clk. + */ +struct dsi_clk_link_set { + struct clk *byte_clk; + struct clk *pixel_clk; +}; + +/** + * dsi_display_clk_mngr_update_splash_status() - Update splash stattus + * @clk_mngr: Structure containing DSI clock information + * @status: Splash status + */ +void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status); + +/** + * dsi_display_clk_mgr_register() - Register DSI clock manager + * @info: Structure containing DSI clock information + */ +void *dsi_display_clk_mngr_register(struct dsi_clk_info *info); + +/** + * dsi_display_clk_mngr_deregister() - Deregister DSI clock manager + * @clk_mngr: DSI clock manager pointer + */ +int dsi_display_clk_mngr_deregister(void *clk_mngr); + +/** + * dsi_register_clk_handle() - Register clock handle with DSI clock manager + * @clk_mngr: DSI clock manager pointer + * @client: DSI clock client pointer. + */ +void *dsi_register_clk_handle(void *clk_mngr, char *client); + +/** + * dsi_deregister_clk_handle() - Deregister clock handle from DSI clock manager + * @client: DSI clock client pointer. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_deregister_clk_handle(void *client); + +/** + * dsi_display_link_clk_force_update_ctrl() - force to set link clks + * @handle: Handle of desired DSI clock client. + * + * return: error code in case of failure or 0 for success. + */ + +int dsi_display_link_clk_force_update_ctrl(void *handle); + +/** + * dsi_display_clk_ctrl() - set frequencies for link clks + * @handle: Handle of desired DSI clock client. + * @clk_type: Clock which is being controlled. + * @clk_state: Desired state of clock + * + * return: error code in case of failure or 0 for success. + */ +int dsi_display_clk_ctrl(void *handle, u32 clk_type, u32 clk_state); + +/** + * dsi_clk_set_link_frequencies() - set frequencies for link clks + * @client: DSI clock client pointer. + * @freq: Structure containing link clock frequencies. + * @index: Index of the DSI controller. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq, + u32 index); + +/** + * dsi_clk_get_link_frequencies() - get link clk frequencies + * @link_freq: Structure to get link clock frequencies + * @client: DSI clock client pointer. + * @index: Index of the DSI controller. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_get_link_frequencies(struct link_clk_freq *link_freq, void *client, u32 index); + +/** + * dsi_clk_set_pixel_clk_rate() - set frequency for pixel_clk + * @client: DSI clock client pointer. + * @pixel_clk: Pixel_clk rate in Hz. + * @index: Index of the DSI controller. + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index); + +/** + * dsi_clk_set_byte_clk_rate() - set frequency for byte clock + * @client: DSI clock client pointer. + * @byte_clk: Pixel clock rate in Hz. + * @byte_intf_clk: Byte interface clock rate in Hz. + * @index: Index of the DSI controller. + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, + u64 byte_intf_clk, u32 index); + +/** + * dsi_clk_update_parent() - update parent clocks for specified clock + * @parent: link clock pair which are set as parent. + * @child: link clock pair whose parent has to be set. + */ +int dsi_clk_update_parent(struct dsi_clk_link_set *parent, + struct dsi_clk_link_set *child); + +/** + * dsi_clk_prepare_enable() - prepare and enable dsi src clocks + * @clk: list of src clocks. + * + * @return: Zero on success and err no on failure + */ +int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk); + +/** + * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks + * @clk: list of src clocks. + */ +void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk); + +/** + * dsi_display_dump_clk_handle_state() - dump client clock state + * @client: DSI clock client pointer. + */ +int dsi_display_dump_clk_handle_state(void *client); + +/** + * dsi_clk_acquire_mngr_lock() - acquire clk manager mutex lock + * @client: DSI clock client pointer. + */ +void dsi_clk_acquire_mngr_lock(void *client); + +/** + * dsi_clk_release_mngr_lock() - release clk manager mutex lock + * @client: DSI clock client pointer. + */ +void dsi_clk_release_mngr_lock(void *client); + +#endif /* _DSI_CLK_H_ */ diff --git a/msm/dsi/dsi_clk_manager.c b/msm/dsi/dsi_clk_manager.c new file mode 100644 index 000000000..a0e7d28ae --- /dev/null +++ b/msm/dsi/dsi_clk_manager.c @@ -0,0 +1,1557 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include "dsi_clk.h" +#include "dsi_defs.h" + +struct dsi_core_clks { + struct dsi_core_clk_info clks; +}; + +struct dsi_link_clks { + struct dsi_link_hs_clk_info hs_clks; + struct dsi_link_lp_clk_info lp_clks; + struct link_clk_freq freq; +}; + +struct dsi_clk_mngr { + char name[MAX_STRING_LEN]; + struct mutex clk_mutex; + struct list_head client_list; + + u32 dsi_ctrl_count; + u32 master_ndx; + struct dsi_core_clks core_clks[MAX_DSI_CTRL]; + struct dsi_link_clks link_clks[MAX_DSI_CTRL]; + u32 ctrl_index[MAX_DSI_CTRL]; + u32 core_clk_state; + u32 link_clk_state; + + phy_configure_cb phy_config_cb; + pll_toggle_cb phy_pll_toggle_cb; + pre_clockoff_cb pre_clkoff_cb; + post_clockoff_cb post_clkoff_cb; + post_clockon_cb post_clkon_cb; + pre_clockon_cb pre_clkon_cb; + + bool is_cont_splash_enabled; + bool phy_pll_bypass; + void *priv_data; +}; + +struct dsi_clk_client_info { + char name[MAX_STRING_LEN]; + u32 core_refcount; + u32 link_refcount; + u32 core_clk_state; + u32 link_clk_state; + struct list_head list; + struct dsi_clk_mngr *mngr; +}; + +static int _get_clk_mngr_index(struct dsi_clk_mngr *mngr, + u32 dsi_ctrl_index, + u32 *clk_mngr_index) +{ + int i; + + for (i = 0; i < mngr->dsi_ctrl_count; i++) { + if (mngr->ctrl_index[i] == dsi_ctrl_index) { + *clk_mngr_index = i; + return 0; + } + } + + return -EINVAL; +} + +/** + * dsi_clk_set_link_frequencies() - set frequencies for link clks + * @clks: Link clock information + * @pixel_clk: pixel clock frequency in KHz. + * @byte_clk: Byte clock frequency in KHz. + * @esc_clk: Escape clock frequency in KHz. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq, + u32 index) +{ + int rc = 0, clk_mngr_index = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + if (!client) { + DSI_ERR("invalid params\n"); + return -EINVAL; + } + + mngr = c->mngr; + rc = _get_clk_mngr_index(mngr, index, &clk_mngr_index); + if (rc) { + DSI_ERR("failed to map control index %d\n", index); + return -EINVAL; + } + + memcpy(&mngr->link_clks[clk_mngr_index].freq, &freq, + sizeof(struct link_clk_freq)); + + return rc; +} + +/** + * dsi_clk_get_link_frequencies() - get link clk frequencies + * @link_freq: Structure to get link clock frequencies + * @client: DSI clock client pointer. + * @index: Index of the DSI controller. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_get_link_frequencies(struct link_clk_freq *link_freq, void *client, u32 index) +{ + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + if (!client || !link_freq) { + DSI_ERR("invalid params\n"); + return -EINVAL; + } + + mngr = c->mngr; + memcpy(link_freq, &mngr->link_clks[index].freq, sizeof(struct link_clk_freq)); + + return 0; +} + +/** + * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock + * @clks: DSI link clock information. + * @pixel_clk: Pixel clock rate in KHz. + * @index: Index of the DSI controller. + * + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + mngr = c->mngr; + + if (mngr->phy_pll_bypass) + return 0; + + rc = clk_set_rate(mngr->link_clks[index].hs_clks.pixel_clk, pixel_clk); + if (rc) + DSI_ERR("failed to set clk rate for pixel clk, rc=%d\n", rc); + else + mngr->link_clks[index].freq.pix_clk_rate = pixel_clk; + + return rc; +} + +/** + * dsi_clk_set_byte_clk_rate() - set frequency for byte clock + * @client: DSI clock client pointer. + * @byte_clk: Byte clock rate in Hz. + * @byte_intf_clk: Byte interface clock rate in Hz. + * @index: Index of the DSI controller. + * return: error code in case of failure or 0 for success. + */ +int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, + u64 byte_intf_clk, u32 index) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + + mngr = c->mngr; + + if (mngr->phy_pll_bypass) + return 0; + + rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk); + if (rc) + DSI_ERR("failed to set clk rate for byte clk, rc=%d\n", rc); + else + mngr->link_clks[index].freq.byte_clk_rate = byte_clk; + + if (mngr->link_clks[index].hs_clks.byte_intf_clk) { + rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_intf_clk, + byte_intf_clk); + if (rc) + DSI_ERR("failed to set clk rate for byte intf clk=%d\n", + rc); + else + mngr->link_clks[index].freq.byte_intf_clk_rate = + byte_intf_clk; + } + + return rc; +} + +/** + * dsi_clk_update_parent() - update parent clocks for specified clock + * @parent: link clock pair which are set as parent. + * @child: link clock pair whose parent has to be set. + */ +int dsi_clk_update_parent(struct dsi_clk_link_set *parent, + struct dsi_clk_link_set *child) +{ + int rc = 0; + + rc = clk_set_parent(child->byte_clk, parent->byte_clk); + if (rc) { + DSI_ERR("failed to set byte clk parent\n"); + goto error; + } + + rc = clk_set_parent(child->pixel_clk, parent->pixel_clk); + if (rc) { + DSI_ERR("failed to set pixel clk parent\n"); + goto error; + } +error: + return rc; +} + +/** + * dsi_clk_prepare_enable() - prepare and enable dsi src clocks + * @clk: list of src clocks. + * + * @return: Zero on success and err no on failure. + */ +int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk) +{ + int rc; + + rc = clk_prepare_enable(clk->byte_clk); + if (rc) { + DSI_ERR("failed to enable byte src clk %d\n", rc); + return rc; + } + + rc = clk_prepare_enable(clk->pixel_clk); + if (rc) { + DSI_ERR("failed to enable pixel src clk %d\n", rc); + return rc; + } + + return 0; +} + +/** + * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks + * @clk: list of src clocks. + */ +void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk) +{ + clk_disable_unprepare(clk->pixel_clk); + clk_disable_unprepare(clk->byte_clk); +} + +int dsi_core_clk_start(struct dsi_core_clks *c_clks) +{ + int rc = 0; + + if (c_clks->clks.mdp_core_clk) { + rc = clk_prepare_enable(c_clks->clks.mdp_core_clk); + if (rc) { + DSI_ERR("failed to enable mdp_core_clk, rc=%d\n", rc); + goto error; + } + } + + if (c_clks->clks.mnoc_clk) { + rc = clk_prepare_enable(c_clks->clks.mnoc_clk); + if (rc) { + DSI_ERR("failed to enable mnoc_clk, rc=%d\n", rc); + goto error_disable_core_clk; + } + } + + if (c_clks->clks.iface_clk) { + rc = clk_prepare_enable(c_clks->clks.iface_clk); + if (rc) { + DSI_ERR("failed to enable iface_clk, rc=%d\n", rc); + goto error_disable_mnoc_clk; + } + } + + if (c_clks->clks.bus_clk) { + rc = clk_prepare_enable(c_clks->clks.bus_clk); + if (rc) { + DSI_ERR("failed to enable bus_clk, rc=%d\n", rc); + goto error_disable_iface_clk; + } + } + + if (c_clks->clks.core_mmss_clk) { + rc = clk_prepare_enable(c_clks->clks.core_mmss_clk); + if (rc) { + DSI_ERR("failed to enable core_mmss_clk, rc=%d\n", rc); + goto error_disable_bus_clk; + } + } + + + return rc; + +error_disable_bus_clk: + if (c_clks->clks.bus_clk) + clk_disable_unprepare(c_clks->clks.bus_clk); +error_disable_iface_clk: + if (c_clks->clks.iface_clk) + clk_disable_unprepare(c_clks->clks.iface_clk); +error_disable_mnoc_clk: + if (c_clks->clks.mnoc_clk) + clk_disable_unprepare(c_clks->clks.mnoc_clk); +error_disable_core_clk: + if (c_clks->clks.mdp_core_clk) + clk_disable_unprepare(c_clks->clks.mdp_core_clk); +error: + return rc; +} + +int dsi_core_clk_stop(struct dsi_core_clks *c_clks) +{ + int rc = 0; + + if (c_clks->clks.core_mmss_clk) + clk_disable_unprepare(c_clks->clks.core_mmss_clk); + + if (c_clks->clks.bus_clk) + clk_disable_unprepare(c_clks->clks.bus_clk); + + if (c_clks->clks.iface_clk) + clk_disable_unprepare(c_clks->clks.iface_clk); + + if (c_clks->clks.mnoc_clk) + clk_disable_unprepare(c_clks->clks.mnoc_clk); + + if (c_clks->clks.mdp_core_clk) + clk_disable_unprepare(c_clks->clks.mdp_core_clk); + + return rc; +} + +static int dsi_link_hs_clk_set_rate(struct dsi_link_hs_clk_info *link_hs_clks, + int index) +{ + int rc = 0; + struct dsi_clk_mngr *mngr; + struct dsi_link_clks *l_clks; + + if (index >= MAX_DSI_CTRL) { + DSI_ERR("Invalid DSI ctrl index\n"); + return -EINVAL; + } + + l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks); + mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]); + + /* + * In an ideal world, cont_splash_enabled should not be required inside + * the clock manager. But, in the current driver cont_splash_enabled + * flag is set inside mdp driver and there is no interface event + * associated with this flag setting. + */ + if (mngr->is_cont_splash_enabled) + return 0; + + if (mngr->phy_pll_bypass) + return 0; + + rc = clk_set_rate(link_hs_clks->byte_clk, + l_clks->freq.byte_clk_rate); + if (rc) { + DSI_ERR("clk_set_rate failed for byte_clk rc = %d\n", rc); + goto error; + } + + rc = clk_set_rate(link_hs_clks->pixel_clk, + l_clks->freq.pix_clk_rate); + if (rc) { + DSI_ERR("clk_set_rate failed for pixel_clk rc = %d\n", rc); + goto error; + } + + /* + * If byte_intf_clk is present, set rate for that too. + */ + if (link_hs_clks->byte_intf_clk) { + rc = clk_set_rate(link_hs_clks->byte_intf_clk, + l_clks->freq.byte_intf_clk_rate); + if (rc) { + DSI_ERR("set_rate failed for byte_intf_clk rc = %d\n", + rc); + goto error; + } + } +error: + return rc; +} + +static int dsi_link_hs_clk_prepare(struct dsi_link_hs_clk_info *link_hs_clks) +{ + int rc = 0; + + rc = clk_prepare(link_hs_clks->byte_clk); + if (rc) { + DSI_ERR("Failed to prepare dsi byte clk, rc=%d\n", rc); + goto byte_clk_err; + } + + rc = clk_prepare(link_hs_clks->pixel_clk); + if (rc) { + DSI_ERR("Failed to prepare dsi pixel clk, rc=%d\n", rc); + goto pixel_clk_err; + } + + if (link_hs_clks->byte_intf_clk) { + rc = clk_prepare(link_hs_clks->byte_intf_clk); + if (rc) { + DSI_ERR("Failed to prepare dsi byte intf clk, rc=%d\n", + rc); + goto byte_intf_clk_err; + } + } + + return rc; + +byte_intf_clk_err: + clk_unprepare(link_hs_clks->pixel_clk); +pixel_clk_err: + clk_unprepare(link_hs_clks->byte_clk); +byte_clk_err: + return rc; +} + +static void dsi_link_hs_clk_unprepare(struct dsi_link_hs_clk_info *link_hs_clks) +{ + if (link_hs_clks->byte_intf_clk) + clk_unprepare(link_hs_clks->byte_intf_clk); + clk_unprepare(link_hs_clks->pixel_clk); + clk_unprepare(link_hs_clks->byte_clk); +} + +static int dsi_link_hs_clk_enable(struct dsi_link_hs_clk_info *link_hs_clks) +{ + int rc = 0; + + rc = clk_enable(link_hs_clks->byte_clk); + if (rc) { + DSI_ERR("Failed to enable dsi byte clk, rc=%d\n", rc); + goto byte_clk_err; + } + + rc = clk_enable(link_hs_clks->pixel_clk); + if (rc) { + DSI_ERR("Failed to enable dsi pixel clk, rc=%d\n", rc); + goto pixel_clk_err; + } + + if (link_hs_clks->byte_intf_clk) { + rc = clk_enable(link_hs_clks->byte_intf_clk); + if (rc) { + DSI_ERR("Failed to enable dsi byte intf clk, rc=%d\n", + rc); + goto byte_intf_clk_err; + } + } + + return rc; + +byte_intf_clk_err: + clk_disable(link_hs_clks->pixel_clk); +pixel_clk_err: + clk_disable(link_hs_clks->byte_clk); +byte_clk_err: + return rc; +} + +static void dsi_link_hs_clk_disable(struct dsi_link_hs_clk_info *link_hs_clks) +{ + if (link_hs_clks->byte_intf_clk) + clk_disable(link_hs_clks->byte_intf_clk); + clk_disable(link_hs_clks->pixel_clk); + clk_disable(link_hs_clks->byte_clk); +} + +/** + * dsi_link_clk_start() - enable dsi link clocks + */ +static int dsi_link_hs_clk_start(struct dsi_link_hs_clk_info *link_hs_clks, + enum dsi_link_clk_op_type op_type, int index) +{ + int rc = 0; + + if (index >= MAX_DSI_CTRL) { + DSI_ERR("Invalid DSI ctrl index\n"); + return -EINVAL; + } + + if (op_type & DSI_LINK_CLK_SET_RATE) { + rc = dsi_link_hs_clk_set_rate(link_hs_clks, index); + if (rc) { + DSI_ERR("failed to set HS clk rates, rc = %d\n", rc); + goto error; + } + } + + if (op_type & DSI_LINK_CLK_PREPARE) { + rc = dsi_link_hs_clk_prepare(link_hs_clks); + if (rc) { + DSI_ERR("failed to prepare link HS clks, rc = %d\n", + rc); + goto error; + } + } + + if (op_type & DSI_LINK_CLK_ENABLE) { + rc = dsi_link_hs_clk_enable(link_hs_clks); + if (rc) { + DSI_ERR("failed to enable link HS clks, rc = %d\n", rc); + goto error_unprepare; + } + } + + DSI_DEBUG("HS Link clocks are enabled\n"); + return rc; +error_unprepare: + dsi_link_hs_clk_unprepare(link_hs_clks); +error: + return rc; +} + +/** + * dsi_link_clk_stop() - Stop DSI link clocks. + */ +static int dsi_link_hs_clk_stop(struct dsi_link_hs_clk_info *link_hs_clks) +{ + dsi_link_hs_clk_disable(link_hs_clks); + dsi_link_hs_clk_unprepare(link_hs_clks); + + DSI_DEBUG("HS Link clocks disabled\n"); + + return 0; +} + +static int dsi_link_lp_clk_start(struct dsi_link_lp_clk_info *link_lp_clks, + int index) +{ + int rc = 0; + struct dsi_clk_mngr *mngr; + struct dsi_link_clks *l_clks; + + if (index >= MAX_DSI_CTRL) { + DSI_ERR("Invalid DSI ctrl index\n"); + return -EINVAL; + } + + l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks); + + mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]); + if (!mngr) + return -EINVAL; + + /* + * In an ideal world, cont_splash_enabled should not be required inside + * the clock manager. But, in the current driver cont_splash_enabled + * flag is set inside mdp driver and there is no interface event + * associated with this flag setting. Also, set rate for clock need not + * be called for every enable call. It should be done only once when + * coming out of suspend. + */ + if (mngr->is_cont_splash_enabled) + goto prepare; + + rc = clk_set_rate(link_lp_clks->esc_clk, l_clks->freq.esc_clk_rate); + if (rc) { + DSI_ERR("clk_set_rate failed for esc_clk rc = %d\n", rc); + goto error; + } + +prepare: + rc = clk_prepare_enable(link_lp_clks->esc_clk); + if (rc) { + DSI_ERR("Failed to enable dsi esc clk\n"); + clk_unprepare(l_clks->lp_clks.esc_clk); + } +error: + DSI_DEBUG("LP Link clocks are enabled\n"); + return rc; +} + +static int dsi_link_lp_clk_stop( + struct dsi_link_lp_clk_info *link_lp_clks) +{ + struct dsi_link_clks *l_clks; + + l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks); + + clk_disable_unprepare(l_clks->lp_clks.esc_clk); + + DSI_DEBUG("LP Link clocks are disabled\n"); + return 0; +} + +static int dsi_display_core_clk_enable(struct dsi_core_clks *clks, + u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_core_clks *clk, *m_clks; + + /* + * In case of split DSI usecases, the clock for master controller should + * be enabled before the other controller. Master controller in the + * clock context refers to the controller that sources the clock. + */ + + m_clks = &clks[master_ndx]; + + rc = dsi_core_clk_start(m_clks); + if (rc) { + DSI_ERR("failed to turn on master clocks, rc=%d\n", rc); + goto error; + } + + /* Turn on rest of the core clocks */ + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + rc = dsi_core_clk_start(clk); + if (rc) { + DSI_ERR("failed to turn on clocks, rc=%d\n", rc); + goto error_disable_master; + } + } + return rc; +error_disable_master: + (void)dsi_core_clk_stop(m_clks); + +error: + return rc; +} + +static int dsi_display_link_clk_enable(struct dsi_link_clks *clks, + enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_link_clks *clk, *m_clks; + struct dsi_clk_mngr *mngr; + + mngr = container_of(clks, struct dsi_clk_mngr, link_clks[master_ndx]); + + /* + * In case of split DSI usecases, the clock for master controller should + * be enabled before the other controller. Master controller in the + * clock context refers to the controller that sources the clock. + */ + + m_clks = &clks[master_ndx]; + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_start(&m_clks->lp_clks, master_ndx); + if (rc) { + DSI_ERR("failed to turn on master lp link clocks, rc=%d\n", + rc); + goto error; + } + } + + if (l_type & DSI_LINK_HS_CLK) { + if (!mngr->is_cont_splash_enabled) { + mngr->phy_config_cb(mngr->priv_data, true); + mngr->phy_pll_toggle_cb(mngr->priv_data, true); + } + rc = dsi_link_hs_clk_start(&m_clks->hs_clks, + DSI_LINK_CLK_START, master_ndx); + if (rc) { + DSI_ERR("failed to turn on master hs link clocks, rc=%d\n", + rc); + goto error; + } + } + + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_start(&clk->lp_clks, i); + if (rc) { + DSI_ERR("failed to turn on lp link clocks, rc=%d\n", + rc); + goto error_disable_master; + } + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_start(&clk->hs_clks, + DSI_LINK_CLK_START, i); + if (rc) { + DSI_ERR("failed to turn on hs link clocks, rc=%d\n", + rc); + goto error_disable_master; + } + } + } + return rc; + +error_disable_master: + if (l_type == DSI_LINK_LP_CLK) + (void)dsi_link_lp_clk_stop(&m_clks->lp_clks); + else if (l_type == DSI_LINK_HS_CLK) + (void)dsi_link_hs_clk_stop(&m_clks->hs_clks); +error: + return rc; +} + +static int dsi_display_core_clk_disable(struct dsi_core_clks *clks, + u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_core_clks *clk, *m_clks; + + /* + * In case of split DSI usecases, clock for slave DSI controllers should + * be disabled first before disabling clock for master controller. Slave + * controllers in the clock context refer to controller which source + * clock from another controller. + */ + + m_clks = &clks[master_ndx]; + + /* Turn off non-master core clocks */ + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + rc = dsi_core_clk_stop(clk); + if (rc) { + DSI_DEBUG("failed to turn off clocks, rc=%d\n", rc); + goto error; + } + } + + rc = dsi_core_clk_stop(m_clks); + if (rc) { + DSI_ERR("failed to turn off master clocks, rc=%d\n", rc); + goto error; + } + +error: + return rc; +} + +static int dsi_display_link_clk_disable(struct dsi_link_clks *clks, + enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx) +{ + int rc = 0; + int i; + struct dsi_link_clks *clk, *m_clks; + struct dsi_clk_mngr *mngr; + + mngr = container_of(clks, struct dsi_clk_mngr, link_clks[master_ndx]); + + /* + * In case of split DSI usecases, clock for slave DSI controllers should + * be disabled first before disabling clock for master controller. Slave + * controllers in the clock context refer to controller which source + * clock from another controller. + */ + + m_clks = &clks[master_ndx]; + + /* Turn off non-master link clocks */ + for (i = 0; i < ctrl_count; i++) { + clk = &clks[i]; + if (!clk || (clk == m_clks)) + continue; + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_stop(&clk->lp_clks); + if (rc) + DSI_ERR("failed to turn off lp link clocks, rc=%d\n", + rc); + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_stop(&clk->hs_clks); + if (rc) + DSI_ERR("failed to turn off hs link clocks, rc=%d\n", + rc); + } + } + + if (l_type & DSI_LINK_LP_CLK) { + rc = dsi_link_lp_clk_stop(&m_clks->lp_clks); + if (rc) + DSI_ERR("failed to turn off master lp link clocks, rc=%d\n", + rc); + } + + if (l_type & DSI_LINK_HS_CLK) { + rc = dsi_link_hs_clk_stop(&m_clks->hs_clks); + if (rc) + DSI_ERR("failed to turn off master hs link clocks, rc=%d\n", + rc); + mngr->phy_pll_toggle_cb(mngr->priv_data, false); + } + + return rc; +} + +static int dsi_clk_update_link_clk_state(struct dsi_clk_mngr *mngr, + struct dsi_link_clks *l_clks, enum dsi_lclk_type l_type, u32 l_state, + bool enable) +{ + int rc = 0; + + if (!mngr) + return -EINVAL; + + if (enable) { + if (mngr->pre_clkon_cb) { + rc = mngr->pre_clkon_cb(mngr->priv_data, DSI_LINK_CLK, + l_type, l_state); + if (rc) { + DSI_ERR("pre link clk on cb failed for type %d\n", + l_type); + goto error; + } + } + rc = dsi_display_link_clk_enable(l_clks, l_type, + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("failed to start link clk type %d rc=%d\n", + l_type, rc); + goto error; + } + + if (mngr->post_clkon_cb) { + rc = mngr->post_clkon_cb(mngr->priv_data, DSI_LINK_CLK, + l_type, l_state); + if (rc) { + DSI_ERR("post link clk on cb failed for type %d\n", + l_type); + goto error; + } + } + } else { + if (mngr->pre_clkoff_cb) { + rc = mngr->pre_clkoff_cb(mngr->priv_data, + DSI_LINK_CLK, l_type, l_state); + if (rc) + DSI_ERR("pre link clk off cb failed\n"); + } + + rc = dsi_display_link_clk_disable(l_clks, l_type, + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("failed to stop link clk type %d, rc = %d\n", + l_type, rc); + goto error; + } + + if (mngr->post_clkoff_cb) { + rc = mngr->post_clkoff_cb(mngr->priv_data, + DSI_LINK_CLK, l_type, l_state); + if (rc) + DSI_ERR("post link clk off cb failed\n"); + } + } + +error: + return rc; +} + +static int dsi_update_core_clks(struct dsi_clk_mngr *mngr, + struct dsi_core_clks *c_clks) +{ + int rc = 0; + + if (mngr->core_clk_state == DSI_CLK_OFF) { + rc = mngr->pre_clkon_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + DSI_CLK_ON); + if (rc) { + DSI_ERR("failed to turn on MDP FS rc= %d\n", rc); + goto error; + } + } + rc = dsi_display_core_clk_enable(c_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("failed to turn on core clks rc = %d\n", rc); + goto error; + } + + if (mngr->post_clkon_cb) { + rc = mngr->post_clkon_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + DSI_CLK_ON); + if (rc) + DSI_ERR("post clk on cb failed, rc = %d\n", rc); + } + mngr->core_clk_state = DSI_CLK_ON; +error: + return rc; +} + +static int dsi_update_clk_state(struct dsi_clk_mngr *mngr, + struct dsi_core_clks *c_clks, u32 c_state, + struct dsi_link_clks *l_clks, u32 l_state) +{ + int rc = 0; + bool l_c_on = false; + + if (!mngr) + return -EINVAL; + + DSI_DEBUG("c_state = %d, l_state = %d\n", + c_clks ? c_state : -1, l_clks ? l_state : -1); + /* + * Below is the sequence to toggle DSI clocks: + * 1. For ON sequence, Core clocks before link clocks + * 2. For OFF sequence, Link clocks before core clocks. + */ + if (c_clks && (c_state == DSI_CLK_ON)) + rc = dsi_update_core_clks(mngr, c_clks); + + if (rc) + goto error; + + if (l_clks) { + if (l_state == DSI_CLK_ON) { + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_LP_CLK, l_state, true); + if (rc) + goto error; + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_HS_CLK, l_state, true); + if (rc) + goto error; + } else { + /* + * Two conditions that need to be checked for Link + * clocks: + * 1. Link clocks need core clocks to be on when + * transitioning from EARLY_GATE to OFF state. + * 2. ULPS mode might have to be enabled in case of OFF + * state. For ULPS, Link clocks should be turned ON + * first before they are turned off again. + * + * If Link is going from EARLY_GATE to OFF state AND + * Core clock is already in EARLY_GATE or OFF state, + * turn on Core clocks and link clocks. + * + * ULPS state is managed as part of the pre_clkoff_cb. + */ + if ((l_state == DSI_CLK_OFF) && + (mngr->link_clk_state == + DSI_CLK_EARLY_GATE) && + (mngr->core_clk_state != + DSI_CLK_ON)) { + rc = dsi_display_core_clk_enable( + mngr->core_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("core clks did not start\n"); + goto error; + } + + rc = dsi_display_link_clk_enable(l_clks, + (DSI_LINK_LP_CLK & DSI_LINK_HS_CLK), + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("LP Link clks did not start\n"); + goto error; + } + l_c_on = true; + DSI_DEBUG("ECG: core and Link_on\n"); + } + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_HS_CLK, l_state, false); + if (rc) + goto error; + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, + DSI_LINK_LP_CLK, l_state, false); + if (rc) + goto error; + + /* + * This check is to save unnecessary clock state + * change when going from EARLY_GATE to OFF. In the + * case where the request happens for both Core and Link + * clocks in the same call, core clocks need to be + * turned on first before OFF state can be entered. + * + * Core clocks are turned on here for Link clocks to go + * to OFF state. If core clock request is also present, + * then core clocks can be turned off Core clocks are + * transitioned to OFF state. + */ + if (l_c_on && (!(c_clks && (c_state == DSI_CLK_OFF) + && (mngr->core_clk_state == + DSI_CLK_EARLY_GATE)))) { + rc = dsi_display_core_clk_disable( + mngr->core_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("core clks did not stop\n"); + goto error; + } + + l_c_on = false; + DSI_DEBUG("ECG: core off\n"); + } else + DSI_DEBUG("ECG: core off skip\n"); + } + + mngr->link_clk_state = l_state; + } + + if (c_clks && (c_state != DSI_CLK_ON)) { + /* + * When going to OFF state from EARLY GATE state, Core clocks + * should be turned on first so that the IOs can be clamped. + * l_c_on flag is set, then the core clocks were turned before + * to the Link clocks go to OFF state. So Core clocks are + * already ON and this step can be skipped. + * + * IOs are clamped in pre_clkoff_cb callback. + */ + if ((c_state == DSI_CLK_OFF) && + (mngr->core_clk_state == + DSI_CLK_EARLY_GATE) && !l_c_on) { + rc = dsi_display_core_clk_enable(mngr->core_clks, + mngr->dsi_ctrl_count, mngr->master_ndx); + if (rc) { + DSI_ERR("core clks did not start\n"); + goto error; + } + DSI_DEBUG("ECG: core on\n"); + } else + DSI_DEBUG("ECG: core on skip\n"); + + if (mngr->pre_clkoff_cb) { + rc = mngr->pre_clkoff_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + c_state); + if (rc) + DSI_ERR("pre core clk off cb failed\n"); + } + + rc = dsi_display_core_clk_disable(c_clks, mngr->dsi_ctrl_count, + mngr->master_ndx); + if (rc) { + DSI_ERR("failed to turn off core clks rc = %d\n", rc); + goto error; + } + + if (c_state == DSI_CLK_OFF) { + if (mngr->post_clkoff_cb) { + rc = mngr->post_clkoff_cb(mngr->priv_data, + DSI_CORE_CLK, + DSI_LINK_NONE, + DSI_CLK_OFF); + if (rc) + DSI_ERR("post clkoff cb fail, rc = %d\n", + rc); + } + } + mngr->core_clk_state = c_state; + } + +error: + return rc; +} + +static int dsi_recheck_clk_state(struct dsi_clk_mngr *mngr) +{ + int rc = 0; + struct list_head *pos = NULL; + struct dsi_clk_client_info *c; + u32 new_core_clk_state = DSI_CLK_OFF; + u32 new_link_clk_state = DSI_CLK_OFF; + u32 old_c_clk_state = DSI_CLK_OFF; + u32 old_l_clk_state = DSI_CLK_OFF; + struct dsi_core_clks *c_clks = NULL; + struct dsi_link_clks *l_clks = NULL; + + /* + * Conditions to maintain DSI manager clock state based on + * clock states of various clients: + * 1. If any client has clock in ON state, DSI manager clock state + * should be ON. + * 2. If any client is in ECG state with rest of them turned OFF, + * go to Early gate state. + * 3. If all clients have clocks as OFF, then go to OFF state. + */ + list_for_each(pos, &mngr->client_list) { + c = list_entry(pos, struct dsi_clk_client_info, list); + if (c->core_clk_state == DSI_CLK_ON) { + new_core_clk_state = DSI_CLK_ON; + break; + } else if (c->core_clk_state == DSI_CLK_EARLY_GATE) { + new_core_clk_state = DSI_CLK_EARLY_GATE; + } + } + + list_for_each(pos, &mngr->client_list) { + c = list_entry(pos, struct dsi_clk_client_info, list); + if (c->link_clk_state == DSI_CLK_ON) { + new_link_clk_state = DSI_CLK_ON; + break; + } else if (c->link_clk_state == DSI_CLK_EARLY_GATE) { + new_link_clk_state = DSI_CLK_EARLY_GATE; + } + } + + if (new_core_clk_state != mngr->core_clk_state) + c_clks = mngr->core_clks; + + if (new_link_clk_state != mngr->link_clk_state) + l_clks = mngr->link_clks; + + old_c_clk_state = mngr->core_clk_state; + old_l_clk_state = mngr->link_clk_state; + + DSI_DEBUG("c_clk_state (%d -> %d)\n", old_c_clk_state, + new_core_clk_state); + DSI_DEBUG("l_clk_state (%d -> %d)\n", old_l_clk_state, + new_link_clk_state); + + if (c_clks || l_clks) { + rc = dsi_update_clk_state(mngr, c_clks, new_core_clk_state, + l_clks, new_link_clk_state); + if (rc) { + DSI_ERR("failed to update clock state, rc = %d\n", rc); + goto error; + } + } + +error: + return rc; +} + +int dsi_clk_req_state(void *client, enum dsi_clk_type clk, + enum dsi_clk_state state) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + bool changed = false; + + if (!client || !clk || clk > (DSI_CORE_CLK | DSI_LINK_CLK) || + state > DSI_CLK_EARLY_GATE) { + DSI_ERR("Invalid params, client = %pK, clk = 0x%x, state = %d\n", + client, clk, state); + return -EINVAL; + } + + mngr = c->mngr; + mutex_lock(&mngr->clk_mutex); + + DSI_DEBUG("[%s]%s: CLK=%d, new_state=%d, core=%d, linkl=%d\n", + mngr->name, c->name, clk, state, c->core_clk_state, + c->link_clk_state); + + /* + * Clock refcount handling as below: + * i. Increment refcount whenever ON is called. + * ii. Decrement refcount when transitioning from ON state to + * either OFF or EARLY_GATE. + * iii. Do not decrement refcount when changing from + * EARLY_GATE to OFF. + */ + if (state == DSI_CLK_ON) { + if (clk & DSI_CORE_CLK) { + c->core_refcount++; + if (c->core_clk_state != DSI_CLK_ON) { + c->core_clk_state = DSI_CLK_ON; + changed = true; + } + } + if (clk & DSI_LINK_CLK) { + c->link_refcount++; + if (c->link_clk_state != DSI_CLK_ON) { + c->link_clk_state = DSI_CLK_ON; + changed = true; + } + } + } else if ((state == DSI_CLK_EARLY_GATE) || + (state == DSI_CLK_OFF)) { + if (clk & DSI_CORE_CLK) { + if (c->core_refcount == 0) { + if ((c->core_clk_state == + DSI_CLK_EARLY_GATE) && + (state == DSI_CLK_OFF)) { + changed = true; + c->core_clk_state = DSI_CLK_OFF; + } else { + DSI_WARN("Core refcount is zero for %s\n", + c->name); + } + } else { + c->core_refcount--; + if (c->core_refcount == 0) { + c->core_clk_state = state; + changed = true; + } + } + } + if (clk & DSI_LINK_CLK) { + if (c->link_refcount == 0) { + if ((c->link_clk_state == + DSI_CLK_EARLY_GATE) && + (state == DSI_CLK_OFF)) { + changed = true; + c->link_clk_state = DSI_CLK_OFF; + } else { + DSI_WARN("Link refcount is zero for %s\n", + c->name); + } + } else { + c->link_refcount--; + if (c->link_refcount == 0) { + c->link_clk_state = state; + changed = true; + } + } + } + } + DSI_DEBUG("[%s]%s: change=%d, Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n", + mngr->name, c->name, changed, c->core_refcount, + c->core_clk_state, c->link_refcount, c->link_clk_state); + + if (changed) { + rc = dsi_recheck_clk_state(mngr); + if (rc) + DSI_ERR("Failed to adjust clock state rc = %d\n", rc); + } + + mutex_unlock(&mngr->clk_mutex); + return rc; +} + +DEFINE_MUTEX(dsi_mngr_clk_mutex); + +static int dsi_display_link_clk_force_update(void *client) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + struct dsi_link_clks *l_clks; + + mngr = c->mngr; + mutex_lock(&mngr->clk_mutex); + + l_clks = mngr->link_clks; + + /* + * When link_clk_state is DSI_CLK_OFF, don't change DSI clock rate + * since it is possible to be overwritten, and return -EAGAIN to + * dynamic DSI writing interface to defer the reenabling to the next + * drm commit. + */ + if (mngr->link_clk_state == DSI_CLK_OFF) { + rc = -EAGAIN; + goto error; + } + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, (DSI_LINK_LP_CLK | + DSI_LINK_HS_CLK), DSI_CLK_OFF, false); + if (rc) + goto error; + + rc = dsi_clk_update_link_clk_state(mngr, l_clks, (DSI_LINK_LP_CLK | + DSI_LINK_HS_CLK), DSI_CLK_ON, true); + if (rc) + goto error; + +error: + mutex_unlock(&mngr->clk_mutex); + return rc; + +} + +int dsi_display_link_clk_force_update_ctrl(void *handle) +{ + int rc = 0; + + if (!handle) { + DSI_ERR("Invalid arg\n"); + return -EINVAL; + } + + mutex_lock(&dsi_mngr_clk_mutex); + + rc = dsi_display_link_clk_force_update(handle); + + mutex_unlock(&dsi_mngr_clk_mutex); + + return rc; +} + +int dsi_display_clk_ctrl(void *handle, + u32 clk_type, u32 clk_state) +{ + int rc = 0; + + if ((!handle) || (clk_type > DSI_ALL_CLKS) || + (clk_state > DSI_CLK_EARLY_GATE)) { + DSI_ERR("Invalid arg\n"); + return -EINVAL; + } + + mutex_lock(&dsi_mngr_clk_mutex); + rc = dsi_clk_req_state(handle, clk_type, clk_state); + if (rc) + DSI_ERR("failed set clk state, rc = %d\n", rc); + mutex_unlock(&dsi_mngr_clk_mutex); + + return rc; +} + +void *dsi_register_clk_handle(void *clk_mngr, char *client) +{ + void *handle = NULL; + struct dsi_clk_mngr *mngr = clk_mngr; + struct dsi_clk_client_info *c; + + if (!mngr) { + DSI_ERR("bad params\n"); + return ERR_PTR(-EINVAL); + } + + mutex_lock(&mngr->clk_mutex); + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) { + handle = ERR_PTR(-ENOMEM); + goto error; + } + + strlcpy(c->name, client, MAX_STRING_LEN); + c->mngr = mngr; + + list_add(&c->list, &mngr->client_list); + + DSI_DEBUG("[%s]: Added new client (%s)\n", mngr->name, c->name); + handle = c; +error: + mutex_unlock(&mngr->clk_mutex); + return handle; +} + +int dsi_deregister_clk_handle(void *client) +{ + int rc = 0; + struct dsi_clk_client_info *c = client; + struct dsi_clk_mngr *mngr; + struct list_head *pos = NULL; + struct list_head *tmp = NULL; + struct dsi_clk_client_info *node = NULL; + + if (!client) { + DSI_ERR("Invalid params\n"); + return -EINVAL; + } + + mngr = c->mngr; + DSI_DEBUG("%s: ENTER\n", mngr->name); + mutex_lock(&mngr->clk_mutex); + c->core_clk_state = DSI_CLK_OFF; + c->link_clk_state = DSI_CLK_OFF; + + rc = dsi_recheck_clk_state(mngr); + if (rc) { + DSI_ERR("clock state recheck failed rc = %d\n", rc); + goto error; + } + + list_for_each_safe(pos, tmp, &mngr->client_list) { + node = list_entry(pos, struct dsi_clk_client_info, + list); + if (node == c) { + list_del(&node->list); + DSI_DEBUG("Removed device (%s)\n", node->name); + kfree(node); + break; + } + } + +error: + mutex_unlock(&mngr->clk_mutex); + DSI_DEBUG("%s: EXIT, rc = %d\n", mngr->name, rc); + return rc; +} + +void dsi_display_clk_mngr_update_splash_status(void *clk_mgr, bool status) +{ + struct dsi_clk_mngr *mngr; + + if (!clk_mgr) { + DSI_ERR("Invalid params\n"); + return; + } + + mngr = (struct dsi_clk_mngr *)clk_mgr; + mngr->is_cont_splash_enabled = status; +} + +int dsi_display_dump_clk_handle_state(void *client) +{ + struct dsi_clk_mngr *mngr; + struct dsi_clk_client_info *c = client; + + if (!c || !c->mngr) { + DSI_ERR("Invalid params\n"); + return -EINVAL; + } + + mngr = c->mngr; + mutex_lock(&mngr->clk_mutex); + DSI_INFO("[%s]%s: Core (ref=%d, state=%d), Link (ref=%d, state=%d)\n", + mngr->name, c->name, c->core_refcount, + c->core_clk_state, c->link_refcount, + c->link_clk_state); + mutex_unlock(&mngr->clk_mutex); + + return 0; +} + +void *dsi_display_clk_mngr_register(struct dsi_clk_info *info) +{ + struct dsi_clk_mngr *mngr; + int i = 0; + + if (!info) { + DSI_ERR("Invalid params\n"); + return ERR_PTR(-EINVAL); + } + + mngr = kzalloc(sizeof(*mngr), GFP_KERNEL); + if (!mngr) { + mngr = ERR_PTR(-ENOMEM); + goto error; + } + + mutex_init(&mngr->clk_mutex); + mngr->dsi_ctrl_count = info->dsi_ctrl_count; + mngr->master_ndx = info->master_ndx; + + if (mngr->dsi_ctrl_count > MAX_DSI_CTRL) { + kfree(mngr); + return ERR_PTR(-EINVAL); + } + + for (i = 0; i < mngr->dsi_ctrl_count; i++) { + memcpy(&mngr->core_clks[i].clks, &info->c_clks[i], + sizeof(struct dsi_core_clk_info)); + memcpy(&mngr->link_clks[i].hs_clks, &info->l_hs_clks[i], + sizeof(struct dsi_link_hs_clk_info)); + memcpy(&mngr->link_clks[i].lp_clks, &info->l_lp_clks[i], + sizeof(struct dsi_link_lp_clk_info)); + mngr->ctrl_index[i] = info->ctrl_index[i]; + } + + INIT_LIST_HEAD(&mngr->client_list); + mngr->pre_clkon_cb = info->pre_clkon_cb; + mngr->post_clkon_cb = info->post_clkon_cb; + mngr->pre_clkoff_cb = info->pre_clkoff_cb; + mngr->post_clkoff_cb = info->post_clkoff_cb; + mngr->phy_config_cb = info->phy_config_cb; + mngr->phy_pll_toggle_cb = info->phy_pll_toggle_cb; + mngr->priv_data = info->priv_data; + mngr->phy_pll_bypass = info->phy_pll_bypass; + memcpy(mngr->name, info->name, MAX_STRING_LEN); + +error: + DSI_DEBUG("EXIT, rc = %ld\n", PTR_ERR(mngr)); + return mngr; +} + +int dsi_display_clk_mngr_deregister(void *clk_mngr) +{ + int rc = 0; + struct dsi_clk_mngr *mngr = clk_mngr; + struct list_head *position = NULL; + struct list_head *tmp = NULL; + struct dsi_clk_client_info *node = NULL; + + if (!mngr) { + DSI_ERR("Invalid params\n"); + return -EINVAL; + } + + DSI_DEBUG("%s: ENTER\n", mngr->name); + mutex_lock(&mngr->clk_mutex); + + list_for_each_safe(position, tmp, &mngr->client_list) { + node = list_entry(position, struct dsi_clk_client_info, + list); + list_del(&node->list); + DSI_DEBUG("Removed device (%s)\n", node->name); + kfree(node); + } + + rc = dsi_recheck_clk_state(mngr); + if (rc) + DSI_ERR("failed to disable all clocks\n"); + + mutex_unlock(&mngr->clk_mutex); + DSI_DEBUG("%s: EXIT, rc = %d\n", mngr->name, rc); + kfree(mngr); + return rc; +} + +/** + * dsi_clk_acquire_mngr_lock() - acquire clk manager mutex lock + * @client: DSI clock client pointer. + */ +void dsi_clk_acquire_mngr_lock(void *client) +{ + struct dsi_clk_mngr *mngr; + struct dsi_clk_client_info *c = client; + + mngr = c->mngr; + mutex_lock(&mngr->clk_mutex); +} + +/** + * dsi_clk_release_mngr_lock() - release clk manager mutex lock + * @client: DSI clock client pointer. + */ +void dsi_clk_release_mngr_lock(void *client) +{ + struct dsi_clk_mngr *mngr; + struct dsi_clk_client_info *c = client; + + mngr = c->mngr; + mutex_unlock(&mngr->clk_mutex); +} diff --git a/msm/dsi/dsi_ctrl.c b/msm/dsi/dsi_ctrl.c new file mode 100644 index 000000000..436f4cbd5 --- /dev/null +++ b/msm/dsi/dsi_ctrl.c @@ -0,0 +1,4284 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include