summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/apple
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/apple')
-rw-r--r--drivers/gpu/drm/apple/.gitignore1
-rw-r--r--drivers/gpu/drm/apple/Kconfig11
-rw-r--r--drivers/gpu/drm/apple/Makefile29
-rw-r--r--drivers/gpu/drm/apple/apple_drv.c501
-rw-r--r--drivers/gpu/drm/apple/dcp-internal.h150
-rw-r--r--drivers/gpu/drm/apple/dcp.c427
-rw-r--r--drivers/gpu/drm/apple/dcp.h59
-rw-r--r--drivers/gpu/drm/apple/dummy-piodma.c31
-rw-r--r--drivers/gpu/drm/apple/iomfb.c1689
-rw-r--r--drivers/gpu/drm/apple/iomfb.h406
-rw-r--r--drivers/gpu/drm/apple/parser.c459
-rw-r--r--drivers/gpu/drm/apple/parser.h32
-rw-r--r--drivers/gpu/drm/apple/trace.c9
-rw-r--r--drivers/gpu/drm/apple/trace.h166
14 files changed, 3970 insertions, 0 deletions
diff --git a/drivers/gpu/drm/apple/.gitignore b/drivers/gpu/drm/apple/.gitignore
new file mode 100644
index 000000000000..d9a77f3b59b2
--- /dev/null
+++ b/drivers/gpu/drm/apple/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/apple/Kconfig b/drivers/gpu/drm/apple/Kconfig
new file mode 100644
index 000000000000..9b9bcb7b5433
--- /dev/null
+++ b/drivers/gpu/drm/apple/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_APPLE
+ tristate "DRM Support for Apple display controllers"
+ depends on DRM && OF && ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
+ select VIDEOMODE_HELPERS
+ help
+ Say Y if you have an Apple Silicon chipset.
diff --git a/drivers/gpu/drm/apple/Makefile b/drivers/gpu/drm/apple/Makefile
new file mode 100644
index 000000000000..2502f781a5dc
--- /dev/null
+++ b/drivers/gpu/drm/apple/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+CFLAGS_trace.o = -I$(src)
+
+appledrm-y := apple_drv.o
+
+apple_dcp-y := dcp.o iomfb.o parser.o
+apple_dcp-$(CONFIG_TRACING) += trace.o
+
+apple_piodma-y := dummy-piodma.o
+
+obj-$(CONFIG_DRM_APPLE) += appledrm.o
+obj-$(CONFIG_DRM_APPLE) += apple_dcp.o
+obj-$(CONFIG_DRM_APPLE) += apple_piodma.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+
+always-y += \
+ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+ $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/apple/apple_drv.c b/drivers/gpu/drm/apple/apple_drv.c
new file mode 100644
index 000000000000..c6483f3011a9
--- /dev/null
+++ b/drivers/gpu/drm/apple/apple_drv.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+/* Based on meson driver which is
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_fixed.h>
+
+#include "dcp.h"
+
+#define DRIVER_NAME "apple"
+#define DRIVER_DESC "Apple display controller DRM driver"
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+#define MAX_COPROCESSORS 2
+
+struct apple_drm_private {
+ struct drm_device drm;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(apple_fops);
+
+static int apple_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 64);
+ args->size = args->pitch * args->height;
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver apple_drm_driver = {
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(apple_drm_gem_dumb_create),
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = "20210901",
+ .major = 1,
+ .minor = 0,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .fops = &apple_fops,
+};
+
+static int apple_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * DCP limits downscaling to 2x and upscaling to 4x. Attempting to
+ * scale outside these bounds errors out when swapping.
+ *
+ * This function also takes care of clipping the src/dest rectangles,
+ * which is required for correct operation. Partially off-screen
+ * surfaces may appear corrupted.
+ *
+ * DCP does not distinguish plane types in the hardware, so we set
+ * can_position. If the primary plane does not fill the screen, the
+ * hardware will fill in zeroes (black).
+ */
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ FRAC_16_16(1, 4),
+ FRAC_16_16(2, 1),
+ true, true);
+}
+
+static void apple_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ /* Handled in atomic_flush */
+}
+
+static const struct drm_plane_helper_funcs apple_plane_helper_funcs = {
+ .atomic_check = apple_plane_atomic_check,
+ .atomic_update = apple_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs apple_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Table of supported formats, mapping from DRM fourccs to DCP fourccs.
+ *
+ * For future work, DCP supports more formats not listed, including YUV
+ * formats, an extra RGBA format, and a biplanar RGB10_A8 format (fourcc b3a8)
+ * used for HDR.
+ *
+ * Note: we don't have non-alpha formats but userspace breaks without XRGB. It
+ * doesn't matter for the primary plane, but cursors/overlays must not
+ * advertise formats without alpha.
+ */
+static const u32 dcp_formats[] = {
+ // DRM_FORMAT_XRGB2101010,
+ // DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+u64 apple_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static struct drm_plane *apple_plane_init(struct drm_device *dev,
+ unsigned long possible_crtcs,
+ enum drm_plane_type type)
+{
+ int ret;
+ struct drm_plane *plane;
+
+ plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+ &apple_plane_funcs,
+ dcp_formats, ARRAY_SIZE(dcp_formats),
+ apple_format_modifiers, type, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(plane, &apple_plane_helper_funcs);
+
+ return plane;
+}
+
+static int apple_enable_vblank(struct drm_crtc *crtc)
+{
+ to_apple_crtc(crtc)->vsync_disabled = false;
+
+ return 0;
+}
+
+static void apple_disable_vblank(struct drm_crtc *crtc)
+{
+ to_apple_crtc(crtc)->vsync_disabled = true;
+}
+
+static enum drm_connector_status
+apple_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+
+ return apple_connector->connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void apple_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->active_changed && crtc_state->active) {
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ dev_dbg(&apple_crtc->dcp->dev, "%s", __func__);
+ dcp_poweron(apple_crtc->dcp);
+ dev_dbg(&apple_crtc->dcp->dev, "%s finished", __func__);
+ }
+ drm_crtc_vblank_on(crtc);
+}
+
+static void apple_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ drm_crtc_vblank_off(crtc);
+
+ if (crtc_state->active_changed && !crtc_state->active) {
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ dev_dbg(&apple_crtc->dcp->dev, "%s", __func__);
+ dcp_poweroff(apple_crtc->dcp);
+ dev_dbg(&apple_crtc->dcp->dev, "%s finished", __func__);
+ }
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void apple_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ apple_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+}
+
+static void dcp_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_fake_vblank(old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
+
+static const struct drm_crtc_funcs apple_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+ .enable_vblank = apple_enable_vblank,
+ .disable_vblank = apple_disable_vblank,
+};
+
+static const struct drm_mode_config_funcs apple_mode_config_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
+static const struct drm_mode_config_helper_funcs apple_mode_config_helpers = {
+ .atomic_commit_tail = dcp_atomic_commit_tail,
+};
+
+static const struct drm_connector_funcs apple_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .detect = apple_connector_detect,
+};
+
+static const struct drm_connector_helper_funcs apple_connector_helper_funcs = {
+ .get_modes = dcp_get_modes,
+ .mode_valid = dcp_mode_valid,
+};
+
+static const struct drm_crtc_helper_funcs apple_crtc_helper_funcs = {
+ .atomic_begin = apple_crtc_atomic_begin,
+ .atomic_check = dcp_crtc_atomic_check,
+ .atomic_flush = dcp_flush,
+ .atomic_enable = apple_crtc_atomic_enable,
+ .atomic_disable = apple_crtc_atomic_disable,
+};
+
+static int apple_probe_per_dcp(struct device *dev,
+ struct drm_device *drm,
+ struct platform_device *dcp,
+ int num)
+{
+ struct apple_crtc *crtc;
+ struct apple_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_plane *primary;
+ int con_type;
+ int ret;
+
+ primary = apple_plane_init(drm, 1U << num, DRM_PLANE_TYPE_PRIMARY);
+
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, primary, NULL,
+ &apple_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&crtc->base, &apple_crtc_helper_funcs);
+
+ encoder = devm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
+ encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ return ret;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ drm_connector_helper_add(&connector->base,
+ &apple_connector_helper_funcs);
+
+ if (of_property_match_string(dcp->dev.of_node, "apple,connector-type", "eDP") >= 0)
+ con_type = DRM_MODE_CONNECTOR_eDP;
+ else if (of_property_match_string(dcp->dev.of_node, "apple,connector-type", "HDMI-A") >= 0)
+ con_type = DRM_MODE_CONNECTOR_HDMIA;
+ else if (of_property_match_string(dcp->dev.of_node, "apple,connector-type", "USB-C") >= 0)
+ con_type = DRM_MODE_CONNECTOR_USB;
+ else
+ con_type = DRM_MODE_CONNECTOR_Unknown;
+
+ ret = drm_connector_init(drm, &connector->base, &apple_connector_funcs,
+ con_type);
+ if (ret)
+ return ret;
+
+ connector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ connector->connected = false;
+ connector->dcp = dcp;
+
+ INIT_WORK(&connector->hotplug_wq, dcp_hotplug);
+
+ crtc->dcp = dcp;
+ dcp_link(dcp, crtc, connector);
+
+ return drm_connector_attach_encoder(&connector->base, encoder);
+}
+
+static int apple_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_drm_private *apple;
+ struct platform_device *dcp[MAX_COPROCESSORS];
+ int ret, nr_dcp, i;
+
+ for (nr_dcp = 0; nr_dcp < MAX_COPROCESSORS; ++nr_dcp) {
+ struct device_node *np;
+ struct device_link *dcp_link;
+
+ np = of_parse_phandle(dev->of_node, "apple,coprocessors",
+ nr_dcp);
+
+ if (!np)
+ break;
+
+ dcp[nr_dcp] = of_find_device_by_node(np);
+
+ if (!dcp[nr_dcp])
+ return -ENODEV;
+
+ dcp_link = device_link_add(dev, &dcp[nr_dcp]->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dcp_link) {
+ dev_err(dev, "Failed to link to DCP %d device", nr_dcp);
+ return -EINVAL;
+ }
+
+ if (dcp_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return -EPROBE_DEFER;
+ }
+
+ /* Need at least 1 DCP for a display subsystem */
+ if (nr_dcp < 1)
+ return -ENODEV;
+
+ // remove before registering our DRM device
+ ret = drm_aperture_remove_framebuffers(false, &apple_drm_driver);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ apple = devm_drm_dev_alloc(dev, &apple_drm_driver,
+ struct apple_drm_private, drm);
+ if (IS_ERR(apple))
+ return PTR_ERR(apple);
+
+ ret = drm_vblank_init(&apple->drm, nr_dcp);
+ if (ret)
+ return ret;
+
+ ret = drmm_mode_config_init(&apple->drm);
+ if (ret)
+ goto err_unload;
+
+ /*
+ * IOMFB::UPPipeDCP_H13P::verify_surfaces produces the error "plane
+ * requires a minimum of 32x32 for the source buffer" if smaller
+ */
+ apple->drm.mode_config.min_width = 32;
+ apple->drm.mode_config.min_height = 32;
+
+ /* Unknown maximum, use the iMac (24-inch, 2021) display resolution as
+ * maximum.
+ * TODO: this is the max framebuffer size not the maximal supported output
+ * resolution. DCP reports the maximal framebuffer size take it from there.
+ */
+ apple->drm.mode_config.max_width = 4480;
+ apple->drm.mode_config.max_height = 2520;
+
+ apple->drm.mode_config.funcs = &apple_mode_config_funcs;
+ apple->drm.mode_config.helper_private = &apple_mode_config_helpers;
+
+ for (i = 0; i < nr_dcp; ++i) {
+ ret = apple_probe_per_dcp(dev, &apple->drm, dcp[i], i);
+
+ if (ret)
+ goto err_unload;
+
+ ret = dcp_start(dcp[i]);
+
+ if (ret)
+ goto err_unload;
+ }
+
+ drm_mode_config_reset(&apple->drm);
+
+ ret = drm_dev_register(&apple->drm, 0);
+ if (ret)
+ goto err_unload;
+
+ drm_fbdev_generic_setup(&apple->drm, 32);
+
+ return 0;
+
+err_unload:
+ drm_dev_put(&apple->drm);
+ return ret;
+}
+
+static int apple_platform_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,display-subsystem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver apple_platform_driver = {
+ .driver = {
+ .name = "apple-drm",
+ .of_match_table = of_match,
+ },
+ .probe = apple_platform_probe,
+ .remove = apple_platform_remove,
+};
+
+module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/dcp-internal.h b/drivers/gpu/drm/apple/dcp-internal.h
new file mode 100644
index 000000000000..6624672109c3
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp-internal.h
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_INTERNAL_H__
+#define __APPLE_DCP_INTERNAL_H__
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "iomfb.h"
+
+#define DCP_MAX_PLANES 2
+
+struct apple_dcp;
+
+enum {
+ SYSTEM_ENDPOINT = 0x20,
+ TEST_ENDPOINT = 0x21,
+ DCP_EXPERT_ENDPOINT = 0x22,
+ DISP0_ENDPOINT = 0x23,
+ DPTX_ENDPOINT = 0x2a,
+ HDCP_ENDPOINT = 0x2b,
+ REMOTE_ALLOC_ENDPOINT = 0x2d,
+ IOMFB_ENDPOINT = 0x37,
+};
+
+/* Temporary backing for a chunked transfer via setDCPAVPropStart/Chunk/End */
+struct dcp_chunks {
+ size_t length;
+ void *data;
+};
+
+#define DCP_MAX_MAPPINGS (128) /* should be enough */
+#define MAX_DISP_REGISTERS (7)
+
+struct dcp_mem_descriptor {
+ size_t size;
+ void *buf;
+ dma_addr_t dva;
+ struct sg_table map;
+ u64 reg;
+};
+
+/* Limit on call stack depth (arbitrary). Some nesting is required */
+#define DCP_MAX_CALL_DEPTH 8
+
+typedef void (*dcp_callback_t)(struct apple_dcp *, void *, void *);
+
+struct dcp_call_channel {
+ dcp_callback_t callbacks[DCP_MAX_CALL_DEPTH];
+ void *cookies[DCP_MAX_CALL_DEPTH];
+ void *output[DCP_MAX_CALL_DEPTH];
+ u16 end[DCP_MAX_CALL_DEPTH];
+
+ /* Current depth of the call stack. Less than DCP_MAX_CALL_DEPTH */
+ u8 depth;
+};
+
+struct dcp_cb_channel {
+ u8 depth;
+ void *output[DCP_MAX_CALL_DEPTH];
+};
+
+struct dcp_fb_reference {
+ struct list_head head;
+ struct drm_framebuffer *fb;
+};
+
+#define MAX_NOTCH_HEIGHT 160
+
+/* TODO: move IOMFB members to its own struct */
+struct apple_dcp {
+ struct device *dev;
+ struct platform_device *piodma;
+ struct device_link *piodma_link;
+ struct apple_rtkit *rtk;
+ struct apple_crtc *crtc;
+ struct apple_connector *connector;
+
+ /* Coprocessor control register */
+ void __iomem *coproc_reg;
+
+ /* mask for DCP IO virtual addresses shared over rtkit */
+ u64 asc_dram_mask;
+
+ /* DCP has crashed */
+ bool crashed;
+
+ /************* IOMFB **************************************************
+ * everything below is mostly used inside IOMFB but it could make *
+ * sense keep some of the the members in apple_dcp. *
+ **********************************************************************/
+
+ /* clock rate request by dcp in */
+ struct clk *clk;
+
+ /* DCP shared memory */
+ void *shmem;
+
+ /* Display registers mappable to the DCP */
+ struct resource *disp_registers[MAX_DISP_REGISTERS];
+ unsigned int nr_disp_registers;
+
+ /* Bitmap of memory descriptors used for mappings made by the DCP */
+ DECLARE_BITMAP(memdesc_map, DCP_MAX_MAPPINGS);
+
+ /* Indexed table of memory descriptors */
+ struct dcp_mem_descriptor memdesc[DCP_MAX_MAPPINGS];
+
+ struct dcp_call_channel ch_cmd, ch_oobcmd;
+ struct dcp_cb_channel ch_cb, ch_oobcb, ch_async;
+
+ /* Active chunked transfer. There can only be one at a time. */
+ struct dcp_chunks chunks;
+
+ /* Queued swap. Owned by the DCP to avoid per-swap memory allocation */
+ struct dcp_swap_submit_req swap;
+
+ /* Current display mode */
+ bool valid_mode;
+ struct dcp_set_digital_out_mode_req mode;
+
+ /* Is the DCP booted? */
+ bool active;
+
+ /* eDP display without DP-HDMI conversion */
+ bool main_display;
+
+ bool ignore_swap_complete;
+
+ /* Modes valid for the connected display */
+ struct dcp_display_mode *modes;
+ unsigned int nr_modes;
+
+ /* Attributes of the connected display */
+ int width_mm, height_mm;
+
+ unsigned notch_height;
+
+ /* Workqueue for sending vblank events when a dcp swap is not possible */
+ struct work_struct vblank_wq;
+
+ /* List of referenced drm_framebuffers which can be unreferenced
+ * on the next successfully completed swap.
+ */
+ struct list_head swapped_out_fbs;
+};
+
+#endif /* __APPLE_DCP_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/apple/dcp.c b/drivers/gpu/drm/apple/dcp.c
new file mode 100644
index 000000000000..c333ea61c49b
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/align.h>
+#include <linux/apple-mailbox.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/completion.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "parser.h"
+#include "trace.h"
+
+#define APPLE_DCP_COPROC_CPU_CONTROL 0x44
+#define APPLE_DCP_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define DCP_BOOT_TIMEOUT msecs_to_jiffies(1000)
+
+/* HACK: moved here to avoid circular dependency between apple_drv and dcp */
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc)
+{
+ unsigned long flags;
+
+ if (crtc->vsync_disabled)
+ return;
+
+ drm_crtc_handle_vblank(&crtc->base);
+
+ spin_lock_irqsave(&crtc->base.dev->event_lock, flags);
+ if (crtc->event) {
+ drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+ drm_crtc_vblank_put(&crtc->base);
+ crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&crtc->base.dev->event_lock, flags);
+}
+
+void dcp_set_dimensions(struct apple_dcp *dcp)
+{
+ int i;
+
+ /* Set the connector info */
+ if (dcp->connector) {
+ struct drm_connector *connector = &dcp->connector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ connector->display_info.width_mm = dcp->width_mm;
+ connector->display_info.height_mm = dcp->height_mm;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ }
+
+ /*
+ * Fix up any probed modes. Modes are created when parsing
+ * TimingElements, dimensions are calculated when parsing
+ * DisplayAttributes, and TimingElements may be sent first
+ */
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ dcp->modes[i].mode.width_mm = dcp->width_mm;
+ dcp->modes[i].mode.height_mm = dcp->height_mm;
+ }
+}
+
+/*
+ * Helper to send a DRM vblank event. We do not know how call swap_submit_dcp
+ * without surfaces. To avoid timeouts in drm_atomic_helper_wait_for_vblanks
+ * send a vblank event via a workqueue.
+ */
+static void dcp_delayed_vblank(struct work_struct *work)
+{
+ struct apple_dcp *dcp;
+
+ dcp = container_of(work, struct apple_dcp, vblank_wq);
+ mdelay(5);
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void dcp_recv_msg(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_dcp *dcp = cookie;
+
+ trace_dcp_recv_msg(dcp, endpoint, message);
+
+ switch (endpoint) {
+ case IOMFB_ENDPOINT:
+ return iomfb_recv_msg(dcp, message);
+ default:
+ WARN(endpoint, "unknown DCP endpoint %hhu", endpoint);
+ }
+}
+
+static void dcp_rtk_crashed(void *cookie)
+{
+ struct apple_dcp *dcp = cookie;
+
+ dcp->crashed = true;
+ dev_err(dcp->dev, "DCP has crashed");
+ if (dcp->connector) {
+ dcp->connector->connected = 0;
+ schedule_work(&dcp->connector->hotplug_wq);
+ }
+}
+
+static int dcp_rtk_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_dcp *dcp = cookie;
+
+ if (bfr->iova) {
+ struct iommu_domain *domain =
+ iommu_get_domain_for_dev(dcp->dev);
+ phys_addr_t phy_addr;
+
+ if (!domain)
+ return -ENOMEM;
+
+ // TODO: get map from device-tree
+ phy_addr = iommu_iova_to_phys(domain,
+ bfr->iova & ~dcp->asc_dram_mask);
+ if (!phy_addr)
+ return -ENOMEM;
+
+ // TODO: verify phy_addr, cache attribute
+ bfr->buffer = memremap(phy_addr, bfr->size, MEMREMAP_WB);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ bfr->is_mapped = true;
+ dev_info(dcp->dev,
+ "shmem_setup: iova: %lx -> pa: %lx -> iomem: %lx",
+ (uintptr_t)bfr->iova, (uintptr_t)phy_addr,
+ (uintptr_t)bfr->buffer);
+ } else {
+ bfr->buffer = dma_alloc_coherent(dcp->dev, bfr->size,
+ &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ bfr->iova |= dcp->asc_dram_mask;
+
+ dev_info(dcp->dev, "shmem_setup: iova: %lx, buffer: %lx",
+ (uintptr_t)bfr->iova, (uintptr_t)bfr->buffer);
+ }
+
+ return 0;
+}
+
+static void dcp_rtk_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_dcp *dcp = cookie;
+
+ if (bfr->is_mapped)
+ memunmap(bfr->buffer);
+ else
+ dma_free_coherent(dcp->dev, bfr->size, bfr->buffer,
+ bfr->iova & ~dcp->asc_dram_mask);
+}
+
+static struct apple_rtkit_ops rtkit_ops = {
+ .crashed = dcp_rtk_crashed,
+ .recv_message = dcp_recv_msg,
+ .shmem_setup = dcp_rtk_shmem_setup,
+ .shmem_destroy = dcp_rtk_shmem_destroy,
+};
+
+
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message)
+{
+ trace_dcp_send_msg(dcp, endpoint, message);
+ apple_rtkit_send_message(dcp->rtk, endpoint, message, NULL,
+ false);
+}
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct drm_plane_state *new_state, *old_state;
+ struct drm_plane *plane;
+ struct drm_crtc_state *crtc_state;
+ int plane_idx, plane_count = 0;
+ bool needs_modeset;
+
+ if (dcp->crashed)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ needs_modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+ if (!needs_modeset && !dcp->connector->connected) {
+ dev_err(dcp->dev, "crtc_atomic_check: disconnected but no modeset");
+ return -EINVAL;
+ }
+
+ for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
+ /* skip planes not for this crtc */
+ if (new_state->crtc != crtc)
+ continue;
+
+ plane_count += 1;
+ }
+
+ if (plane_count > DCP_MAX_PLANES) {
+ dev_err(dcp->dev, "crtc_atomic_check: Blend supports only 2 layers!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dcp_crtc_atomic_check);
+
+void dcp_link(struct platform_device *pdev, struct apple_crtc *crtc,
+ struct apple_connector *connector)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ dcp->crtc = crtc;
+ dcp->connector = connector;
+}
+EXPORT_SYMBOL_GPL(dcp_link);
+
+int dcp_start(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ int ret;
+
+ /* start RTKit endpoints */
+ ret = iomfb_start_rtkit(dcp);
+ if (ret)
+ dev_err(dcp->dev, "Failed to start IOMFB endpoint: %d", ret);
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(dcp_start);
+
+static struct platform_device *dcp_get_dev(struct device *dev, const char *name)
+{
+ struct platform_device *pdev;
+ struct device_node *node = of_parse_phandle(dev->of_node, name, 0);
+
+ if (!node)
+ return NULL;
+
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ return pdev;
+}
+
+static int dcp_get_disp_regs(struct apple_dcp *dcp)
+{
+ struct platform_device *pdev = to_platform_device(dcp->dev);
+ int count = pdev->num_resources - 1;
+ int i;
+
+ if (count <= 0 || count > MAX_DISP_REGISTERS)
+ return -EINVAL;
+
+ for (i = 0; i < count; ++i) {
+ dcp->disp_registers[i] =
+ platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+ }
+
+ dcp->nr_disp_registers = count;
+ return 0;
+}
+
+static int dcp_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_dcp *dcp;
+ u32 cpu_ctrl;
+ int ret;
+
+ dcp = devm_kzalloc(dev, sizeof(*dcp), GFP_KERNEL);
+ if (!dcp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dcp);
+ dcp->dev = dev;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dcp->coproc_reg = devm_platform_ioremap_resource_byname(pdev, "coproc");
+ if (IS_ERR(dcp->coproc_reg))
+ return PTR_ERR(dcp->coproc_reg);
+
+ of_platform_default_populate(dev->of_node, NULL, dev);
+
+ dcp->piodma = dcp_get_dev(dev, "apple,piodma-mapper");
+ if (!dcp->piodma) {
+ dev_err(dev, "failed to find piodma\n");
+ return -ENODEV;
+ }
+
+ dcp->piodma_link = device_link_add(dev, &dcp->piodma->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dcp->piodma_link) {
+ dev_err(dev, "Failed to link to piodma device");
+ return -EINVAL;
+ }
+
+ if (dcp->piodma_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return -EPROBE_DEFER;
+
+ ret = dcp_get_disp_regs(dcp);
+ if (ret) {
+ dev_err(dev, "failed to find display registers\n");
+ return ret;
+ }
+
+ dcp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dcp->clk))
+ return dev_err_probe(dev, PTR_ERR(dcp->clk),
+ "Unable to find clock\n");
+
+ ret = of_property_read_u64(dev->of_node, "apple,asc-dram-mask",
+ &dcp->asc_dram_mask);
+ if (ret)
+ dev_warn(dev, "failed read 'apple,asc-dram-mask': %d\n", ret);
+ dev_dbg(dev, "'apple,asc-dram-mask': 0x%011llx\n", dcp->asc_dram_mask);
+
+ ret = of_property_read_u32(dev->of_node, "apple,notch-height",
+ &dcp->notch_height);
+ if (dcp->notch_height > MAX_NOTCH_HEIGHT)
+ dcp->notch_height = MAX_NOTCH_HEIGHT;
+ if (dcp->notch_height > 0)
+ dev_info(dev, "Detected display with notch of %u pixel\n", dcp->notch_height);
+
+ bitmap_zero(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+ // TDOD: mem_desc IDs start at 1, for simplicity just skip '0' entry
+ set_bit(0, dcp->memdesc_map);
+
+ INIT_WORK(&dcp->vblank_wq, dcp_delayed_vblank);
+
+ dcp->swapped_out_fbs =
+ (struct list_head)LIST_HEAD_INIT(dcp->swapped_out_fbs);
+
+ cpu_ctrl =
+ readl_relaxed(dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+ writel_relaxed(cpu_ctrl | APPLE_DCP_COPROC_CPU_CONTROL_RUN,
+ dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+
+ dcp->rtk = devm_apple_rtkit_init(dev, dcp, "mbox", 0, &rtkit_ops);
+ if (IS_ERR(dcp->rtk))
+ return dev_err_probe(dev, PTR_ERR(dcp->rtk),
+ "Failed to intialize RTKit");
+
+ ret = apple_rtkit_wake(dcp->rtk);
+ if (ret)
+ return dev_err_probe(dev, PTR_ERR(dcp->rtk),
+ "Failed to boot RTKit: %d", ret);
+
+ return ret;
+}
+
+/*
+ * We need to shutdown DCP before tearing down the display subsystem. Otherwise
+ * the DCP will crash and briefly flash a green screen of death.
+ */
+static void dcp_platform_shutdown(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ iomfb_shutdown(dcp);
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,dcp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We don't hold any useful persistent state, so for suspend/resume it suffices
+ * to power off/on the entire DCP. The firmware will sort out the details for
+ * us.
+ */
+static int dcp_suspend(struct device *dev)
+{
+ dcp_poweroff(to_platform_device(dev));
+ return 0;
+}
+
+static int dcp_resume(struct device *dev)
+{
+ dcp_poweron(to_platform_device(dev));
+ return 0;
+}
+
+static const struct dev_pm_ops dcp_pm_ops = {
+ .suspend = dcp_suspend,
+ .resume = dcp_resume,
+};
+#endif
+
+static struct platform_driver apple_platform_driver = {
+ .probe = dcp_platform_probe,
+ .shutdown = dcp_platform_shutdown,
+ .driver = {
+ .name = "apple-dcp",
+ .of_match_table = of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &dcp_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION("Apple Display Controller DRM driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/dcp.h b/drivers/gpu/drm/apple/dcp.h
new file mode 100644
index 000000000000..60e9bcfa4714
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp.h
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_H__
+#define __APPLE_DCP_H__
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_fourcc.h>
+
+#include "dcp-internal.h"
+#include "parser.h"
+
+struct apple_crtc {
+ struct drm_crtc base;
+ struct drm_pending_vblank_event *event;
+ bool vsync_disabled;
+
+ /* Reference to the DCP device owning this CRTC */
+ struct platform_device *dcp;
+};
+
+#define to_apple_crtc(x) container_of(x, struct apple_crtc, base)
+
+void dcp_hotplug(struct work_struct *work);
+
+struct apple_connector {
+ struct drm_connector base;
+ bool connected;
+
+ struct platform_device *dcp;
+
+ /* Workqueue for sending hotplug events to the associated device */
+ struct work_struct hotplug_wq;
+};
+
+#define to_apple_connector(x) container_of(x, struct apple_connector, base)
+
+void dcp_poweroff(struct platform_device *pdev);
+void dcp_poweron(struct platform_device *pdev);
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void dcp_link(struct platform_device *pdev, struct apple_crtc *apple,
+ struct apple_connector *connector);
+int dcp_start(struct platform_device *pdev);
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+bool dcp_is_initialized(struct platform_device *pdev);
+void apple_crtc_vblank(struct apple_crtc *apple);
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc);
+int dcp_get_modes(struct drm_connector *connector);
+int dcp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void dcp_set_dimensions(struct apple_dcp *dcp);
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message);
+
+int iomfb_start_rtkit(struct apple_dcp *dcp);
+void iomfb_shutdown(struct apple_dcp *dcp);
+/* rtkit message handler for IOMFB messages */
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message);
+
+#endif
diff --git a/drivers/gpu/drm/apple/dummy-piodma.c b/drivers/gpu/drm/apple/dummy-piodma.c
new file mode 100644
index 000000000000..05d3e6130bf1
--- /dev/null
+++ b/drivers/gpu/drm/apple/dummy-piodma.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+
+static int dcp_piodma_probe(struct platform_device *pdev)
+{
+ return dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,dcp-piodma" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver dcp_piodma_platform_driver = {
+ .probe = dcp_piodma_probe,
+ .driver = {
+ .name = "apple,dcp-piodma",
+ .of_match_table = of_match,
+ },
+};
+
+module_platform_driver(dcp_piodma_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION("[HACK] Apple DCP PIODMA shim");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/iomfb.c b/drivers/gpu/drm/apple/iomfb.c
new file mode 100644
index 000000000000..79e96070c45f
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/align.h>
+#include <linux/apple-mailbox.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/completion.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "parser.h"
+#include "trace.h"
+
+/* Register defines used in bandwidth setup structure */
+#define REG_SCRATCH (0x14)
+#define REG_SCRATCH_T600X (0x988)
+#define REG_DOORBELL (0x0)
+#define REG_DOORBELL_BIT (2)
+
+struct dcp_wait_cookie {
+ struct kref refcount;
+ struct completion done;
+};
+
+static void release_wait_cookie(struct kref *ref)
+{
+ struct dcp_wait_cookie *cookie;
+ cookie = container_of(ref, struct dcp_wait_cookie, refcount);
+
+ kfree(cookie);
+}
+
+static int dcp_tx_offset(enum dcp_context_id id)
+{
+ switch (id) {
+ case DCP_CONTEXT_CB:
+ case DCP_CONTEXT_CMD:
+ return 0x00000;
+ case DCP_CONTEXT_OOBCB:
+ case DCP_CONTEXT_OOBCMD:
+ return 0x08000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dcp_channel_offset(enum dcp_context_id id)
+{
+ switch (id) {
+ case DCP_CONTEXT_ASYNC:
+ return 0x40000;
+ case DCP_CONTEXT_CB:
+ return 0x60000;
+ case DCP_CONTEXT_OOBCB:
+ return 0x68000;
+ default:
+ return dcp_tx_offset(id);
+ }
+}
+
+static inline u64 dcpep_set_shmem(u64 dart_va)
+{
+ return (DCPEP_TYPE_SET_SHMEM << DCPEP_TYPE_SHIFT) |
+ (DCPEP_FLAG_VALUE << DCPEP_FLAG_SHIFT) |
+ (dart_va << DCPEP_DVA_SHIFT);
+}
+
+static inline u64 dcpep_msg(enum dcp_context_id id, u32 length, u16 offset)
+{
+ return (DCPEP_TYPE_MESSAGE << DCPEP_TYPE_SHIFT) |
+ ((u64)id << DCPEP_CONTEXT_SHIFT) |
+ ((u64)offset << DCPEP_OFFSET_SHIFT) |
+ ((u64)length << DCPEP_LENGTH_SHIFT);
+}
+
+static inline u64 dcpep_ack(enum dcp_context_id id)
+{
+ return dcpep_msg(id, 0, 0) | DCPEP_ACK;
+}
+
+/*
+ * A channel is busy if we have sent a message that has yet to be
+ * acked. The driver must not sent a message to a busy channel.
+ */
+static bool dcp_channel_busy(struct dcp_call_channel *ch)
+{
+ return (ch->depth != 0);
+}
+
+/* Get a call channel for a context */
+static struct dcp_call_channel *
+dcp_get_call_channel(struct apple_dcp *dcp, enum dcp_context_id context)
+{
+ switch (context) {
+ case DCP_CONTEXT_CMD:
+ case DCP_CONTEXT_CB:
+ return &dcp->ch_cmd;
+ case DCP_CONTEXT_OOBCMD:
+ case DCP_CONTEXT_OOBCB:
+ return &dcp->ch_oobcmd;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Get the context ID passed to the DCP for a command we push. The rule is
+ * simple: callback contexts are used when replying to the DCP, command
+ * contexts are used otherwise. That corresponds to a non/zero call stack
+ * depth. This rule frees the caller from tracking the call context manually.
+ */
+static enum dcp_context_id dcp_call_context(struct apple_dcp *dcp, bool oob)
+{
+ u8 depth = oob ? dcp->ch_oobcmd.depth : dcp->ch_cmd.depth;
+
+ if (depth)
+ return oob ? DCP_CONTEXT_OOBCB : DCP_CONTEXT_CB;
+ else
+ return oob ? DCP_CONTEXT_OOBCMD : DCP_CONTEXT_CMD;
+}
+
+/* Get a callback channel for a context */
+static struct dcp_cb_channel *dcp_get_cb_channel(struct apple_dcp *dcp,
+ enum dcp_context_id context)
+{
+ switch (context) {
+ case DCP_CONTEXT_CB:
+ return &dcp->ch_cb;
+ case DCP_CONTEXT_OOBCB:
+ return &dcp->ch_oobcb;
+ case DCP_CONTEXT_ASYNC:
+ return &dcp->ch_async;
+ default:
+ return NULL;
+ }
+}
+
+/* Get the start of a packet: after the end of the previous packet */
+static u16 dcp_packet_start(struct dcp_call_channel *ch, u8 depth)
+{
+ if (depth > 0)
+ return ch->end[depth - 1];
+ else
+ return 0;
+}
+
+/* Pushes and pops the depth of the call stack with safety checks */
+static u8 dcp_push_depth(u8 *depth)
+{
+ u8 ret = (*depth)++;
+
+ WARN_ON(ret >= DCP_MAX_CALL_DEPTH);
+ return ret;
+}
+
+static u8 dcp_pop_depth(u8 *depth)
+{
+ WARN_ON((*depth) == 0);
+
+ return --(*depth);
+}
+
+#define DCP_METHOD(tag, name) [name] = { #name, tag }
+
+const struct dcp_method_entry dcp_methods[dcpep_num_methods] = {
+ DCP_METHOD("A000", dcpep_late_init_signal),
+ DCP_METHOD("A029", dcpep_setup_video_limits),
+ DCP_METHOD("A034", dcpep_update_notify_clients_dcp),
+ DCP_METHOD("A357", dcpep_set_create_dfb),
+ DCP_METHOD("A401", dcpep_start_signal),
+ DCP_METHOD("A407", dcpep_swap_start),
+ DCP_METHOD("A408", dcpep_swap_submit),
+ DCP_METHOD("A410", dcpep_set_display_device),
+ DCP_METHOD("A411", dcpep_is_main_display),
+ DCP_METHOD("A412", dcpep_set_digital_out_mode),
+ DCP_METHOD("A439", dcpep_set_parameter_dcp),
+ DCP_METHOD("A443", dcpep_create_default_fb),
+ DCP_METHOD("A447", dcpep_enable_disable_video_power_savings),
+ DCP_METHOD("A454", dcpep_first_client_open),
+ DCP_METHOD("A460", dcpep_set_display_refresh_properties),
+ DCP_METHOD("A463", dcpep_flush_supports_power),
+ DCP_METHOD("A468", dcpep_set_power_state),
+};
+
+/* Call a DCP function given by a tag */
+static void dcp_push(struct apple_dcp *dcp, bool oob, enum dcpep_method method,
+ u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
+ void *cookie)
+{
+ struct dcp_call_channel *ch = oob ? &dcp->ch_oobcmd : &dcp->ch_cmd;
+ enum dcp_context_id context = dcp_call_context(dcp, oob);
+
+ struct dcp_packet_header header = {
+ .in_len = in_len,
+ .out_len = out_len,
+
+ /* Tag is reversed due to endianness of the fourcc */
+ .tag[0] = dcp_methods[method].tag[3],
+ .tag[1] = dcp_methods[method].tag[2],
+ .tag[2] = dcp_methods[method].tag[1],
+ .tag[3] = dcp_methods[method].tag[0],
+ };
+
+ u8 depth = dcp_push_depth(&ch->depth);
+ u16 offset = dcp_packet_start(ch, depth);
+
+ void *out = dcp->shmem + dcp_tx_offset(context) + offset;
+ void *out_data = out + sizeof(header);
+ size_t data_len = sizeof(header) + in_len + out_len;
+
+ memcpy(out, &header, sizeof(header));
+
+ if (in_len > 0)
+ memcpy(out_data, data, in_len);
+
+ trace_iomfb_push(dcp, &dcp_methods[method], context, offset, depth);
+
+ ch->callbacks[depth] = cb;
+ ch->cookies[depth] = cookie;
+ ch->output[depth] = out + sizeof(header) + in_len;
+ ch->end[depth] = offset + ALIGN(data_len, DCP_PACKET_ALIGNMENT);
+
+ dcp_send_message(dcp, IOMFB_ENDPOINT,
+ dcpep_msg(context, data_len, offset));
+}
+
+#define DCP_THUNK_VOID(func, handle) \
+ static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+ void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, 0, 0, NULL, cb, cookie); \
+ }
+
+#define DCP_THUNK_OUT(func, handle, T) \
+ static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+ void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, 0, sizeof(T), NULL, cb, cookie); \
+ }
+
+#define DCP_THUNK_IN(func, handle, T) \
+ static void func(struct apple_dcp *dcp, bool oob, T *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, sizeof(T), 0, data, cb, cookie); \
+ }
+
+#define DCP_THUNK_INOUT(func, handle, T_in, T_out) \
+ static void func(struct apple_dcp *dcp, bool oob, T_in *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, sizeof(T_in), sizeof(T_out), data, \
+ cb, cookie); \
+ }
+
+DCP_THUNK_INOUT(dcp_swap_submit, dcpep_swap_submit, struct dcp_swap_submit_req,
+ struct dcp_swap_submit_resp);
+
+DCP_THUNK_INOUT(dcp_swap_start, dcpep_swap_start, struct dcp_swap_start_req,
+ struct dcp_swap_start_resp);
+
+DCP_THUNK_INOUT(dcp_set_power_state, dcpep_set_power_state,
+ struct dcp_set_power_state_req,
+ struct dcp_set_power_state_resp);
+
+DCP_THUNK_INOUT(dcp_set_digital_out_mode, dcpep_set_digital_out_mode,
+ struct dcp_set_digital_out_mode_req, u32);
+
+DCP_THUNK_INOUT(dcp_set_display_device, dcpep_set_display_device, u32, u32);
+
+DCP_THUNK_OUT(dcp_set_display_refresh_properties,
+ dcpep_set_display_refresh_properties, u32);
+
+DCP_THUNK_OUT(dcp_late_init_signal, dcpep_late_init_signal, u32);
+DCP_THUNK_IN(dcp_flush_supports_power, dcpep_flush_supports_power, u32);
+DCP_THUNK_OUT(dcp_create_default_fb, dcpep_create_default_fb, u32);
+DCP_THUNK_OUT(dcp_start_signal, dcpep_start_signal, u32);
+DCP_THUNK_VOID(dcp_setup_video_limits, dcpep_setup_video_limits);
+DCP_THUNK_VOID(dcp_set_create_dfb, dcpep_set_create_dfb);
+DCP_THUNK_VOID(dcp_first_client_open, dcpep_first_client_open);
+
+__attribute__((unused))
+DCP_THUNK_IN(dcp_update_notify_clients_dcp, dcpep_update_notify_clients_dcp,
+ struct dcp_update_notify_clients_dcp);
+
+DCP_THUNK_INOUT(dcp_set_parameter_dcp, dcpep_set_parameter_dcp,
+ struct dcp_set_parameter_dcp, u32);
+
+DCP_THUNK_INOUT(dcp_enable_disable_video_power_savings,
+ dcpep_enable_disable_video_power_savings, u32, int);
+
+DCP_THUNK_OUT(dcp_is_main_display, dcpep_is_main_display, u32);
+
+/* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
+static int dcp_parse_tag(char tag[4])
+{
+ u32 d[3];
+ int i;
+
+ if (tag[3] != 'D')
+ return -EINVAL;
+
+ for (i = 0; i < 3; ++i) {
+ d[i] = (u32)(tag[i] - '0');
+
+ if (d[i] > 9)
+ return -EINVAL;
+ }
+
+ return d[0] + (d[1] * 10) + (d[2] * 100);
+}
+
+/* Ack a callback from the DCP */
+static void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context)
+{
+ struct dcp_cb_channel *ch = dcp_get_cb_channel(dcp, context);
+
+ dcp_pop_depth(&ch->depth);
+ dcp_send_message(dcp, IOMFB_ENDPOINT,
+ dcpep_ack(context));
+}
+
+/* DCP callback handlers */
+static void dcpep_cb_nop(struct apple_dcp *dcp)
+{
+ /* No operation */
+}
+
+static u8 dcpep_cb_true(struct apple_dcp *dcp)
+{
+ return true;
+}
+
+static u8 dcpep_cb_false(struct apple_dcp *dcp)
+{
+ return false;
+}
+
+static u32 dcpep_cb_zero(struct apple_dcp *dcp)
+{
+ return 0;
+}
+
+static void dcpep_cb_swap_complete(struct apple_dcp *dcp,
+ struct dc_swap_complete_resp *resp)
+{
+ trace_iomfb_swap_complete(dcp, resp->swap_id);
+
+ if (!dcp->ignore_swap_complete)
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static struct dcp_get_uint_prop_resp
+dcpep_cb_get_uint_prop(struct apple_dcp *dcp, struct dcp_get_uint_prop_req *req)
+{
+ /* unimplemented for now */
+ return (struct dcp_get_uint_prop_resp){ .value = 0 };
+}
+
+/*
+ * Callback to map a buffer allocated with allocate_buf for PIODMA usage.
+ * PIODMA is separate from the main DCP and uses own IOVA space on a dedicated
+ * stream of the display DART, rather than the expected DCP DART.
+ *
+ * XXX: This relies on dma_get_sgtable in concert with dma_map_sgtable, which
+ * is a "fundamentally unsafe" operation according to the docs. And yet
+ * everyone does it...
+ */
+static struct dcp_map_buf_resp dcpep_cb_map_piodma(struct apple_dcp *dcp,
+ struct dcp_map_buf_req *req)
+{
+ struct sg_table *map;
+ int ret;
+
+ if (req->buffer >= ARRAY_SIZE(dcp->memdesc))
+ goto reject;
+
+ map = &dcp->memdesc[req->buffer].map;
+
+ if (!map->sgl)
+ goto reject;
+
+ /* Use PIODMA device instead of DCP to map against the right IOMMU. */
+ ret = dma_map_sgtable(&dcp->piodma->dev, map, DMA_BIDIRECTIONAL, 0);
+
+ if (ret)
+ goto reject;
+
+ return (struct dcp_map_buf_resp){ .dva = sg_dma_address(map->sgl) };
+
+reject:
+ dev_err(dcp->dev, "denying map of invalid buffer %llx for pidoma\n",
+ req->buffer);
+ return (struct dcp_map_buf_resp){ .ret = EINVAL };
+}
+
+static void dcpep_cb_unmap_piodma(struct apple_dcp *dcp,
+ struct dcp_unmap_buf_resp *resp)
+{
+ struct sg_table *map;
+ dma_addr_t dma_addr;
+
+ if (resp->buffer >= ARRAY_SIZE(dcp->memdesc)) {
+ dev_warn(dcp->dev, "unmap request for out of range buffer %llu",
+ resp->buffer);
+ return;
+ }
+
+ map = &dcp->memdesc[resp->buffer].map;
+
+ if (!map->sgl) {
+ dev_warn(dcp->dev,
+ "unmap for non-mapped buffer %llu iova:0x%08llx",
+ resp->buffer, resp->dva);
+ return;
+ }
+
+ dma_addr = sg_dma_address(map->sgl);
+ if (dma_addr != resp->dva) {
+ dev_warn(dcp->dev, "unmap buffer %llu address mismatch dma_addr:%llx dva:%llx",
+ resp->buffer, dma_addr, resp->dva);
+ return;
+ }
+
+ /* Use PIODMA device instead of DCP to unmap from the right IOMMU. */
+ dma_unmap_sgtable(&dcp->piodma->dev, map, DMA_BIDIRECTIONAL, 0);
+}
+
+/*
+ * Allocate an IOVA contiguous buffer mapped to the DCP. The buffer need not be
+ * physically contigiuous, however we should save the sgtable in case the
+ * buffer needs to be later mapped for PIODMA.
+ */
+static struct dcp_allocate_buffer_resp
+dcpep_cb_allocate_buffer(struct apple_dcp *dcp,
+ struct dcp_allocate_buffer_req *req)
+{
+ struct dcp_allocate_buffer_resp resp = { 0 };
+ struct dcp_mem_descriptor *memdesc;
+ u32 id;
+
+ resp.dva_size = ALIGN(req->size, 4096);
+ resp.mem_desc_id =
+ find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+
+ if (resp.mem_desc_id >= DCP_MAX_MAPPINGS) {
+ dev_warn(dcp->dev, "DCP overflowed mapping table, ignoring");
+ resp.dva_size = 0;
+ resp.mem_desc_id = 0;
+ return resp;
+ }
+ id = resp.mem_desc_id;
+ set_bit(id, dcp->memdesc_map);
+
+ memdesc = &dcp->memdesc[id];
+
+ memdesc->size = resp.dva_size;
+ memdesc->buf = dma_alloc_coherent(dcp->dev, memdesc->size,
+ &memdesc->dva, GFP_KERNEL);
+
+ dma_get_sgtable(dcp->dev, &memdesc->map, memdesc->buf, memdesc->dva,
+ memdesc->size);
+ resp.dva = memdesc->dva;
+
+ return resp;
+}
+
+static u8 dcpep_cb_release_mem_desc(struct apple_dcp *dcp, u32 *mem_desc_id)
+{
+ struct dcp_mem_descriptor *memdesc;
+ u32 id = *mem_desc_id;
+
+ if (id >= DCP_MAX_MAPPINGS) {
+ dev_warn(dcp->dev,
+ "unmap request for out of range mem_desc_id %u", id);
+ return 0;
+ }
+
+ if (!test_and_clear_bit(id, dcp->memdesc_map)) {
+ dev_warn(dcp->dev, "unmap request for unused mem_desc_id %u",
+ id);
+ return 0;
+ }
+
+ memdesc = &dcp->memdesc[id];
+ if (memdesc->buf) {
+ dma_free_coherent(dcp->dev, memdesc->size, memdesc->buf,
+ memdesc->dva);
+
+ memdesc->buf = NULL;
+ memset(&memdesc->map, 0, sizeof(memdesc->map));
+ } else {
+ memdesc->reg = 0;
+ }
+
+ memdesc->size = 0;
+
+ return 1;
+}
+
+/* Validate that the specified region is a display register */
+static bool is_disp_register(struct apple_dcp *dcp, u64 start, u64 end)
+{
+ int i;
+
+ for (i = 0; i < dcp->nr_disp_registers; ++i) {
+ struct resource *r = dcp->disp_registers[i];
+
+ if ((start >= r->start) && (end <= r->end))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Map contiguous physical memory into the DCP's address space. The firmware
+ * uses this to map the display registers we advertise in
+ * sr_map_device_memory_with_index, so we bounds check against that to guard
+ * safe against malicious coprocessors.
+ */
+static struct dcp_map_physical_resp
+dcpep_cb_map_physical(struct apple_dcp *dcp, struct dcp_map_physical_req *req)
+{
+ int size = ALIGN(req->size, 4096);
+ u32 id;
+
+ if (!is_disp_register(dcp, req->paddr, req->paddr + size - 1)) {
+ dev_err(dcp->dev, "refusing to map phys address %llx size %llx",
+ req->paddr, req->size);
+ return (struct dcp_map_physical_resp){};
+ }
+
+ id = find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+ set_bit(id, dcp->memdesc_map);
+ dcp->memdesc[id].size = size;
+ dcp->memdesc[id].reg = req->paddr;
+
+ return (struct dcp_map_physical_resp){
+ .dva_size = size,
+ .mem_desc_id = id,
+ .dva = dma_map_resource(dcp->dev, req->paddr, size,
+ DMA_BIDIRECTIONAL, 0),
+ };
+}
+
+static u64 dcpep_cb_get_frequency(struct apple_dcp *dcp)
+{
+ return clk_get_rate(dcp->clk);
+}
+
+static struct dcp_map_reg_resp dcpep_cb_map_reg(struct apple_dcp *dcp,
+ struct dcp_map_reg_req *req)
+{
+ if (req->index >= dcp->nr_disp_registers) {
+ dev_warn(dcp->dev, "attempted to read invalid reg index %u",
+ req->index);
+
+ return (struct dcp_map_reg_resp){ .ret = 1 };
+ } else {
+ struct resource *rsrc = dcp->disp_registers[req->index];
+
+ return (struct dcp_map_reg_resp){
+ .addr = rsrc->start, .length = resource_size(rsrc)
+ };
+ }
+}
+
+static struct dcp_read_edt_data_resp
+dcpep_cb_read_edt_data(struct apple_dcp *dcp, struct dcp_read_edt_data_req *req)
+{
+ return (struct dcp_read_edt_data_resp){
+ .value[0] = req->value[0],
+ .ret = 0,
+ };
+}
+
+/* Chunked data transfer for property dictionaries */
+static u8 dcpep_cb_prop_start(struct apple_dcp *dcp, u32 *length)
+{
+ if (dcp->chunks.data != NULL) {
+ dev_warn(dcp->dev, "ignoring spurious transfer start\n");
+ return false;
+ }
+
+ dcp->chunks.length = *length;
+ dcp->chunks.data = devm_kzalloc(dcp->dev, *length, GFP_KERNEL);
+
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "failed to allocate chunks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static u8 dcpep_cb_prop_chunk(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_chunk_req *req)
+{
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "ignoring spurious chunk\n");
+ return false;
+ }
+
+ if (req->offset + req->length > dcp->chunks.length) {
+ dev_warn(dcp->dev, "ignoring overflowing chunk\n");
+ return false;
+ }
+
+ memcpy(dcp->chunks.data + req->offset, req->data, req->length);
+ return true;
+}
+
+static bool dcpep_process_chunks(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_end_req *req)
+{
+ struct dcp_parse_ctx ctx;
+ int ret;
+
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "ignoring spurious end\n");
+ return false;
+ }
+
+ ret = parse(dcp->chunks.data, dcp->chunks.length, &ctx);
+
+ if (ret) {
+ dev_warn(dcp->dev, "bad header on dcpav props\n");
+ return false;
+ }
+
+ if (!strcmp(req->key, "TimingElements")) {
+ dcp->modes = enumerate_modes(&ctx, &dcp->nr_modes,
+ dcp->width_mm, dcp->height_mm,
+ dcp->notch_height);
+
+ if (IS_ERR(dcp->modes)) {
+ dev_warn(dcp->dev, "failed to parse modes\n");
+ dcp->modes = NULL;
+ dcp->nr_modes = 0;
+ return false;
+ }
+ } else if (!strcmp(req->key, "DisplayAttributes")) {
+ ret = parse_display_attributes(&ctx, &dcp->width_mm,
+ &dcp->height_mm);
+
+ if (ret) {
+ dev_warn(dcp->dev, "failed to parse display attribs\n");
+ return false;
+ }
+
+ dcp_set_dimensions(dcp);
+ }
+
+ return true;
+}
+
+static u8 dcpep_cb_prop_end(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_end_req *req)
+{
+ u8 resp = dcpep_process_chunks(dcp, req);
+
+ /* Reset for the next transfer */
+ devm_kfree(dcp->dev, dcp->chunks.data);
+ dcp->chunks.data = NULL;
+
+ return resp;
+}
+
+/* Boot sequence */
+static void boot_done(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_cb_channel *ch = &dcp->ch_cb;
+ u8 *succ = ch->output[ch->depth - 1];
+ dev_dbg(dcp->dev, "boot done");
+
+ *succ = true;
+ dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static void boot_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_set_display_refresh_properties(dcp, false, boot_done, NULL);
+}
+
+static void boot_4(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_late_init_signal(dcp, false, boot_5, NULL);
+}
+
+static void boot_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ u32 v_true = true;
+
+ dcp_flush_supports_power(dcp, false, &v_true, boot_4, NULL);
+}
+
+static void boot_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_setup_video_limits(dcp, false, boot_3, NULL);
+}
+
+static void boot_1_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_create_default_fb(dcp, false, boot_2, NULL);
+}
+
+/* Use special function signature to defer the ACK */
+static bool dcpep_cb_boot_1(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+ trace_iomfb_callback(dcp, tag, __func__);
+ dcp_set_create_dfb(dcp, false, boot_1_5, NULL);
+ return false;
+}
+
+static struct dcp_rt_bandwidth dcpep_cb_rt_bandwidth(struct apple_dcp *dcp)
+{
+ if (dcp->disp_registers[5] && dcp->disp_registers[6])
+ return (struct dcp_rt_bandwidth){
+ .reg_scratch =
+ dcp->disp_registers[5]->start + REG_SCRATCH,
+ .reg_doorbell =
+ dcp->disp_registers[6]->start + REG_DOORBELL,
+ .doorbell_bit = REG_DOORBELL_BIT,
+
+ .padding[3] = 0x4, // XXX: required by 11.x firmware
+ };
+ else if (dcp->disp_registers[4])
+ return (struct dcp_rt_bandwidth){
+ .reg_scratch = dcp->disp_registers[4]->start +
+ REG_SCRATCH_T600X,
+ .reg_doorbell = 0,
+ .doorbell_bit = 0,
+ };
+ else
+ return (struct dcp_rt_bandwidth){
+ .reg_scratch = 0,
+ .reg_doorbell = 0,
+ .doorbell_bit = 0,
+ };
+}
+
+/* Callback to get the current time as milliseconds since the UNIX epoch */
+static u64 dcpep_cb_get_time(struct apple_dcp *dcp)
+{
+ return ktime_to_ms(ktime_get_real());
+}
+
+struct dcp_swap_cookie {
+ struct kref refcount;
+ struct completion done;
+ u32 swap_id;
+};
+
+static void release_swap_cookie(struct kref *ref)
+{
+ struct dcp_swap_cookie *cookie;
+ cookie = container_of(ref, struct dcp_swap_cookie, refcount);
+
+ kfree(cookie);
+}
+
+static void dcp_swap_cleared(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_submit_resp *resp = data;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (cookie) {
+ struct dcp_swap_cookie *info = cookie;
+ complete(&info->done);
+ kref_put(&info->refcount, release_swap_cookie);
+ }
+
+ if (resp->ret) {
+ dev_err(dcp->dev, "swap_clear failed! status %u\n", resp->ret);
+ dcp_drm_crtc_vblank(dcp->crtc);
+ return;
+ }
+
+ while (!list_empty(&dcp->swapped_out_fbs)) {
+ struct dcp_fb_reference *entry;
+ entry = list_first_entry(&dcp->swapped_out_fbs,
+ struct dcp_fb_reference, head);
+ if (entry->fb)
+ drm_framebuffer_put(entry->fb);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+}
+
+static void dcp_swap_clear_started(struct apple_dcp *dcp, void *data,
+ void *cookie)
+{
+ struct dcp_swap_start_resp *resp = data;
+ dev_dbg(dcp->dev, "%s swap_id: %u", __func__, resp->swap_id);
+ dcp->swap.swap.swap_id = resp->swap_id;
+
+ if (cookie) {
+ struct dcp_swap_cookie *info = cookie;
+ info->swap_id = resp->swap_id;
+ }
+
+ dcp_swap_submit(dcp, false, &dcp->swap, dcp_swap_cleared, cookie);
+}
+
+static void dcp_on_final(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+static void dcp_on_set_parameter(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_set_parameter_dcp param = {
+ .param = 14,
+ .value = { 0 },
+ .count = 1,
+ };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ dcp_set_parameter_dcp(dcp, false, &param, dcp_on_final, cookie);
+}
+
+void dcp_poweron(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct dcp_wait_cookie *cookie;
+ struct dcp_set_power_state_req req = {
+ .unklong = 1,
+ };
+ int ret;
+ u32 handle;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ if (dcp->main_display) {
+ handle = 0;
+ dcp_set_display_device(dcp, false, &handle, dcp_on_final,
+ cookie);
+ } else {
+ handle = 2;
+ dcp_set_display_device(dcp, false, &handle,
+ dcp_on_set_parameter, cookie);
+ }
+ dcp_set_power_state(dcp, true, &req, NULL, NULL);
+
+ ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(500));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "wait for power timed out");
+
+ kref_put(&cookie->refcount, release_wait_cookie);;
+}
+EXPORT_SYMBOL(dcp_poweron);
+
+static void complete_set_powerstate(struct apple_dcp *dcp, void *out,
+ void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+void dcp_poweroff(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ int ret, swap_id;
+ struct dcp_set_power_state_req power_req = {
+ .unklong = 0,
+ };
+ struct dcp_swap_cookie *cookie;
+ struct dcp_wait_cookie *poff_cookie;
+ struct dcp_swap_start_req swap_req = { 0 };
+
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ // clear surfaces
+ memset(&dcp->swap, 0, sizeof(dcp->swap));
+
+ dcp->swap.swap.swap_enabled = DCP_REMOVE_LAYERS | 0x7;
+ dcp->swap.swap.swap_completed = DCP_REMOVE_LAYERS | 0x7;
+ dcp->swap.swap.unk_10c = 0xFF000000;
+
+ for (int l = 0; l < SWAP_SURFACES; l++)
+ dcp->swap.surf_null[l] = true;
+
+ dcp_swap_start(dcp, false, &swap_req, dcp_swap_clear_started, cookie);
+
+ ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(50));
+ swap_id = cookie->swap_id;
+ kref_put(&cookie->refcount, release_swap_cookie);
+ if (ret <= 0) {
+ dcp->crashed = true;
+ return;
+ }
+
+ dev_dbg(dcp->dev, "%s: clear swap submitted: %u", __func__, swap_id);
+
+ poff_cookie = kzalloc(sizeof(*poff_cookie), GFP_KERNEL);
+ if (!poff_cookie)
+ return;
+ init_completion(&poff_cookie->done);
+ kref_init(&poff_cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&poff_cookie->refcount);
+
+ dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate,
+ poff_cookie);
+ ret = wait_for_completion_timeout(&poff_cookie->done,
+ msecs_to_jiffies(1000));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "setPowerState(0) timeout %u ms", 1000);
+ else if (ret > 0)
+ dev_dbg(dcp->dev,
+ "setPowerState(0) finished with %d ms to spare",
+ jiffies_to_msecs(ret));
+
+ kref_put(&poff_cookie->refcount, release_wait_cookie);
+ dev_dbg(dcp->dev, "%s: setPowerState(0) done", __func__);
+}
+EXPORT_SYMBOL(dcp_poweroff);
+
+/*
+ * Helper to send a DRM hotplug event. The DCP is accessed from a single
+ * (RTKit) thread. To handle hotplug callbacks, we need to call
+ * drm_kms_helper_hotplug_event, which does an atomic commit (via DCP) and
+ * waits for vblank (a DCP callback). That means we deadlock if we call from
+ * the RTKit thread! Instead, move the call to another thread via a workqueue.
+ */
+void dcp_hotplug(struct work_struct *work)
+{
+ struct apple_connector *connector;
+ struct drm_device *dev;
+ struct apple_dcp *dcp;
+
+ connector = container_of(work, struct apple_connector, hotplug_wq);
+ dev = connector->base.dev;
+
+ dcp = platform_get_drvdata(connector->dcp);
+ dev_info(dcp->dev, "%s: connected: %d", __func__, connector->connected);
+
+ /*
+ * DCP defers link training until we set a display mode. But we set
+ * display modes from atomic_flush, so userspace needs to trigger a
+ * flush, or the CRTC gets no signal.
+ */
+ if (!dcp->valid_mode && connector->connected) {
+ drm_connector_set_link_status_property(
+ &connector->base, DRM_MODE_LINK_STATUS_BAD);
+ }
+
+ if (dev && dev->registered)
+ drm_kms_helper_hotplug_event(dev);
+}
+EXPORT_SYMBOL_GPL(dcp_hotplug);
+
+static void dcpep_cb_hotplug(struct apple_dcp *dcp, u64 *connected)
+{
+ struct apple_connector *connector = dcp->connector;
+
+ /* DCP issues hotplug_gated callbacks after SetPowerState() calls on
+ * devices with display (macbooks, imacs). This must not result in
+ * connector state changes on DRM side. Some applications won't enable
+ * a CRTC with a connector in disconnected state. Weston after DPMS off
+ * is one example. dcp_is_main_display() returns true on devices with
+ * integrated display. Ignore the hotplug_gated() callbacks there.
+ */
+ if (dcp->main_display)
+ return;
+
+ /* Hotplug invalidates mode. DRM doesn't always handle this. */
+ if (!(*connected)) {
+ dcp->valid_mode = false;
+ /* after unplug swap will not complete until the next
+ * set_digital_out_mode */
+ schedule_work(&dcp->vblank_wq);
+ }
+
+ if (connector && connector->connected != !!(*connected)) {
+ connector->connected = !!(*connected);
+ dcp->valid_mode = false;
+ schedule_work(&connector->hotplug_wq);
+ }
+}
+
+static void
+dcpep_cb_swap_complete_intent_gated(struct apple_dcp *dcp,
+ struct dcp_swap_complete_intent_gated *info)
+{
+ trace_iomfb_swap_complete_intent_gated(dcp, info->swap_id,
+ info->width, info->height);
+}
+
+#define DCPEP_MAX_CB (1000)
+
+/*
+ * Define type-safe trampolines. Define typedefs to enforce type-safety on the
+ * input data (so if the types don't match, gcc errors out).
+ */
+
+#define TRAMPOLINE_VOID(func, handler) \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ handler(dcp); \
+ return true; \
+ }
+
+#define TRAMPOLINE_IN(func, handler, T_in) \
+ typedef void (*callback_##handler)(struct apple_dcp *, T_in *); \
+ \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ callback_##handler cb = handler; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ cb(dcp, in); \
+ return true; \
+ }
+
+#define TRAMPOLINE_INOUT(func, handler, T_in, T_out) \
+ typedef T_out (*callback_##handler)(struct apple_dcp *, T_in *); \
+ \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ T_out *typed_out = out; \
+ callback_##handler cb = handler; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ *typed_out = cb(dcp, in); \
+ return true; \
+ }
+
+#define TRAMPOLINE_OUT(func, handler, T_out) \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ T_out *typed_out = out; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ *typed_out = handler(dcp); \
+ return true; \
+ }
+
+TRAMPOLINE_VOID(trampoline_nop, dcpep_cb_nop);
+TRAMPOLINE_OUT(trampoline_true, dcpep_cb_true, u8);
+TRAMPOLINE_OUT(trampoline_false, dcpep_cb_false, u8);
+TRAMPOLINE_OUT(trampoline_zero, dcpep_cb_zero, u32);
+TRAMPOLINE_IN(trampoline_swap_complete, dcpep_cb_swap_complete,
+ struct dc_swap_complete_resp);
+TRAMPOLINE_INOUT(trampoline_get_uint_prop, dcpep_cb_get_uint_prop,
+ struct dcp_get_uint_prop_req, struct dcp_get_uint_prop_resp);
+TRAMPOLINE_INOUT(trampoline_map_piodma, dcpep_cb_map_piodma,
+ struct dcp_map_buf_req, struct dcp_map_buf_resp);
+TRAMPOLINE_IN(trampoline_unmap_piodma, dcpep_cb_unmap_piodma,
+ struct dcp_unmap_buf_resp);
+TRAMPOLINE_INOUT(trampoline_allocate_buffer, dcpep_cb_allocate_buffer,
+ struct dcp_allocate_buffer_req,
+ struct dcp_allocate_buffer_resp);
+TRAMPOLINE_INOUT(trampoline_map_physical, dcpep_cb_map_physical,
+ struct dcp_map_physical_req, struct dcp_map_physical_resp);
+TRAMPOLINE_INOUT(trampoline_release_mem_desc, dcpep_cb_release_mem_desc, u32,
+ u8);
+TRAMPOLINE_INOUT(trampoline_map_reg, dcpep_cb_map_reg, struct dcp_map_reg_req,
+ struct dcp_map_reg_resp);
+TRAMPOLINE_INOUT(trampoline_read_edt_data, dcpep_cb_read_edt_data,
+ struct dcp_read_edt_data_req, struct dcp_read_edt_data_resp);
+TRAMPOLINE_INOUT(trampoline_prop_start, dcpep_cb_prop_start, u32, u8);
+TRAMPOLINE_INOUT(trampoline_prop_chunk, dcpep_cb_prop_chunk,
+ struct dcp_set_dcpav_prop_chunk_req, u8);
+TRAMPOLINE_INOUT(trampoline_prop_end, dcpep_cb_prop_end,
+ struct dcp_set_dcpav_prop_end_req, u8);
+TRAMPOLINE_OUT(trampoline_rt_bandwidth, dcpep_cb_rt_bandwidth,
+ struct dcp_rt_bandwidth);
+TRAMPOLINE_OUT(trampoline_get_frequency, dcpep_cb_get_frequency, u64);
+TRAMPOLINE_OUT(trampoline_get_time, dcpep_cb_get_time, u64);
+TRAMPOLINE_IN(trampoline_hotplug, dcpep_cb_hotplug, u64);
+TRAMPOLINE_IN(trampoline_swap_complete_intent_gated,
+ dcpep_cb_swap_complete_intent_gated,
+ struct dcp_swap_complete_intent_gated);
+
+bool (*const dcpep_cb_handlers[DCPEP_MAX_CB])(struct apple_dcp *, int, void *,
+ void *) = {
+ [0] = trampoline_true, /* did_boot_signal */
+ [1] = trampoline_true, /* did_power_on_signal */
+ [2] = trampoline_nop, /* will_power_off_signal */
+ [3] = trampoline_rt_bandwidth,
+ [100] = trampoline_nop, /* match_pmu_service */
+ [101] = trampoline_zero, /* get_display_default_stride */
+ [103] = trampoline_nop, /* set_boolean_property */
+ [106] = trampoline_nop, /* remove_property */
+ [107] = trampoline_true, /* create_provider_service */
+ [108] = trampoline_true, /* create_product_service */
+ [109] = trampoline_true, /* create_pmu_service */
+ [110] = trampoline_true, /* create_iomfb_service */
+ [111] = trampoline_false, /* create_backlight_service */
+ [116] = dcpep_cb_boot_1,
+ [117] = trampoline_false, /* is_dark_boot */
+ [118] = trampoline_false, /* is_dark_boot / is_waking_from_hibernate*/
+ [120] = trampoline_read_edt_data,
+ [122] = trampoline_prop_start,
+ [123] = trampoline_prop_chunk,
+ [124] = trampoline_prop_end,
+ [201] = trampoline_map_piodma,
+ [202] = trampoline_unmap_piodma,
+ [206] = trampoline_true, /* match_pmu_service_2 */
+ [207] = trampoline_true, /* match_backlight_service */
+ [208] = trampoline_get_time,
+ [211] = trampoline_nop, /* update_backlight_factor_prop */
+ [300] = trampoline_nop, /* pr_publish */
+ [401] = trampoline_get_uint_prop,
+ [404] = trampoline_nop, /* sr_set_uint_prop */
+ [406] = trampoline_nop, /* set_fx_prop */
+ [408] = trampoline_get_frequency,
+ [411] = trampoline_map_reg,
+ [413] = trampoline_true, /* sr_set_property_dict */
+ [414] = trampoline_true, /* sr_set_property_int */
+ [415] = trampoline_true, /* sr_set_property_bool */
+ [451] = trampoline_allocate_buffer,
+ [452] = trampoline_map_physical,
+ [456] = trampoline_release_mem_desc,
+ [552] = trampoline_true, /* set_property_dict_0 */
+ [561] = trampoline_true, /* set_property_dict */
+ [563] = trampoline_true, /* set_property_int */
+ [565] = trampoline_true, /* set_property_bool */
+ [567] = trampoline_true, /* set_property_str */
+ [574] = trampoline_zero, /* power_up_dart */
+ [576] = trampoline_hotplug,
+ [577] = trampoline_nop, /* powerstate_notify */
+ [582] = trampoline_true, /* create_default_fb_surface */
+ [589] = trampoline_swap_complete,
+ [591] = trampoline_swap_complete_intent_gated,
+ [593] = trampoline_nop, /* enable_backlight_message_ap_gated */
+ [598] = trampoline_nop, /* find_swap_function_gated */
+};
+
+static void dcpep_handle_cb(struct apple_dcp *dcp, enum dcp_context_id context,
+ void *data, u32 length)
+{
+ struct device *dev = dcp->dev;
+ struct dcp_packet_header *hdr = data;
+ void *in, *out;
+ int tag = dcp_parse_tag(hdr->tag);
+ struct dcp_cb_channel *ch = dcp_get_cb_channel(dcp, context);
+ u8 depth;
+
+ if (tag < 0 || tag >= DCPEP_MAX_CB || !dcpep_cb_handlers[tag]) {
+ dev_warn(dev, "received unknown callback %c%c%c%c\n",
+ hdr->tag[3], hdr->tag[2], hdr->tag[1], hdr->tag[0]);
+ return;
+ }
+
+ in = data + sizeof(*hdr);
+ out = in + hdr->in_len;
+
+ // TODO: verify that in_len and out_len match our prototypes
+ // for now just clear the out data to have at least consistant results
+ if (hdr->out_len)
+ memset(out, 0, hdr->out_len);
+
+ depth = dcp_push_depth(&ch->depth);
+ ch->output[depth] = out;
+
+ if (dcpep_cb_handlers[tag](dcp, tag, out, in))
+ dcp_ack(dcp, context);
+}
+
+static void dcpep_handle_ack(struct apple_dcp *dcp, enum dcp_context_id context,
+ void *data, u32 length)
+{
+ struct dcp_packet_header *header = data;
+ struct dcp_call_channel *ch = dcp_get_call_channel(dcp, context);
+ void *cookie;
+ dcp_callback_t cb;
+
+ if (!ch) {
+ dev_warn(dcp->dev, "ignoring ack on context %X\n", context);
+ return;
+ }
+
+ dcp_pop_depth(&ch->depth);
+
+ cb = ch->callbacks[ch->depth];
+ cookie = ch->cookies[ch->depth];
+
+ ch->callbacks[ch->depth] = NULL;
+ ch->cookies[ch->depth] = NULL;
+
+ if (cb)
+ cb(dcp, data + sizeof(*header) + header->in_len, cookie);
+}
+
+static void dcpep_got_msg(struct apple_dcp *dcp, u64 message)
+{
+ enum dcp_context_id ctx_id;
+ u16 offset;
+ u32 length;
+ int channel_offset;
+ void *data;
+
+ ctx_id = (message & DCPEP_CONTEXT_MASK) >> DCPEP_CONTEXT_SHIFT;
+ offset = (message & DCPEP_OFFSET_MASK) >> DCPEP_OFFSET_SHIFT;
+ length = (message >> DCPEP_LENGTH_SHIFT);
+
+ channel_offset = dcp_channel_offset(ctx_id);
+
+ if (channel_offset < 0) {
+ dev_warn(dcp->dev, "invalid context received %u", ctx_id);
+ return;
+ }
+
+ data = dcp->shmem + channel_offset + offset;
+
+ if (message & DCPEP_ACK)
+ dcpep_handle_ack(dcp, ctx_id, data, length);
+ else
+ dcpep_handle_cb(dcp, ctx_id, data, length);
+}
+
+/*
+ * Callback for swap requests. If a swap failed, we'll never get a swap
+ * complete event so we need to fake a vblank event early to avoid a hang.
+ */
+
+static void dcp_swapped(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_submit_resp *resp = data;
+
+ if (resp->ret) {
+ dev_err(dcp->dev, "swap failed! status %u\n", resp->ret);
+ dcp_drm_crtc_vblank(dcp->crtc);
+ return;
+ }
+
+ while (!list_empty(&dcp->swapped_out_fbs)) {
+ struct dcp_fb_reference *entry;
+ entry = list_first_entry(&dcp->swapped_out_fbs,
+ struct dcp_fb_reference, head);
+ if (entry->fb)
+ drm_framebuffer_put(entry->fb);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+}
+
+static void dcp_swap_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_start_resp *resp = data;
+
+ dcp->swap.swap.swap_id = resp->swap_id;
+
+ trace_iomfb_swap_submit(dcp, resp->swap_id);
+ dcp_swap_submit(dcp, false, &dcp->swap, dcp_swapped, NULL);
+}
+
+/*
+ * DRM specifies rectangles as start and end coordinates. DCP specifies
+ * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
+ * to a DCP rectangle.
+ */
+static struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect)
+{
+ return (struct dcp_rect){ .x = rect->x1,
+ .y = rect->y1,
+ .w = drm_rect_width(rect),
+ .h = drm_rect_height(rect) };
+}
+
+static u32 drm_format_to_dcp(u32 drm)
+{
+ switch (drm) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return fourcc_code('A', 'R', 'G', 'B');
+
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return fourcc_code('A', 'B', 'G', 'R');
+
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XRGB2101010:
+ return fourcc_code('r', '0', '3', 'w');
+ }
+
+ pr_warn("DRM format %X not supported in DCP\n", drm);
+ return 0;
+}
+
+static u8 drm_format_to_colorspace(u32 drm)
+{
+ switch (drm) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return 1;
+
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XRGB2101010:
+ return 2;
+ }
+
+ return 1;
+}
+
+int dcp_get_modes(struct drm_connector *connector)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+ struct platform_device *pdev = apple_connector->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ int i;
+
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ mode = drm_mode_duplicate(dev, &dcp->modes[i].mode);
+
+ if (!mode) {
+ dev_err(dev->dev, "Failed to duplicate display mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return dcp->nr_modes;
+}
+EXPORT_SYMBOL_GPL(dcp_get_modes);
+
+/* The user may own drm_display_mode, so we need to search for our copy */
+static struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
+ struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ if (drm_mode_match(mode, &dcp->modes[i].mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK))
+ return &dcp->modes[i];
+ }
+
+ return NULL;
+}
+
+int dcp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+ struct platform_device *pdev = apple_connector->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return lookup_mode(dcp, mode) ? MODE_OK : MODE_BAD;
+}
+EXPORT_SYMBOL_GPL(dcp_mode_valid);
+
+/* Helpers to modeset and swap, used to flush */
+static void do_swap(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_start_req start_req = { 0 };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (dcp->connector && dcp->connector->connected)
+ dcp_swap_start(dcp, false, &start_req, dcp_swap_started, NULL);
+ else
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void complete_set_digital_out_mode(struct apple_dcp *dcp, void *data,
+ void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ dcp->ignore_swap_complete = false;
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct drm_plane *plane;
+ struct drm_plane_state *new_state, *old_state;
+ struct drm_crtc_state *crtc_state;
+ struct dcp_swap_submit_req *req = &dcp->swap;
+ int plane_idx, l;
+ int has_surface = 0;
+ bool modeset;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+
+ if (dcp_channel_busy(&dcp->ch_cmd))
+ {
+ dev_err(dcp->dev, "unexpected busy command channel");
+ /* HACK: issue a delayed vblank event to avoid timeouts in
+ * drm_atomic_helper_wait_for_vblanks().
+ */
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ /* Reset to defaults */
+ memset(req, 0, sizeof(*req));
+ for (l = 0; l < SWAP_SURFACES; l++)
+ req->surf_null[l] = true;
+
+ l = 0;
+ for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect src_rect;
+ bool opaque = false;
+
+ /* skip planes not for this crtc */
+ if (old_state->crtc != crtc && new_state->crtc != crtc)
+ continue;
+
+ WARN_ON(l >= SWAP_SURFACES);
+
+ req->swap.swap_enabled |= BIT(l);
+
+ if (old_state->fb && fb != old_state->fb) {
+ /*
+ * Race condition between a framebuffer unbind getting
+ * swapped out and GEM unreferencing a framebuffer. If
+ * we lose the race, the display gets IOVA faults and
+ * the DCP crashes. We need to extend the lifetime of
+ * the drm_framebuffer (and hence the GEM object) until
+ * after we get a swap complete for the swap unbinding
+ * it.
+ */
+ struct dcp_fb_reference *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->fb = old_state->fb;
+ list_add_tail(&entry->head,
+ &dcp->swapped_out_fbs);
+ }
+ drm_framebuffer_get(old_state->fb);
+ }
+
+ if (!new_state->fb) {
+ if (old_state->fb)
+ req->swap.swap_enabled |= DCP_REMOVE_LAYERS;
+
+ l += 1;
+ continue;
+ }
+ req->surf_null[l] = false;
+ has_surface = 1;
+
+ if (fb->format->has_alpha ||
+ new_state->plane->type == DRM_PLANE_TYPE_PRIMARY)
+ opaque = true;
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+
+ req->swap.src_rect[l] = drm_to_dcp_rect(&src_rect);
+ req->swap.dst_rect[l] = drm_to_dcp_rect(&new_state->dst);
+
+ if (dcp->notch_height > 0)
+ req->swap.dst_rect[l].y += dcp->notch_height;
+
+ req->surf_iova[l] = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ req->surf[l] = (struct dcp_surface){
+ .opaque = opaque,
+ .format = drm_format_to_dcp(fb->format->format),
+ .xfer_func = 13,
+ .colorspace = drm_format_to_colorspace(fb->format->format),
+ .stride = fb->pitches[0],
+ .width = fb->width,
+ .height = fb->height,
+ .buf_size = fb->height * fb->pitches[0],
+ .surface_id = req->swap.surf_ids[l],
+
+ /* Only used for compressed or multiplanar surfaces */
+ .pix_size = 1,
+ .pel_w = 1,
+ .pel_h = 1,
+ .has_comp = 1,
+ .has_planes = 1,
+ };
+
+ l += 1;
+ }
+
+ /* These fields should be set together */
+ req->swap.swap_completed = req->swap.swap_enabled;
+
+ if (modeset) {
+ struct dcp_display_mode *mode;
+ struct dcp_wait_cookie *cookie;
+ int ret;
+
+ mode = lookup_mode(dcp, &crtc_state->mode);
+ if (!mode) {
+ dev_warn(dcp->dev, "no match for " DRM_MODE_FMT,
+ DRM_MODE_ARG(&crtc_state->mode));
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ dev_info(dcp->dev, "set_digital_out_mode(color:%d timing:%d)",
+ mode->color_mode_id, mode->timing_mode_id);
+ dcp->mode = (struct dcp_set_digital_out_mode_req){
+ .color_mode_id = mode->color_mode_id,
+ .timing_mode_id = mode->timing_mode_id
+ };
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie) {
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ dcp_set_digital_out_mode(dcp, false, &dcp->mode,
+ complete_set_digital_out_mode, cookie);
+
+ dev_dbg(dcp->dev, "%s - wait for modeset", __func__);
+ ret = wait_for_completion_timeout(&cookie->done,
+ msecs_to_jiffies(500));
+
+ kref_put(&cookie->refcount, release_wait_cookie);
+
+ if (ret == 0) {
+ dev_dbg(dcp->dev, "set_digital_out_mode 200 ms");
+ schedule_work(&dcp->vblank_wq);
+ return;
+ } else if (ret > 0) {
+ dev_dbg(dcp->dev,
+ "set_digital_out_mode finished with %d to spare",
+ jiffies_to_msecs(ret));
+ }
+
+ dcp->valid_mode = true;
+ }
+
+ if (!has_surface) {
+ if (crtc_state->enable && crtc_state->active &&
+ !crtc_state->planes_changed) {
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ req->clear = 1;
+ }
+ do_swap(dcp, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(dcp_flush);
+
+bool dcp_is_initialized(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return dcp->active;
+}
+EXPORT_SYMBOL_GPL(dcp_is_initialized);
+
+static void res_is_main_display(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct apple_connector *connector;
+ int result = *(int *)out;
+ dev_info(dcp->dev, "DCP is_main_display: %d\n", result);
+
+ dcp->main_display = result != 0;
+
+ dcp->active = true;
+
+ connector = dcp->connector;
+ if (connector) {
+ connector->connected = dcp->nr_modes > 0;
+ schedule_work(&connector->hotplug_wq);
+ }
+}
+
+static void init_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_is_main_display(dcp, false, res_is_main_display, NULL);
+}
+
+static void init_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_first_client_open(dcp, false, init_3, NULL);
+}
+
+static void init_1(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ u32 val = 0;
+ dcp_enable_disable_video_power_savings(dcp, false, &val, init_2, NULL);
+}
+
+static void dcp_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ dev_info(dcp->dev, "DCP booted\n");
+
+ init_1(dcp, data, cookie);
+}
+
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message)
+{
+ enum dcpep_type type = (message >> DCPEP_TYPE_SHIFT) & DCPEP_TYPE_MASK;
+
+ if (type == DCPEP_TYPE_INITIALIZED)
+ dcp_start_signal(dcp, false, dcp_started, NULL);
+ else if (type == DCPEP_TYPE_MESSAGE)
+ dcpep_got_msg(dcp, message);
+ else
+ dev_warn(dcp->dev, "Ignoring unknown message %llx\n", message);
+}
+
+int iomfb_start_rtkit(struct apple_dcp *dcp)
+{
+ dma_addr_t shmem_iova;
+ apple_rtkit_start_ep(dcp->rtk, IOMFB_ENDPOINT);
+
+ dcp->shmem = dma_alloc_coherent(dcp->dev, DCP_SHMEM_SIZE, &shmem_iova,
+ GFP_KERNEL);
+
+ shmem_iova |= dcp->asc_dram_mask;
+ dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_set_shmem(shmem_iova));
+
+ return 0;
+}
+
+void iomfb_shutdown(struct apple_dcp *dcp)
+{
+ struct dcp_set_power_state_req req = {
+ /* defaults are ok */
+ };
+
+ /* We're going down */
+ dcp->active = false;
+ dcp->valid_mode = false;
+
+ dcp_set_power_state(dcp, false, &req, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/apple/iomfb.h b/drivers/gpu/drm/apple/iomfb.h
new file mode 100644
index 000000000000..f9ead84c21f2
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb.h
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCPEP_H__
+#define __APPLE_DCPEP_H__
+
+#include <linux/types.h>
+
+/* Fixed size of shared memory between DCP and AP */
+#define DCP_SHMEM_SIZE 0x100000
+
+/* DCP message contexts */
+enum dcp_context_id {
+ /* Callback */
+ DCP_CONTEXT_CB = 0,
+
+ /* Command */
+ DCP_CONTEXT_CMD = 2,
+
+ /* Asynchronous */
+ DCP_CONTEXT_ASYNC = 3,
+
+ /* Out-of-band callback */
+ DCP_CONTEXT_OOBCB = 4,
+
+ /* Out-of-band command */
+ DCP_CONTEXT_OOBCMD = 6,
+
+ DCP_NUM_CONTEXTS
+};
+
+/* RTKit endpoint message types */
+enum dcpep_type {
+ /* Set shared memory */
+ DCPEP_TYPE_SET_SHMEM = 0,
+
+ /* DCP is initialized */
+ DCPEP_TYPE_INITIALIZED = 1,
+
+ /* Remote procedure call */
+ DCPEP_TYPE_MESSAGE = 2,
+};
+
+/* Message */
+#define DCPEP_TYPE_SHIFT (0)
+#define DCPEP_TYPE_MASK GENMASK(1, 0)
+#define DCPEP_ACK BIT_ULL(6)
+#define DCPEP_CONTEXT_SHIFT (8)
+#define DCPEP_CONTEXT_MASK GENMASK(11, 8)
+#define DCPEP_OFFSET_SHIFT (16)
+#define DCPEP_OFFSET_MASK GENMASK(31, 16)
+#define DCPEP_LENGTH_SHIFT (32)
+
+/* Set shmem */
+#define DCPEP_DVA_SHIFT (16)
+#define DCPEP_FLAG_SHIFT (4)
+#define DCPEP_FLAG_VALUE (4)
+
+struct dcp_packet_header {
+ char tag[4];
+ u32 in_len;
+ u32 out_len;
+} __packed;
+
+#define DCP_IS_NULL(ptr) ((ptr) ? 1 : 0)
+#define DCP_PACKET_ALIGNMENT (0x40)
+
+/* Structures used in v12.0 firmware */
+
+#define SWAP_SURFACES 4
+#define MAX_PLANES 3
+
+struct dcp_iouserclient {
+ /* Handle for the IOUserClient. macOS sets this to a kernel VA. */
+ u64 handle;
+ u32 unk;
+ u8 flag1;
+ u8 flag2;
+ u8 padding[2];
+} __packed;
+
+struct dcp_rect {
+ u32 x;
+ u32 y;
+ u32 w;
+ u32 h;
+} __packed;
+
+/*
+ * Set in the swap_{enabled,completed} field to remove missing
+ * layers. Without this flag, the DCP will assume missing layers have
+ * not changed since the previous frame and will preserve their
+ * content.
+ */
+#define DCP_REMOVE_LAYERS BIT(31)
+
+struct dcp_swap {
+ u64 ts1;
+ u64 ts2;
+ u64 unk_10[6];
+ u64 flags1;
+ u64 flags2;
+
+ u32 swap_id;
+
+ u32 surf_ids[SWAP_SURFACES];
+ struct dcp_rect src_rect[SWAP_SURFACES];
+ u32 surf_flags[SWAP_SURFACES];
+ u32 surf_unk[SWAP_SURFACES];
+ struct dcp_rect dst_rect[SWAP_SURFACES];
+ u32 swap_enabled;
+ u32 swap_completed;
+
+ u32 unk_10c;
+ u8 unk_110[0x1b8];
+ u32 unk_2c8;
+ u8 unk_2cc[0x14];
+ u32 unk_2e0;
+ u8 unk_2e4[0x3c];
+} __packed;
+
+/* Information describing a plane of a planar compressed surface */
+struct dcp_plane_info {
+ u32 width;
+ u32 height;
+ u32 base;
+ u32 offset;
+ u32 stride;
+ u32 size;
+ u16 tile_size;
+ u8 tile_w;
+ u8 tile_h;
+ u32 unk[13];
+} __packed;
+
+struct dcp_component_types {
+ u8 count;
+ u8 types[7];
+} __packed;
+
+/* Information describing a surface */
+struct dcp_surface {
+ u8 is_tiled;
+ u8 unk_1;
+ u8 opaque; /** ignore alpha, also required YUV overlays */
+ u32 plane_cnt;
+ u32 plane_cnt2;
+ u32 format; /* DCP fourcc */
+ u32 unk_f;
+ u8 xfer_func;
+ u8 colorspace;
+ u32 stride;
+ u16 pix_size;
+ u8 pel_w;
+ u8 pel_h;
+ u32 offset;
+ u32 width;
+ u32 height;
+ u32 buf_size;
+ u32 unk_2d;
+ u32 unk_31;
+ u32 surface_id;
+ struct dcp_component_types comp_types[MAX_PLANES];
+ u64 has_comp;
+ struct dcp_plane_info planes[MAX_PLANES];
+ u64 has_planes;
+ u32 compression_info[MAX_PLANES][13];
+ u64 has_compr_info;
+ u64 unk_1f5;
+ u8 padding[7];
+} __packed;
+
+struct dcp_rt_bandwidth {
+ u64 unk1;
+ u64 reg_scratch;
+ u64 reg_doorbell;
+ u32 unk2;
+ u32 doorbell_bit;
+ u32 padding[7];
+} __packed;
+
+/* Method calls */
+
+enum dcpep_method {
+ dcpep_late_init_signal,
+ dcpep_setup_video_limits,
+ dcpep_set_create_dfb,
+ dcpep_start_signal,
+ dcpep_swap_start,
+ dcpep_swap_submit,
+ dcpep_set_display_device,
+ dcpep_set_digital_out_mode,
+ dcpep_create_default_fb,
+ dcpep_set_display_refresh_properties,
+ dcpep_flush_supports_power,
+ dcpep_set_power_state,
+ dcpep_first_client_open,
+ dcpep_update_notify_clients_dcp,
+ dcpep_set_parameter_dcp,
+ dcpep_enable_disable_video_power_savings,
+ dcpep_is_main_display,
+ dcpep_num_methods
+};
+
+struct dcp_method_entry {
+ const char *name;
+ char tag[4];
+};
+
+/* Prototypes */
+
+struct dcp_set_digital_out_mode_req {
+ u32 color_mode_id;
+ u32 timing_mode_id;
+} __packed;
+
+struct dcp_map_buf_req {
+ u64 buffer;
+ u8 unk;
+ u8 buf_null;
+ u8 vaddr_null;
+ u8 dva_null;
+} __packed;
+
+struct dcp_map_buf_resp {
+ u64 vaddr;
+ u64 dva;
+ u32 ret;
+} __packed;
+
+struct dcp_unmap_buf_resp {
+ u64 buffer;
+ u64 vaddr;
+ u64 dva;
+ u8 unk;
+ u8 buf_null;
+} __packed;
+
+struct dcp_allocate_buffer_req {
+ u32 unk0;
+ u64 size;
+ u32 unk2;
+ u8 paddr_null;
+ u8 dva_null;
+ u8 dva_size_null;
+ u8 padding;
+} __packed;
+
+struct dcp_allocate_buffer_resp {
+ u64 paddr;
+ u64 dva;
+ u64 dva_size;
+ u32 mem_desc_id;
+} __packed;
+
+struct dcp_map_physical_req {
+ u64 paddr;
+ u64 size;
+ u32 flags;
+ u8 dva_null;
+ u8 dva_size_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_map_physical_resp {
+ u64 dva;
+ u64 dva_size;
+ u32 mem_desc_id;
+} __packed;
+
+struct dcp_map_reg_req {
+ char obj[4];
+ u32 index;
+ u32 flags;
+ u8 addr_null;
+ u8 length_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_map_reg_resp {
+ u64 addr;
+ u64 length;
+ u32 ret;
+} __packed;
+
+struct dcp_swap_start_req {
+ u32 swap_id;
+ struct dcp_iouserclient client;
+ u8 swap_id_null;
+ u8 client_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_swap_start_resp {
+ u32 swap_id;
+ struct dcp_iouserclient client;
+ u32 ret;
+} __packed;
+
+struct dcp_swap_submit_req {
+ struct dcp_swap swap;
+ struct dcp_surface surf[SWAP_SURFACES];
+ u64 surf_iova[SWAP_SURFACES];
+ u8 unkbool;
+ u64 unkdouble;
+ u32 clear; // or maybe switch to default fb?
+ u8 swap_null;
+ u8 surf_null[SWAP_SURFACES];
+ u8 unkoutbool_null;
+ u8 padding[1];
+} __packed;
+
+struct dcp_swap_submit_resp {
+ u8 unkoutbool;
+ u32 ret;
+ u8 padding[3];
+} __packed;
+
+struct dc_swap_complete_resp {
+ u32 swap_id;
+ u8 unkbool;
+ u64 swap_data;
+ u8 swap_info[0x6c4];
+ u32 unkint;
+ u8 swap_info_null;
+} __packed;
+
+struct dcp_get_uint_prop_req {
+ char obj[4];
+ char key[0x40];
+ u64 value;
+ u8 value_null;
+ u8 padding[3];
+} __packed;
+
+struct dcp_get_uint_prop_resp {
+ u64 value;
+ u8 ret;
+ u8 padding[3];
+} __packed;
+
+struct dcp_set_power_state_req {
+ u64 unklong;
+ u8 unkbool;
+ u8 unkint_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_set_power_state_resp {
+ u32 unkint;
+ u32 ret;
+} __packed;
+
+struct dcp_set_dcpav_prop_chunk_req {
+ char data[0x1000];
+ u32 offset;
+ u32 length;
+} __packed;
+
+struct dcp_set_dcpav_prop_end_req {
+ char key[0x40];
+} __packed;
+
+struct dcp_update_notify_clients_dcp {
+ u32 client_0;
+ u32 client_1;
+ u32 client_2;
+ u32 client_3;
+ u32 client_4;
+ u32 client_5;
+ u32 client_6;
+ u32 client_7;
+ u32 client_8;
+ u32 client_9;
+ u32 client_a;
+ u32 client_b;
+ u32 client_c;
+ u32 client_d;
+} __packed;
+
+struct dcp_set_parameter_dcp {
+ u32 param;
+ u32 value[8];
+ u32 count;
+} __packed;
+
+struct dcp_swap_complete_intent_gated {
+ u32 swap_id;
+ u8 unkBool;
+ u32 unkInt;
+ u32 width;
+ u32 height;
+} __packed;
+
+struct dcp_read_edt_data_req {
+ char key[0x40];
+ u32 count;
+ u32 value[8];
+} __packed;
+
+struct dcp_read_edt_data_resp {
+ u32 value[8];
+ u8 ret;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/apple/parser.c b/drivers/gpu/drm/apple/parser.c
new file mode 100644
index 000000000000..910b0e57a35a
--- /dev/null
+++ b/drivers/gpu/drm/apple/parser.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "parser.h"
+
+#define DCP_PARSE_HEADER 0xd3
+
+enum dcp_parse_type {
+ DCP_TYPE_DICTIONARY = 1,
+ DCP_TYPE_ARRAY = 2,
+ DCP_TYPE_INT64 = 4,
+ DCP_TYPE_STRING = 9,
+ DCP_TYPE_BLOB = 10,
+ DCP_TYPE_BOOL = 11
+};
+
+struct dcp_parse_tag {
+ unsigned int size : 24;
+ enum dcp_parse_type type : 5;
+ unsigned int padding : 2;
+ bool last : 1;
+} __packed;
+
+static void *parse_bytes(struct dcp_parse_ctx *ctx, size_t count)
+{
+ void *ptr = ctx->blob + ctx->pos;
+
+ if (ctx->pos + count > ctx->len)
+ return ERR_PTR(-EINVAL);
+
+ ctx->pos += count;
+ return ptr;
+}
+
+static u32 *parse_u32(struct dcp_parse_ctx *ctx)
+{
+ return parse_bytes(ctx, sizeof(u32));
+}
+
+static struct dcp_parse_tag *parse_tag(struct dcp_parse_ctx *ctx)
+{
+ struct dcp_parse_tag *tag;
+
+ /* Align to 32-bits */
+ ctx->pos = round_up(ctx->pos, 4);
+
+ tag = parse_bytes(ctx, sizeof(struct dcp_parse_tag));
+
+ if (IS_ERR(tag))
+ return tag;
+
+ if (tag->padding)
+ return ERR_PTR(-EINVAL);
+
+ return tag;
+}
+
+static struct dcp_parse_tag *parse_tag_of_type(struct dcp_parse_ctx *ctx,
+ enum dcp_parse_type type)
+{
+ struct dcp_parse_tag *tag = parse_tag(ctx);
+
+ if (IS_ERR(tag))
+ return tag;
+
+ if (tag->type != type)
+ return ERR_PTR(-EINVAL);
+
+ return tag;
+}
+
+static int skip(struct dcp_parse_ctx *handle)
+{
+ struct dcp_parse_tag *tag = parse_tag(handle);
+ int ret = 0;
+ int i;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ switch (tag->type) {
+ case DCP_TYPE_DICTIONARY:
+ for (i = 0; i < tag->size; ++i) {
+ ret |= skip(handle); /* key */
+ ret |= skip(handle); /* value */
+ }
+
+ return ret;
+
+ case DCP_TYPE_ARRAY:
+ for (i = 0; i < tag->size; ++i)
+ ret |= skip(handle);
+
+ return ret;
+
+ case DCP_TYPE_INT64:
+ handle->pos += sizeof(s64);
+ return 0;
+
+ case DCP_TYPE_STRING:
+ case DCP_TYPE_BLOB:
+ handle->pos += tag->size;
+ return 0;
+
+ case DCP_TYPE_BOOL:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Caller must free the result */
+static char *parse_string(struct dcp_parse_ctx *handle)
+{
+ struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_STRING);
+ const char *in;
+ char *out;
+
+ if (IS_ERR(tag))
+ return (void *)tag;
+
+ in = parse_bytes(handle, tag->size);
+ if (IS_ERR(in))
+ return (void *)in;
+
+ out = kmalloc(tag->size + 1, GFP_KERNEL);
+
+ memcpy(out, in, tag->size);
+ out[tag->size] = '\0';
+ return out;
+}
+
+static int parse_int(struct dcp_parse_ctx *handle, s64 *value)
+{
+ void *tag = parse_tag_of_type(handle, DCP_TYPE_INT64);
+ s64 *in;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ in = parse_bytes(handle, sizeof(s64));
+
+ if (IS_ERR(in))
+ return PTR_ERR(in);
+
+ memcpy(value, in, sizeof(*value));
+ return 0;
+}
+
+static int parse_bool(struct dcp_parse_ctx *handle, bool *b)
+{
+ struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_BOOL);
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ *b = !!tag->size;
+ return 0;
+}
+
+struct iterator {
+ struct dcp_parse_ctx *handle;
+ u32 idx, len;
+};
+
+int iterator_begin(struct dcp_parse_ctx *handle, struct iterator *it, bool dict)
+{
+ struct dcp_parse_tag *tag;
+ enum dcp_parse_type type = dict ? DCP_TYPE_DICTIONARY : DCP_TYPE_ARRAY;
+
+ *it = (struct iterator) {
+ .handle = handle,
+ .idx = 0
+ };
+
+ tag = parse_tag_of_type(it->handle, type);
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ it->len = tag->size;
+ return 0;
+}
+
+#define dcp_parse_foreach_in_array(handle, it) \
+ for (iterator_begin(handle, &it, false); it.idx < it.len; ++it.idx)
+#define dcp_parse_foreach_in_dict(handle, it) \
+ for (iterator_begin(handle, &it, true); it.idx < it.len; ++it.idx)
+
+int parse(void *blob, size_t size, struct dcp_parse_ctx *ctx)
+{
+ u32 *header;
+
+ *ctx = (struct dcp_parse_ctx) {
+ .blob = blob,
+ .len = size,
+ .pos = 0,
+ };
+
+ header = parse_u32(ctx);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ if (*header != DCP_PARSE_HEADER)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct dimension {
+ s64 total, front_porch, sync_width, active;
+ s64 precise_sync_rate;
+};
+
+static int parse_dimension(struct dcp_parse_ctx *handle, struct dimension *dim)
+{
+ struct iterator it;
+ int ret = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(handle);
+ else if (!strcmp(key, "Active"))
+ ret = parse_int(it.handle, &dim->active);
+ else if (!strcmp(key, "Total"))
+ ret = parse_int(it.handle, &dim->total);
+ else if (!strcmp(key, "FrontPorch"))
+ ret = parse_int(it.handle, &dim->front_porch);
+ else if (!strcmp(key, "SyncWidth"))
+ ret = parse_int(it.handle, &dim->sync_width);
+ else if (!strcmp(key, "PreciseSyncRate"))
+ ret = parse_int(it.handle, &dim->precise_sync_rate);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int parse_color_modes(struct dcp_parse_ctx *handle, s64 *best_id)
+{
+ struct iterator outer_it;
+ int ret = 0;
+ s64 best_score = -1;
+
+ *best_id = -1;
+
+ dcp_parse_foreach_in_array(handle, outer_it) {
+ struct iterator it;
+ s64 score = -1, id = -1;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "Score"))
+ ret = parse_int(it.handle, &score);
+ else if (!strcmp(key, "ID"))
+ ret = parse_int(it.handle, &id);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ /* Skip partial entries */
+ if (score < 0 || id < 0)
+ continue;
+
+ if (score > best_score) {
+ best_score = score;
+ *best_id = id;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate the pixel clock for a mode given the 16:16 fixed-point refresh
+ * rate. The pixel clock is the refresh rate times the pixel count. DRM
+ * specifies the clock in kHz. The intermediate result may overflow a u32, so
+ * use a u64 where required.
+ */
+static u32 calculate_clock(struct dimension *horiz, struct dimension *vert)
+{
+ u32 pixels = horiz->total * vert->total;
+ u64 clock = mul_u32_u32(pixels, vert->precise_sync_rate);
+
+ return DIV_ROUND_CLOSEST_ULL(clock >> 16, 1000);
+}
+
+static int parse_mode(struct dcp_parse_ctx *handle,
+ struct dcp_display_mode *out, s64 *score, int width_mm,
+ int height_mm, unsigned notch_height)
+{
+ int ret = 0;
+ struct iterator it;
+ struct dimension horiz, vert;
+ s64 id = -1;
+ s64 best_color_mode = -1;
+ bool is_virtual = false;
+ struct drm_display_mode *mode = &out->mode;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "HorizontalAttributes"))
+ ret = parse_dimension(it.handle, &horiz);
+ else if (!strcmp(key, "VerticalAttributes"))
+ ret = parse_dimension(it.handle, &vert);
+ else if (!strcmp(key, "ColorModes"))
+ ret = parse_color_modes(it.handle, &best_color_mode);
+ else if (!strcmp(key, "ID"))
+ ret = parse_int(it.handle, &id);
+ else if (!strcmp(key, "IsVirtual"))
+ ret = parse_bool(it.handle, &is_virtual);
+ else if (!strcmp(key, "Score"))
+ ret = parse_int(it.handle, score);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Reject modes without valid color mode.
+ */
+ if (best_color_mode < 0)
+ return -EINVAL;
+
+ /*
+ * We need to skip virtual modes. In some cases, virtual modes are "too
+ * big" for the monitor and can cause breakage. It is unclear why the
+ * DCP reports these modes at all. Treat as a recoverable error.
+ */
+ if (is_virtual)
+ return -EINVAL;
+
+ vert.active -= notch_height;
+ vert.sync_width += notch_height;
+
+ /* From here we must succeed. Start filling out the mode. */
+ *mode = (struct drm_display_mode) {
+ .type = DRM_MODE_TYPE_DRIVER,
+ .clock = calculate_clock(&horiz, &vert),
+
+ .vdisplay = vert.active,
+ .vsync_start = vert.active + vert.front_porch,
+ .vsync_end = vert.active + vert.front_porch + vert.sync_width,
+ .vtotal = vert.total,
+
+ .hdisplay = horiz.active,
+ .hsync_start = horiz.active + horiz.front_porch,
+ .hsync_end = horiz.active + horiz.front_porch +
+ horiz.sync_width,
+ .htotal = horiz.total,
+
+ .width_mm = width_mm,
+ .height_mm = height_mm,
+ };
+
+ drm_mode_set_name(mode);
+
+ out->timing_mode_id = id;
+ out->color_mode_id = best_color_mode;
+
+ return 0;
+}
+
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+ unsigned int *count, int width_mm,
+ int height_mm, unsigned notch_height)
+{
+ struct iterator it;
+ int ret;
+ struct dcp_display_mode *mode, *modes;
+ struct dcp_display_mode *best_mode = NULL;
+ s64 score, best_score = -1;
+
+ ret = iterator_begin(handle, &it, false);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Start with a worst case allocation */
+ modes = kmalloc_array(it.len, sizeof(*modes), GFP_KERNEL);
+ *count = 0;
+
+ if (!modes)
+ return ERR_PTR(-ENOMEM);
+
+ for (; it.idx < it.len; ++it.idx) {
+ mode = &modes[*count];
+ ret = parse_mode(it.handle, mode, &score, width_mm, height_mm, notch_height);
+
+ /* Errors for a single mode are recoverable -- just skip it. */
+ if (ret)
+ continue;
+
+ /* Process a successful mode */
+ (*count)++;
+
+ if (score > best_score) {
+ best_score = score;
+ best_mode = mode;
+ }
+ }
+
+ if (best_mode != NULL)
+ best_mode->mode.type |= DRM_MODE_TYPE_PREFERRED;
+
+ return modes;
+}
+
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+ int *height_mm)
+{
+ int ret = 0;
+ struct iterator it;
+ s64 width_cm = 0, height_cm = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "MaxHorizontalImageSize"))
+ ret = parse_int(it.handle, &width_cm);
+ else if (!strcmp(key, "MaxVerticalImageSize"))
+ ret = parse_int(it.handle, &height_cm);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ /* 1cm = 10mm */
+ *width_mm = 10 * width_cm;
+ *height_mm = 10 * height_cm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/apple/parser.h b/drivers/gpu/drm/apple/parser.h
new file mode 100644
index 000000000000..a2d479258ed0
--- /dev/null
+++ b/drivers/gpu/drm/apple/parser.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_PARSER_H__
+#define __APPLE_DCP_PARSER_H__
+
+/* For mode parsing */
+#include <drm/drm_modes.h>
+
+struct dcp_parse_ctx {
+ void *blob;
+ u32 pos, len;
+};
+
+/*
+ * Represents a single display mode. These mode objects are populated at
+ * runtime based on the TimingElements dictionary sent by the DCP.
+ */
+struct dcp_display_mode {
+ struct drm_display_mode mode;
+ u32 color_mode_id;
+ u32 timing_mode_id;
+};
+
+int parse(void *blob, size_t size, struct dcp_parse_ctx *ctx);
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+ unsigned int *count, int width_mm,
+ int height_mm, unsigned notch_height);
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+ int *height_mm);
+
+#endif
diff --git a/drivers/gpu/drm/apple/trace.c b/drivers/gpu/drm/apple/trace.c
new file mode 100644
index 000000000000..6f40d5a583df
--- /dev/null
+++ b/drivers/gpu/drm/apple/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Tracepoints for Apple DCP driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/gpu/drm/apple/trace.h b/drivers/gpu/drm/apple/trace.h
new file mode 100644
index 000000000000..d6a4742fcf47
--- /dev/null
+++ b/drivers/gpu/drm/apple/trace.h
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (C) The Asahi Linux Contributors */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dcp
+
+#if !defined(_TRACE_DCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCP_H
+
+#include "dcp-internal.h"
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#define show_dcp_endpoint(ep) \
+ __print_symbolic(ep, { SYSTEM_ENDPOINT, "system" }, \
+ { TEST_ENDPOINT, "test" }, \
+ { DCP_EXPERT_ENDPOINT, "dcpexpert" }, \
+ { DISP0_ENDPOINT, "disp0" }, \
+ { DPTX_ENDPOINT, "dptxport" }, \
+ { HDCP_ENDPOINT, "hdcp" }, \
+ { REMOTE_ALLOC_ENDPOINT, "remotealloc" }, \
+ { IOMFB_ENDPOINT, "iomfb" })
+
+TRACE_EVENT(dcp_recv_msg,
+ TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+ TP_ARGS(dcp, endpoint, message),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u8, endpoint)
+ __field(u64, message)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->endpoint = endpoint;
+ __entry->message = message;),
+
+ TP_printk("%s: endpoint 0x%x (%s): received message 0x%016llx",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(dcp_send_msg,
+ TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+ TP_ARGS(dcp, endpoint, message),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u8, endpoint)
+ __field(u64, message)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->endpoint = endpoint;
+ __entry->message = message;),
+
+ TP_printk("%s: endpoint 0x%x (%s): will send message 0x%016llx",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(iomfb_callback,
+ TP_PROTO(struct apple_dcp *dcp, int tag, const char *name),
+ TP_ARGS(dcp, tag, name),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dcp->dev))
+ __field(int, tag)
+ __field(const char *, name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dcp->dev));
+ __entry->tag = tag; __entry->name = name;
+ ),
+
+ TP_printk("%s: Callback D%03d %s", __get_str(devname), __entry->tag,
+ __entry->name));
+
+TRACE_EVENT(iomfb_push,
+ TP_PROTO(struct apple_dcp *dcp,
+ const struct dcp_method_entry *method, int context,
+ int offset, int depth),
+ TP_ARGS(dcp, method, context, offset, depth),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dcp->dev))
+ __string(name, method->name)
+ __field(int, context)
+ __field(int, offset)
+ __field(int, depth)),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dcp->dev));
+ __assign_str(name, method->name);
+ __entry->context = context; __entry->offset = offset;
+ __entry->depth = depth;
+ ),
+
+ TP_printk("%s: Method %s: context %u, offset %u, depth %u",
+ __get_str(devname), __get_str(name), __entry->context,
+ __entry->offset, __entry->depth));
+
+TRACE_EVENT(iomfb_swap_submit,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%d",
+ __entry->dcp,
+ __entry->swap_id)
+);
+
+TRACE_EVENT(iomfb_swap_complete,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%d",
+ __entry->dcp,
+ __entry->swap_id
+ )
+);
+
+TRACE_EVENT(iomfb_swap_complete_intent_gated,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id, u32 width, u32 height),
+ TP_ARGS(dcp, swap_id, width, height),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ __field(u32, width)
+ __field(u32, height)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ __entry->height = height;
+ __entry->width = width;
+ ),
+ TP_printk("dcp=%llx, swap_id=%u %ux%u",
+ __entry->dcp,
+ __entry->swap_id,
+ __entry->width,
+ __entry->height
+ )
+);
+
+#endif /* _TRACE_DCP_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>