summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/x86/apple.c11
-rw-r--r--drivers/android/Kconfig6
-rw-r--r--drivers/android/Makefile2
-rw-r--r--drivers/android/allocation.rs266
-rw-r--r--drivers/android/context.rs80
-rw-r--r--drivers/android/defs.rs99
-rw-r--r--drivers/android/node.rs476
-rw-r--r--drivers/android/process.rs961
-rw-r--r--drivers/android/range_alloc.rs189
-rw-r--r--drivers/android/rust_binder.rs106
-rw-r--r--drivers/android/thread.rs871
-rw-r--r--drivers/android/transaction.rs326
-rw-r--r--drivers/base/firmware_loader/main.c2
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/hci_bcm4377.c2513
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835_rng_rust.rs62
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c352
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c14
-rw-r--r--drivers/dma/apple-admac.c103
-rw-r--r--drivers/gpio/Kconfig19
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-macsmc.c388
-rw-r--r--drivers/gpio/gpio_pl061_rust.rs367
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/apple/.gitignore1
-rw-r--r--drivers/gpu/drm/apple/Kconfig11
-rw-r--r--drivers/gpu/drm/apple/Makefile29
-rw-r--r--drivers/gpu/drm/apple/apple_drv.c501
-rw-r--r--drivers/gpu/drm/apple/dcp-internal.h150
-rw-r--r--drivers/gpu/drm/apple/dcp.c427
-rw-r--r--drivers/gpu/drm/apple/dcp.h59
-rw-r--r--drivers/gpu/drm/apple/dummy-piodma.c31
-rw-r--r--drivers/gpu/drm/apple/iomfb.c1689
-rw-r--r--drivers/gpu/drm/apple/iomfb.h406
-rw-r--r--drivers/gpu/drm/apple/parser.c459
-rw-r--r--drivers/gpu/drm/apple/parser.h32
-rw-r--r--drivers/gpu/drm/apple/trace.c9
-rw-r--r--drivers/gpu/drm/apple/trace.h166
-rw-r--r--drivers/gpu/drm/tiny/Kconfig8
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c40
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile4
-rw-r--r--drivers/hid/dockchannel-hid/Kconfig14
-rw-r--r--drivers/hid/dockchannel-hid/Makefile6
-rw-r--r--drivers/hid/dockchannel-hid/dockchannel-hid.c1152
-rw-r--r--drivers/hid/hid-apple.c64
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-magicmouse.c334
-rw-r--r--drivers/hid/spi-hid/Kconfig26
-rw-r--r--drivers/hid/spi-hid/Makefile10
-rw-r--r--drivers/hid/spi-hid/spi-hid-apple-core.c1029
-rw-r--r--drivers/hid/spi-hid/spi-hid-apple-of.c138
-rw-r--r--drivers/hid/spi-hid/spi-hid-apple.h31
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c131
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.h5
-rw-r--r--drivers/i2c/busses/i2c-pasemi-platform.c6
-rw-r--r--drivers/input/misc/Kconfig12
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/macsmc-hid.c196
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/apple-dart.c672
-rw-r--r--drivers/iommu/dma-iommu.c12
-rw-r--r--drivers/iommu/io-pgtable-dart.c65
-rw-r--r--drivers/iommu/io-pgtable-dart.h15
-rw-r--r--drivers/iommu/iommu.c18
-rw-r--r--drivers/iommu/of_iommu.c116
-rw-r--r--drivers/mailbox/apple-mailbox.c42
-rw-r--r--drivers/mailbox/mailbox.c20
-rw-r--r--drivers/mfd/Kconfig28
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/simple-mfd-spmi.c49
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c52
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c218
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c122
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c49
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h159
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c85
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c284
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h5
-rw-r--r--drivers/nvme/host/apple.c70
-rw-r--r--drivers/nvmem/Kconfig13
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/spmi-mfd-nvmem.c99
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/pcie-apple.c62
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c14
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/apple/Kconfig49
-rw-r--r--drivers/platform/apple/Makefile11
-rw-r--r--drivers/platform/apple/smc.h28
-rw-r--r--drivers/platform/apple/smc_core.c249
-rw-r--r--drivers/platform/apple/smc_rtkit.c452
-rw-r--r--drivers/power/reset/Kconfig12
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/macsmc-reboot.c336
-rw-r--r--drivers/power/supply/Kconfig7
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/macsmc_power.c516
-rw-r--r--drivers/pwm/Kconfig12
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-apple.c127
-rw-r--r--drivers/rtc/Kconfig13
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-macsmc.c130
-rw-r--r--drivers/soc/apple/Kconfig24
-rw-r--r--drivers/soc/apple/Makefile6
-rw-r--r--drivers/soc/apple/dockchannel.c407
-rw-r--r--drivers/soc/apple/rtkit-helper.c147
-rw-r--r--drivers/soc/apple/rtkit.c86
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-apple.c544
-rw-r--r--drivers/spi/spi.c21
-rw-r--r--drivers/spmi/Kconfig8
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/spmi-apple-controller.c223
-rw-r--r--drivers/tty/serial/samsung_tty.c92
-rw-r--r--drivers/usb/dwc3/core.c41
-rw-r--r--drivers/usb/dwc3/core.h6
-rw-r--r--drivers/usb/dwc3/drd.c7
-rw-r--r--drivers/usb/host/Kconfig13
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/xhci-pci-asmedia.c272
-rw-r--r--drivers/usb/host/xhci-pci-core.c (renamed from drivers/usb/host/xhci-pci.c)24
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c4
-rw-r--r--drivers/usb/host/xhci-pci.h15
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/typec/tipd/core.c34
146 files changed, 20321 insertions, 441 deletions
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index 8812ecd03d55..45d0f16f374f 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -71,13 +71,16 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
if ( key->type != ACPI_TYPE_STRING ||
(val->type != ACPI_TYPE_INTEGER &&
- val->type != ACPI_TYPE_BUFFER))
+ val->type != ACPI_TYPE_BUFFER &&
+ val->type != ACPI_TYPE_STRING))
continue; /* skip invalid properties */
__set_bit(i, valid);
newsize += key->string.length + 1;
if ( val->type == ACPI_TYPE_BUFFER)
newsize += val->buffer.length;
+ else if (val->type == ACPI_TYPE_STRING)
+ newsize += val->string.length + 1;
}
numvalid = bitmap_weight(valid, numprops);
@@ -119,6 +122,12 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
newprops[v].type = val->type;
if (val->type == ACPI_TYPE_INTEGER) {
newprops[v].integer.value = val->integer.value;
+ } else if (val->type == ACPI_TYPE_STRING) {
+ newprops[v].string.length = val->string.length;
+ newprops[v].string.pointer = free_space;
+ memcpy(free_space, val->string.pointer,
+ val->string.length);
+ free_space += val->string.length + 1;
} else {
newprops[v].buffer.length = val->buffer.length;
newprops[v].buffer.pointer = free_space;
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 07aa8ae0a058..37f7080807c5 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -13,6 +13,12 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Android Binder IPC Driver in Rust"
+ depends on MMU && RUST
+ help
+ Implementation of the Binder IPC in Rust.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c99c25..c428f2ce2f05 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -4,3 +4,5 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
diff --git a/drivers/android/allocation.rs b/drivers/android/allocation.rs
new file mode 100644
index 000000000000..5c3261a69f44
--- /dev/null
+++ b/drivers/android/allocation.rs
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::mem::{replace, size_of, MaybeUninit};
+use kernel::{
+ bindings, linked_list::List, pages::Pages, prelude::*, sync::Arc, user_ptr::UserSlicePtrReader,
+};
+
+use crate::{
+ defs::*,
+ node::NodeRef,
+ process::{AllocationInfo, Process},
+ thread::{BinderError, BinderResult},
+ transaction::FileInfo,
+};
+
+pub(crate) struct Allocation<'a> {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pages: Arc<[Pages<0>]>,
+ pub(crate) process: &'a Process,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ file_list: List<Box<FileInfo>>,
+}
+
+impl<'a> Allocation<'a> {
+ pub(crate) fn new(
+ process: &'a Process,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ pages: Arc<[Pages<0>]>,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ pages,
+ allocation_info: None,
+ free_on_drop: true,
+ file_list: List::new(),
+ }
+ }
+
+ pub(crate) fn take_file_list(&mut self) -> List<Box<FileInfo>> {
+ replace(&mut self.file_list, List::new())
+ }
+
+ pub(crate) fn add_file_info(&mut self, file: Box<FileInfo>) {
+ self.file_list.push_back(file);
+ }
+
+ fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Pages<0>, usize, usize) -> Result,
+ {
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EINVAL)? > self.size {
+ return Err(EINVAL);
+ }
+ offset += self.offset;
+ let mut page_index = offset >> bindings::PAGE_SHIFT;
+ offset &= (1 << bindings::PAGE_SHIFT) - 1;
+ while size > 0 {
+ let available = core::cmp::min(size, (1 << bindings::PAGE_SHIFT) as usize - offset);
+ cb(&self.pages[page_index], offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSlicePtrReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_into_page(reader, offset, to_copy)
+ })
+ }
+
+ pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: Data buffer is allocated on the stack.
+ unsafe {
+ page.read(
+ (out.as_mut_ptr() as *mut u8).add(out_offset),
+ offset,
+ to_copy,
+ )
+ }?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = unsafe { (obj as *const T as *const u8).add(obj_offset) };
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ unsafe { page.write(obj_ptr, offset, to_copy) }?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+}
+
+impl Drop for Allocation<'_> {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(info) = &self.allocation_info {
+ let offsets = info.offsets.clone();
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+pub(crate) struct AllocationView<'a, 'b> {
+ pub(crate) alloc: &'a mut Allocation<'b>,
+ limit: usize,
+}
+
+impl<'a, 'b> AllocationView<'a, 'b> {
+ pub(crate) fn new(alloc: &'a mut Allocation<'b>, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn transfer_binder_object<T>(
+ &self,
+ offset: usize,
+ strong: bool,
+ get_node: T,
+ ) -> BinderResult
+ where
+ T: FnOnce(&bindings::flat_binder_object) -> BinderResult<NodeRef>,
+ {
+ // TODO: Do we want this function to take a &mut self?
+ let obj = self.read::<bindings::flat_binder_object>(offset)?;
+ let node_ref = get_node(&obj)?;
+
+ if core::ptr::eq(&*node_ref.node.owner, self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node_ref.node.get_id();
+ let newobj = bindings::flat_binder_object {
+ hdr: bindings::binder_object_header {
+ type_: if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ },
+ },
+ flags: obj.flags,
+ __bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 { binder: ptr as _ },
+ cookie: cookie as _,
+ };
+ self.write(offset, &newobj)?;
+
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node_ref.node.update_refcount(true, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .insert_or_update_handle(node_ref, false)?;
+
+ let newobj = bindings::flat_binder_object {
+ hdr: bindings::binder_object_header {
+ type_: if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ },
+ },
+ flags: obj.flags,
+ // TODO: To avoid padding, we write to `binder` instead of `handle` here. We need a
+ // better solution though.
+ __bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 {
+ binder: handle as _,
+ },
+ ..bindings::flat_binder_object::default()
+ };
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self.alloc.process.update_ref(handle, false, strong);
+ return Err(BinderError::new_failed());
+ }
+ }
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<bindings::binder_object_header>(offset)?;
+ // TODO: Handle other types.
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<bindings::flat_binder_object>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as usize;
+ let cookie = obj.cookie as usize;
+ self.alloc.process.update_node(ptr, cookie, strong, false);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<bindings::flat_binder_object>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ self.alloc.process.update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
diff --git a/drivers/android/context.rs b/drivers/android/context.rs
new file mode 100644
index 000000000000..ecb48a6bcb44
--- /dev/null
+++ b/drivers/android/context.rs
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ bindings,
+ prelude::*,
+ security,
+ sync::{Arc, Mutex, UniqueArc},
+};
+
+use crate::{
+ node::NodeRef,
+ thread::{BinderError, BinderResult},
+};
+
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<bindings::kuid_t>,
+}
+
+pub(crate) struct Context {
+ manager: Mutex<Manager>,
+}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for Context {}
+unsafe impl Sync for Context {}
+
+impl Context {
+ pub(crate) fn new() -> Result<Arc<Self>> {
+ let mut ctx = Pin::from(UniqueArc::try_new(Self {
+ // SAFETY: Init is called below.
+ manager: unsafe {
+ Mutex::new(Manager {
+ node: None,
+ uid: None,
+ })
+ },
+ })?);
+
+ // SAFETY: `manager` is also pinned when `ctx` is.
+ let manager = unsafe { ctx.as_mut().map_unchecked_mut(|c| &mut c.manager) };
+ kernel::mutex_init!(manager, "Context::manager");
+
+ Ok(ctx.into())
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // TODO: Get the actual caller id.
+ let caller_uid = bindings::kuid_t::default();
+ if let Some(ref uid) = manager.uid {
+ if uid.val != caller_uid.val {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> BinderResult<NodeRef> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ }
+}
diff --git a/drivers/android/defs.rs b/drivers/android/defs.rs
new file mode 100644
index 000000000000..925e751a2564
--- /dev/null
+++ b/drivers/android/defs.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ bindings,
+ bindings::*,
+ io_buffer::{ReadableFromBytes, WritableToBytes},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_OK,
+ BR_ERROR,
+ BR_TRANSACTION,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_TRANSACTION_COMPLETE,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FAILED_REPLY
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_REPLY,
+ BC_FREE_BUFFER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REGISTER_LOOPER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE
+);
+
+pub_no_prefix!(transaction_flags_, TF_ONE_WAY, TF_ACCEPT_FDS);
+
+pub(crate) use bindings::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_HANDLE, BINDER_TYPE_WEAK_BINDER,
+ BINDER_TYPE_WEAK_HANDLE, FLAT_BINDER_FLAG_ACCEPTS_FDS,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ #[derive(Copy, Clone, Default)]
+ pub(crate) struct $newname($wrapped);
+
+ // TODO: This must be justified by inspecting the type, so should live outside the macro or
+ // the macro should be somehow marked unsafe.
+ unsafe impl ReadableFromBytes for $newname {}
+ unsafe impl WritableToBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, bindings::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, bindings::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, bindings::flat_binder_object);
+decl_wrapper!(BinderTransactionData, bindings::binder_transaction_data);
+decl_wrapper!(BinderWriteRead, bindings::binder_write_read);
+decl_wrapper!(BinderVersion, bindings::binder_version);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(bindings::binder_version {
+ protocol_version: bindings::BINDER_CURRENT_PROTOCOL_VERSION as _,
+ })
+ }
+}
diff --git a/drivers/android/node.rs b/drivers/android/node.rs
new file mode 100644
index 000000000000..ce4367a78436
--- /dev/null
+++ b/drivers/android/node.rs
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::sync::atomic::{AtomicU64, Ordering};
+use kernel::{
+ io_buffer::IoBufferWriter,
+ linked_list::{GetLinks, Links, List},
+ prelude::*,
+ sync::{Arc, Guard, LockedBy, Mutex, SpinLock},
+ user_ptr::UserSlicePtrWriter,
+};
+
+use crate::{
+ defs::*,
+ process::{Process, ProcessInner},
+ thread::{BinderError, BinderResult, Thread},
+ DeliverToRead,
+};
+
+struct CountState {
+ count: usize,
+ has_count: bool,
+ is_biased: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ is_biased: false,
+ }
+ }
+
+ fn add_bias(&mut self) {
+ self.count += 1;
+ self.is_biased = true;
+ }
+}
+
+struct NodeInner {
+ strong: CountState,
+ weak: CountState,
+ death_list: List<Arc<NodeDeath>>,
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+pub(crate) struct NodeDeath {
+ node: Arc<Node>,
+ process: Arc<Process>,
+ // TODO: Make this private.
+ pub(crate) cookie: usize,
+ work_links: Links<dyn DeliverToRead>,
+ // TODO: Add the moment we're using this for two lists, which isn't safe because we want to
+ // remove from the list without knowing the list it's in. We need to separate this out.
+ death_links: Links<NodeDeath>,
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call `NodeDeath::init` before using the notification object.
+ pub(crate) unsafe fn new(node: Arc<Node>, process: Arc<Process>, cookie: usize) -> Self {
+ Self {
+ node,
+ process,
+ cookie,
+ work_links: Links::new(),
+ death_links: Links::new(),
+ inner: unsafe {
+ SpinLock::new(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ })
+ },
+ }
+ }
+
+ pub(crate) fn init(self: Pin<&mut Self>) {
+ // SAFETY: `inner` is pinned when `self` is.
+ let inner = unsafe { self.map_unchecked_mut(|n| &mut n.inner) };
+ kernel::spinlock_init!(inner, "NodeDeath::inner");
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed. It must only be called
+ /// once.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &Arc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ inner.cleared = true;
+ if abort {
+ inner.aborted = true;
+ }
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ unsafe { node_inner.death_list.remove(self) };
+ }
+
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_notification_done(self: Arc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+
+ if needs_queueing {
+ let _ = thread.push_work_if_looper(self);
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: Arc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ let process = self.process.clone();
+ let _ = process.push_work(self);
+ }
+ }
+}
+
+impl GetLinks for NodeDeath {
+ type EntryType = NodeDeath;
+ fn get_links(data: &NodeDeath) -> &Links<NodeDeath> {
+ &data.death_links
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(self: Arc<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write(&cmd)?;
+ writer.write(&cookie)?;
+
+ // Mimic the original code: we stop processing work items when we get to a death
+ // notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.work_links
+ }
+}
+
+pub(crate) struct Node {
+ pub(crate) global_id: u64,
+ ptr: usize,
+ cookie: usize,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, Mutex<ProcessInner>>,
+ links: Links<dyn DeliverToRead>,
+}
+
+impl Node {
+ pub(crate) fn new(ptr: usize, cookie: usize, flags: u32, owner: Arc<Process>) -> Self {
+ static NEXT_ID: AtomicU64 = AtomicU64::new(1);
+ let inner = LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ death_list: List::new(),
+ },
+ );
+ Self {
+ global_id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ inner,
+ links: Links::new(),
+ }
+ }
+
+ pub(crate) fn get_id(&self) -> (usize, usize) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn next_death(
+ &self,
+ guard: &mut Guard<'_, Mutex<ProcessInner>>,
+ ) -> Option<Arc<NodeDeath>> {
+ self.inner.access_mut(guard).death_list.pop_front()
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: Arc<NodeDeath>,
+ guard: &mut Guard<'_, Mutex<ProcessInner>>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn update_refcount_locked(
+ &self,
+ inc: bool,
+ strong: bool,
+ biased: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> bool {
+ let inner = self.inner.access_from_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update biased state: if the count is not biased, there is nothing to do; otherwise,
+ // we're removing the bias, so mark the state as such.
+ if biased {
+ if !state.is_biased {
+ return false;
+ }
+
+ state.is_biased = false;
+ }
+
+ // Update the count and determine whether we need to push work.
+ // TODO: Here we may want to check the weak count being zero but the strong count being 1,
+ // because in such cases, we won't deliver anything to userspace, so we shouldn't queue
+ // either.
+ if inc {
+ state.count += 1;
+ !state.has_count
+ } else {
+ state.count -= 1;
+ state.count == 0 && state.has_count
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &Arc<Self>, inc: bool, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, false, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, Mutex<ProcessInner>>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, Mutex<ProcessInner>>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, Mutex<ProcessInner>>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut UserSlicePtrWriter, code: u32) -> Result {
+ writer.write(&code)?;
+ writer.write(&self.ptr)?;
+ writer.write(&self.cookie)?;
+ Ok(())
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(self: Arc<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+ inner.weak.has_count = weak;
+ inner.strong.has_count = strong;
+
+ if !weak {
+ // Remove the node if there are no references to it.
+ owner_inner.remove_node(self.ptr);
+ } else {
+ if !has_weak {
+ inner.weak.add_bias();
+ }
+
+ if !has_strong && strong {
+ inner.strong.add_bias();
+ }
+ }
+
+ drop(owner_inner);
+
+ // This could be done more compactly but we write out all the posibilities for
+ // compatibility with the original implementation wrt the order of events.
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+
+ if !strong && has_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+
+ if !weak && has_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
+
+pub(crate) struct NodeRef {
+ pub(crate) node: Arc<Node>,
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: Arc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> BinderResult<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(BinderError::new_failed());
+ }
+
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+
+ let (count, other_count) = if strong {
+ (&mut self.strong_count, self.weak_count)
+ } else {
+ (&mut self.weak_count, self.strong_count)
+ };
+
+ if inc {
+ if *count == 0 {
+ self.node.update_refcount(true, strong);
+ }
+ *count += 1;
+ } else {
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, strong);
+ return other_count == 0;
+ }
+ }
+
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ fn drop(&mut self) {
+ if self.strong_count > 0 {
+ self.node.update_refcount(false, true);
+ }
+
+ if self.weak_count > 0 {
+ self.node.update_refcount(false, false);
+ }
+ }
+}
diff --git a/drivers/android/process.rs b/drivers/android/process.rs
new file mode 100644
index 000000000000..37898342fced
--- /dev/null
+++ b/drivers/android/process.rs
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::{convert::TryFrom, mem::take, ops::Range};
+use kernel::{
+ bindings,
+ cred::Credential,
+ file::{self, File, IoctlCommand, IoctlHandler, PollTable},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ linked_list::List,
+ mm,
+ pages::Pages,
+ prelude::*,
+ rbtree::RBTree,
+ sync::{Arc, ArcBorrow, Guard, Mutex, UniqueArc},
+ task::Task,
+ user_ptr::{UserSlicePtr, UserSlicePtrReader},
+ Either,
+};
+
+use crate::{
+ allocation::Allocation,
+ context::Context,
+ defs::*,
+ node::{Node, NodeDeath, NodeRef},
+ range_alloc::RangeAllocator,
+ thread::{BinderError, BinderResult, Thread},
+ DeliverToRead, DeliverToReadListAdapter,
+};
+
+// TODO: Review this:
+// Lock order: Process::node_refs -> Process::inner -> Thread::inner
+
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Range<usize>,
+}
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+ pages: Arc<[Pages<0>]>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize, pages: Arc<[Pages<0>]>) -> Result<Self> {
+ let alloc = RangeAllocator::new(size)?;
+ Ok(Self {
+ address,
+ alloc,
+ pages,
+ })
+ }
+}
+
+// TODO: Make this private.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ ready_threads: List<Arc<Thread>>,
+ work: List<DeliverToReadListAdapter>,
+ mapping: Option<Mapping>,
+ nodes: RBTree<usize, Arc<Node>>,
+
+ delivered_deaths: List<Arc<NodeDeath>>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ work: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ delivered_deaths: List::new(),
+ }
+ }
+
+ fn push_work(&mut self, work: Arc<dyn DeliverToRead>) -> BinderResult {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ thread.push_work(work)
+ } else if self.is_dead {
+ Err(BinderError::new_dead())
+ } else {
+ // There are no ready threads. Push work to process queue.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready();
+ }
+ Ok(())
+ }
+ }
+
+ // TODO: Should this be private?
+ pub(crate) fn remove_node(&mut self, ptr: usize) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ // TODO: Decide if this should be private.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &Arc<Node>,
+ inc: bool,
+ strong: bool,
+ biased: bool,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, biased, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node.clone());
+ } else {
+ let _ = self.push_work(node.clone());
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ // TODO: Make this private.
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: Arc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, false, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: usize, cookie: usize) -> Result<Option<Arc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ /// Returns a reference to an existing node with the given pointer and cookie. It requires a
+ /// mutable reference because it needs to increment the ref count on the node, which may
+ /// require pushing work to the work queue (to notify userspace of 0 to 1 transitions).
+ fn get_existing_node_ref(
+ &mut self,
+ ptr: usize,
+ cookie: usize,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> Result<Option<NodeRef>> {
+ Ok(self
+ .get_existing_node(ptr, cookie)?
+ .map(|node| self.new_node_ref(node, strong, thread)))
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: usize) -> Option<Arc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front_mut();
+ while let Some(death) = cursor.current() {
+ if death.cookie == cookie {
+ return cursor.remove_current();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: Arc<NodeDeath>) {
+ self.delivered_deaths.push_back(death);
+ }
+}
+
+struct NodeRefInfo {
+ node_ref: NodeRef,
+ death: Option<Arc<NodeDeath>>,
+}
+
+impl NodeRefInfo {
+ fn new(node_ref: NodeRef) -> Self {
+ Self {
+ node_ref,
+ death: None,
+ }
+ }
+}
+
+struct ProcessNodeRefs {
+ by_handle: RBTree<u32, NodeRefInfo>,
+ by_global_id: RBTree<u64, u32>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ by_global_id: RBTree::new(),
+ }
+ }
+}
+
+pub(crate) struct Process {
+ ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ // TODO: For now this a mutex because we have allocations in RangeAllocator while holding the
+ // lock. We may want to split up the process state at some point to use a spin lock for the
+ // other fields.
+ // TODO: Make this private again.
+ pub(crate) inner: Mutex<ProcessInner>,
+
+ // References are in a different mutex to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ node_refs: Mutex<ProcessNodeRefs>,
+}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for Process {}
+unsafe impl Sync for Process {}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let mut process = Pin::from(UniqueArc::try_new(Self {
+ ctx,
+ cred,
+ task: Task::current().group_leader().into(),
+ // SAFETY: `inner` is initialised in the call to `mutex_init` below.
+ inner: unsafe { Mutex::new(ProcessInner::new()) },
+ // SAFETY: `node_refs` is initialised in the call to `mutex_init` below.
+ node_refs: unsafe { Mutex::new(ProcessNodeRefs::new()) },
+ })?);
+
+ // SAFETY: `inner` is pinned when `Process` is.
+ let pinned = unsafe { process.as_mut().map_unchecked_mut(|p| &mut p.inner) };
+ kernel::mutex_init!(pinned, "Process::inner");
+
+ // SAFETY: `node_refs` is pinned when `Process` is.
+ let pinned = unsafe { process.as_mut().map_unchecked_mut(|p| &mut p.node_refs) };
+ kernel::mutex_init!(pinned, "Process::node_refs");
+
+ Ok(process.into())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<Arc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> Either<Arc<dyn DeliverToRead>, Registration<'a>> {
+ let mut inner = self.inner.lock();
+
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return Either::Left(work);
+ }
+
+ // Register the thread as ready.
+ Either::Right(Registration::new(self, thread, &mut inner))
+ }
+
+ fn get_thread(self: ArcBorrow<'_, Self>, id: i32) -> Result<Arc<Thread>> {
+ // TODO: Consider using read/write locks here instead.
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let ta = Thread::new(id, self.into())?;
+ let node = RBTree::try_allocate_node(id, ta.clone())?;
+
+ let mut inner = self.inner.lock();
+
+ // Recheck. It's possible the thread was create while we were not holding the lock.
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+
+ inner.threads.insert(node);
+ Ok(ta)
+ }
+
+ pub(crate) fn push_work(&self, work: Arc<dyn DeliverToRead>) -> BinderResult {
+ self.inner.lock().push_work(work)
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr as _, cookie as _, flags as _, true, Some(thread))?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: usize,
+ cookie: usize,
+ flags: u32,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> Result<NodeRef> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node_ref(ptr, cookie, strong, thread)? {
+ return Ok(node);
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = Arc::try_new(Node::new(ptr, cookie, flags, self.into()))?;
+ let rbnode = RBTree::try_allocate_node(ptr, node.clone())?;
+
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node_ref(ptr, cookie, strong, thread)? {
+ return Ok(node);
+ }
+
+ inner.nodes.insert(rbnode);
+ Ok(inner.new_node_ref(node, strong, thread))
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ &self,
+ node_ref: NodeRef,
+ is_mananger: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_global_id.get(&node_ref.node.global_id) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref.absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTree::try_reserve_node()?;
+ let reserve2 = RBTree::try_reserve_node()?;
+
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_global_id.get(&node_ref.node.global_id) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref.absorb(node_ref);
+ return Ok(handle);
+ }
+
+ // Find id.
+ let mut target = if is_mananger { 0 } else { 1 };
+ for handle in refs.by_handle.keys() {
+ if *handle > target {
+ break;
+ }
+ if *handle == target {
+ target = target.checked_add(1).ok_or(ENOMEM)?;
+ }
+ }
+
+ // Ensure the process is still alive while we insert a new reference.
+ let inner = self.inner.lock();
+ if inner.is_dead {
+ return Err(ESRCH);
+ }
+ refs.by_global_id
+ .insert(reserve1.into_node(node_ref.node.global_id, target));
+ refs.by_handle
+ .insert(reserve2.into_node(target, NodeRefInfo::new(node_ref)));
+ Ok(target)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ self.ctx.get_manager_node(true)
+ } else {
+ self.get_node_from_handle(handle, true)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> BinderResult<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get(&handle)
+ .ok_or(ENOENT)?
+ .node_ref
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &Arc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(&self, handle: u32, inc: bool, strong: bool) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref.update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death.take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables.
+ let id = info.node_ref.node.global_id;
+ refs.by_handle.remove(&handle);
+ refs.by_global_id.remove(&id);
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: usize, cookie: usize, strong: bool, biased: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, biased, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSlicePtrReader, strong: bool) -> Result {
+ let ptr = reader.read::<usize>()?;
+ let cookie = reader.read::<usize>()?;
+ self.update_node(ptr, cookie, strong, true);
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(&self, size: usize) -> BinderResult<Allocation<'_>> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+
+ let offset = mapping.alloc.reserve_new(size)?;
+ Ok(Allocation::new(
+ self,
+ offset,
+ size,
+ mapping.address + offset,
+ mapping.pages.clone(),
+ ))
+ }
+
+ // TODO: Review if we want an Option or a Result.
+ pub(crate) fn buffer_get(&self, ptr: usize) -> Option<Allocation<'_>> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self, offset, size, ptr, mapping.pages.clone());
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if ptr < mapping.address
+ || mapping
+ .alloc
+ .reservation_abort(ptr - mapping.address)
+ .is_err()
+ {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ }
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mut mm::virt::Area) -> Result {
+ let size = core::cmp::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let page_count = size / kernel::PAGE_SIZE;
+
+ // Allocate and map all pages.
+ //
+ // N.B. If we fail halfway through mapping these pages, the kernel will unmap them.
+ let mut pages = Vec::new();
+ pages.try_reserve_exact(page_count)?;
+ let mut address = vma.start();
+ for _ in 0..page_count {
+ let page = Pages::<0>::new()?;
+ vma.insert_page(address, &page)?;
+ pages.try_push(page)?;
+ address += kernel::PAGE_SIZE;
+ }
+
+ let ref_pages = Arc::try_from(pages)?;
+
+ // Save pages for later.
+ let mut inner = self.inner.lock();
+ match &inner.mapping {
+ None => inner.mapping = Some(Mapping::new(vma.start(), size, ref_pages)?),
+ Some(_) => return Err(EBUSY),
+ }
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlicePtr) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn get_node_debug_info(&self, data: UserSlicePtr) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr as usize;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlicePtr) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ let node_ref = self
+ .get_node_from_handle(out.handle, true)
+ .or(Err(EINVAL))?;
+
+ // Get the counts from the node.
+ {
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ };
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSlicePtrReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: usize = reader.read()?;
+
+ // TODO: First two should result in error, but not the others.
+
+ // TODO: Do we care about the context manager dying?
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::try_new_uninit().map_err(|err| {
+ thread.push_return_work(BR_ERROR);
+ err
+ })?;
+
+ let mut refs = self.node_refs.lock();
+ let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death.is_some() {
+ return Ok(());
+ }
+
+ let death = {
+ let mut pinned = Pin::from(death.write(
+ // SAFETY: `init` is called below.
+ unsafe { NodeDeath::new(info.node_ref.node.clone(), self.clone(), cookie) },
+ ));
+ pinned.as_mut().init();
+ Arc::<NodeDeath>::from(pinned)
+ };
+
+ info.death = Some(death.clone());
+
+ // Register the death notification.
+ {
+ let mut owner_inner = info.node_ref.node.owner.inner.lock();
+ if owner_inner.is_dead {
+ drop(owner_inner);
+ let _ = self.push_work(death);
+ } else {
+ info.node_ref.node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSlicePtrReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: usize = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
+
+ let death = info.death.take().ok_or(EINVAL)?;
+ if death.cookie != cookie {
+ info.death = Some(death);
+ return Err(EINVAL);
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ let _ = thread.push_work_if_looper(death);
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: usize, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+}
+
+impl IoctlHandler for Process {
+ type Target<'a> = ArcBorrow<'a, Process>;
+
+ fn write(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSlicePtrReader,
+ ) -> Result<i32> {
+ let thread = this.get_thread(Task::current().pid())?;
+ match cmd {
+ bindings::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ bindings::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ bindings::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ bindings::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ _ => return Err(EINVAL),
+ }
+ Ok(0)
+ }
+
+ fn read_write(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlicePtr,
+ ) -> Result<i32> {
+ let thread = this.get_thread(Task::current().pid())?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ bindings::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ bindings::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ bindings::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ bindings::BINDER_VERSION => this.version(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(0)
+ }
+}
+
+#[vtable]
+impl file::Operations for Process {
+ type Data = Arc<Self>;
+ type OpenData = Arc<Context>;
+
+ fn open(ctx: &Arc<Context>, file: &File) -> Result<Self::Data> {
+ Self::new(ctx.clone(), file.cred().into())
+ }
+
+ fn release(obj: Self::Data, _file: &File) {
+ // Mark this process as dead. We'll do the same for the threads later.
+ obj.inner.lock().is_dead = true;
+
+ // If this process is the manager, unset it.
+ if obj.inner.lock().is_manager {
+ obj.ctx.unset_manager_node();
+ }
+
+ // TODO: Do this in a worker?
+
+ // Cancel all pending work items.
+ while let Some(work) = obj.get_work() {
+ work.cancel();
+ }
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = obj.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ let pages = mapping.pages.clone();
+ mapping.alloc.for_each(|offset, size, odata| {
+ let ptr = offset + address;
+ let mut alloc = Allocation::new(&obj, offset, size, ptr, pages.clone());
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // Drop all references. We do this dance with `swap` to avoid destroying the references
+ // while holding the lock.
+ let mut refs = obj.node_refs.lock();
+ let mut node_refs = take(&mut refs.by_handle);
+ drop(refs);
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ for info in node_refs.values_mut() {
+ let death = if let Some(existing) = info.death.take() {
+ existing
+ } else {
+ continue;
+ };
+
+ death.set_cleared(false);
+ }
+
+ // Do similar dance for the state lock.
+ let mut inner = obj.inner.lock();
+ let threads = take(&mut inner.threads);
+ let nodes = take(&mut inner.nodes);
+ drop(inner);
+
+ // Release all threads.
+ for thread in threads.values() {
+ thread.release();
+ }
+
+ // Deliver death notifications.
+ for node in nodes.values() {
+ loop {
+ let death = {
+ let mut inner = obj.inner.lock();
+ if let Some(death) = node.next_death(&mut inner) {
+ death
+ } else {
+ break;
+ }
+ };
+
+ death.set_dead();
+ }
+ }
+ }
+
+ fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: &mut IoctlCommand) -> Result<i32> {
+ cmd.dispatch::<Self>(this, file)
+ }
+
+ fn compat_ioctl(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: &mut IoctlCommand,
+ ) -> Result<i32> {
+ cmd.dispatch::<Self>(this, file)
+ }
+
+ fn mmap(this: ArcBorrow<'_, Process>, _file: &File, vma: &mut mm::virt::Area) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(Task::current().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ let mut flags = vma.flags();
+ use mm::virt::flags::*;
+ if flags & WRITE != 0 {
+ return Err(EPERM);
+ }
+
+ flags |= DONTCOPY | MIXEDMAP;
+ flags &= !MAYWRITE;
+ vma.set_flags(flags);
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ fn poll(this: ArcBorrow<'_, Process>, file: &File, table: &PollTable) -> Result<u32> {
+ let thread = this.get_thread(Task::current().pid())?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+pub(crate) struct Registration<'a> {
+ process: &'a Process,
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(
+ process: &'a Process,
+ thread: &'a Arc<Thread>,
+ guard: &mut Guard<'_, Mutex<ProcessInner>>,
+ ) -> Self {
+ guard.ready_threads.push_back(thread.clone());
+ Self { process, thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.process.inner.lock();
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
diff --git a/drivers/android/range_alloc.rs b/drivers/android/range_alloc.rs
new file mode 100644
index 000000000000..7b149048879b
--- /dev/null
+++ b/drivers/android/range_alloc.rs
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::ptr::NonNull;
+use kernel::{
+ linked_list::{CursorMut, GetLinks, Links, List},
+ prelude::*,
+};
+
+pub(crate) struct RangeAllocator<T> {
+ list: List<Box<Descriptor<T>>>,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum DescriptorState {
+ Free,
+ Reserved,
+ Allocated,
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Result<Self> {
+ let desc = Box::try_new(Descriptor::new(0, size))?;
+ let mut list = List::new();
+ list.push_back(desc);
+ Ok(Self { list })
+ }
+
+ fn find_best_match(&self, size: usize) -> Option<NonNull<Descriptor<T>>> {
+ // TODO: Use a binary tree instead of list for this lookup.
+ let mut best = None;
+ let mut best_size = usize::MAX;
+ let mut cursor = self.list.cursor_front();
+ while let Some(desc) = cursor.current() {
+ if desc.state == DescriptorState::Free {
+ if size == desc.size {
+ return Some(NonNull::from(desc));
+ }
+
+ if size < desc.size && desc.size < best_size {
+ best = Some(NonNull::from(desc));
+ best_size = desc.size;
+ }
+ }
+
+ cursor.move_next();
+ }
+ best
+ }
+
+ pub(crate) fn reserve_new(&mut self, size: usize) -> Result<usize> {
+ let desc_ptr = match self.find_best_match(size) {
+ None => return Err(ENOMEM),
+ Some(found) => found,
+ };
+
+ // SAFETY: We hold the only mutable reference to list, so it cannot have changed.
+ let desc = unsafe { &mut *desc_ptr.as_ptr() };
+ if desc.size == size {
+ desc.state = DescriptorState::Reserved;
+ return Ok(desc.offset);
+ }
+
+ // We need to break up the descriptor.
+ let new = Box::try_new(Descriptor::new(desc.offset + size, desc.size - size))?;
+ unsafe { self.list.insert_after(desc_ptr, new) };
+ desc.state = DescriptorState::Reserved;
+ desc.size = size;
+ Ok(desc.offset)
+ }
+
+ fn free_with_cursor(cursor: &mut CursorMut<'_, Box<Descriptor<T>>>) -> Result {
+ let mut size = match cursor.current() {
+ None => return Err(EINVAL),
+ Some(ref mut entry) => {
+ match entry.state {
+ DescriptorState::Free => return Err(EINVAL),
+ DescriptorState::Allocated => return Err(EPERM),
+ DescriptorState::Reserved => {}
+ }
+ entry.state = DescriptorState::Free;
+ entry.size
+ }
+ };
+
+ // Try to merge with the next entry.
+ if let Some(next) = cursor.peek_next() {
+ if next.state == DescriptorState::Free {
+ next.offset -= size;
+ next.size += size;
+ size = next.size;
+ cursor.remove_current();
+ }
+ }
+
+ // Try to merge with the previous entry.
+ if let Some(prev) = cursor.peek_prev() {
+ if prev.state == DescriptorState::Free {
+ prev.size += size;
+ cursor.remove_current();
+ }
+ }
+
+ Ok(())
+ }
+
+ fn find_at_offset(&mut self, offset: usize) -> Option<CursorMut<'_, Box<Descriptor<T>>>> {
+ let mut cursor = self.list.cursor_front_mut();
+ while let Some(desc) = cursor.current() {
+ if desc.offset == offset {
+ return Some(cursor);
+ }
+
+ if desc.offset > offset {
+ return None;
+ }
+
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result {
+ // TODO: The force case is currently O(n), but could be made O(1) with unsafe.
+ let mut cursor = self.find_at_offset(offset).ok_or(EINVAL)?;
+ Self::free_with_cursor(&mut cursor)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: Option<T>) -> Result {
+ // TODO: This is currently O(n), make it O(1).
+ let mut cursor = self.find_at_offset(offset).ok_or(ENOENT)?;
+ let desc = cursor.current().unwrap();
+ desc.state = DescriptorState::Allocated;
+ desc.data = data;
+ Ok(())
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, Option<T>)> {
+ // TODO: This is currently O(n), make it O(log n).
+ let mut cursor = self.find_at_offset(offset).ok_or(ENOENT)?;
+ let desc = cursor.current().unwrap();
+ if desc.state != DescriptorState::Allocated {
+ return Err(ENOENT);
+ }
+ desc.state = DescriptorState::Reserved;
+ Ok((desc.size, desc.data.take()))
+ }
+
+ pub(crate) fn for_each<F: Fn(usize, usize, Option<T>)>(&mut self, callback: F) {
+ let mut cursor = self.list.cursor_front_mut();
+ while let Some(desc) = cursor.current() {
+ if desc.state == DescriptorState::Allocated {
+ callback(desc.offset, desc.size, desc.data.take());
+ }
+
+ cursor.move_next();
+ }
+ }
+}
+
+struct Descriptor<T> {
+ state: DescriptorState,
+ size: usize,
+ offset: usize,
+ links: Links<Descriptor<T>>,
+ data: Option<T>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: DescriptorState::Free,
+ links: Links::new(),
+ data: None,
+ }
+ }
+}
+
+impl<T> GetLinks for Descriptor<T> {
+ type EntryType = Self;
+ fn get_links(desc: &Self) -> &Links<Self> {
+ &desc.links
+ }
+}
diff --git a/drivers/android/rust_binder.rs b/drivers/android/rust_binder.rs
new file mode 100644
index 000000000000..f6432cac0f0a
--- /dev/null
+++ b/drivers/android/rust_binder.rs
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Binder -- the Android IPC mechanism.
+//!
+//! TODO: This module is a work in progress.
+
+use kernel::{
+ io_buffer::IoBufferWriter,
+ linked_list::{GetLinks, GetLinksWrapped, Links},
+ miscdev::Registration,
+ prelude::*,
+ str::CStr,
+ sync::Arc,
+ user_ptr::UserSlicePtrWriter,
+};
+
+mod allocation;
+mod context;
+mod defs;
+mod node;
+mod process;
+mod range_alloc;
+mod thread;
+mod transaction;
+
+use {context::Context, thread::Thread};
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ author: "Wedson Almeida Filho",
+ description: "Android Binder",
+ license: "GPL",
+}
+
+trait DeliverToRead {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(self: Arc<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: Arc<Self>) {}
+
+ /// Returns the linked list links for the work item.
+ fn get_links(&self) -> &Links<dyn DeliverToRead>;
+}
+
+struct DeliverToReadListAdapter {}
+
+impl GetLinks for DeliverToReadListAdapter {
+ type EntryType = dyn DeliverToRead;
+
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ data.get_links()
+ }
+}
+
+impl GetLinksWrapped for DeliverToReadListAdapter {
+ type Wrapped = Arc<dyn DeliverToRead>;
+}
+
+struct DeliverCode {
+ code: u32,
+ links: Links<dyn DeliverToRead>,
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ links: Links::new(),
+ }
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(self: Arc<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ writer.write(&self.code)?;
+ Ok(true)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
+
+const fn ptr_align(value: usize) -> usize {
+ let size = core::mem::size_of::<usize>() - 1;
+ (value + size) & !size
+}
+
+unsafe impl Sync for BinderModule {}
+
+struct BinderModule {
+ _reg: Pin<Box<Registration<process::Process>>>,
+}
+
+impl kernel::Module for BinderModule {
+ fn init(name: &'static CStr, _module: &'static kernel::ThisModule) -> Result<Self> {
+ let ctx = Context::new()?;
+ let reg = Registration::new_pinned(fmt!("{name}"), ctx)?;
+ Ok(Self { _reg: reg })
+ }
+}
diff --git a/drivers/android/thread.rs b/drivers/android/thread.rs
new file mode 100644
index 000000000000..0ab96ec03ab9
--- /dev/null
+++ b/drivers/android/thread.rs
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::{
+ alloc::AllocError,
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+use kernel::{
+ bindings,
+ file::{File, PollTable},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ linked_list::{GetLinks, Links, List},
+ prelude::*,
+ security,
+ sync::{Arc, CondVar, SpinLock, UniqueArc},
+ user_ptr::{UserSlicePtr, UserSlicePtrWriter},
+ Either,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView},
+ defs::*,
+ process::{AllocationInfo, Process},
+ ptr_align,
+ transaction::{FileInfo, Transaction},
+ DeliverCode, DeliverToRead, DeliverToReadListAdapter,
+};
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+}
+
+impl BinderError {
+ pub(crate) fn new_failed() -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ }
+ }
+
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ }
+ }
+}
+
+impl From<Error> for BinderError {
+ fn from(_: Error) -> Self {
+ Self::new_failed()
+ }
+}
+
+impl From<AllocError> for BinderError {
+ fn from(_: AllocError) -> Self {
+ Self::new_failed()
+ }
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_POLL: u32 = 0x20;
+
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. When set to
+ /// `Some(x)`, it will hold the only reference to the object so that it can update the error
+ /// code to be delivered before queuing it.
+ reply_work: Option<Arc<ThreadError>>,
+
+ /// Work item used to deliver error codes to the current thread. When set to `Some(x)`, it will
+ /// hold the only reference to the object so that it can update the error code to be delivered
+ /// before queuing.
+ return_work: Option<Arc<ThreadError>>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ work_list: List<DeliverToReadListAdapter>,
+ current_transaction: Option<Arc<Transaction>>,
+}
+
+impl InnerThread {
+ fn new() -> Self {
+ Self {
+ looper_flags: 0,
+ is_dead: false,
+ process_work_list: false,
+ work_list: List::new(),
+ current_transaction: None,
+ return_work: None,
+ reply_work: None,
+ }
+ }
+
+ fn set_reply_work(&mut self, reply_work: Arc<ThreadError>) {
+ self.reply_work = Some(reply_work);
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ let work = self.reply_work.take();
+ self.push_existing_work(work, code);
+ }
+
+ fn set_return_work(&mut self, return_work: Arc<ThreadError>) {
+ self.return_work = Some(return_work);
+ }
+
+ fn push_return_work(&mut self, code: u32) {
+ let work = self.return_work.take();
+ self.push_existing_work(work, code);
+ }
+
+ fn push_existing_work(&mut self, owork: Option<Arc<ThreadError>>, code: u32) {
+ // TODO: Write some warning when the following fails. It should not happen, and
+ // if it does, there is likely something wrong.
+ if let Some(work) = owork {
+ // `error_code` is written to with relaxed semantics because the queue onto which it is
+ // being inserted is protected by a lock. The release barrier when the lock is released
+ // by the caller matches with the acquire barrier of the future reader to guarantee
+ // that `error_code` is visible.
+ work.error_code.store(code, Ordering::Relaxed);
+ self.push_work(work);
+ }
+ }
+
+ fn pop_work(&mut self) -> Option<Arc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ // Once the queue is drained, we stop processing it until a non-deferred item is pushed
+ // again onto it.
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work_deferred(&mut self, work: Arc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ fn push_work(&mut self, work: Arc<dyn DeliverToRead>) {
+ self.push_work_deferred(work);
+ self.process_work_list = true;
+ }
+
+ fn has_work(&self) -> bool {
+ self.process_work_list && !self.work_list.is_empty()
+ }
+
+ /// Fetches the transaction the thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<Arc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread);
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &Arc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue
+ /// (when its own queue is empty). This is case when the thread is not part of a transaction
+ /// stack and it is registered as a looper.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.has_work() {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ inner: SpinLock<InnerThread>,
+ work_condvar: CondVar,
+ links: Links<Thread>,
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let return_work = Arc::try_new(ThreadError::new(InnerThread::set_return_work))?;
+ let reply_work = Arc::try_new(ThreadError::new(InnerThread::set_reply_work))?;
+ let mut thread = Pin::from(UniqueArc::try_new(Self {
+ id,
+ process,
+ // SAFETY: `inner` is initialised in the call to `spinlock_init` below.
+ inner: unsafe { SpinLock::new(InnerThread::new()) },
+ // SAFETY: `work_condvar` is initialised in the call to `condvar_init` below.
+ work_condvar: unsafe { CondVar::new() },
+ links: Links::new(),
+ })?);
+
+ // SAFETY: `inner` is pinned when `thread` is.
+ let inner = unsafe { thread.as_mut().map_unchecked_mut(|t| &mut t.inner) };
+ kernel::spinlock_init!(inner, "Thread::inner");
+
+ // SAFETY: `work_condvar` is pinned when `thread` is.
+ let condvar = unsafe { thread.as_mut().map_unchecked_mut(|t| &mut t.work_condvar) };
+ kernel::condvar_init!(condvar, "Thread::work_condvar");
+
+ {
+ let mut inner = thread.inner.lock();
+ inner.set_reply_work(reply_work);
+ inner.set_return_work(return_work);
+ }
+
+ Ok(thread.into())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: Arc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Arc<dyn DeliverToRead>> {
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(work);
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(ERESTARTSYS);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Arc<dyn DeliverToRead>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(work);
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ Either::Left(work) => return Ok(work),
+ Either::Right(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(work);
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ // A signal is pending. We need to pull the thread off the list, then check the
+ // state again after it's off the list to ensure that something was not queued in
+ // the meantime. If something has been queued, we just return it (instead of the
+ // error).
+ drop(inner);
+ drop(reg);
+ return self.inner.lock().pop_work().ok_or(ERESTARTSYS);
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: Arc<dyn DeliverToRead>) -> BinderResult {
+ {
+ let mut inner = self.inner.lock();
+ if inner.is_dead {
+ return Err(BinderError::new_dead());
+ }
+ inner.push_work(work);
+ }
+ self.work_condvar.notify_one();
+ Ok(())
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: Arc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: Arc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ fn translate_object(
+ &self,
+ index_offset: usize,
+ view: &mut AllocationView<'_, '_>,
+ allow_fds: bool,
+ ) -> BinderResult {
+ let offset = view.alloc.read(index_offset)?;
+ let header = view.read::<bindings::binder_object_header>(offset)?;
+ // TODO: Handle other types.
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ view.transfer_binder_object(offset, strong, |obj| {
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self.process.as_arc_borrow().get_node(
+ ptr,
+ cookie,
+ flags,
+ strong,
+ Some(self),
+ )?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ Ok(node)
+ })?;
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ view.transfer_binder_object(offset, strong, |obj| {
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ Ok(node)
+ })?;
+ }
+ BINDER_TYPE_FD => {
+ if !allow_fds {
+ return Err(BinderError::new_failed());
+ }
+
+ let obj = view.read::<bindings::binder_fd_object>(offset)?;
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = File::from_fd(fd)?;
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+ let field_offset =
+ kernel::offset_of!(bindings::binder_fd_object, __bindgen_anon_1.fd) as usize;
+ let file_info = Box::try_new(FileInfo::new(file, offset + field_offset))?;
+ view.alloc.add_file_info(file_info);
+ }
+ _ => pr_warn!("Unsupported binder object type: {:x}\n", header.type_),
+ }
+ Ok(())
+ }
+
+ fn translate_objects(
+ &self,
+ alloc: &mut Allocation<'_>,
+ start: usize,
+ end: usize,
+ allow_fds: bool,
+ ) -> BinderResult {
+ let mut view = AllocationView::new(alloc, start);
+ for i in (start..end).step_by(size_of::<usize>()) {
+ if let Err(err) = self.translate_object(i, &mut view, allow_fds) {
+ alloc.set_info(AllocationInfo { offsets: start..i });
+ return Err(err);
+ }
+ }
+ alloc.set_info(AllocationInfo {
+ offsets: start..end,
+ });
+ Ok(())
+ }
+
+ pub(crate) fn copy_transaction_data<'a>(
+ &self,
+ to_process: &'a Process,
+ tr: &BinderTransactionData,
+ allow_fds: bool,
+ ) -> BinderResult<Allocation<'a>> {
+ let data_size = tr.data_size as _;
+ let adata_size = ptr_align(data_size);
+ let offsets_size = tr.offsets_size as _;
+ let aoffsets_size = ptr_align(offsets_size);
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = core::cmp::max(
+ adata_size.checked_add(aoffsets_size).ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let mut alloc = to_process.buffer_alloc(len)?;
+
+ // Copy raw data.
+ let mut reader = unsafe { UserSlicePtr::new(tr.data.ptr.buffer as _, data_size) }.reader();
+ alloc.copy_into(&mut reader, 0, data_size)?;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ let mut reader =
+ unsafe { UserSlicePtr::new(tr.data.ptr.offsets as _, offsets_size) }.reader();
+ alloc.copy_into(&mut reader, adata_size, offsets_size)?;
+
+ // Traverse the objects specified.
+ self.translate_objects(
+ &mut alloc,
+ adata_size,
+ adata_size + aoffsets_size,
+ allow_fds,
+ )?;
+ }
+
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Either::Right(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Either<Arc<Transaction>, u32>,
+ transaction: &Arc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Either<Arc<Transaction>, u32>,
+ transaction: &Arc<Transaction>,
+ ) -> bool {
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Either::Left(work) => inner.push_work(work),
+ Either::Right(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_one();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &Arc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionData, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionData) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ self.inner.lock().push_return_work(err.reply);
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionData) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(BinderError::new_failed());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ (|| -> BinderResult<_> {
+ let completion = Arc::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Either::Left(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ let reply = Either::Right(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ })
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<Arc<Transaction>>> {
+ let inner = self.inner.lock();
+ Ok(if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ return Err(EINVAL);
+ }
+ Some(cur.clone())
+ } else {
+ None
+ })
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionData) -> BinderResult {
+ let handle = unsafe { tr.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let completion = Arc::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ self.inner.lock().push_work(completion);
+ // TODO: Remove the completion on error?
+ transaction.submit()?;
+ Ok(())
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionData) -> BinderResult {
+ let handle = unsafe { tr.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let completion = Arc::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ return Err(BinderError::new_failed());
+ }
+ inner.current_transaction = Some(transaction.clone());
+ }
+
+ // We push the completion as a deferred work so that we wait for the reply before returning
+ // to userland.
+ self.push_work_deferred(completion);
+ // TODO: Remove completion if submission fails?
+ transaction.submit()?;
+ Ok(())
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size - req.write_consumed;
+ let mut reader = unsafe { UserSlicePtr::new(write_start as _, write_len as _).reader() };
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_some() {
+ let before = reader.len();
+ match reader.read::<u32>()? {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?;
+ if tr.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner)
+ } else {
+ self.transaction(&tr, Self::transaction_inner)
+ }
+ }
+ BC_REPLY => self.transaction(&reader.read()?, Self::reply_inner),
+ BC_FREE_BUFFER => drop(self.process.buffer_get(reader.read()?)),
+ BC_INCREFS => self.process.update_ref(reader.read()?, true, false)?,
+ BC_ACQUIRE => self.process.update_ref(reader.read()?, true, true)?,
+ BC_RELEASE => self.process.update_ref(reader.read()?, false, true)?,
+ BC_DECREFS => self.process.update_ref(reader.read()?, false, false)?,
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+
+ // TODO: Add support for BC_TRANSACTION_SG and BC_REPLY_SG.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size - req.read_consumed;
+ let mut writer = unsafe { UserSlicePtr::new(read_start as _, read_len as _) }.writer();
+ let (in_pool, getter) = {
+ let inner = self.inner.lock();
+ (
+ inner.is_looper(),
+ if inner.should_use_process_work_queue() {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ },
+ )
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ if req.read_consumed == 0 {
+ writer.write(&BR_NOOP)?;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<u32>() {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(work) => {
+ if !work.do_work(self, &mut writer)? {
+ break;
+ }
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if in_pool && self.process.needs_thread() {
+ let mut writer =
+ unsafe { UserSlicePtr::new(req.read_buffer as _, req.read_size as _) }.writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlicePtr, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // TODO: `write(&req)` happens in all exit paths from here on. Find a better way to encode
+ // it.
+
+ // Go through the write buffer.
+ if req.write_size > 0 {
+ if let Err(err) = self.write(&mut req) {
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ return Err(err);
+ }
+ }
+
+ // Go through the work queue.
+ let mut ret = Ok(());
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: &PollTable) -> (bool, u32) {
+ // SAFETY: `free_waiters` is called on release.
+ unsafe { table.register_wait(file, &self.work_condvar) };
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0
+ && inner.should_use_process_work_queue()
+ && !inner.has_work();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn push_return_work(&self, code: u32) {
+ self.inner.lock().push_return_work(code)
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ // Mark the thread as dead.
+ self.inner.lock().is_dead = true;
+
+ // Cancel all pending work items.
+ while let Ok(work) = self.get_work_local(false) {
+ work.cancel();
+ }
+
+ // Complete the transaction stack as far as we can.
+ self.unwind_transaction_stack();
+
+ // Remove epoll items if polling was ever used on the thread.
+ let poller = self.inner.lock().looper_flags & LOOPER_POLL != 0;
+ if poller {
+ self.work_condvar.free_waiters();
+
+ unsafe { bindings::synchronize_rcu() };
+ }
+ }
+}
+
+impl GetLinks for Thread {
+ type EntryType = Thread;
+ fn get_links(data: &Thread) -> &Links<Thread> {
+ &data.links
+ }
+}
+
+struct ThreadError {
+ error_code: AtomicU32,
+ return_fn: fn(&mut InnerThread, Arc<ThreadError>),
+ links: Links<dyn DeliverToRead>,
+}
+
+impl ThreadError {
+ fn new(return_fn: fn(&mut InnerThread, Arc<ThreadError>)) -> Self {
+ Self {
+ error_code: AtomicU32::new(BR_OK),
+ return_fn,
+ links: Links::new(),
+ }
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(self: Arc<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ // See `ThreadInner::push_existing_work` for the reason why `error_code` is up to date even
+ // though we use relaxed semantics.
+ let code = self.error_code.load(Ordering::Relaxed);
+
+ // Return the `ThreadError` to the thread.
+ (self.return_fn)(&mut *thread.inner.lock(), self);
+
+ // Deliver the error code to userspace.
+ writer.write(&code)?;
+ Ok(true)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
diff --git a/drivers/android/transaction.rs b/drivers/android/transaction.rs
new file mode 100644
index 000000000000..34f325d608cb
--- /dev/null
+++ b/drivers/android/transaction.rs
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ bindings,
+ file::{File, FileDescriptorReservation},
+ io_buffer::IoBufferWriter,
+ linked_list::List,
+ linked_list::{GetLinks, Links},
+ prelude::*,
+ sync::{Arc, SpinLock, UniqueArc},
+ user_ptr::UserSlicePtrWriter,
+ Either, ScopeGuard,
+};
+
+use crate::{
+ defs::*,
+ node::NodeRef,
+ process::Process,
+ ptr_align,
+ thread::{BinderResult, Thread},
+ DeliverToRead,
+};
+
+struct TransactionInner {
+ file_list: List<Box<FileInfo>>,
+}
+
+pub(crate) struct Transaction {
+ inner: SpinLock<TransactionInner>,
+ // TODO: Node should be released when the buffer is released.
+ node_ref: Option<NodeRef>,
+ stack_next: Option<Arc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ to: Arc<Process>,
+ free_allocation: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ links: Links<dyn DeliverToRead>,
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ stack_next: Option<Arc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionData,
+ ) -> BinderResult<Arc<Self>> {
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let to = node_ref.node.owner.clone();
+ let mut alloc = from.copy_transaction_data(&to, tr, allow_fds)?;
+ let data_address = alloc.ptr;
+ let file_list = alloc.take_file_list();
+ alloc.keep_alive();
+ let mut tr = Pin::from(UniqueArc::try_new(Self {
+ // SAFETY: `spinlock_init` is called below.
+ inner: unsafe { SpinLock::new(TransactionInner { file_list }) },
+ node_ref: Some(node_ref),
+ stack_next,
+ from: from.clone(),
+ to,
+ code: tr.code,
+ flags: tr.flags,
+ data_size: tr.data_size as _,
+ data_address,
+ offsets_size: tr.offsets_size as _,
+ links: Links::new(),
+ free_allocation: AtomicBool::new(true),
+ })?);
+
+ // SAFETY: `inner` is pinned when `tr` is.
+ let pinned = unsafe { tr.as_mut().map_unchecked_mut(|t| &mut t.inner) };
+ kernel::spinlock_init!(pinned, "Transaction::inner");
+
+ Ok(tr.into())
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionData,
+ allow_fds: bool,
+ ) -> BinderResult<Arc<Self>> {
+ let mut alloc = from.copy_transaction_data(&to, tr, allow_fds)?;
+ let data_address = alloc.ptr;
+ let file_list = alloc.take_file_list();
+ alloc.keep_alive();
+ let mut tr = Pin::from(UniqueArc::try_new(Self {
+ // SAFETY: `spinlock_init` is called below.
+ inner: unsafe { SpinLock::new(TransactionInner { file_list }) },
+ node_ref: None,
+ stack_next: None,
+ from: from.clone(),
+ to,
+ code: tr.code,
+ flags: tr.flags,
+ data_size: tr.data_size as _,
+ data_address,
+ offsets_size: tr.offsets_size as _,
+ links: Links::new(),
+ free_allocation: AtomicBool::new(true),
+ })?);
+
+ // SAFETY: `inner` is pinned when `tr` is.
+ let pinned = unsafe { tr.as_mut().map_unchecked_mut(|t| &mut t.inner) };
+ kernel::spinlock_init!(pinned, "Transaction::inner");
+
+ Ok(tr.into())
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<Arc<Self>>) -> bool {
+ match (&self.stack_next, onext) {
+ (None, None) => true,
+ (Some(stack_next), Some(next)) => Arc::ptr_eq(stack_next, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<Arc<Self>> {
+ let next = self.stack_next.as_ref()?;
+ Some(next.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let process = &self.node_ref.as_ref()?.node.owner;
+
+ let mut it = &self.stack_next;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, process) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.stack_next;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<Arc<Transaction>> {
+ let mut it = &self.stack_next;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction.clone());
+ }
+
+ it = &transaction.stack_next;
+ }
+ None
+ }
+
+ /// Submits the transaction to a work queue. Use a thread if there is one in the transaction
+ /// stack, otherwise use the destination process.
+ pub(crate) fn submit(self: Arc<Self>) -> BinderResult {
+ if let Some(thread) = self.find_target_thread() {
+ thread.push_work(self)
+ } else {
+ let process = self.to.clone();
+ process.push_work(self)
+ }
+ }
+
+ /// Prepares the file list for delivery to the caller.
+ fn prepare_file_list(&self) -> Result<List<Box<FileInfo>>> {
+ // Get list of files that are being transferred as part of the transaction.
+ let mut file_list = core::mem::replace(&mut self.inner.lock().file_list, List::new());
+
+ // If the list is non-empty, prepare the buffer.
+ if !file_list.is_empty() {
+ let alloc = self.to.buffer_get(self.data_address).ok_or(ESRCH)?;
+ let cleanup = ScopeGuard::new(|| {
+ self.free_allocation.store(false, Ordering::Relaxed);
+ });
+
+ let mut it = file_list.cursor_front_mut();
+ while let Some(file_info) = it.current() {
+ let reservation = FileDescriptorReservation::new(bindings::O_CLOEXEC)?;
+ alloc.write(file_info.buffer_offset, &reservation.reserved_fd())?;
+ file_info.reservation = Some(reservation);
+ it.move_next();
+ }
+
+ alloc.keep_alive();
+ cleanup.dismiss();
+ }
+
+ Ok(file_list)
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(self: Arc<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ // TODO: Initialise the following fields from `tr`:
+ // - `pub sender_pid: pid_t`.
+ // - `pub sender_euid: uid_t`.
+
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.node_ref.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Either::Right(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ });
+ let mut file_list = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr = BinderTransactionData::default();
+
+ if let Some(nref) = &self.node_ref {
+ let (ptr, cookie) = nref.node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size)) as _;
+ }
+
+ let code = if self.node_ref.is_none() {
+ BR_REPLY
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write(&code)?;
+ writer.write(&tr)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit all files.
+ {
+ let mut it = file_list.cursor_front_mut();
+ while let Some(file_info) = it.current() {
+ if let Some(reservation) = file_info.reservation.take() {
+ if let Some(file) = file_info.file.take() {
+ reservation.commit(file);
+ }
+ }
+
+ it.move_next();
+ }
+ }
+
+ // When `drop` is called, we don't want the allocation to be freed because it is now the
+ // user's reponsibility to free it.
+ //
+ // `drop` is guaranteed to see this relaxed store because `Arc` guarantess that everything
+ // that happens when an object is referenced happens-before the eventual `drop`.
+ self.free_allocation.store(false, Ordering::Relaxed);
+
+ // When this is not a reply and not an async transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.node_ref.is_some() && tr.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: Arc<Self>) {
+ let reply = Either::Right(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
+
+impl Drop for Transaction {
+ fn drop(&mut self) {
+ if self.free_allocation.load(Ordering::Relaxed) {
+ self.to.buffer_get(self.data_address);
+ }
+ }
+}
+
+pub(crate) struct FileInfo {
+ links: Links<FileInfo>,
+
+ /// The file for which a descriptor will be created in the recipient process.
+ file: Option<ARef<File>>,
+
+ /// The file descriptor reservation on the recipient process.
+ reservation: Option<FileDescriptorReservation>,
+
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+}
+
+impl FileInfo {
+ pub(crate) fn new(file: ARef<File>, buffer_offset: usize) -> Self {
+ Self {
+ file: Some(file),
+ reservation: None,
+ buffer_offset,
+ links: Links::new(),
+ }
+ }
+}
+
+impl GetLinks for FileInfo {
+ type EntryType = Self;
+
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ &data.links
+ }
+}
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 7c3590fd97c2..fc1f22fdfc3e 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -470,6 +470,8 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
static char fw_path_para[256];
static const char * const fw_path[] = {
fw_path_para,
+ "/lib/firmware/vendor/" UTS_RELEASE,
+ "/lib/firmware/vendor",
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index e30707405455..71732e51516f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -274,6 +274,18 @@ config BT_HCIBCM203X
Say Y here to compile support for HCI BCM203x devices into the
kernel or say M to compile it as module (bcm203x).
+
+config BT_HCIBCM4377
+ tristate "HCI BCM4377/4378/4387 PCIe driver"
+ depends on PCI
+ select FW_LOADER
+ help
+ Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via
+ PCIe. These are usually found in Apple machines.
+
+ Say Y here to compile support for HCI BCM4377 family devices into the
+ kernel or say M to compile it as module (hci_bcm4377).
+
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 3321a8aea4a0..e0b261f24fc9 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_BT_HCIVHCI) += hci_vhci.o
obj-$(CONFIG_BT_HCIUART) += hci_uart.o
obj-$(CONFIG_BT_HCIBCM203X) += bcm203x.o
+obj-$(CONFIG_BT_HCIBCM4377) += hci_bcm4377.o
obj-$(CONFIG_BT_HCIBPA10X) += bpa10x.o
obj-$(CONFIG_BT_HCIBFUSB) += bfusb.o
obj-$(CONFIG_BT_HCIDTL1) += dtl1_cs.o
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
new file mode 100644
index 000000000000..8dd564642aef
--- /dev/null
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -0,0 +1,2513 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/async.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmi.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+enum bcm4377_chip {
+ BCM4377 = 0,
+ BCM4378,
+ BCM4387,
+};
+
+#define BCM4377_DEVICE_ID 0x5fa0
+#define BCM4378_DEVICE_ID 0x5f69
+#define BCM4387_DEVICE_ID 0x5f71
+
+#define BCM4377_TIMEOUT 1000
+
+/*
+ * These devices only support DMA transactions inside a 32bit window
+ * (possibly to avoid 64 bit arithmetic). The window size cannot exceed
+ * 0xffffffff but is always aligned down to the previous 0x200 byte boundary
+ * which effectively limits the window to [start, start+0xfffffe00].
+ * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't
+ * run into this limitation.
+ */
+#define BCM4377_DMA_MASK 0xfffffe00
+
+#define BCM4377_PCIECFG_BAR0_WINDOW1 0x80
+#define BCM4377_PCIECFG_BAR0_WINDOW2 0x70
+#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1 0x74
+#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW2 0x78
+#define BCM4377_PCIECFG_BAR2_WINDOW 0x84
+
+#define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT 0x18011000
+#define BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT 0x19000000
+
+#define BCM4377_PCIECFG_SUBSYSTEM_CTRL 0x88
+
+#define BCM4377_BAR0_FW_DOORBELL 0x140
+#define BCM4377_BAR0_RTI_CONTROL 0x144
+
+#define BCM4377_BAR0_SLEEP_CONTROL 0x150
+#define BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE 0
+#define BCM4377_BAR0_SLEEP_CONTROL_AWAKE 2
+#define BCM4377_BAR0_SLEEP_CONTROL_QUIESCE 3
+
+#define BCM4377_BAR0_DOORBELL 0x174
+#define BCM4377_BAR0_DOORBELL_VALUE GENMASK(31, 16)
+#define BCM4377_BAR0_DOORBELL_IDX GENMASK(15, 8)
+#define BCM4377_BAR0_DOORBELL_RING BIT(5)
+
+#define BCM4377_BAR0_HOST_WINDOW_LO 0x590
+#define BCM4377_BAR0_HOST_WINDOW_HI 0x594
+#define BCM4377_BAR0_HOST_WINDOW_SIZE 0x598
+
+#define BCM4377_BAR2_BOOTSTAGE 0x200454
+
+#define BCM4377_BAR2_FW_LO 0x200478
+#define BCM4377_BAR2_FW_HI 0x20047c
+#define BCM4377_BAR2_FW_SIZE 0x200480
+
+#define BCM4377_BAR2_CONTEXT_ADDR_LO 0x20048c
+#define BCM4377_BAR2_CONTEXT_ADDR_HI 0x200450
+
+#define BCM4377_BAR2_RTI_STATUS 0x20045c
+#define BCM4377_BAR2_RTI_WINDOW_LO 0x200494
+#define BCM4377_BAR2_RTI_WINDOW_HI 0x200498
+#define BCM4377_BAR2_RTI_WINDOW_SIZE 0x20049c
+
+#define BCM4377_OTP_SIZE 0xe0
+#define BCM4377_OTP_SYS_VENDOR 0x15
+#define BCM4377_OTP_CIS 0x80
+#define BCM4377_OTP_VENDOR_HDR 0x00000008
+#define BCM4377_OTP_MAX_PARAM_LEN 16
+
+#define BCM4377_N_TRANSFER_RINGS 9
+#define BCM4377_N_COMPLETION_RINGS 6
+
+#define BCM4377_MAX_RING_SIZE 256
+
+#define BCM4377_MSGID_GENERATION GENMASK(15, 8)
+#define BCM4377_MSGID_ID GENMASK(7, 0)
+
+#define BCM4377_RING_N_ENTRIES 128
+
+#define BCM4377_CONTROL_MSG_SIZE 0x34
+#define BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff)
+
+#define MAX_ACL_PAYLOAD_SIZE (HCI_MAX_FRAME_SIZE + HCI_ACL_HDR_SIZE)
+#define MAX_SCO_PAYLOAD_SIZE (HCI_MAX_SCO_SIZE + HCI_SCO_HDR_SIZE)
+#define MAX_EVENT_PAYLOAD_SIZE (HCI_MAX_EVENT_SIZE + HCI_EVENT_HDR_SIZE)
+
+enum bcm4377_otp_params_type {
+ BCM4377_OTP_BOARD_PARAMS,
+ BCM4377_OTP_CHIP_PARAMS
+};
+
+enum bcm4377_transfer_ring_id {
+ BCM4377_XFER_RING_CONTROL = 0,
+ BCM4377_XFER_RING_HCI_H2D = 1,
+ BCM4377_XFER_RING_HCI_D2H = 2,
+ BCM4377_XFER_RING_SCO_H2D = 3,
+ BCM4377_XFER_RING_SCO_D2H = 4,
+ BCM4377_XFER_RING_ACL_H2D = 5,
+ BCM4377_XFER_RING_ACL_D2H = 6,
+};
+
+enum bcm4377_completion_ring_id {
+ BCM4377_ACK_RING_CONTROL = 0,
+ BCM4377_ACK_RING_HCI_ACL = 1,
+ BCM4377_EVENT_RING_HCI_ACL = 2,
+ BCM4377_ACK_RING_SCO = 3,
+ BCM4377_EVENT_RING_SCO = 4,
+};
+
+enum bcm4377_doorbell {
+ BCM4377_DOORBELL_CONTROL = 0,
+ BCM4377_DOORBELL_HCI_H2D = 1,
+ BCM4377_DOORBELL_HCI_D2H = 2,
+ BCM4377_DOORBELL_ACL_H2D = 3,
+ BCM4377_DOORBELL_ACL_D2H = 4,
+ BCM4377_DOORBELL_SCO = 6,
+};
+
+/*
+ * Transfer ring entry
+ *
+ * flags: Flags to indicate if the payload is appended or mapped
+ * len: Payload length
+ * payload: Optional payload DMA address
+ * id: Message id to recognize the answer in the completion ring entry
+ */
+struct bcm4377_xfer_ring_entry {
+#define BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED BIT(0)
+#define BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1)
+ u8 flags;
+ __le16 len;
+ u8 _unk0;
+ __le64 payload;
+ __le16 id;
+ u8 _unk1[2];
+} __packed;
+static_assert(sizeof(struct bcm4377_xfer_ring_entry) == 0x10);
+
+/*
+ * Completion ring entry
+ *
+ * flags: Flags to indicate if the payload is appended or mapped. If the payload
+ * is mapped it can be found in the buffer of the corresponding transfer
+ * ring message.
+ * ring_id: Transfer ring ID which required this message
+ * msg_id: Message ID specified in transfer ring entry
+ * len: Payload length
+ */
+struct bcm4377_completion_ring_entry {
+ u8 flags;
+ u8 _unk0;
+ __le16 ring_id;
+ __le16 msg_id;
+ __le32 len;
+ u8 _unk1[6];
+} __packed;
+static_assert(sizeof(struct bcm4377_completion_ring_entry) == 0x10);
+
+enum bcm4377_control_message_type {
+ BCM4377_CONTROL_MSG_CREATE_XFER_RING = 1,
+ BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING = 2,
+ BCM4377_CONTROL_MSG_DESTROY_XFER_RING = 3,
+ BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING = 4,
+};
+
+/*
+ * Control message used to create a completion ring
+ *
+ * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING
+ * header_size: Unknown, but probably reserved space in front of the entry
+ * footer_size: Number of 32 bit words reserved for payloads after the entry
+ * id/id_again: Completion ring index
+ * ring_iova: DMA address of the ring buffer
+ * n_elements: Number of elements inside the ring buffer
+ * msi: MSI index, doesn't work for all rings though and should be zero
+ * intmod_delay: Unknown delay
+ * intmod_bytes: Unknown
+ */
+struct bcm4377_create_completion_ring_msg {
+ u8 msg_type;
+ u8 header_size;
+ u8 footer_size;
+ u8 _unk0;
+ __le16 id;
+ __le16 id_again;
+ __le64 ring_iova;
+ __le16 n_elements;
+ __le32 unk;
+ u8 _unk1[6];
+ __le16 msi;
+ __le16 intmod_delay;
+ __le32 intmod_bytes;
+ __le16 _unk2;
+ __le32 _unk3;
+ u8 _unk4[10];
+} __packed;
+static_assert(sizeof(struct bcm4377_create_completion_ring_msg) ==
+ BCM4377_CONTROL_MSG_SIZE);
+
+/*
+ * Control ring message used to destroy a completion ring
+ *
+ * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING
+ * ring_id: Completion ring to be destroyed
+ */
+struct bcm4377_destroy_completion_ring_msg {
+ u8 msg_type;
+ u8 _pad0;
+ __le16 ring_id;
+ u8 _pad1[48];
+} __packed;
+static_assert(sizeof(struct bcm4377_destroy_completion_ring_msg) ==
+ BCM4377_CONTROL_MSG_SIZE);
+
+/*
+ * Control message used to create a transfer ring
+ *
+ * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_XFER_RING
+ * header_size: Number of 32 bit words reserved for unknown content before the
+ * entry
+ * footer_size: Number of 32 bit words reserved for payloads after the entry
+ * ring_id/ring_id_again: Transfer ring index
+ * ring_iova: DMA address of the ring buffer
+ * n_elements: Number of elements inside the ring buffer
+ * completion_ring_id: Completion ring index for acknowledgements and events
+ * doorbell: Doorbell index used to notify device of new entries
+ * flags: Transfer ring flags
+ * - virtual: set if there is no associated shared memory and only the
+ * corresponding completion ring is used
+ * - sync: only set for the SCO rings
+ */
+struct bcm4377_create_transfer_ring_msg {
+ u8 msg_type;
+ u8 header_size;
+ u8 footer_size;
+ u8 _unk0;
+ __le16 ring_id;
+ __le16 ring_id_again;
+ __le64 ring_iova;
+ u8 _unk1[8];
+ __le16 n_elements;
+ __le16 completion_ring_id;
+ __le16 doorbell;
+#define BCM4377_XFER_RING_FLAG_VIRTUAL BIT(7)
+#define BCM4377_XFER_RING_FLAG_SYNC BIT(8)
+ __le16 flags;
+ u8 _unk2[20];
+} __packed;
+static_assert(sizeof(struct bcm4377_create_transfer_ring_msg) ==
+ BCM4377_CONTROL_MSG_SIZE);
+
+/*
+ * Control ring message used to destroy a transfer ring
+ *
+ * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_XFER_RING
+ * ring_id: Transfer ring to be destroyed
+ */
+struct bcm4377_destroy_transfer_ring_msg {
+ u8 msg_type;
+ u8 _pad0;
+ __le16 ring_id;
+ u8 _pad1[48];
+} __packed;
+static_assert(sizeof(struct bcm4377_destroy_transfer_ring_msg) ==
+ BCM4377_CONTROL_MSG_SIZE);
+
+/*
+ * "Converged IPC" context struct used to make the device aware of all other
+ * shared memory structures. A pointer to this structure is configured inside a
+ * MMIO register.
+ *
+ * version: Protocol version, must be 2.
+ * size: Size of this structure, must be 0x68.
+ * enabled_caps: Enabled capabilities. Unknown bitfield but should be 2.
+ * peripheral_info_addr: DMA address for a 0x20 buffer to which the device will
+ * write unknown contents
+ * {completion,xfer}_ring_{tails,heads}_addr: DMA pointers to ring heads/tails
+ * n_completion_rings: Number of completion rings, the firmware only works if
+ * this is set to BCM4377_N_COMPLETION_RINGS.
+ * n_xfer_rings: Number of transfer rings, the firmware only works if
+ * this is set to BCM4377_N_TRANSFER_RINGS.
+ * control_completion_ring_addr: Control completion ring buffer DMA address
+ * control_xfer_ring_addr: Control transfer ring buffer DMA address
+ * control_xfer_ring_n_entries: Number of control transfer ring entries
+ * control_completion_ring_n_entries: Number of control completion ring entries
+ * control_xfer_ring_doorbell: Control transfer ring doorbell
+ * control_completion_ring_doorbell: Control completion ring doorbell,
+ * must be set to 0xffff
+ * control_xfer_ring_msi: Control completion ring MSI index, must be 0
+ * control_completion_ring_msi: Control completion ring MSI index, must be 0.
+ * control_xfer_ring_header_size: Number of 32 bit words reserved in front of
+ * every control transfer ring entry
+ * control_xfer_ring_footer_size: Number of 32 bit words reserved after every
+ * control transfer ring entry
+ * control_completion_ring_header_size: Number of 32 bit words reserved in front
+ * of every control completion ring entry
+ * control_completion_ring_footer_size: Number of 32 bit words reserved after
+ * every control completion ring entry
+ * scratch_pad: Optional scratch pad DMA address
+ * scratch_pad_size: Scratch pad size
+ */
+struct bcm4377_context {
+ __le16 version;
+ __le16 size;
+ __le32 enabled_caps;
+
+ __le64 peripheral_info_addr;
+
+ /* ring heads and tails */
+ __le64 completion_ring_heads_addr;
+ __le64 xfer_ring_tails_addr;
+ __le64 completion_ring_tails_addr;
+ __le64 xfer_ring_heads_addr;
+ __le16 n_completion_rings;
+ __le16 n_xfer_rings;
+
+ /* control ring configuration */
+ __le64 control_completion_ring_addr;
+ __le64 control_xfer_ring_addr;
+ __le16 control_xfer_ring_n_entries;
+ __le16 control_completion_ring_n_entries;
+ __le16 control_xfer_ring_doorbell;
+ __le16 control_completion_ring_doorbell;
+ __le16 control_xfer_ring_msi;
+ __le16 control_completion_ring_msi;
+ u8 control_xfer_ring_header_size;
+ u8 control_xfer_ring_footer_size;
+ u8 control_completion_ring_header_size;
+ u8 control_completion_ring_footer_size;
+
+ __le16 _unk0;
+ __le16 _unk1;
+
+ __le64 scratch_pad;
+ __le32 scratch_pad_size;
+
+ __le32 _unk3;
+} __packed;
+static_assert(sizeof(struct bcm4377_context) == 0x68);
+
+#define BCM4378_CALIBRATION_CHUNK_SIZE 0xe6
+struct bcm4378_hci_send_calibration_cmd {
+ u8 unk;
+ __le16 blocks_left;
+ u8 data[BCM4378_CALIBRATION_CHUNK_SIZE];
+} __packed;
+
+#define BCM4378_PTB_CHUNK_SIZE 0xcf
+struct bcm4378_hci_send_ptb_cmd {
+ __le16 blocks_left;
+ u8 data[BCM4378_PTB_CHUNK_SIZE];
+} __packed;
+
+/*
+ * Shared memory structure used to store the ring head and tail pointers.
+ */
+struct bcm4377_ring_state {
+ __le16 completion_ring_head[BCM4377_N_COMPLETION_RINGS];
+ __le16 completion_ring_tail[BCM4377_N_COMPLETION_RINGS];
+ __le16 xfer_ring_head[BCM4377_N_TRANSFER_RINGS];
+ __le16 xfer_ring_tail[BCM4377_N_TRANSFER_RINGS];
+};
+
+/*
+ * A transfer ring can be used in two configurations:
+ * 1) Send control or HCI messages to the device which are then acknowledged
+ * in the corresponding completion ring
+ * 2) Receiving HCI frames from the devices. In this case the transfer ring
+ * itself contains empty messages that are acknowledged once data is
+ * available from the device. If the payloads fit inside the footers
+ * of the completion ring the transfer ring can be configured to be
+ * virtual such that it has no ring buffer.
+ *
+ * ring_id: ring index hardcoded in the firmware
+ * doorbell: doorbell index to notify device of new entries
+ * payload_size: optional in-place payload size
+ * mapped_payload_size: optional out-of-place payload size
+ * completion_ring: index of corresponding completion ring
+ * n_entries: number of entries inside this ring
+ * generation: ring generation; incremented on hci_open to detect stale messages
+ * sync: set to true for SCO rings
+ * virtual: set to true if this ring has no entries and is just required to
+ * setup a corresponding completion ring for device->host messages
+ * d2h_buffers_only: set to true if this ring is only used to provide large
+ * buffers used by device->host messages in the completion
+ * ring
+ * allow_wait: allow to wait for messages to be acknowledged
+ * enabled: true once the ring has been created and can be used
+ * ring: ring buffer for entries (struct bcm4377_xfer_ring_entry)
+ * ring_dma: DMA address for ring entry buffer
+ * payloads: payload buffer for mapped_payload_size payloads
+ * payloads_dma:DMA address for payload buffer
+ * events: pointer to array of completions if waiting is allowed
+ * msgids: bitmap to keep track of used message ids
+ * lock: Spinlock to protect access to ring structurs used in the irq handler
+ */
+struct bcm4377_transfer_ring {
+ enum bcm4377_transfer_ring_id ring_id;
+ enum bcm4377_doorbell doorbell;
+ size_t payload_size;
+ size_t mapped_payload_size;
+ u8 completion_ring;
+ u16 n_entries;
+ u8 generation;
+
+ bool sync;
+ bool virtual;
+ bool d2h_buffers_only;
+ bool allow_wait;
+ bool enabled;
+
+ void *ring;
+ dma_addr_t ring_dma;
+
+ void *payloads;
+ dma_addr_t payloads_dma;
+
+ struct completion **events;
+ DECLARE_BITMAP(msgids, BCM4377_MAX_RING_SIZE);
+ spinlock_t lock;
+};
+
+/*
+ * A completion ring can be either used to either acknowledge messages sent in
+ * the corresponding transfer ring or to receive messages associated with the
+ * transfer ring. When used to receive messages the transfer ring either
+ * has no ring buffer and is only advanced ("virtual transfer ring") or it
+ * only contains empty DMA buffers to be used for the payloads.
+ *
+ * ring_id: completion ring id, hardcoded in firmware
+ * payload_size: optional payload size after each entry
+ * delay: unknown delay
+ * n_entries: number of entries in this ring
+ * enabled: true once the ring has been created and can be used
+ * ring: ring buffer for entries (struct bcm4377_completion_ring_entry)
+ * ring_dma: DMA address of ring buffer
+ * transfer_rings: bitmap of corresponding transfer ring ids
+ */
+struct bcm4377_completion_ring {
+ enum bcm4377_completion_ring_id ring_id;
+ u16 payload_size;
+ u16 delay;
+ u16 n_entries;
+ bool enabled;
+
+ void *ring;
+ dma_addr_t ring_dma;
+
+ unsigned long transfer_rings;
+};
+
+struct bcm4377_data;
+
+/*
+ * Chip-specific configuration struct
+ *
+ * id: Chip id (e.g. 0x4377 for BCM4377)
+ * otp_offset: Offset to the start of the OTP inside BAR0
+ * bar0_window1: Backplane address mapped to the first window in BAR0
+ * bar0_window2: Backplane address mapped to the second window in BAR0
+ * bar0_core2_window2: Optional backplane address mapped to the second core's
+ * second window in BAR0
+ * has_bar0_core2_window2: Set to true if this chip requires the second core's
+ * second window to be configured
+ * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the
+ * vendor-specific subsystem control
+ * register has to be cleared
+ * disable_aspm: Set to true if ASPM must be disabled due to hardware errata
+ * broken_ext_scan: Set to true if the chip erroneously claims to support
+ * extended scanning
+ * broken_mws_transport_config: Set to true if the chip erroneously claims to
+ * support MWS Transport Configuration
+ * send_calibration: Optional callback to send calibration data
+ * send_ptb: Callback to send "PTB" regulatory/calibration data
+ */
+struct bcm4377_hw {
+ unsigned int id;
+
+ u32 otp_offset;
+
+ u32 bar0_window1;
+ u32 bar0_window2;
+ u32 bar0_core2_window2;
+
+ unsigned long has_bar0_core2_window2 : 1;
+ unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1;
+ unsigned long disable_aspm : 1;
+ unsigned long broken_ext_scan : 1;
+ unsigned long broken_mws_transport_config : 1;
+
+ int (*send_calibration)(struct bcm4377_data *bcm4377);
+ int (*send_ptb)(struct bcm4377_data *bcm4377,
+ const struct firmware *fw);
+};
+
+static const struct bcm4377_hw bcm4377_hw_variants[];
+static const struct dmi_system_id bcm4377_dmi_board_table[];
+
+/*
+ * Private struct associated with each device containing global state
+ *
+ * pdev: Pointer to associated struct pci_dev
+ * hdev: Pointer to associated strucy hci_dev
+ * bar0: iomem pointing to BAR0
+ * bar1: iomem pointing to BAR2
+ * bootstage: Current value of the bootstage
+ * rti_status: Current "RTI" status value
+ * hw: Pointer to chip-specific struct bcm4377_hw
+ * taurus_cal_blob: "Taurus" calibration blob used for some chips
+ * taurus_cal_size: "Taurus" calibration blob size
+ * taurus_beamforming_cal_blob: "Taurus" beamforming calibration blob used for
+ * some chips
+ * taurus_beamforming_cal_size: "Taurus" beamforming calibration blob size
+ * stepping: Chip stepping read from OTP; used for firmware selection
+ * vendor: Antenna vendor read from OTP; used for firmware selection
+ * board_type: Board type from FDT or DMI match; used for firmware selection
+ * event: Event for changed bootstage or rti_status; used for booting firmware
+ * ctx: "Converged IPC" context
+ * ctx_dma: "Converged IPC" context DMA address
+ * ring_state: Shared memory buffer containing ring head and tail indexes
+ * ring_state_dma: DMA address for ring_state
+ * {control,hci_acl,sco}_ack_ring: Completion rings used to acknowledge messages
+ * {hci_acl,sco}_event_ring: Completion rings used for device->host messages
+ * control_h2d_ring: Transfer ring used for control messages
+ * {hci,sco,acl}_h2d_ring: Transfer ring used to transfer HCI frames
+ * {hci,sco,acl}_d2h_ring: Transfer ring used to receive HCI frames in the
+ * corresponding completion ring
+ */
+struct bcm4377_data {
+ struct pci_dev *pdev;
+ struct hci_dev *hdev;
+
+ void __iomem *bar0;
+ void __iomem *bar2;
+
+ u32 bootstage;
+ u32 rti_status;
+
+ const struct bcm4377_hw *hw;
+
+ const void *taurus_cal_blob;
+ int taurus_cal_size;
+ const void *taurus_beamforming_cal_blob;
+ int taurus_beamforming_cal_size;
+
+ char stepping[BCM4377_OTP_MAX_PARAM_LEN];
+ char vendor[BCM4377_OTP_MAX_PARAM_LEN];
+ const char *board_type;
+
+ struct completion event;
+
+ struct bcm4377_context *ctx;
+ dma_addr_t ctx_dma;
+
+ struct bcm4377_ring_state *ring_state;
+ dma_addr_t ring_state_dma;
+
+ /*
+ * The HCI and ACL rings have to be merged because this structure is
+ * hardcoded in the firmware.
+ */
+ struct bcm4377_completion_ring control_ack_ring;
+ struct bcm4377_completion_ring hci_acl_ack_ring;
+ struct bcm4377_completion_ring hci_acl_event_ring;
+ struct bcm4377_completion_ring sco_ack_ring;
+ struct bcm4377_completion_ring sco_event_ring;
+
+ struct bcm4377_transfer_ring control_h2d_ring;
+ struct bcm4377_transfer_ring hci_h2d_ring;
+ struct bcm4377_transfer_ring hci_d2h_ring;
+ struct bcm4377_transfer_ring sco_h2d_ring;
+ struct bcm4377_transfer_ring sco_d2h_ring;
+ struct bcm4377_transfer_ring acl_h2d_ring;
+ struct bcm4377_transfer_ring acl_d2h_ring;
+};
+
+static void bcm4377_ring_doorbell(struct bcm4377_data *bcm4377, u8 doorbell,
+ u16 val)
+{
+ u32 db = 0;
+
+ db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_VALUE, val);
+ db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_IDX, doorbell);
+ db |= BCM4377_BAR0_DOORBELL_RING;
+
+ dev_dbg(&bcm4377->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val,
+ doorbell, db);
+ iowrite32(db, bcm4377->bar0 + BCM4377_BAR0_DOORBELL);
+}
+
+static int bcm4377_extract_msgid(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring,
+ u16 raw_msgid, u8 *msgid)
+{
+ u8 generation = FIELD_GET(BCM4377_MSGID_GENERATION, raw_msgid);
+ *msgid = FIELD_GET(BCM4377_MSGID_ID, raw_msgid);
+
+ if (generation != ring->generation) {
+ dev_warn(
+ &bcm4377->pdev->dev,
+ "invalid message generation %d should be %d in entry for ring %d\n",
+ generation, ring->generation, ring->ring_id);
+ return -EINVAL;
+ }
+
+ if (*msgid >= ring->n_entries) {
+ dev_warn(&bcm4377->pdev->dev,
+ "invalid message id in entry for ring %d: %d > %d\n",
+ ring->ring_id, *msgid, ring->n_entries);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bcm4377_handle_event(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring,
+ u16 raw_msgid, u8 entry_flags, u8 type,
+ void *payload, size_t len)
+{
+ struct sk_buff *skb;
+ u16 head;
+ u8 msgid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (!ring->enabled) {
+ dev_warn(&bcm4377->pdev->dev,
+ "event for disabled transfer ring %d\n",
+ ring->ring_id);
+ goto out;
+ }
+
+ if (ring->d2h_buffers_only &&
+ entry_flags & BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED) {
+ if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid))
+ goto out;
+
+ if (len > ring->mapped_payload_size) {
+ dev_warn(
+ &bcm4377->pdev->dev,
+ "invalid payload len in event for ring %d: %zu > %zu\n",
+ ring->ring_id, len, ring->mapped_payload_size);
+ goto out;
+ }
+
+ payload = ring->payloads + msgid * ring->mapped_payload_size;
+ }
+
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ memcpy(skb_put(skb, len), payload, len);
+ hci_skb_pkt_type(skb) = type;
+ hci_recv_frame(bcm4377->hdev, skb);
+
+out:
+ head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]);
+ head = (head + 1) % ring->n_entries;
+ bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head);
+
+ bcm4377_ring_doorbell(bcm4377, ring->doorbell, head);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcm4377_handle_ack(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring,
+ u16 raw_msgid)
+{
+ unsigned long flags;
+ u8 msgid;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid))
+ goto unlock;
+
+ if (!test_bit(msgid, ring->msgids)) {
+ dev_warn(
+ &bcm4377->pdev->dev,
+ "invalid message id in ack for ring %d: %d is not used\n",
+ ring->ring_id, msgid);
+ goto unlock;
+ }
+
+ if (ring->allow_wait && ring->events[msgid]) {
+ complete(ring->events[msgid]);
+ ring->events[msgid] = NULL;
+ }
+
+ bitmap_release_region(ring->msgids, msgid, ring->n_entries);
+
+unlock:
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcm4377_handle_completion(struct bcm4377_data *bcm4377,
+ struct bcm4377_completion_ring *ring,
+ u16 pos)
+{
+ struct bcm4377_completion_ring_entry *entry;
+ u16 msg_id, transfer_ring;
+ size_t entry_size, data_len;
+ void *data;
+
+ if (pos >= ring->n_entries) {
+ dev_warn(&bcm4377->pdev->dev,
+ "invalid offset %d for completion ring %d\n", pos,
+ ring->ring_id);
+ return;
+ }
+
+ entry_size = sizeof(*entry) + ring->payload_size;
+ entry = ring->ring + pos * entry_size;
+ data = ring->ring + pos * entry_size + sizeof(*entry);
+ data_len = le32_to_cpu(entry->len);
+ msg_id = le16_to_cpu(entry->msg_id);
+ transfer_ring = le16_to_cpu(entry->ring_id);
+
+ if ((ring->transfer_rings & BIT(transfer_ring)) == 0) {
+ dev_warn(
+ &bcm4377->pdev->dev,
+ "invalid entry at offset %d for transfer ring %d in completion ring %d\n",
+ pos, transfer_ring, ring->ring_id);
+ return;
+ }
+
+ dev_dbg(&bcm4377->pdev->dev,
+ "entry in completion ring %d for transfer ring %d with msg_id %d\n",
+ ring->ring_id, transfer_ring, msg_id);
+
+ switch (transfer_ring) {
+ case BCM4377_XFER_RING_CONTROL:
+ bcm4377_handle_ack(bcm4377, &bcm4377->control_h2d_ring, msg_id);
+ break;
+ case BCM4377_XFER_RING_HCI_H2D:
+ bcm4377_handle_ack(bcm4377, &bcm4377->hci_h2d_ring, msg_id);
+ break;
+ case BCM4377_XFER_RING_SCO_H2D:
+ bcm4377_handle_ack(bcm4377, &bcm4377->sco_h2d_ring, msg_id);
+ break;
+ case BCM4377_XFER_RING_ACL_H2D:
+ bcm4377_handle_ack(bcm4377, &bcm4377->acl_h2d_ring, msg_id);
+ break;
+
+ case BCM4377_XFER_RING_HCI_D2H:
+ bcm4377_handle_event(bcm4377, &bcm4377->hci_d2h_ring, msg_id,
+ entry->flags, HCI_EVENT_PKT, data,
+ data_len);
+ break;
+ case BCM4377_XFER_RING_SCO_D2H:
+ bcm4377_handle_event(bcm4377, &bcm4377->sco_d2h_ring, msg_id,
+ entry->flags, HCI_SCODATA_PKT, data,
+ data_len);
+ break;
+ case BCM4377_XFER_RING_ACL_D2H:
+ bcm4377_handle_event(bcm4377, &bcm4377->acl_d2h_ring, msg_id,
+ entry->flags, HCI_ACLDATA_PKT, data,
+ data_len);
+ break;
+
+ default:
+ dev_warn(
+ &bcm4377->pdev->dev,
+ "entry in completion ring %d for unknown transfer ring %d with msg_id %d\n",
+ ring->ring_id, transfer_ring, msg_id);
+ }
+}
+
+static void bcm4377_poll_completion_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_completion_ring *ring)
+{
+ u16 tail;
+ __le16 *heads = bcm4377->ring_state->completion_ring_head;
+ __le16 *tails = bcm4377->ring_state->completion_ring_tail;
+
+ if (!ring->enabled)
+ return;
+
+ tail = le16_to_cpu(tails[ring->ring_id]);
+ dev_dbg(&bcm4377->pdev->dev,
+ "completion ring #%d: head: %d, tail: %d\n", ring->ring_id,
+ le16_to_cpu(heads[ring->ring_id]), tail);
+
+ while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) {
+ /*
+ * ensure the CPU doesn't speculate through the comparison.
+ * otherwise it might already read the (empty) queue entry
+ * before the updated head has been loaded and checked.
+ */
+ dma_rmb();
+
+ bcm4377_handle_completion(bcm4377, ring, tail);
+
+ tail = (tail + 1) % ring->n_entries;
+ tails[ring->ring_id] = cpu_to_le16(tail);
+ }
+}
+
+static irqreturn_t bcm4377_irq(int irq, void *data)
+{
+ struct bcm4377_data *bcm4377 = data;
+ u32 bootstage, rti_status;
+
+ bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
+ rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
+
+ if (bootstage != bcm4377->bootstage ||
+ rti_status != bcm4377->rti_status) {
+ dev_dbg(&bcm4377->pdev->dev,
+ "bootstage = %d -> %d, rti state = %d -> %d\n",
+ bcm4377->bootstage, bootstage, bcm4377->rti_status,
+ rti_status);
+ complete(&bcm4377->event);
+ bcm4377->bootstage = bootstage;
+ bcm4377->rti_status = rti_status;
+ }
+
+ if (rti_status > 2)
+ dev_err(&bcm4377->pdev->dev, "RTI status is %d\n", rti_status);
+
+ bcm4377_poll_completion_ring(bcm4377, &bcm4377->control_ack_ring);
+ bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
+ bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
+ bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
+ bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_event_ring);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm4377_enqueue(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring, void *data,
+ size_t len, bool wait)
+{
+ unsigned long flags;
+ struct bcm4377_xfer_ring_entry *entry;
+ void *payload;
+ size_t offset;
+ u16 head, tail, new_head;
+ u16 raw_msgid;
+ int ret, msgid;
+ DECLARE_COMPLETION_ONSTACK(event);
+
+ if (len > ring->payload_size && len > ring->mapped_payload_size) {
+ dev_warn(
+ &bcm4377->pdev->dev,
+ "payload len %zu is too large for ring %d (max is %zu or %zu)\n",
+ len, ring->ring_id, ring->payload_size,
+ ring->mapped_payload_size);
+ return -EINVAL;
+ }
+ if (wait && !ring->allow_wait)
+ return -EINVAL;
+ if (ring->virtual)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]);
+ tail = le16_to_cpu(bcm4377->ring_state->xfer_ring_tail[ring->ring_id]);
+
+ new_head = (head + 1) % ring->n_entries;
+
+ if (new_head == tail) {
+ dev_warn(&bcm4377->pdev->dev,
+ "can't send message because ring %d is full\n",
+ ring->ring_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0);
+ if (msgid < 0) {
+ dev_warn(&bcm4377->pdev->dev,
+ "can't find message id for ring %d\n", ring->ring_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation);
+ raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, msgid);
+
+ offset = head * (sizeof(*entry) + ring->payload_size);
+ entry = ring->ring + offset;
+
+ memset(entry, 0, sizeof(*entry));
+ entry->id = cpu_to_le16(raw_msgid);
+ entry->len = cpu_to_le16(len);
+
+ if (len <= ring->payload_size) {
+ entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER;
+ payload = ring->ring + offset + sizeof(*entry);
+ } else {
+ entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED;
+ entry->payload = cpu_to_le64(ring->payloads_dma +
+ msgid * ring->mapped_payload_size);
+ payload = ring->payloads + msgid * ring->mapped_payload_size;
+ }
+
+ memcpy(payload, data, len);
+
+ if (wait)
+ ring->events[msgid] = &event;
+
+ /*
+ * The 4377 chips stop responding to any commands as soon as they
+ * have been idle for a while. Poking the sleep control register here
+ * makes them come alive again.
+ */
+ iowrite32(BCM4377_BAR0_SLEEP_CONTROL_AWAKE,
+ bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
+
+ dev_dbg(&bcm4377->pdev->dev,
+ "updating head for transfer queue #%d to %d\n", ring->ring_id,
+ new_head);
+ bcm4377->ring_state->xfer_ring_head[ring->ring_id] =
+ cpu_to_le16(new_head);
+
+ if (!ring->sync)
+ bcm4377_ring_doorbell(bcm4377, ring->doorbell, new_head);
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (ret == 0 && wait) {
+ ret = wait_for_completion_interruptible_timeout(
+ &event, BCM4377_TIMEOUT);
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ ring->events[msgid] = NULL;
+ spin_unlock_irqrestore(&ring->lock, flags);
+ }
+
+ return ret;
+}
+
+static int bcm4377_create_completion_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_completion_ring *ring)
+{
+ struct bcm4377_create_completion_ring_msg msg;
+ int ret;
+
+ if (ring->enabled) {
+ dev_warn(&bcm4377->pdev->dev,
+ "completion ring %d already enabled\n", ring->ring_id);
+ return 0;
+ }
+
+ memset(ring->ring, 0,
+ ring->n_entries * (sizeof(struct bcm4377_completion_ring_entry) +
+ ring->payload_size));
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_type = BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING;
+ msg.id = cpu_to_le16(ring->ring_id);
+ msg.id_again = cpu_to_le16(ring->ring_id);
+ msg.ring_iova = cpu_to_le64(ring->ring_dma);
+ msg.n_elements = cpu_to_le16(ring->n_entries);
+ msg.intmod_bytes = cpu_to_le32(0xffffffff);
+ msg.unk = cpu_to_le32(0xffffffff);
+ msg.intmod_delay = cpu_to_le16(ring->delay);
+ msg.footer_size = ring->payload_size / 4;
+
+ ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
+ sizeof(msg), true);
+ if (!ret)
+ ring->enabled = true;
+
+ return ret;
+}
+
+static int bcm4377_destroy_completion_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_completion_ring *ring)
+{
+ struct bcm4377_destroy_completion_ring_msg msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING;
+ msg.ring_id = cpu_to_le16(ring->ring_id);
+
+ ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
+ sizeof(msg), true);
+ if (ret)
+ dev_warn(&bcm4377->pdev->dev,
+ "failed to destroy completion ring %d\n",
+ ring->ring_id);
+
+ ring->enabled = false;
+ return ret;
+}
+
+static int bcm4377_create_transfer_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring)
+{
+ struct bcm4377_create_transfer_ring_msg msg;
+ u16 flags = 0;
+ int ret, i;
+ unsigned long spinlock_flags;
+
+ if (ring->virtual)
+ flags |= BCM4377_XFER_RING_FLAG_VIRTUAL;
+ if (ring->sync)
+ flags |= BCM4377_XFER_RING_FLAG_SYNC;
+
+ spin_lock_irqsave(&ring->lock, spinlock_flags);
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_type = BCM4377_CONTROL_MSG_CREATE_XFER_RING;
+ msg.ring_id = cpu_to_le16(ring->ring_id);
+ msg.ring_id_again = cpu_to_le16(ring->ring_id);
+ msg.ring_iova = cpu_to_le64(ring->ring_dma);
+ msg.n_elements = cpu_to_le16(ring->n_entries);
+ msg.completion_ring_id = cpu_to_le16(ring->completion_ring);
+ msg.doorbell = cpu_to_le16(ring->doorbell);
+ msg.flags = cpu_to_le16(flags);
+ msg.footer_size = ring->payload_size / 4;
+
+ bcm4377->ring_state->xfer_ring_head[ring->ring_id] = 0;
+ bcm4377->ring_state->xfer_ring_tail[ring->ring_id] = 0;
+ ring->generation++;
+ spin_unlock_irqrestore(&ring->lock, spinlock_flags);
+
+ ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
+ sizeof(msg), true);
+
+ spin_lock_irqsave(&ring->lock, spinlock_flags);
+
+ if (ring->d2h_buffers_only) {
+ for (i = 0; i < ring->n_entries; ++i) {
+ struct bcm4377_xfer_ring_entry *entry =
+ ring->ring + i * sizeof(*entry);
+ u16 raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION,
+ ring->generation);
+ raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, i);
+
+ memset(entry, 0, sizeof(*entry));
+ entry->id = cpu_to_le16(raw_msgid);
+ entry->len = cpu_to_le16(ring->mapped_payload_size);
+ entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED;
+ entry->payload =
+ cpu_to_le64(ring->payloads_dma +
+ i * ring->mapped_payload_size);
+ }
+ }
+
+ /*
+ * send some messages if this is a device->host ring to allow the device
+ * to reply by acknowledging them in the completion ring
+ */
+ if (ring->virtual || ring->d2h_buffers_only) {
+ bcm4377->ring_state->xfer_ring_head[ring->ring_id] =
+ cpu_to_le16(0xf);
+ bcm4377_ring_doorbell(bcm4377, ring->doorbell, 0xf);
+ }
+
+ ring->enabled = true;
+ spin_unlock_irqrestore(&ring->lock, spinlock_flags);
+
+ return ret;
+}
+
+static int bcm4377_destroy_transfer_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring)
+{
+ struct bcm4377_destroy_transfer_ring_msg msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_XFER_RING;
+ msg.ring_id = cpu_to_le16(ring->ring_id);
+
+ ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg,
+ sizeof(msg), true);
+ if (ret)
+ dev_warn(&bcm4377->pdev->dev,
+ "failed to destroy transfer ring %d\n", ring->ring_id);
+
+ ring->enabled = false;
+ return ret;
+}
+
+static int __bcm4378_send_calibration_chunk(struct bcm4377_data *bcm4377,
+ const void *data, size_t data_len,
+ u16 blocks_left)
+{
+ struct bcm4378_hci_send_calibration_cmd cmd;
+ struct sk_buff *skb;
+
+ if (data_len > sizeof(cmd.data))
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.unk = 0x03;
+ cmd.blocks_left = cpu_to_le16(blocks_left);
+ memcpy(cmd.data, data, data_len);
+
+ skb = __hci_cmd_sync(bcm4377->hdev, 0xfd97, sizeof(cmd), &cmd,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __bcm4378_send_calibration(struct bcm4377_data *bcm4377,
+ const void *data, size_t data_size)
+{
+ int ret;
+ size_t i, left, transfer_len;
+ size_t blocks =
+ DIV_ROUND_UP(data_size, (size_t)BCM4378_CALIBRATION_CHUNK_SIZE);
+
+ if (!data) {
+ dev_err(&bcm4377->pdev->dev,
+ "no calibration data available.\n");
+ return -ENOENT;
+ }
+
+ for (i = 0, left = data_size; i < blocks; ++i, left -= transfer_len) {
+ transfer_len =
+ min_t(size_t, left, BCM4378_CALIBRATION_CHUNK_SIZE);
+
+ ret = __bcm4378_send_calibration_chunk(
+ bcm4377, data + i * BCM4378_CALIBRATION_CHUNK_SIZE,
+ transfer_len, blocks - i - 1);
+ if (ret) {
+ dev_err(&bcm4377->pdev->dev,
+ "send calibration chunk failed with %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int bcm4378_send_calibration(struct bcm4377_data *bcm4377)
+{
+ if ((strcmp(bcm4377->stepping, "b1") == 0) ||
+ strcmp(bcm4377->stepping, "b3") == 0)
+ return __bcm4378_send_calibration(
+ bcm4377, bcm4377->taurus_beamforming_cal_blob,
+ bcm4377->taurus_beamforming_cal_size);
+ else
+ return __bcm4378_send_calibration(bcm4377,
+ bcm4377->taurus_cal_blob,
+ bcm4377->taurus_cal_size);
+}
+
+static int bcm4387_send_calibration(struct bcm4377_data *bcm4377)
+{
+ if (strcmp(bcm4377->stepping, "c2") == 0)
+ return __bcm4378_send_calibration(
+ bcm4377, bcm4377->taurus_beamforming_cal_blob,
+ bcm4377->taurus_beamforming_cal_size);
+ else
+ return __bcm4378_send_calibration(bcm4377,
+ bcm4377->taurus_cal_blob,
+ bcm4377->taurus_cal_size);
+}
+
+static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377,
+ const char *suffix)
+{
+ const struct firmware *fw;
+ char name0[64], name1[64];
+ int ret;
+
+ snprintf(name0, sizeof(name0), "brcm/brcmbt%04x%s-%s-%s.%s",
+ bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type,
+ bcm4377->vendor, suffix);
+ snprintf(name1, sizeof(name1), "brcm/brcmbt%04x%s-%s.%s",
+ bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type,
+ suffix);
+ dev_dbg(&bcm4377->pdev->dev, "Trying to load firmware: '%s' or '%s'\n",
+ name0, name1);
+
+ ret = firmware_request_nowarn(&fw, name0, &bcm4377->pdev->dev);
+ if (!ret)
+ return fw;
+ ret = firmware_request_nowarn(&fw, name1, &bcm4377->pdev->dev);
+ if (!ret)
+ return fw;
+
+ dev_err(&bcm4377->pdev->dev,
+ "Unable to load firmware; tried '%s' and '%s'\n", name0, name1);
+ return NULL;
+}
+
+static int bcm4377_send_ptb(struct bcm4377_data *bcm4377,
+ const struct firmware *fw)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = __hci_cmd_sync(bcm4377->hdev, 0xfd98, fw->size, fw->data,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ dev_err(&bcm4377->pdev->dev, "sending ptb failed (%d)", ret);
+ return ret;
+ }
+
+ kfree_skb(skb);
+ return ret;
+}
+
+static int bcm4378_send_ptb_chunk(struct bcm4377_data *bcm4377,
+ const void *data, size_t data_len,
+ u16 blocks_left)
+{
+ struct bcm4378_hci_send_ptb_cmd cmd;
+ struct sk_buff *skb;
+
+ if (data_len > BCM4378_PTB_CHUNK_SIZE)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.blocks_left = cpu_to_le16(blocks_left);
+ memcpy(cmd.data, data, data_len);
+
+ skb = __hci_cmd_sync(bcm4377->hdev, 0xfe0d, sizeof(cmd), &cmd,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int bcm4378_send_ptb(struct bcm4377_data *bcm4377,
+ const struct firmware *fw)
+{
+ size_t chunks = DIV_ROUND_UP(fw->size, (size_t)BCM4378_PTB_CHUNK_SIZE);
+ size_t i, left, transfer_len;
+ int ret;
+
+ for (i = 0, left = fw->size; i < chunks; ++i, left -= transfer_len) {
+ transfer_len = min_t(size_t, left, BCM4378_PTB_CHUNK_SIZE);
+
+ dev_dbg(&bcm4377->pdev->dev, "sending ptb chunk %zu/%zu\n",
+ i + 1, chunks);
+ ret = bcm4378_send_ptb_chunk(
+ bcm4377, fw->data + i * BCM4378_PTB_CHUNK_SIZE,
+ transfer_len, chunks - i - 1);
+ if (ret) {
+ dev_err(&bcm4377->pdev->dev,
+ "sending ptb chunk %zu failed (%d)", i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int bcm4377_hci_open(struct hci_dev *hdev)
+{
+ struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
+ int ret;
+
+ dev_dbg(&bcm4377->pdev->dev, "creating rings\n");
+
+ ret = bcm4377_create_completion_ring(bcm4377,
+ &bcm4377->hci_acl_ack_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_create_completion_ring(bcm4377,
+ &bcm4377->hci_acl_event_ring);
+ if (ret)
+ goto destroy_hci_acl_ack;
+ ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
+ if (ret)
+ goto destroy_hci_acl_event;
+ ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_event_ring);
+ if (ret)
+ goto destroy_sco_ack;
+ dev_dbg(&bcm4377->pdev->dev,
+ "all completion rings successfully created!\n");
+
+ ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
+ if (ret)
+ goto destroy_sco_event;
+ ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
+ if (ret)
+ goto destroy_hci_h2d;
+ ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
+ if (ret)
+ goto destroy_hci_d2h;
+ ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
+ if (ret)
+ goto destroy_sco_h2d;
+ ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
+ if (ret)
+ goto destroy_sco_d2h;
+ ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
+ if (ret)
+ goto destroy_acl_h2d;
+ dev_dbg(&bcm4377->pdev->dev,
+ "all transfer rings successfully created!\n");
+
+ return 0;
+
+destroy_acl_h2d:
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
+destroy_sco_d2h:
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
+destroy_sco_h2d:
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
+destroy_hci_d2h:
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
+destroy_hci_h2d:
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
+destroy_sco_event:
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring);
+destroy_sco_ack:
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
+destroy_hci_acl_event:
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
+destroy_hci_acl_ack:
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
+
+ dev_err(&bcm4377->pdev->dev, "Creating rings failed with %d\n", ret);
+ return ret;
+}
+
+static int bcm4377_hci_close(struct hci_dev *hdev)
+{
+ struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
+
+ dev_dbg(&bcm4377->pdev->dev, "destroying rings in hci_close\n");
+
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
+ bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
+
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring);
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring);
+ bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring);
+
+ return 0;
+}
+
+static bool bcm4377_is_valid_bdaddr(struct bcm4377_data *bcm4377,
+ bdaddr_t *addr)
+{
+ if (addr->b[0] != 0x93)
+ return true;
+ if (addr->b[1] != 0x76)
+ return true;
+ if (addr->b[2] != 0x00)
+ return true;
+ if (addr->b[4] != (bcm4377->hw->id & 0xff))
+ return true;
+ if (addr->b[5] != (bcm4377->hw->id >> 8))
+ return true;
+ return false;
+}
+
+static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
+{
+ struct hci_rp_read_bd_addr *bda;
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(bcm4377->hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ int err = PTR_ERR(skb);
+
+ dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR failed (%d)",
+ err);
+ return err;
+ }
+
+ if (skb->len != sizeof(*bda)) {
+ dev_err(&bcm4377->pdev->dev,
+ "HCI_OP_READ_BD_ADDR reply length invalid");
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ bda = (struct hci_rp_read_bd_addr *)skb->data;
+ if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int bcm4377_hci_setup(struct hci_dev *hdev)
+{
+ struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
+ const struct firmware *fw;
+ int ret;
+
+ if (bcm4377->hw->send_calibration) {
+ ret = bcm4377->hw->send_calibration(bcm4377);
+ if (ret)
+ return ret;
+ }
+
+ fw = bcm4377_request_blob(bcm4377, "ptb");
+ if (!fw) {
+ dev_err(&bcm4377->pdev->dev, "failed to load PTB data");
+ return -ENOENT;
+ }
+
+ ret = bcm4377->hw->send_ptb(bcm4377, fw);
+ release_firmware(fw);
+ if (ret)
+ return ret;
+
+ return bcm4377_check_bdaddr(bcm4377);
+}
+
+static int bcm4377_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
+ struct bcm4377_transfer_ring *ring;
+ int ret;
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ ring = &bcm4377->hci_h2d_ring;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ ring = &bcm4377->acl_h2d_ring;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ ring = &bcm4377->sco_h2d_ring;
+ break;
+
+ default:
+ return -EILSEQ;
+ }
+
+ ret = bcm4377_enqueue(bcm4377, ring, skb->data, skb->len, false);
+ if (ret < 0) {
+ hdev->stat.err_tx++;
+ return ret;
+ }
+
+ hdev->stat.byte_tx += skb->len;
+ kfree_skb(skb);
+ return ret;
+}
+
+static int bcm4377_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev);
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ dev_err(&bcm4377->pdev->dev,
+ "Change address command failed (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int bcm4377_alloc_transfer_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_transfer_ring *ring)
+{
+ size_t entry_size;
+
+ spin_lock_init(&ring->lock);
+ ring->payload_size = ALIGN(ring->payload_size, 4);
+ ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4);
+
+ if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE)
+ return -EINVAL;
+ if (ring->n_entries > BCM4377_MAX_RING_SIZE)
+ return -EINVAL;
+ if (ring->virtual && ring->allow_wait)
+ return -EINVAL;
+
+ if (ring->d2h_buffers_only) {
+ if (ring->virtual)
+ return -EINVAL;
+ if (ring->payload_size)
+ return -EINVAL;
+ if (!ring->mapped_payload_size)
+ return -EINVAL;
+ }
+ if (ring->virtual)
+ return 0;
+
+ entry_size =
+ ring->payload_size + sizeof(struct bcm4377_xfer_ring_entry);
+ ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev,
+ ring->n_entries * entry_size,
+ &ring->ring_dma, GFP_KERNEL);
+ if (!ring->ring)
+ return -ENOMEM;
+
+ if (ring->allow_wait) {
+ ring->events = devm_kcalloc(&bcm4377->pdev->dev,
+ ring->n_entries,
+ sizeof(*ring->events), GFP_KERNEL);
+ if (!ring->events)
+ return -ENOMEM;
+ }
+
+ if (ring->mapped_payload_size) {
+ ring->payloads = dmam_alloc_coherent(
+ &bcm4377->pdev->dev,
+ ring->n_entries * ring->mapped_payload_size,
+ &ring->payloads_dma, GFP_KERNEL);
+ if (!ring->payloads)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bcm4377_alloc_completion_ring(struct bcm4377_data *bcm4377,
+ struct bcm4377_completion_ring *ring)
+{
+ size_t entry_size;
+
+ ring->payload_size = ALIGN(ring->payload_size, 4);
+ if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE)
+ return -EINVAL;
+ if (ring->n_entries > BCM4377_MAX_RING_SIZE)
+ return -EINVAL;
+
+ entry_size = ring->payload_size +
+ sizeof(struct bcm4377_completion_ring_entry);
+
+ ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev,
+ ring->n_entries * entry_size,
+ &ring->ring_dma, GFP_KERNEL);
+ if (!ring->ring)
+ return -ENOMEM;
+ return 0;
+}
+
+static int bcm4377_init_context(struct bcm4377_data *bcm4377)
+{
+ struct device *dev = &bcm4377->pdev->dev;
+ dma_addr_t peripheral_info_dma;
+
+ bcm4377->ctx = dmam_alloc_coherent(dev, sizeof(*bcm4377->ctx),
+ &bcm4377->ctx_dma, GFP_KERNEL);
+ if (!bcm4377->ctx)
+ return -ENOMEM;
+ memset(bcm4377->ctx, 0, sizeof(*bcm4377->ctx));
+
+ bcm4377->ring_state =
+ dmam_alloc_coherent(dev, sizeof(*bcm4377->ring_state),
+ &bcm4377->ring_state_dma, GFP_KERNEL);
+ if (!bcm4377->ring_state)
+ return -ENOMEM;
+ memset(bcm4377->ring_state, 0, sizeof(*bcm4377->ring_state));
+
+ bcm4377->ctx->version = cpu_to_le16(1);
+ bcm4377->ctx->size = cpu_to_le16(sizeof(*bcm4377->ctx));
+ bcm4377->ctx->enabled_caps = cpu_to_le32(2);
+
+ /*
+ * The BT device will write 0x20 bytes of data to this buffer but
+ * the exact contents are unknown. It only needs to exist for BT
+ * to work such that we can just allocate and then ignore it.
+ */
+ if (!dmam_alloc_coherent(&bcm4377->pdev->dev, 0x20,
+ &peripheral_info_dma, GFP_KERNEL))
+ return -ENOMEM;
+ bcm4377->ctx->peripheral_info_addr = cpu_to_le64(peripheral_info_dma);
+
+ bcm4377->ctx->xfer_ring_heads_addr = cpu_to_le64(
+ bcm4377->ring_state_dma +
+ offsetof(struct bcm4377_ring_state, xfer_ring_head));
+ bcm4377->ctx->xfer_ring_tails_addr = cpu_to_le64(
+ bcm4377->ring_state_dma +
+ offsetof(struct bcm4377_ring_state, xfer_ring_tail));
+ bcm4377->ctx->completion_ring_heads_addr = cpu_to_le64(
+ bcm4377->ring_state_dma +
+ offsetof(struct bcm4377_ring_state, completion_ring_head));
+ bcm4377->ctx->completion_ring_tails_addr = cpu_to_le64(
+ bcm4377->ring_state_dma +
+ offsetof(struct bcm4377_ring_state, completion_ring_tail));
+
+ bcm4377->ctx->n_completion_rings =
+ cpu_to_le16(BCM4377_N_COMPLETION_RINGS);
+ bcm4377->ctx->n_xfer_rings = cpu_to_le16(BCM4377_N_TRANSFER_RINGS);
+
+ bcm4377->ctx->control_completion_ring_addr =
+ cpu_to_le64(bcm4377->control_ack_ring.ring_dma);
+ bcm4377->ctx->control_completion_ring_n_entries =
+ cpu_to_le16(bcm4377->control_ack_ring.n_entries);
+ bcm4377->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff);
+ bcm4377->ctx->control_completion_ring_msi = 0;
+ bcm4377->ctx->control_completion_ring_header_size = 0;
+ bcm4377->ctx->control_completion_ring_footer_size = 0;
+
+ bcm4377->ctx->control_xfer_ring_addr =
+ cpu_to_le64(bcm4377->control_h2d_ring.ring_dma);
+ bcm4377->ctx->control_xfer_ring_n_entries =
+ cpu_to_le16(bcm4377->control_h2d_ring.n_entries);
+ bcm4377->ctx->control_xfer_ring_doorbell =
+ cpu_to_le16(bcm4377->control_h2d_ring.doorbell);
+ bcm4377->ctx->control_xfer_ring_msi = 0;
+ bcm4377->ctx->control_xfer_ring_header_size = 0;
+ bcm4377->ctx->control_xfer_ring_footer_size =
+ bcm4377->control_h2d_ring.payload_size / 4;
+
+ dev_dbg(&bcm4377->pdev->dev, "context initialized at IOVA %pad",
+ &bcm4377->ctx_dma);
+
+ return 0;
+}
+
+static int bcm4377_prepare_rings(struct bcm4377_data *bcm4377)
+{
+ int ret;
+
+ /*
+ * Even though many of these settings appear to be configurable
+ * when sending the "create ring" messages most of these are
+ * actually hardcoded in some (and quite possibly all) firmware versions
+ * and changing them on the host has no effect.
+ * Specifically, this applies to at least the doorbells, the transfer
+ * and completion ring ids and their mapping (e.g. both HCI and ACL
+ * entries will always be queued in completion rings 1 and 2 no matter
+ * what we configure here).
+ */
+ bcm4377->control_ack_ring.ring_id = BCM4377_ACK_RING_CONTROL;
+ bcm4377->control_ack_ring.n_entries = 32;
+ bcm4377->control_ack_ring.transfer_rings =
+ BIT(BCM4377_XFER_RING_CONTROL);
+
+ bcm4377->hci_acl_ack_ring.ring_id = BCM4377_ACK_RING_HCI_ACL;
+ bcm4377->hci_acl_ack_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES;
+ bcm4377->hci_acl_ack_ring.transfer_rings =
+ BIT(BCM4377_XFER_RING_HCI_H2D) | BIT(BCM4377_XFER_RING_ACL_H2D);
+ bcm4377->hci_acl_ack_ring.delay = 1000;
+
+ /*
+ * A payload size of MAX_EVENT_PAYLOAD_SIZE is enough here since large
+ * ACL packets will be transmitted inside buffers mapped via
+ * acl_d2h_ring anyway.
+ */
+ bcm4377->hci_acl_event_ring.ring_id = BCM4377_EVENT_RING_HCI_ACL;
+ bcm4377->hci_acl_event_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE;
+ bcm4377->hci_acl_event_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES;
+ bcm4377->hci_acl_event_ring.transfer_rings =
+ BIT(BCM4377_XFER_RING_HCI_D2H) | BIT(BCM4377_XFER_RING_ACL_D2H);
+ bcm4377->hci_acl_event_ring.delay = 1000;
+
+ bcm4377->sco_ack_ring.ring_id = BCM4377_ACK_RING_SCO;
+ bcm4377->sco_ack_ring.n_entries = BCM4377_RING_N_ENTRIES;
+ bcm4377->sco_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_H2D);
+
+ bcm4377->sco_event_ring.ring_id = BCM4377_EVENT_RING_SCO;
+ bcm4377->sco_event_ring.payload_size = MAX_SCO_PAYLOAD_SIZE;
+ bcm4377->sco_event_ring.n_entries = BCM4377_RING_N_ENTRIES;
+ bcm4377->sco_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_D2H);
+
+ bcm4377->control_h2d_ring.ring_id = BCM4377_XFER_RING_CONTROL;
+ bcm4377->control_h2d_ring.doorbell = BCM4377_DOORBELL_CONTROL;
+ bcm4377->control_h2d_ring.payload_size = BCM4377_CONTROL_MSG_SIZE;
+ bcm4377->control_h2d_ring.completion_ring = BCM4377_ACK_RING_CONTROL;
+ bcm4377->control_h2d_ring.allow_wait = true;
+ bcm4377->control_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ bcm4377->hci_h2d_ring.ring_id = BCM4377_XFER_RING_HCI_H2D;
+ bcm4377->hci_h2d_ring.doorbell = BCM4377_DOORBELL_HCI_H2D;
+ bcm4377->hci_h2d_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE;
+ bcm4377->hci_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL;
+ bcm4377->hci_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ bcm4377->hci_d2h_ring.ring_id = BCM4377_XFER_RING_HCI_D2H;
+ bcm4377->hci_d2h_ring.doorbell = BCM4377_DOORBELL_HCI_D2H;
+ bcm4377->hci_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL;
+ bcm4377->hci_d2h_ring.virtual = true;
+ bcm4377->hci_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ bcm4377->sco_h2d_ring.ring_id = BCM4377_XFER_RING_SCO_H2D;
+ bcm4377->sco_h2d_ring.doorbell = BCM4377_DOORBELL_SCO;
+ bcm4377->sco_h2d_ring.payload_size = MAX_SCO_PAYLOAD_SIZE;
+ bcm4377->sco_h2d_ring.completion_ring = BCM4377_ACK_RING_SCO;
+ bcm4377->sco_h2d_ring.sync = true;
+ bcm4377->sco_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ bcm4377->sco_d2h_ring.ring_id = BCM4377_XFER_RING_SCO_D2H;
+ bcm4377->sco_d2h_ring.doorbell = BCM4377_DOORBELL_SCO;
+ bcm4377->sco_d2h_ring.completion_ring = BCM4377_EVENT_RING_SCO;
+ bcm4377->sco_d2h_ring.virtual = true;
+ bcm4377->sco_d2h_ring.sync = true;
+ bcm4377->sco_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ /*
+ * This ring has to use mapped_payload_size because the largest ACL
+ * packet doesn't fit inside the largest possible footer
+ */
+ bcm4377->acl_h2d_ring.ring_id = BCM4377_XFER_RING_ACL_H2D;
+ bcm4377->acl_h2d_ring.doorbell = BCM4377_DOORBELL_ACL_H2D;
+ bcm4377->acl_h2d_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE;
+ bcm4377->acl_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL;
+ bcm4377->acl_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ /*
+ * This ring only contains empty buffers to be used by incoming
+ * ACL packets that do not fit inside the footer of hci_acl_event_ring
+ */
+ bcm4377->acl_d2h_ring.ring_id = BCM4377_XFER_RING_ACL_D2H;
+ bcm4377->acl_d2h_ring.doorbell = BCM4377_DOORBELL_ACL_D2H;
+ bcm4377->acl_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL;
+ bcm4377->acl_d2h_ring.d2h_buffers_only = true;
+ bcm4377->acl_d2h_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE;
+ bcm4377->acl_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES;
+
+ /*
+ * no need for any cleanup since this is only called from _probe
+ * and only devres-managed allocations are used
+ */
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->control_h2d_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring);
+ if (ret)
+ return ret;
+
+ ret = bcm4377_alloc_completion_ring(bcm4377,
+ &bcm4377->control_ack_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_completion_ring(bcm4377,
+ &bcm4377->hci_acl_ack_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_completion_ring(bcm4377,
+ &bcm4377->hci_acl_event_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_ack_ring);
+ if (ret)
+ return ret;
+ ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_event_ring);
+ if (ret)
+ return ret;
+
+ dev_dbg(&bcm4377->pdev->dev, "all rings allocated and prepared\n");
+
+ return 0;
+}
+
+static int bcm4377_boot(struct bcm4377_data *bcm4377)
+{
+ const struct firmware *fw;
+ void *bfr;
+ dma_addr_t fw_dma;
+ int ret = 0;
+ u32 bootstage, rti_status;
+
+ bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
+ rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
+
+ if (bootstage != 0) {
+ dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n",
+ bootstage);
+ return -EINVAL;
+ }
+
+ if (rti_status != 0) {
+ dev_err(&bcm4377->pdev->dev, "RTI status is %d and not 0\n",
+ rti_status);
+ return -EINVAL;
+ }
+
+ fw = bcm4377_request_blob(bcm4377, "bin");
+ if (!fw) {
+ dev_err(&bcm4377->pdev->dev, "Failed to load firmware\n");
+ return -ENOENT;
+ }
+
+ bfr = dma_alloc_coherent(&bcm4377->pdev->dev, fw->size, &fw_dma,
+ GFP_KERNEL);
+ if (!bfr) {
+ ret = -ENOMEM;
+ goto out_release_fw;
+ }
+
+ memcpy(bfr, fw->data, fw->size);
+
+ iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_LO);
+ iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_HI);
+ iowrite32(BCM4377_DMA_MASK,
+ bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE);
+
+ iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO);
+ iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI);
+ iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE);
+ iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL);
+
+ dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n");
+
+ ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
+ BCM4377_TIMEOUT);
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto out_dma_free;
+ } else if (ret < 0) {
+ goto out_dma_free;
+ }
+
+ if (bcm4377->bootstage != 2) {
+ dev_err(&bcm4377->pdev->dev, "boostage %d != 2\n",
+ bcm4377->bootstage);
+ ret = -ENXIO;
+ goto out_dma_free;
+ }
+
+ dev_dbg(&bcm4377->pdev->dev, "firmware has booted (stage = %x)\n",
+ bcm4377->bootstage);
+ ret = 0;
+
+out_dma_free:
+ dma_free_coherent(&bcm4377->pdev->dev, fw->size, bfr, fw_dma);
+out_release_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static int bcm4377_setup_rti(struct bcm4377_data *bcm4377)
+{
+ int ret;
+
+ dev_dbg(&bcm4377->pdev->dev, "starting RTI\n");
+ iowrite32(1, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
+
+ ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
+ BCM4377_TIMEOUT);
+ if (ret == 0) {
+ dev_err(&bcm4377->pdev->dev,
+ "timed out while waiting for RTI to transition to state 1");
+ return -ETIMEDOUT;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ if (bcm4377->rti_status != 1) {
+ dev_err(&bcm4377->pdev->dev, "RTI did not ack state 1 (%d)\n",
+ bcm4377->rti_status);
+ return -ENODEV;
+ }
+ dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n");
+
+ /* allow access to the entire IOVA space again */
+ iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO);
+ iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI);
+ iowrite32(BCM4377_DMA_MASK,
+ bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE);
+
+ /* setup "Converged IPC" context */
+ iowrite32(lower_32_bits(bcm4377->ctx_dma),
+ bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO);
+ iowrite32(upper_32_bits(bcm4377->ctx_dma),
+ bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI);
+ iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
+
+ ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
+ BCM4377_TIMEOUT);
+ if (ret == 0) {
+ dev_err(&bcm4377->pdev->dev,
+ "timed out while waiting for RTI to transition to state 2");
+ return -ETIMEDOUT;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ if (bcm4377->rti_status != 2) {
+ dev_err(&bcm4377->pdev->dev, "RTI did not ack state 2 (%d)\n",
+ bcm4377->rti_status);
+ return -ENODEV;
+ }
+
+ dev_dbg(&bcm4377->pdev->dev,
+ "RTI is in state 2; control ring is ready\n");
+ bcm4377->control_ack_ring.enabled = true;
+
+ return 0;
+}
+
+static int bcm4377_parse_otp_board_params(struct bcm4377_data *bcm4377,
+ char tag, const char *val, size_t len)
+{
+ if (tag != 'V')
+ return 0;
+ if (len >= sizeof(bcm4377->vendor))
+ return -EINVAL;
+
+ strscpy(bcm4377->vendor, val, len + 1);
+ return 0;
+}
+
+static int bcm4377_parse_otp_chip_params(struct bcm4377_data *bcm4377, char tag,
+ const char *val, size_t len)
+{
+ size_t idx = 0;
+
+ if (tag != 's')
+ return 0;
+ if (len >= sizeof(bcm4377->stepping))
+ return -EINVAL;
+
+ while (len != 0) {
+ bcm4377->stepping[idx] = tolower(val[idx]);
+ if (val[idx] == '\0')
+ return 0;
+
+ idx++;
+ len--;
+ }
+
+ bcm4377->stepping[idx] = '\0';
+ return 0;
+}
+
+static int bcm4377_parse_otp_str(struct bcm4377_data *bcm4377, const u8 *str,
+ enum bcm4377_otp_params_type type)
+{
+ const char *p;
+ int ret;
+
+ p = skip_spaces(str);
+ while (*p) {
+ char tag = *p++;
+ const char *end;
+ size_t len;
+
+ if (*p++ != '=') /* implicit NUL check */
+ return -EINVAL;
+
+ /* *p might be NUL here, if so end == p and len == 0 */
+ end = strchrnul(p, ' ');
+ len = end - p;
+
+ /* leave 1 byte for NUL in destination string */
+ if (len > (BCM4377_OTP_MAX_PARAM_LEN - 1))
+ return -EINVAL;
+
+ switch (type) {
+ case BCM4377_OTP_BOARD_PARAMS:
+ ret = bcm4377_parse_otp_board_params(bcm4377, tag, p,
+ len);
+ break;
+ case BCM4377_OTP_CHIP_PARAMS:
+ ret = bcm4377_parse_otp_chip_params(bcm4377, tag, p,
+ len);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Skip to next arg, if any */
+ p = skip_spaces(end);
+ }
+
+ return 0;
+}
+
+static int bcm4377_parse_otp_sys_vendor(struct bcm4377_data *bcm4377, u8 *otp,
+ size_t size)
+{
+ int idx = 4;
+ const char *chip_params;
+ const char *board_params;
+ int ret;
+
+ /* 4-byte header and two empty strings */
+ if (size < 6)
+ return -EINVAL;
+
+ if (get_unaligned_le32(otp) != BCM4377_OTP_VENDOR_HDR)
+ return -EINVAL;
+
+ chip_params = &otp[idx];
+
+ /* Skip first string, including terminator */
+ idx += strnlen(chip_params, size - idx) + 1;
+ if (idx >= size)
+ return -EINVAL;
+
+ board_params = &otp[idx];
+
+ /* Skip to terminator of second string */
+ idx += strnlen(board_params, size - idx);
+ if (idx >= size)
+ return -EINVAL;
+
+ /* At this point both strings are guaranteed NUL-terminated */
+ dev_dbg(&bcm4377->pdev->dev,
+ "OTP: chip_params='%s' board_params='%s'\n", chip_params,
+ board_params);
+
+ ret = bcm4377_parse_otp_str(bcm4377, chip_params,
+ BCM4377_OTP_CHIP_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = bcm4377_parse_otp_str(bcm4377, board_params,
+ BCM4377_OTP_BOARD_PARAMS);
+ if (ret)
+ return ret;
+
+ if (!bcm4377->stepping[0] || !bcm4377->vendor[0])
+ return -EINVAL;
+
+ dev_dbg(&bcm4377->pdev->dev, "OTP: stepping=%s, vendor=%s\n",
+ bcm4377->stepping, bcm4377->vendor);
+ return 0;
+}
+
+static int bcm4377_parse_otp(struct bcm4377_data *bcm4377)
+{
+ u8 *otp;
+ int i;
+ int ret = -ENOENT;
+
+ otp = kzalloc(BCM4377_OTP_SIZE, GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM4377_OTP_SIZE; ++i)
+ otp[i] = ioread8(bcm4377->bar0 + bcm4377->hw->otp_offset + i);
+
+ i = 0;
+ while (i < (BCM4377_OTP_SIZE - 1)) {
+ u8 type = otp[i];
+ u8 length = otp[i + 1];
+
+ if (type == 0)
+ break;
+
+ if ((i + 2 + length) > BCM4377_OTP_SIZE)
+ break;
+
+ switch (type) {
+ case BCM4377_OTP_SYS_VENDOR:
+ dev_dbg(&bcm4377->pdev->dev,
+ "OTP @ 0x%x (%d): SYS_VENDOR", i, length);
+ ret = bcm4377_parse_otp_sys_vendor(bcm4377, &otp[i + 2],
+ length);
+ break;
+ case BCM4377_OTP_CIS:
+ dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): CIS", i,
+ length);
+ break;
+ default:
+ dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): unknown",
+ i, length);
+ break;
+ }
+
+ i += 2 + length;
+ }
+
+ kfree(otp);
+ return ret;
+}
+
+static int bcm4377_init_cfg(struct bcm4377_data *bcm4377)
+{
+ int ret;
+ u32 ctrl;
+
+ ret = pci_write_config_dword(bcm4377->pdev,
+ BCM4377_PCIECFG_BAR0_WINDOW1,
+ bcm4377->hw->bar0_window1);
+ if (ret)
+ return ret;
+
+ ret = pci_write_config_dword(bcm4377->pdev,
+ BCM4377_PCIECFG_BAR0_WINDOW2,
+ bcm4377->hw->bar0_window2);
+ if (ret)
+ return ret;
+
+ ret = pci_write_config_dword(
+ bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1,
+ BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT);
+ if (ret)
+ return ret;
+
+ if (bcm4377->hw->has_bar0_core2_window2) {
+ ret = pci_write_config_dword(bcm4377->pdev,
+ BCM4377_PCIECFG_BAR0_CORE2_WINDOW2,
+ bcm4377->hw->bar0_core2_window2);
+ if (ret)
+ return ret;
+ }
+
+ ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR2_WINDOW,
+ BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_dword(bcm4377->pdev,
+ BCM4377_PCIECFG_SUBSYSTEM_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ if (bcm4377->hw->clear_pciecfg_subsystem_ctrl_bit19)
+ ctrl &= ~BIT(19);
+ ctrl |= BIT(16);
+
+ return pci_write_config_dword(bcm4377->pdev,
+ BCM4377_PCIECFG_SUBSYSTEM_CTRL, ctrl);
+}
+
+static int bcm4377_probe_dmi(struct bcm4377_data *bcm4377)
+{
+ const struct dmi_system_id *board_type_dmi_id;
+
+ board_type_dmi_id = dmi_first_match(bcm4377_dmi_board_table);
+ if (board_type_dmi_id && board_type_dmi_id->driver_data) {
+ bcm4377->board_type = board_type_dmi_id->driver_data;
+ dev_dbg(&bcm4377->pdev->dev,
+ "found board type via DMI match: %s\n",
+ bcm4377->board_type);
+ }
+
+ return 0;
+}
+
+static int bcm4377_probe_of(struct bcm4377_data *bcm4377)
+{
+ struct device_node *np = bcm4377->pdev->dev.of_node;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ ret = of_property_read_string(np, "brcm,board-type",
+ &bcm4377->board_type);
+ if (ret) {
+ dev_err(&bcm4377->pdev->dev, "no brcm,board-type property\n");
+ return ret;
+ }
+
+ bcm4377->taurus_beamforming_cal_blob =
+ of_get_property(np, "brcm,taurus-bf-cal-blob",
+ &bcm4377->taurus_beamforming_cal_size);
+ if (!bcm4377->taurus_beamforming_cal_blob) {
+ dev_err(&bcm4377->pdev->dev,
+ "no brcm,taurus-bf-cal-blob property\n");
+ return -ENOENT;
+ }
+ bcm4377->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob",
+ &bcm4377->taurus_cal_size);
+ if (!bcm4377->taurus_cal_blob) {
+ dev_err(&bcm4377->pdev->dev,
+ "no brcm,taurus-cal-blob property\n");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void bcm4377_disable_aspm(struct bcm4377_data *bcm4377)
+{
+ pci_disable_link_state(bcm4377->pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
+ /*
+ * pci_disable_link_state can fail if either CONFIG_PCIEASPM is disabled
+ * or if the BIOS hasn't handed over control to us. We must *always*
+ * disable ASPM for this device due to hardware errata though.
+ */
+ pcie_capability_clear_word(bcm4377->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC);
+}
+
+static void bcm4377_pci_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void bcm4377_hci_free_dev(void *data)
+{
+ hci_free_dev(data);
+}
+
+static void bcm4377_hci_unregister_dev(void *data)
+{
+ hci_unregister_dev(data);
+}
+
+static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct bcm4377_data *bcm4377;
+ struct hci_dev *hdev;
+ int ret, irq;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, BCM4377_DMA_MASK);
+ if (ret)
+ return ret;
+
+ bcm4377 = devm_kzalloc(&pdev->dev, sizeof(*bcm4377), GFP_KERNEL);
+ if (!bcm4377)
+ return -ENOMEM;
+
+ bcm4377->pdev = pdev;
+ bcm4377->hw = &bcm4377_hw_variants[id->driver_data];
+ init_completion(&bcm4377->event);
+
+ ret = bcm4377_prepare_rings(bcm4377);
+ if (ret)
+ return ret;
+
+ ret = bcm4377_init_context(bcm4377);
+ if (ret)
+ return ret;
+
+ ret = bcm4377_probe_dmi(bcm4377);
+ if (ret)
+ return ret;
+ ret = bcm4377_probe_of(bcm4377);
+ if (ret)
+ return ret;
+ if (!bcm4377->board_type) {
+ dev_err(&pdev->dev, "unable to determine board type\n");
+ return -ENODEV;
+ }
+
+ if (bcm4377->hw->disable_aspm)
+ bcm4377_disable_aspm(bcm4377);
+
+ ret = pci_reset_function_locked(pdev);
+ if (ret)
+ dev_warn(
+ &pdev->dev,
+ "function level reset failed with %d; trying to continue anyway\n",
+ ret);
+
+ /*
+ * If this number is too low and we try to access any BAR too
+ * early the device will crash. Experiments have shown that
+ * approximately 50 msec is the minimum amount we have to wait.
+ * Let's double that to be safe.
+ */
+ msleep(100);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = bcm4377_init_cfg(bcm4377);
+ if (ret)
+ return ret;
+
+ bcm4377->bar0 = pcim_iomap(pdev, 0, 0);
+ if (!bcm4377->bar0)
+ return -EBUSY;
+ bcm4377->bar2 = pcim_iomap(pdev, 2, 0);
+ if (!bcm4377->bar2)
+ return -EBUSY;
+
+ ret = bcm4377_parse_otp(bcm4377);
+ if (ret) {
+ dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Legacy interrupts result in an IRQ storm because we don't know where
+ * the interrupt mask and status registers for these chips are.
+ * MSIs are acked automatically instead.
+ */
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, bcm4377_pci_free_irq_vectors,
+ pdev);
+ if (ret)
+ return ret;
+
+ irq = pci_irq_vector(pdev, 0);
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = devm_request_irq(&pdev->dev, irq, bcm4377_irq, 0, "bcm4377",
+ bcm4377);
+ if (ret)
+ return ret;
+
+ hdev = hci_alloc_dev();
+ if (!hdev)
+ return -ENOMEM;
+ ret = devm_add_action_or_reset(&pdev->dev, bcm4377_hci_free_dev, hdev);
+ if (ret)
+ return ret;
+
+ bcm4377->hdev = hdev;
+
+ hdev->bus = HCI_PCI;
+ hdev->dev_type = HCI_PRIMARY;
+ hdev->open = bcm4377_hci_open;
+ hdev->close = bcm4377_hci_close;
+ hdev->send = bcm4377_hci_send_frame;
+ hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
+ hdev->setup = bcm4377_hci_setup;
+
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ if (bcm4377->hw->broken_mws_transport_config)
+ set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ if (bcm4377->hw->broken_ext_scan)
+ set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+
+ pci_set_drvdata(pdev, bcm4377);
+ hci_set_drvdata(hdev, bcm4377);
+ SET_HCIDEV_DEV(hdev, &pdev->dev);
+
+ ret = bcm4377_boot(bcm4377);
+ if (ret)
+ return ret;
+
+ ret = bcm4377_setup_rti(bcm4377);
+ if (ret)
+ return ret;
+
+ ret = hci_register_dev(hdev);
+ if (ret)
+ return ret;
+ return devm_add_action_or_reset(&pdev->dev, bcm4377_hci_unregister_dev,
+ hdev);
+}
+
+static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = hci_suspend_dev(bcm4377->hdev);
+ if (ret)
+ return ret;
+
+ iowrite32(BCM4377_BAR0_SLEEP_CONTROL_QUIESCE,
+ bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
+
+ return 0;
+}
+
+static int bcm4377_resume(struct pci_dev *pdev)
+{
+ struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev);
+
+ iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE,
+ bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL);
+
+ return hci_resume_dev(bcm4377->hdev);
+}
+
+static const struct dmi_system_id bcm4377_dmi_board_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"),
+ },
+ .driver_data = "apple,formosa",
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro15,4"),
+ },
+ .driver_data = "apple,formosa",
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,3"),
+ },
+ .driver_data = "apple,formosa",
+ },
+ {}
+};
+
+static const struct bcm4377_hw bcm4377_hw_variants[] = {
+ [BCM4377] = {
+ .id = 0x4377,
+ .otp_offset = 0x4120,
+ .bar0_window1 = 0x1800b000,
+ .bar0_window2 = 0x1810c000,
+ .disable_aspm = true,
+ .broken_ext_scan = true,
+ .send_ptb = bcm4377_send_ptb,
+ },
+
+ [BCM4378] = {
+ .id = 0x4378,
+ .otp_offset = 0x4120,
+ .bar0_window1 = 0x18002000,
+ .bar0_window2 = 0x1810a000,
+ .bar0_core2_window2 = 0x18107000,
+ .has_bar0_core2_window2 = true,
+ .broken_mws_transport_config = true,
+ .send_calibration = bcm4378_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
+
+ [BCM4387] = {
+ .id = 0x4387,
+ .otp_offset = 0x413c,
+ .bar0_window1 = 0x18002000,
+ .bar0_window2 = 0x18109000,
+ .bar0_core2_window2 = 0x18106000,
+ .has_bar0_core2_window2 = true,
+ .clear_pciecfg_subsystem_ctrl_bit19 = true,
+ .broken_mws_transport_config = true,
+ .send_calibration = bcm4387_send_calibration,
+ .send_ptb = bcm4378_send_ptb,
+ },
+};
+
+#define BCM4377_DEVID_ENTRY(id) \
+ { \
+ PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID, \
+ PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \
+ BCM##id \
+ }
+
+static const struct pci_device_id bcm4377_devid_table[] = {
+ BCM4377_DEVID_ENTRY(4377),
+ BCM4377_DEVID_ENTRY(4378),
+ BCM4377_DEVID_ENTRY(4387),
+ {},
+};
+MODULE_DEVICE_TABLE(pci, bcm4377_devid_table);
+
+static struct pci_driver bcm4377_pci_driver = {
+ .name = "hci_bcm4377",
+ .id_table = bcm4377_devid_table,
+ .probe = bcm4377_probe,
+ .suspend = bcm4377_suspend,
+ .resume = bcm4377_resume,
+};
+module_pci_driver(bcm4377_pci_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_FIRMWARE("brcm/brcmbt4377*.bin");
+MODULE_FIRMWARE("brcm/brcmbt4377*.ptb");
+MODULE_FIRMWARE("brcm/brcmbt4378*.bin");
+MODULE_FIRMWARE("brcm/brcmbt4378*.ptb");
+MODULE_FIRMWARE("brcm/brcmbt4387*.bin");
+MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 3da8e85f8aae..bb23d57758cf 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -98,6 +98,19 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
+config HW_RANDOM_BCM2835_RUST
+ tristate "Rust implementation of Broadcom BCM2835 Random Number Generator"
+ depends on RUST && ARCH_BCM2835
+ help
+ This driver provides alternative Rust-based kernel-side support
+ for the Random Number Generator hardware found on the Broadcom
+ BCM2835 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm2835_rng_rust
+
+ If unsure, say N.
+
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 3e948cf04476..ab1a84c28efc 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM2835_RUST) += bcm2835_rng_rust.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/bcm2835_rng_rust.rs b/drivers/char/hw_random/bcm2835_rng_rust.rs
new file mode 100644
index 000000000000..661ec362a0f0
--- /dev/null
+++ b/drivers/char/hw_random/bcm2835_rng_rust.rs
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Broadcom BCM2835 Random Number Generator support.
+
+use kernel::{
+ device, file, file::File, io_buffer::IoBufferWriter, miscdev, module_platform_driver, of,
+ platform, prelude::*, sync::Arc,
+};
+
+module_platform_driver! {
+ type: RngDriver,
+ name: "bcm2835_rng_rust",
+ author: "Rust for Linux Contributors",
+ description: "BCM2835 Random Number Generator (RNG) driver",
+ license: "GPL v2",
+}
+
+struct RngDevice;
+
+#[vtable]
+impl file::Operations for RngDevice {
+ fn open(_open_data: &(), _file: &File) -> Result {
+ Ok(())
+ }
+
+ fn read(_: (), _: &File, data: &mut impl IoBufferWriter, offset: u64) -> Result<usize> {
+ // Succeed if the caller doesn't provide a buffer or if not at the start.
+ if data.is_empty() || offset != 0 {
+ return Ok(0);
+ }
+
+ data.write(&0_u32)?;
+ Ok(4)
+ }
+}
+
+type DeviceData = device::Data<miscdev::Registration<RngDevice>, (), ()>;
+
+struct RngDriver;
+impl platform::Driver for RngDriver {
+ type Data = Arc<DeviceData>;
+
+ kernel::define_of_id_table! {(), [
+ (of::DeviceId::Compatible(b"brcm,bcm2835-rng"), None),
+ ]}
+
+ fn probe(dev: &mut platform::Device, _id_info: Option<&Self::IdInfo>) -> Result<Self::Data> {
+ pr_info!("probing discovered hwrng with id {}\n", dev.id());
+ let data = kernel::new_device_data!(
+ miscdev::Registration::new(),
+ (),
+ (),
+ "BCM2835::Registrations"
+ )?;
+
+ data.registrations()
+ .ok_or(ENXIO)?
+ .as_pinned_mut()
+ .register(fmt!("rust_hwrng"), ())?;
+ Ok(data.into())
+ }
+}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 82e5de1f6f8c..29969f84008a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -41,6 +41,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_APPLE_SOC_CPUFREQ
+ tristate "Apple Silicon SoC CPUFreq support"
+ depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+ select PM_OPP
+ default ARCH_APPLE
+ help
+ This adds the CPUFreq driver for Apple Silicon machines
+ (e.g. Apple M1).
+
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
depends on ARCH_MVEBU && CPUFREQ_DT
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 49b98c62c5af..32a7029e25ed 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
new file mode 100644
index 000000000000..9f067ab5794a
--- /dev/null
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple SoC CPU cluster performance state driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on scpi-cpufreq.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+
+/* Same timebase as CPU counter (24MHz) */
+#define APPLE_DVFS_LAST_CHG_TIME 0x38
+
+/*
+ * Apple ran out of bits and had to shift this in T8112...
+ */
+#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
+#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
+#define APPLE_DVFS_STATUS_CUR_PS_T8112 GENMASK(9, 5)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112 5
+#define APPLE_DVFS_STATUS_TGT_PS_T8112 GENMASK(4, 0)
+
+/*
+ * Div is +1, base clock is 12MHz on existing SoCs.
+ * For documentation purposes. We use the OPP table to
+ * get the frequency.
+ */
+#define APPLE_DVFS_PLL_STATUS 0xc0
+#define APPLE_DVFS_PLL_FACTOR 0xc8
+#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
+#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
+
+#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+
+struct apple_soc_cpufreq_info {
+ u64 max_pstate;
+ u64 cur_pstate_mask;
+ u64 cur_pstate_shift;
+};
+
+struct apple_cpu_priv {
+ struct device *cpu_dev;
+ void __iomem *reg_base;
+ const struct apple_soc_cpufreq_info *info;
+};
+
+static struct cpufreq_driver apple_soc_cpufreq_driver;
+
+static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .max_pstate = 15,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+};
+
+static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .max_pstate = 31,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+};
+
+static const struct apple_soc_cpufreq_info soc_default_info = {
+ .max_pstate = 15,
+ .cur_pstate_mask = 0, /* fallback */
+};
+
+static const struct of_device_id apple_soc_cpufreq_of_match[] = {
+ {
+ .compatible = "apple,t8103-cluster-cpufreq",
+ .data = &soc_t8103_info,
+ },
+ {
+ .compatible = "apple,t8112-cluster-cpufreq",
+ .data = &soc_t8112_info,
+ },
+ {
+ .compatible = "apple,cluster-cpufreq",
+ .data = &soc_default_info,
+ },
+ {}
+};
+
+static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_frequency_table *p;
+ unsigned int pstate, i;
+
+ if (priv->info->cur_pstate_mask) {
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+
+ pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
+ } else {
+ /*
+ * For the fallback case we might not know the layout of DVFS_STATUS,
+ * so just use the command register value (which ignores boost limitations).
+ */
+ u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_CMD);
+
+ pstate = FIELD_GET(APPLE_DVFS_CMD_PS1, reg);
+ }
+
+ cpufreq_for_each_valid_entry(p, policy->freq_table)
+ if (p->driver_data == pstate)
+ return p->frequency;
+
+ dev_err(priv->cpu_dev, "could not find frequency for pstate %d\n",
+ pstate);
+ return 0;
+}
+
+static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct apple_cpu_priv *priv = policy->driver_data;
+ unsigned int pstate = policy->freq_table[index].driver_data;
+ u64 reg;
+
+ /* Fallback for newer SoCs */
+ if (index > priv->info->max_pstate)
+ index = priv->info->max_pstate;
+
+ if (readq_poll_timeout_atomic(priv->reg_base + APPLE_DVFS_CMD, reg,
+ !(reg & APPLE_DVFS_CMD_BUSY), 2,
+ APPLE_DVFS_TRANSITION_TIMEOUT)) {
+ return -EIO;
+ }
+
+ reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg |= APPLE_DVFS_CMD_SET;
+
+ writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
+
+ return 0;
+}
+
+static unsigned int apple_soc_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ if (apple_soc_cpufreq_set_target(policy, policy->cached_resolved_idx) < 0)
+ return 0;
+
+ return policy->freq_table[policy->cached_resolved_idx].frequency;
+}
+
+static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
+ void __iomem **reg_base,
+ const struct apple_soc_cpufreq_info **info)
+{
+ struct of_phandle_args args;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus, &args);
+ if (ret < 0)
+ return ret;
+
+ match = of_match_node(apple_soc_cpufreq_of_match, args.np);
+ of_node_put(args.np);
+ if (!match)
+ return -ENODEV;
+
+ *info = match->data;
+
+ *reg_base = of_iomap(args.np, 0);
+ if (IS_ERR(*reg_base))
+ return PTR_ERR(*reg_base);
+
+ return 0;
+}
+
+static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL, /* Filled in below if boost is enabled */
+ NULL,
+};
+
+static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret, i;
+ unsigned int transition_latency;
+ void __iomem *reg_base;
+ struct device *cpu_dev;
+ struct apple_cpu_priv *priv;
+ const struct apple_soc_cpufreq_info *info;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "%s: failed to add OPP table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = apple_soc_cpufreq_find_cluster(policy, &reg_base, &info);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to get cluster info: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret);
+ goto out_iounmap;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ /* Get OPP levels (p-state indexes) and stash them in driver_data */
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ unsigned long rate = freq_table[i].frequency * 1000 + 999;
+ struct dev_pm_opp *opp = dev_pm_opp_find_freq_floor(cpu_dev, &rate);
+
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out_free_cpufreq_table;
+ }
+ freq_table[i].driver_data = dev_pm_opp_get_level(opp);
+ dev_pm_opp_put(opp);
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->reg_base = reg_base;
+ priv->info = info;
+ policy->driver_data = priv;
+ policy->freq_table = freq_table;
+
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
+ policy->fast_switch_possible = true;
+
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ } else {
+ apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ apple_soc_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
+ return 0;
+
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+out_iounmap:
+ iounmap(reg_base);
+ return ret;
+}
+
+static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct apple_cpu_priv *priv = policy->driver_data;
+
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ iounmap(priv->reg_base);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct cpufreq_driver apple_soc_cpufreq_driver = {
+ .name = "apple-cpufreq",
+ .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = apple_soc_cpufreq_get_rate,
+ .init = apple_soc_cpufreq_init,
+ .exit = apple_soc_cpufreq_exit,
+ .target_index = apple_soc_cpufreq_set_target,
+ .fast_switch = apple_soc_cpufreq_fast_switch,
+ .register_em = cpufreq_register_em_with_opp,
+ .attr = apple_soc_cpufreq_hw_attr,
+};
+
+static int __init apple_soc_cpufreq_module_init(void)
+{
+ if (!of_machine_is_compatible("apple,arm-platform"))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&apple_soc_cpufreq_driver);
+}
+module_init(apple_soc_cpufreq_module_init);
+
+static void __exit apple_soc_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&apple_soc_cpufreq_driver);
+}
+module_exit(apple_soc_cpufreq_module_exit);
+
+MODULE_DEVICE_TABLE(of, apple_soc_cpufreq_of_match);
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("Apple SoC CPU cluster DVFS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 6ac3800db450..a108b9796770 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = {
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "allwinner,sun50i-h6", },
+ { .compatible = "apple,arm-platform", },
+
{ .compatible = "arm,vexpress", },
{ .compatible = "calxeda,highbank", },
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f0e0a35c7f21..f80339779084 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -160,6 +160,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
struct mtk_cpufreq_data *data;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct of_phandle_args args;
void __iomem *base;
int ret, i;
int index;
@@ -168,11 +169,14 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
if (!data)
return -ENOMEM;
- index = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
- "#performance-domain-cells",
- policy->cpus);
- if (index < 0)
- return index;
+ ret = of_perf_domain_get_sharing_cpumask(policy->cpu, "performance-domains",
+ "#performance-domain-cells",
+ policy->cpus, &args);
+ if (ret < 0)
+ return ret;
+
+ index = args.args[0];
+ of_node_put(args.np);
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index a2cc520225d3..68cdbb6d3d05 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -21,6 +21,12 @@
#define NCHANNELS_MAX 64
#define IRQ_NOUTPUTS 4
+/*
+ * For allocation purposes we split the cache
+ * memory into blocks of fixed size (given in bytes).
+ */
+#define SRAM_BLOCK 2048
+
#define RING_WRITE_SLOT GENMASK(1, 0)
#define RING_READ_SLOT GENMASK(5, 4)
#define RING_FULL BIT(9)
@@ -36,6 +42,9 @@
#define REG_TX_STOP 0x0004
#define REG_RX_START 0x0008
#define REG_RX_STOP 0x000c
+#define REG_IMPRINT 0x0090
+#define REG_TX_SRAM_SIZE 0x0094
+#define REG_RX_SRAM_SIZE 0x0098
#define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
#define REG_CHAN_CTL_RST_RINGS BIT(0)
@@ -53,7 +62,9 @@
#define BUS_WIDTH_FRAME_2_WORDS 0x10
#define BUS_WIDTH_FRAME_4_WORDS 0x20
-#define CHAN_BUFSIZE 0x8000
+#define REG_CHAN_SRAM_CARVEOUT(ch) (0x8050 + (ch) * 0x200)
+#define CHAN_SRAM_CARVEOUT_SIZE GENMASK(31, 16)
+#define CHAN_SRAM_CARVEOUT_BASE GENMASK(15, 0)
#define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
#define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
@@ -76,6 +87,8 @@ struct admac_chan {
struct dma_chan chan;
struct tasklet_struct tasklet;
+ u32 carveout;
+
spinlock_t lock;
struct admac_tx *current_tx;
int nperiod_acks;
@@ -92,12 +105,24 @@ struct admac_chan {
struct list_head to_free;
};
+struct admac_sram {
+ u32 size;
+ /*
+ * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than
+ * 64K and a 32-bit bitfield over 2K blocks covers it.
+ */
+ u32 alloced;
+};
+
struct admac_data {
struct dma_device dma;
struct device *dev;
__iomem void *base;
struct reset_control *rstc;
+ struct mutex cache_alloc_lock;
+ struct admac_sram txcache, rxcache;
+
int irq;
int irq_index;
int nchannels;
@@ -118,6 +143,58 @@ struct admac_tx {
struct list_head node;
};
+static int admac_alloc_sram_carveout(struct admac_data *ad,
+ enum dma_transfer_direction dir, u32 *out)
+{
+ struct admac_sram *sram;
+ int i, ret = 0, nblocks;
+
+ if (dir == DMA_MEM_TO_DEV)
+ sram = &ad->txcache;
+ else
+ sram = &ad->rxcache;
+
+ mutex_lock(&ad->cache_alloc_lock);
+
+ nblocks = sram->size / SRAM_BLOCK;
+ for (i = 0; i < nblocks; i++)
+ if (!(sram->alloced & BIT(i)))
+ break;
+
+ if (i < nblocks) {
+ *out = FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE, i * SRAM_BLOCK) |
+ FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE, SRAM_BLOCK);
+ sram->alloced |= BIT(i);
+ } else {
+ ret = -EBUSY;
+ }
+
+ mutex_unlock(&ad->cache_alloc_lock);
+
+ return ret;
+}
+
+static void admac_free_sram_carveout(struct admac_data *ad,
+ enum dma_transfer_direction dir, u32 carveout)
+{
+ struct admac_sram *sram;
+ u32 base = FIELD_GET(CHAN_SRAM_CARVEOUT_BASE, carveout);
+ int i;
+
+ if (dir == DMA_MEM_TO_DEV)
+ sram = &ad->txcache;
+ else
+ sram = &ad->rxcache;
+
+ if (WARN_ON(base >= sram->size))
+ return;
+
+ mutex_lock(&ad->cache_alloc_lock);
+ i = base / SRAM_BLOCK;
+ sram->alloced &= ~BIT(i);
+ mutex_unlock(&ad->cache_alloc_lock);
+}
+
static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
{
void __iomem *addr = ad->base + reg;
@@ -466,15 +543,28 @@ static void admac_synchronize(struct dma_chan *chan)
static int admac_alloc_chan_resources(struct dma_chan *chan)
{
struct admac_chan *adchan = to_admac_chan(chan);
+ struct admac_data *ad = adchan->host;
+ int ret;
dma_cookie_init(&adchan->chan);
+ ret = admac_alloc_sram_carveout(ad, admac_chan_direction(adchan->no),
+ &adchan->carveout);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(adchan->carveout,
+ ad->base + REG_CHAN_SRAM_CARVEOUT(adchan->no));
return 0;
}
static void admac_free_chan_resources(struct dma_chan *chan)
{
+ struct admac_chan *adchan = to_admac_chan(chan);
+
admac_terminate_all(chan);
admac_synchronize(chan);
+ admac_free_sram_carveout(adchan->host, admac_chan_direction(adchan->no),
+ adchan->carveout);
}
static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
@@ -712,6 +802,7 @@ static int admac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ad);
ad->dev = &pdev->dev;
ad->nchannels = nchannels;
+ mutex_init(&ad->cache_alloc_lock);
/*
* The controller has 4 IRQ outputs. Try them all until
@@ -757,6 +848,9 @@ static int admac_probe(struct platform_device *pdev)
dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -801,6 +895,13 @@ static int admac_probe(struct platform_device *pdev)
goto free_irq;
}
+ ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
+ ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
+
+ dev_info(&pdev->dev, "Audio DMA Controller\n");
+ dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
+ readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
+
return 0;
free_irq:
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index a01af1180616..306cded64c98 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -486,6 +486,14 @@ config GPIO_PL061
help
Say yes here to support the PrimeCell PL061 GPIO device.
+config GPIO_PL061_RUST
+ tristate "PrimeCell PL061 GPIO support written in Rust"
+ depends on ARM_AMBA && RUST
+ select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the PrimeCell PL061 GPIO device
+
config GPIO_PMIC_EIC_SPRD
tristate "Spreadtrum PMIC EIC support"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
@@ -1283,6 +1291,17 @@ config GPIO_LP87565
This driver can also be built as a module. If so, the module will be
called gpio-lp87565.
+config GPIO_MACSMC
+ tristate "Apple Mac SMC GPIO"
+ depends on APPLE_SMC
+ default ARCH_APPLE
+ help
+ Support for GPIOs controlled by the SMC microcontroller on Apple Mac
+ systems.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-macsmc.
+
config GPIO_MADERA
tristate "Cirrus Logic Madera class codecs"
depends on PINCTRL_MADERA
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 29e3beb6548c..c06040c204f9 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_MACSMC) += gpio-macsmc.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -119,6 +120,7 @@ obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+obj-$(CONFIG_GPIO_PL061_RUST) += gpio_pl061_rust.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
new file mode 100644
index 000000000000..98fc74af69d4
--- /dev/null
+++ b/drivers/gpio/gpio-macsmc.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC GPIO driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver implements basic SMC PMU GPIO support that can read inputs
+ * and write outputs. Mode changes and IRQ config are not yet implemented.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+
+#define MAX_GPIO 64
+
+/*
+ * Commands 0-6 are, presumably, the intended API.
+ * Command 0xff lets you get/set the pin configuration in detail directly,
+ * but the bit meanings seem not to be stable between devices/PMU hardware
+ * versions.
+ *
+ * We're going to try to make do with the low commands for now.
+ * We don't implement pin mode changes at this time.
+ */
+
+#define CMD_ACTION (0 << 24)
+#define CMD_OUTPUT (1 << 24)
+#define CMD_INPUT (2 << 24)
+#define CMD_PINMODE (3 << 24)
+#define CMD_IRQ_ENABLE (4 << 24)
+#define CMD_IRQ_ACK (5 << 24)
+#define CMD_IRQ_MODE (6 << 24)
+#define CMD_CONFIG (0xff << 24)
+
+#define MODE_INPUT 0
+#define MODE_OUTPUT 1
+#define MODE_VALUE_0 0
+#define MODE_VALUE_1 2
+
+#define IRQ_MODE_HIGH 0
+#define IRQ_MODE_LOW 1
+#define IRQ_MODE_RISING 2
+#define IRQ_MODE_FALLING 3
+#define IRQ_MODE_BOTH 4
+
+#define CONFIG_MASK GENMASK(23, 16)
+#define CONFIG_VAL GENMASK(7, 0)
+
+#define CONFIG_OUTMODE GENMASK(7, 6)
+#define CONFIG_IRQMODE GENMASK(5, 3)
+#define CONFIG_PULLDOWN BIT(2)
+#define CONFIG_PULLUP BIT(1)
+#define CONFIG_OUTVAL BIT(0)
+
+/*
+ * output modes seem to differ depending on the PMU in use... ?
+ * j274 / M1 (Sera PMU):
+ * 0 = input
+ * 1 = output
+ * 2 = open drain
+ * 3 = disable
+ * j314 / M1Pro (Maverick PMU):
+ * 0 = input
+ * 1 = open drain
+ * 2 = output
+ * 3 = ?
+ */
+
+#define SMC_EV_GPIO 0x7202
+
+struct macsmc_gpio {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct gpio_chip gc;
+ struct irq_chip ic;
+ struct notifier_block nb;
+
+ struct mutex irq_mutex;
+ DECLARE_BITMAP(irq_supported, MAX_GPIO);
+ DECLARE_BITMAP(irq_enable_shadow, MAX_GPIO);
+ DECLARE_BITMAP(irq_enable, MAX_GPIO);
+ u32 irq_mode_shadow[MAX_GPIO];
+ u32 irq_mode[MAX_GPIO];
+
+ int first_index;
+};
+
+static int macsmc_gpio_nr(smc_key key)
+{
+ int low = hex_to_bin(key & 0xff);
+ int high = hex_to_bin((key >> 8) & 0xff);
+
+ if (low < 0 || high < 0)
+ return -1;
+
+ return low | (high << 4);
+}
+
+static int macsmc_gpio_key(unsigned int offset)
+{
+ return _SMC_KEY("gP\0\0") | (hex_asc_hi(offset) << 8) | hex_asc_lo(offset);
+}
+
+static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 val;
+ int ret;
+
+ /* First try reading the explicit pin mode register */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val);
+ if (!ret)
+ return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+
+ /*
+ * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode.
+ * Fall back to reading IRQ mode, which will only succeed for inputs.
+ */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+ return (!ret) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 val;
+ int ret;
+
+ ret = macsmc_gpio_get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret == GPIO_LINE_DIRECTION_OUT)
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_OUTPUT, &val);
+ else
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_INPUT, &val);
+
+ if (ret < 0)
+ return ret;
+
+ return val ? 1 : 0;
+}
+
+static void macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ int ret;
+
+ value |= CMD_OUTPUT;
+ ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value);
+ if (ret < 0)
+ dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n", &key, value);
+}
+
+static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask, unsigned int ngpios)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ int count = apple_smc_get_key_count(smcgp->smc) - smcgp->first_index;
+ int i;
+
+ if (count > MAX_GPIO)
+ count = MAX_GPIO;
+
+ bitmap_zero(valid_mask, ngpios);
+
+ for (i = 0; i < count; i++) {
+ smc_key key;
+ int gpio_nr;
+ u32 val;
+ int ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key);
+
+ if (ret < 0)
+ return ret;
+
+ if (key > SMC_KEY(gPff))
+ break;
+
+ gpio_nr = macsmc_gpio_nr(key);
+ if (gpio_nr < 0 || gpio_nr > MAX_GPIO) {
+ dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key);
+ continue;
+ }
+
+ set_bit(gpio_nr, valid_mask);
+
+ /* Check for IRQ support */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+ if (!ret)
+ set_bit(gpio_nr, smcgp->irq_supported);
+ }
+
+ return 0;
+}
+
+static int macsmc_gpio_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct macsmc_gpio *smcgp = container_of(nb, struct macsmc_gpio, nb);
+ u16 type = event >> 16;
+ u8 offset = (event >> 8) & 0xff;
+ smc_key key = macsmc_gpio_key(offset);
+ unsigned long flags;
+
+ if (type != SMC_EV_GPIO)
+ return NOTIFY_DONE;
+
+ if (offset > MAX_GPIO) {
+ dev_err(smcgp->dev, "GPIO event index %d out of range\n", offset);
+ return NOTIFY_BAD;
+ }
+
+ local_irq_save(flags);
+ generic_handle_irq_desc(irq_resolve_mapping(smcgp->gc.irq.domain, offset));
+ local_irq_restore(flags);
+
+ if (apple_smc_write_u32(smcgp->smc, key, CMD_IRQ_ACK | 1) < 0)
+ dev_err(smcgp->dev, "GPIO IRQ ack failed for %p4ch\n", &key);
+
+ return NOTIFY_OK;
+}
+
+static void macsmc_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+
+ set_bit(irqd_to_hwirq(d), smcgp->irq_enable_shadow);
+}
+
+static void macsmc_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+
+ clear_bit(irqd_to_hwirq(d), smcgp->irq_enable_shadow);
+}
+
+static int macsmc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ int offset = irqd_to_hwirq(d);
+ u32 mode;
+
+ if (!test_bit(offset, smcgp->irq_supported))
+ return -EINVAL;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ mode = IRQ_MODE_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ mode = IRQ_MODE_LOW;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ mode = IRQ_MODE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ mode = IRQ_MODE_FALLING;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ mode = IRQ_MODE_BOTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ smcgp->irq_mode_shadow[offset] = mode;
+ return 0;
+}
+
+static void macsmc_gpio_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+
+ mutex_lock(&smcgp->irq_mutex);
+}
+
+static void macsmc_gpio_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(irqd_to_hwirq(d));
+ int offset = irqd_to_hwirq(d);
+ bool val;
+
+ if (smcgp->irq_mode_shadow[offset] != smcgp->irq_mode[offset]) {
+ u32 cmd = CMD_IRQ_MODE | smcgp->irq_mode_shadow[offset];
+ if (apple_smc_write_u32(smcgp->smc, key, cmd) < 0)
+ dev_err(smcgp->dev, "GPIO IRQ config failed for %p4ch = 0x%x\n", &key, cmd);
+ else
+ smcgp->irq_mode_shadow[offset] = smcgp->irq_mode[offset];
+ }
+
+ val = test_bit(offset, smcgp->irq_enable_shadow);
+ if (test_bit(offset, smcgp->irq_enable) != val) {
+ if (apple_smc_write_u32(smcgp->smc, key, CMD_IRQ_ENABLE | val) < 0)
+ dev_err(smcgp->dev, "GPIO IRQ en/disable failed for %p4ch\n", &key);
+ else
+ change_bit(offset, smcgp->irq_enable);
+ }
+
+ mutex_unlock(&smcgp->irq_mutex);
+}
+
+static int macsmc_gpio_probe(struct platform_device *pdev)
+{
+ struct macsmc_gpio *smcgp;
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ smc_key key;
+ int ret;
+
+ smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL);
+ if (!smcgp)
+ return -ENOMEM;
+
+ pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "gpio");
+
+ smcgp->dev = &pdev->dev;
+ smcgp->smc = smc;
+ smcgp->first_index = apple_smc_find_first_key_index(smc, SMC_KEY(gP00));
+
+ if (smcgp->first_index >= apple_smc_get_key_count(smc))
+ return -ENODEV;
+
+ ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > macsmc_gpio_key(MAX_GPIO - 1))
+ return -ENODEV;
+
+ dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key);
+
+ smcgp->gc.label = "macsmc-pmu-gpio";
+ smcgp->gc.owner = THIS_MODULE;
+ smcgp->gc.get = macsmc_gpio_get;
+ smcgp->gc.set = macsmc_gpio_set;
+ smcgp->gc.get_direction = macsmc_gpio_get_direction;
+ smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
+ smcgp->gc.can_sleep = true;
+ smcgp->gc.ngpio = MAX_GPIO;
+ smcgp->gc.base = -1;
+ smcgp->gc.parent = &pdev->dev;
+
+ smcgp->ic.name = "macsmc-pmu-gpio";
+ smcgp->ic.irq_mask = macsmc_gpio_irq_disable;
+ smcgp->ic.irq_unmask = macsmc_gpio_irq_enable;
+ smcgp->ic.irq_set_type = macsmc_gpio_irq_set_type;
+ smcgp->ic.irq_bus_lock = macsmc_gpio_irq_bus_lock;
+ smcgp->ic.irq_bus_sync_unlock = macsmc_gpio_irq_bus_sync_unlock;
+ smcgp->ic.irq_set_type = macsmc_gpio_irq_set_type;
+ smcgp->ic.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
+
+ smcgp->gc.irq.chip = &smcgp->ic;
+ smcgp->gc.irq.parent_handler = NULL;
+ smcgp->gc.irq.num_parents = 0;
+ smcgp->gc.irq.parents = NULL;
+ smcgp->gc.irq.default_type = IRQ_TYPE_NONE;
+ smcgp->gc.irq.handler = handle_simple_irq;
+
+ mutex_init(&smcgp->irq_mutex);
+
+ smcgp->nb.notifier_call = macsmc_gpio_event;
+ apple_smc_register_notifier(smc, &smcgp->nb);
+
+ return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp);
+}
+
+static struct platform_driver macsmc_gpio_driver = {
+ .driver = {
+ .name = "macsmc-gpio",
+ },
+ .probe = macsmc_gpio_probe,
+};
+module_platform_driver(macsmc_gpio_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
+MODULE_ALIAS("platform:macsmc-gpio");
diff --git a/drivers/gpio/gpio_pl061_rust.rs b/drivers/gpio/gpio_pl061_rust.rs
new file mode 100644
index 000000000000..1457d763497e
--- /dev/null
+++ b/drivers/gpio/gpio_pl061_rust.rs
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061).
+//!
+//! Based on the C driver written by Baruch Siach <baruch@tkos.co.il>.
+
+use kernel::{
+ amba, bit, bits_iter, define_amba_id_table, device, gpio,
+ io_mem::IoMem,
+ irq::{self, ExtraResult, IrqData, LockedIrqData},
+ power,
+ prelude::*,
+ sync::{Arc, ArcBorrow, RawSpinLock},
+};
+
+const GPIODIR: usize = 0x400;
+const GPIOIS: usize = 0x404;
+const GPIOIBE: usize = 0x408;
+const GPIOIEV: usize = 0x40C;
+const GPIOIE: usize = 0x410;
+const GPIOMIS: usize = 0x418;
+const GPIOIC: usize = 0x41C;
+const GPIO_SIZE: usize = 0x1000;
+
+const PL061_GPIO_NR: u16 = 8;
+
+#[derive(Default)]
+struct ContextSaveRegs {
+ gpio_data: u8,
+ gpio_dir: u8,
+ gpio_is: u8,
+ gpio_ibe: u8,
+ gpio_iev: u8,
+ gpio_ie: u8,
+}
+
+#[derive(Default)]
+struct PL061DataInner {
+ csave_regs: ContextSaveRegs,
+}
+
+struct PL061Data {
+ dev: device::Device,
+ inner: RawSpinLock<PL061DataInner>,
+}
+
+struct PL061Resources {
+ base: IoMem<GPIO_SIZE>,
+ parent_irq: u32,
+}
+
+type PL061Registrations = gpio::RegistrationWithIrqChip<PL061Device>;
+
+type DeviceData = device::Data<PL061Registrations, PL061Resources, PL061Data>;
+
+struct PL061Device;
+
+#[vtable]
+impl gpio::Chip for PL061Device {
+ type Data = Arc<DeviceData>;
+
+ fn get_direction(data: ArcBorrow<'_, DeviceData>, offset: u32) -> Result<gpio::LineDirection> {
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ Ok(if pl061.base.readb(GPIODIR) & bit(offset) != 0 {
+ gpio::LineDirection::Out
+ } else {
+ gpio::LineDirection::In
+ })
+ }
+
+ fn direction_input(data: ArcBorrow<'_, DeviceData>, offset: u32) -> Result {
+ let _guard = data.inner.lock_irqdisable();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ let mut gpiodir = pl061.base.readb(GPIODIR);
+ gpiodir &= !bit(offset);
+ pl061.base.writeb(gpiodir, GPIODIR);
+ Ok(())
+ }
+
+ fn direction_output(data: ArcBorrow<'_, DeviceData>, offset: u32, value: bool) -> Result {
+ let woffset = bit(offset + 2).into();
+ let _guard = data.inner.lock_irqdisable();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ pl061.base.try_writeb((value as u8) << offset, woffset)?;
+ let mut gpiodir = pl061.base.readb(GPIODIR);
+ gpiodir |= bit(offset);
+ pl061.base.writeb(gpiodir, GPIODIR);
+
+ // gpio value is set again, because pl061 doesn't allow to set value of a gpio pin before
+ // configuring it in OUT mode.
+ pl061.base.try_writeb((value as u8) << offset, woffset)?;
+ Ok(())
+ }
+
+ fn get(data: ArcBorrow<'_, DeviceData>, offset: u32) -> Result<bool> {
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ Ok(pl061.base.try_readb(bit(offset + 2).into())? != 0)
+ }
+
+ fn set(data: ArcBorrow<'_, DeviceData>, offset: u32, value: bool) {
+ if let Some(pl061) = data.resources() {
+ let woffset = bit(offset + 2).into();
+ let _ = pl061.base.try_writeb((value as u8) << offset, woffset);
+ }
+ }
+}
+
+impl gpio::ChipWithIrqChip for PL061Device {
+ fn handle_irq_flow(
+ data: ArcBorrow<'_, DeviceData>,
+ desc: &irq::Descriptor,
+ domain: &irq::Domain,
+ ) {
+ let chained = desc.enter_chained();
+
+ if let Some(pl061) = data.resources() {
+ let pending = pl061.base.readb(GPIOMIS);
+ for offset in bits_iter(pending) {
+ domain.generic_handle_chained(offset, &chained);
+ }
+ }
+ }
+}
+
+#[vtable]
+impl irq::Chip for PL061Device {
+ type Data = Arc<DeviceData>;
+
+ fn set_type(
+ data: ArcBorrow<'_, DeviceData>,
+ irq_data: &mut LockedIrqData,
+ trigger: u32,
+ ) -> Result<ExtraResult> {
+ let offset = irq_data.hwirq();
+ let bit = bit(offset);
+
+ if offset >= PL061_GPIO_NR.into() {
+ return Err(EINVAL);
+ }
+
+ if trigger & (irq::Type::LEVEL_HIGH | irq::Type::LEVEL_LOW) != 0
+ && trigger & (irq::Type::EDGE_RISING | irq::Type::EDGE_FALLING) != 0
+ {
+ dev_err!(
+ data.dev,
+ "trying to configure line {} for both level and edge detection, choose one!\n",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let _guard = data.inner.lock_irqdisable();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+
+ let mut gpioiev = pl061.base.readb(GPIOIEV);
+ let mut gpiois = pl061.base.readb(GPIOIS);
+ let mut gpioibe = pl061.base.readb(GPIOIBE);
+
+ if trigger & (irq::Type::LEVEL_HIGH | irq::Type::LEVEL_LOW) != 0 {
+ let polarity = trigger & irq::Type::LEVEL_HIGH != 0;
+
+ // Disable edge detection.
+ gpioibe &= !bit;
+ // Enable level detection.
+ gpiois |= bit;
+ // Select polarity.
+ if polarity {
+ gpioiev |= bit;
+ } else {
+ gpioiev &= !bit;
+ }
+ irq_data.set_level_handler();
+ dev_dbg!(
+ data.dev,
+ "line {}: IRQ on {} level\n",
+ offset,
+ if polarity { "HIGH" } else { "LOW" }
+ );
+ } else if (trigger & irq::Type::EDGE_BOTH) == irq::Type::EDGE_BOTH {
+ // Disable level detection.
+ gpiois &= !bit;
+ // Select both edges, settings this makes GPIOEV be ignored.
+ gpioibe |= bit;
+ irq_data.set_edge_handler();
+ dev_dbg!(data.dev, "line {}: IRQ on both edges\n", offset);
+ } else if trigger & (irq::Type::EDGE_RISING | irq::Type::EDGE_FALLING) != 0 {
+ let rising = trigger & irq::Type::EDGE_RISING != 0;
+
+ // Disable level detection.
+ gpiois &= !bit;
+ // Clear detection on both edges.
+ gpioibe &= !bit;
+ // Select edge.
+ if rising {
+ gpioiev |= bit;
+ } else {
+ gpioiev &= !bit;
+ }
+ irq_data.set_edge_handler();
+ dev_dbg!(
+ data.dev,
+ "line {}: IRQ on {} edge\n",
+ offset,
+ if rising { "RISING" } else { "FALLING}" }
+ );
+ } else {
+ // No trigger: disable everything.
+ gpiois &= !bit;
+ gpioibe &= !bit;
+ gpioiev &= !bit;
+ irq_data.set_bad_handler();
+ dev_warn!(data.dev, "no trigger selected for line {}\n", offset);
+ }
+
+ pl061.base.writeb(gpiois, GPIOIS);
+ pl061.base.writeb(gpioibe, GPIOIBE);
+ pl061.base.writeb(gpioiev, GPIOIEV);
+
+ Ok(ExtraResult::None)
+ }
+
+ fn mask(data: ArcBorrow<'_, DeviceData>, irq_data: &IrqData) {
+ let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
+ let _guard = data.inner.lock();
+ if let Some(pl061) = data.resources() {
+ let gpioie = pl061.base.readb(GPIOIE) & !mask;
+ pl061.base.writeb(gpioie, GPIOIE);
+ }
+ }
+
+ fn unmask(data: ArcBorrow<'_, DeviceData>, irq_data: &IrqData) {
+ let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
+ let _guard = data.inner.lock();
+ if let Some(pl061) = data.resources() {
+ let gpioie = pl061.base.readb(GPIOIE) | mask;
+ pl061.base.writeb(gpioie, GPIOIE);
+ }
+ }
+
+ // This gets called from the edge IRQ handler to ACK the edge IRQ in the GPIOIC
+ // (interrupt-clear) register. For level IRQs this is not needed: these go away when the level
+ // signal goes away.
+ fn ack(data: ArcBorrow<'_, DeviceData>, irq_data: &IrqData) {
+ let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
+ let _guard = data.inner.lock();
+ if let Some(pl061) = data.resources() {
+ pl061.base.writeb(mask.into(), GPIOIC);
+ }
+ }
+
+ fn set_wake(data: ArcBorrow<'_, DeviceData>, _irq_data: &IrqData, on: bool) -> Result {
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ irq::set_wake(pl061.parent_irq, on)
+ }
+}
+
+impl amba::Driver for PL061Device {
+ type Data = Arc<DeviceData>;
+ type PowerOps = Self;
+
+ define_amba_id_table! {(), [
+ ({id: 0x00041061, mask: 0x000fffff}, None),
+ ]}
+
+ fn probe(dev: &mut amba::Device, _data: Option<&Self::IdInfo>) -> Result<Arc<DeviceData>> {
+ let res = dev.take_resource().ok_or(ENXIO)?;
+ let irq = dev.irq(0).ok_or(ENXIO)?;
+
+ let mut data = kernel::new_device_data!(
+ gpio::RegistrationWithIrqChip::new(),
+ PL061Resources {
+ // SAFETY: This device doesn't support DMA.
+ base: unsafe { IoMem::try_new(res)? },
+ parent_irq: irq,
+ },
+ PL061Data {
+ dev: device::Device::from_dev(dev),
+ // SAFETY: We call `rawspinlock_init` below.
+ inner: unsafe { RawSpinLock::new(PL061DataInner::default()) },
+ },
+ "PL061::Registrations"
+ )?;
+
+ // SAFETY: General part of the data is pinned when `data` is.
+ let gen_inner = unsafe { data.as_mut().map_unchecked_mut(|d| &mut (**d).inner) };
+ kernel::rawspinlock_init!(gen_inner, "PL061Data::inner");
+
+ let data = Arc::<DeviceData>::from(data);
+
+ data.resources().ok_or(ENXIO)?.base.writeb(0, GPIOIE); // disable irqs
+
+ kernel::gpio_irq_chip_register!(
+ data.registrations().ok_or(ENXIO)?.as_pinned_mut(),
+ Self,
+ PL061_GPIO_NR,
+ None,
+ dev,
+ data.clone(),
+ irq
+ )?;
+
+ dev_info!(data.dev, "PL061 GPIO chip registered\n");
+
+ Ok(data)
+ }
+}
+
+impl power::Operations for PL061Device {
+ type Data = Arc<DeviceData>;
+
+ fn suspend(data: ArcBorrow<'_, DeviceData>) -> Result {
+ let mut inner = data.inner.lock();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ inner.csave_regs.gpio_data = 0;
+ inner.csave_regs.gpio_dir = pl061.base.readb(GPIODIR);
+ inner.csave_regs.gpio_is = pl061.base.readb(GPIOIS);
+ inner.csave_regs.gpio_ibe = pl061.base.readb(GPIOIBE);
+ inner.csave_regs.gpio_iev = pl061.base.readb(GPIOIEV);
+ inner.csave_regs.gpio_ie = pl061.base.readb(GPIOIE);
+
+ for offset in 0..PL061_GPIO_NR {
+ if inner.csave_regs.gpio_dir & bit(offset) != 0 {
+ if let Ok(v) = <Self as gpio::Chip>::get(data, offset.into()) {
+ inner.csave_regs.gpio_data |= (v as u8) << offset;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn resume(data: ArcBorrow<'_, DeviceData>) -> Result {
+ let inner = data.inner.lock();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+
+ for offset in 0..PL061_GPIO_NR {
+ if inner.csave_regs.gpio_dir & bit(offset) != 0 {
+ let value = inner.csave_regs.gpio_data & bit(offset) != 0;
+ let _ = <Self as gpio::Chip>::direction_output(data, offset.into(), value);
+ } else {
+ let _ = <Self as gpio::Chip>::direction_input(data, offset.into());
+ }
+ }
+
+ pl061.base.writeb(inner.csave_regs.gpio_is, GPIOIS);
+ pl061.base.writeb(inner.csave_regs.gpio_ibe, GPIOIBE);
+ pl061.base.writeb(inner.csave_regs.gpio_iev, GPIOIEV);
+ pl061.base.writeb(inner.csave_regs.gpio_ie, GPIOIE);
+
+ Ok(())
+ }
+
+ fn freeze(data: ArcBorrow<'_, DeviceData>) -> Result {
+ Self::suspend(data)
+ }
+
+ fn restore(data: ArcBorrow<'_, DeviceData>) -> Result {
+ Self::resume(data)
+ }
+}
+
+module_amba_driver! {
+ type: PL061Device,
+ name: "pl061_gpio",
+ author: "Wedson Almeida Filho",
+ license: "GPL",
+}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 34f5a092c99e..5aa8222f085c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -416,6 +416,8 @@ source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
+source "drivers/gpu/drm/apple/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && MMU && HYPERV
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0b283e46f28b..e547c9c0bf73 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -140,6 +140,7 @@ obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
+obj-$(CONFIG_DRM_APPLE) += apple/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/apple/.gitignore b/drivers/gpu/drm/apple/.gitignore
new file mode 100644
index 000000000000..d9a77f3b59b2
--- /dev/null
+++ b/drivers/gpu/drm/apple/.gitignore
@@ -0,0 +1 @@
+*.hdrtest
diff --git a/drivers/gpu/drm/apple/Kconfig b/drivers/gpu/drm/apple/Kconfig
new file mode 100644
index 000000000000..9b9bcb7b5433
--- /dev/null
+++ b/drivers/gpu/drm/apple/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+config DRM_APPLE
+ tristate "DRM Support for Apple display controllers"
+ depends on DRM && OF && ARM64
+ depends on ARCH_APPLE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
+ select VIDEOMODE_HELPERS
+ help
+ Say Y if you have an Apple Silicon chipset.
diff --git a/drivers/gpu/drm/apple/Makefile b/drivers/gpu/drm/apple/Makefile
new file mode 100644
index 000000000000..2502f781a5dc
--- /dev/null
+++ b/drivers/gpu/drm/apple/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+
+CFLAGS_trace.o = -I$(src)
+
+appledrm-y := apple_drv.o
+
+apple_dcp-y := dcp.o iomfb.o parser.o
+apple_dcp-$(CONFIG_TRACING) += trace.o
+
+apple_piodma-y := dummy-piodma.o
+
+obj-$(CONFIG_DRM_APPLE) += appledrm.o
+obj-$(CONFIG_DRM_APPLE) += apple_dcp.o
+obj-$(CONFIG_DRM_APPLE) += apple_piodma.o
+
+# header test
+
+# exclude some broken headers from the test coverage
+no-header-test := \
+
+always-y += \
+ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \
+ $(shell cd $(srctree)/$(src) && find * -name '*.h')))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/apple/apple_drv.c b/drivers/gpu/drm/apple/apple_drv.c
new file mode 100644
index 000000000000..c6483f3011a9
--- /dev/null
+++ b/drivers/gpu/drm/apple/apple_drv.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+/* Based on meson driver which is
+ * Copyright (C) 2016 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_fixed.h>
+
+#include "dcp.h"
+
+#define DRIVER_NAME "apple"
+#define DRIVER_DESC "Apple display controller DRM driver"
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+#define MAX_COPROCESSORS 2
+
+struct apple_drm_private {
+ struct drm_device drm;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(apple_fops);
+
+static int apple_drm_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 64);
+ args->size = args->pitch * args->height;
+
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
+}
+
+static const struct drm_driver apple_drm_driver = {
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(apple_drm_gem_dumb_create),
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = "20210901",
+ .major = 1,
+ .minor = 0,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .fops = &apple_fops,
+};
+
+static int apple_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_crtc_state *crtc_state;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * DCP limits downscaling to 2x and upscaling to 4x. Attempting to
+ * scale outside these bounds errors out when swapping.
+ *
+ * This function also takes care of clipping the src/dest rectangles,
+ * which is required for correct operation. Partially off-screen
+ * surfaces may appear corrupted.
+ *
+ * DCP does not distinguish plane types in the hardware, so we set
+ * can_position. If the primary plane does not fill the screen, the
+ * hardware will fill in zeroes (black).
+ */
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ FRAC_16_16(1, 4),
+ FRAC_16_16(2, 1),
+ true, true);
+}
+
+static void apple_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ /* Handled in atomic_flush */
+}
+
+static const struct drm_plane_helper_funcs apple_plane_helper_funcs = {
+ .atomic_check = apple_plane_atomic_check,
+ .atomic_update = apple_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs apple_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/*
+ * Table of supported formats, mapping from DRM fourccs to DCP fourccs.
+ *
+ * For future work, DCP supports more formats not listed, including YUV
+ * formats, an extra RGBA format, and a biplanar RGB10_A8 format (fourcc b3a8)
+ * used for HDR.
+ *
+ * Note: we don't have non-alpha formats but userspace breaks without XRGB. It
+ * doesn't matter for the primary plane, but cursors/overlays must not
+ * advertise formats without alpha.
+ */
+static const u32 dcp_formats[] = {
+ // DRM_FORMAT_XRGB2101010,
+ // DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+};
+
+u64 apple_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static struct drm_plane *apple_plane_init(struct drm_device *dev,
+ unsigned long possible_crtcs,
+ enum drm_plane_type type)
+{
+ int ret;
+ struct drm_plane *plane;
+
+ plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
+
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+ &apple_plane_funcs,
+ dcp_formats, ARRAY_SIZE(dcp_formats),
+ apple_format_modifiers, type, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(plane, &apple_plane_helper_funcs);
+
+ return plane;
+}
+
+static int apple_enable_vblank(struct drm_crtc *crtc)
+{
+ to_apple_crtc(crtc)->vsync_disabled = false;
+
+ return 0;
+}
+
+static void apple_disable_vblank(struct drm_crtc *crtc)
+{
+ to_apple_crtc(crtc)->vsync_disabled = true;
+}
+
+static enum drm_connector_status
+apple_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+
+ return apple_connector->connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void apple_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->active_changed && crtc_state->active) {
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ dev_dbg(&apple_crtc->dcp->dev, "%s", __func__);
+ dcp_poweron(apple_crtc->dcp);
+ dev_dbg(&apple_crtc->dcp->dev, "%s finished", __func__);
+ }
+ drm_crtc_vblank_on(crtc);
+}
+
+static void apple_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ drm_crtc_vblank_off(crtc);
+
+ if (crtc_state->active_changed && !crtc_state->active) {
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ dev_dbg(&apple_crtc->dcp->dev, "%s", __func__);
+ dcp_poweroff(apple_crtc->dcp);
+ dev_dbg(&apple_crtc->dcp->dev, "%s finished", __func__);
+ }
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void apple_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct apple_crtc *apple_crtc = to_apple_crtc(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ apple_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+}
+
+static void dcp_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_fake_vblank(old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+
+
+static const struct drm_crtc_funcs apple_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+ .enable_vblank = apple_enable_vblank,
+ .disable_vblank = apple_disable_vblank,
+};
+
+static const struct drm_mode_config_funcs apple_mode_config_funcs = {
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_gem_fb_create,
+};
+
+static const struct drm_mode_config_helper_funcs apple_mode_config_helpers = {
+ .atomic_commit_tail = dcp_atomic_commit_tail,
+};
+
+static const struct drm_connector_funcs apple_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .detect = apple_connector_detect,
+};
+
+static const struct drm_connector_helper_funcs apple_connector_helper_funcs = {
+ .get_modes = dcp_get_modes,
+ .mode_valid = dcp_mode_valid,
+};
+
+static const struct drm_crtc_helper_funcs apple_crtc_helper_funcs = {
+ .atomic_begin = apple_crtc_atomic_begin,
+ .atomic_check = dcp_crtc_atomic_check,
+ .atomic_flush = dcp_flush,
+ .atomic_enable = apple_crtc_atomic_enable,
+ .atomic_disable = apple_crtc_atomic_disable,
+};
+
+static int apple_probe_per_dcp(struct device *dev,
+ struct drm_device *drm,
+ struct platform_device *dcp,
+ int num)
+{
+ struct apple_crtc *crtc;
+ struct apple_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_plane *primary;
+ int con_type;
+ int ret;
+
+ primary = apple_plane_init(drm, 1U << num, DRM_PLANE_TYPE_PRIMARY);
+
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, primary, NULL,
+ &apple_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&crtc->base, &apple_crtc_helper_funcs);
+
+ encoder = devm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL);
+ encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ return ret;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ drm_connector_helper_add(&connector->base,
+ &apple_connector_helper_funcs);
+
+ if (of_property_match_string(dcp->dev.of_node, "apple,connector-type", "eDP") >= 0)
+ con_type = DRM_MODE_CONNECTOR_eDP;
+ else if (of_property_match_string(dcp->dev.of_node, "apple,connector-type", "HDMI-A") >= 0)
+ con_type = DRM_MODE_CONNECTOR_HDMIA;
+ else if (of_property_match_string(dcp->dev.of_node, "apple,connector-type", "USB-C") >= 0)
+ con_type = DRM_MODE_CONNECTOR_USB;
+ else
+ con_type = DRM_MODE_CONNECTOR_Unknown;
+
+ ret = drm_connector_init(drm, &connector->base, &apple_connector_funcs,
+ con_type);
+ if (ret)
+ return ret;
+
+ connector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ connector->connected = false;
+ connector->dcp = dcp;
+
+ INIT_WORK(&connector->hotplug_wq, dcp_hotplug);
+
+ crtc->dcp = dcp;
+ dcp_link(dcp, crtc, connector);
+
+ return drm_connector_attach_encoder(&connector->base, encoder);
+}
+
+static int apple_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_drm_private *apple;
+ struct platform_device *dcp[MAX_COPROCESSORS];
+ int ret, nr_dcp, i;
+
+ for (nr_dcp = 0; nr_dcp < MAX_COPROCESSORS; ++nr_dcp) {
+ struct device_node *np;
+ struct device_link *dcp_link;
+
+ np = of_parse_phandle(dev->of_node, "apple,coprocessors",
+ nr_dcp);
+
+ if (!np)
+ break;
+
+ dcp[nr_dcp] = of_find_device_by_node(np);
+
+ if (!dcp[nr_dcp])
+ return -ENODEV;
+
+ dcp_link = device_link_add(dev, &dcp[nr_dcp]->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dcp_link) {
+ dev_err(dev, "Failed to link to DCP %d device", nr_dcp);
+ return -EINVAL;
+ }
+
+ if (dcp_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return -EPROBE_DEFER;
+ }
+
+ /* Need at least 1 DCP for a display subsystem */
+ if (nr_dcp < 1)
+ return -ENODEV;
+
+ // remove before registering our DRM device
+ ret = drm_aperture_remove_framebuffers(false, &apple_drm_driver);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ apple = devm_drm_dev_alloc(dev, &apple_drm_driver,
+ struct apple_drm_private, drm);
+ if (IS_ERR(apple))
+ return PTR_ERR(apple);
+
+ ret = drm_vblank_init(&apple->drm, nr_dcp);
+ if (ret)
+ return ret;
+
+ ret = drmm_mode_config_init(&apple->drm);
+ if (ret)
+ goto err_unload;
+
+ /*
+ * IOMFB::UPPipeDCP_H13P::verify_surfaces produces the error "plane
+ * requires a minimum of 32x32 for the source buffer" if smaller
+ */
+ apple->drm.mode_config.min_width = 32;
+ apple->drm.mode_config.min_height = 32;
+
+ /* Unknown maximum, use the iMac (24-inch, 2021) display resolution as
+ * maximum.
+ * TODO: this is the max framebuffer size not the maximal supported output
+ * resolution. DCP reports the maximal framebuffer size take it from there.
+ */
+ apple->drm.mode_config.max_width = 4480;
+ apple->drm.mode_config.max_height = 2520;
+
+ apple->drm.mode_config.funcs = &apple_mode_config_funcs;
+ apple->drm.mode_config.helper_private = &apple_mode_config_helpers;
+
+ for (i = 0; i < nr_dcp; ++i) {
+ ret = apple_probe_per_dcp(dev, &apple->drm, dcp[i], i);
+
+ if (ret)
+ goto err_unload;
+
+ ret = dcp_start(dcp[i]);
+
+ if (ret)
+ goto err_unload;
+ }
+
+ drm_mode_config_reset(&apple->drm);
+
+ ret = drm_dev_register(&apple->drm, 0);
+ if (ret)
+ goto err_unload;
+
+ drm_fbdev_generic_setup(&apple->drm, 32);
+
+ return 0;
+
+err_unload:
+ drm_dev_put(&apple->drm);
+ return ret;
+}
+
+static int apple_platform_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,display-subsystem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver apple_platform_driver = {
+ .driver = {
+ .name = "apple-drm",
+ .of_match_table = of_match,
+ },
+ .probe = apple_platform_probe,
+ .remove = apple_platform_remove,
+};
+
+module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/dcp-internal.h b/drivers/gpu/drm/apple/dcp-internal.h
new file mode 100644
index 000000000000..6624672109c3
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp-internal.h
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_INTERNAL_H__
+#define __APPLE_DCP_INTERNAL_H__
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "iomfb.h"
+
+#define DCP_MAX_PLANES 2
+
+struct apple_dcp;
+
+enum {
+ SYSTEM_ENDPOINT = 0x20,
+ TEST_ENDPOINT = 0x21,
+ DCP_EXPERT_ENDPOINT = 0x22,
+ DISP0_ENDPOINT = 0x23,
+ DPTX_ENDPOINT = 0x2a,
+ HDCP_ENDPOINT = 0x2b,
+ REMOTE_ALLOC_ENDPOINT = 0x2d,
+ IOMFB_ENDPOINT = 0x37,
+};
+
+/* Temporary backing for a chunked transfer via setDCPAVPropStart/Chunk/End */
+struct dcp_chunks {
+ size_t length;
+ void *data;
+};
+
+#define DCP_MAX_MAPPINGS (128) /* should be enough */
+#define MAX_DISP_REGISTERS (7)
+
+struct dcp_mem_descriptor {
+ size_t size;
+ void *buf;
+ dma_addr_t dva;
+ struct sg_table map;
+ u64 reg;
+};
+
+/* Limit on call stack depth (arbitrary). Some nesting is required */
+#define DCP_MAX_CALL_DEPTH 8
+
+typedef void (*dcp_callback_t)(struct apple_dcp *, void *, void *);
+
+struct dcp_call_channel {
+ dcp_callback_t callbacks[DCP_MAX_CALL_DEPTH];
+ void *cookies[DCP_MAX_CALL_DEPTH];
+ void *output[DCP_MAX_CALL_DEPTH];
+ u16 end[DCP_MAX_CALL_DEPTH];
+
+ /* Current depth of the call stack. Less than DCP_MAX_CALL_DEPTH */
+ u8 depth;
+};
+
+struct dcp_cb_channel {
+ u8 depth;
+ void *output[DCP_MAX_CALL_DEPTH];
+};
+
+struct dcp_fb_reference {
+ struct list_head head;
+ struct drm_framebuffer *fb;
+};
+
+#define MAX_NOTCH_HEIGHT 160
+
+/* TODO: move IOMFB members to its own struct */
+struct apple_dcp {
+ struct device *dev;
+ struct platform_device *piodma;
+ struct device_link *piodma_link;
+ struct apple_rtkit *rtk;
+ struct apple_crtc *crtc;
+ struct apple_connector *connector;
+
+ /* Coprocessor control register */
+ void __iomem *coproc_reg;
+
+ /* mask for DCP IO virtual addresses shared over rtkit */
+ u64 asc_dram_mask;
+
+ /* DCP has crashed */
+ bool crashed;
+
+ /************* IOMFB **************************************************
+ * everything below is mostly used inside IOMFB but it could make *
+ * sense keep some of the the members in apple_dcp. *
+ **********************************************************************/
+
+ /* clock rate request by dcp in */
+ struct clk *clk;
+
+ /* DCP shared memory */
+ void *shmem;
+
+ /* Display registers mappable to the DCP */
+ struct resource *disp_registers[MAX_DISP_REGISTERS];
+ unsigned int nr_disp_registers;
+
+ /* Bitmap of memory descriptors used for mappings made by the DCP */
+ DECLARE_BITMAP(memdesc_map, DCP_MAX_MAPPINGS);
+
+ /* Indexed table of memory descriptors */
+ struct dcp_mem_descriptor memdesc[DCP_MAX_MAPPINGS];
+
+ struct dcp_call_channel ch_cmd, ch_oobcmd;
+ struct dcp_cb_channel ch_cb, ch_oobcb, ch_async;
+
+ /* Active chunked transfer. There can only be one at a time. */
+ struct dcp_chunks chunks;
+
+ /* Queued swap. Owned by the DCP to avoid per-swap memory allocation */
+ struct dcp_swap_submit_req swap;
+
+ /* Current display mode */
+ bool valid_mode;
+ struct dcp_set_digital_out_mode_req mode;
+
+ /* Is the DCP booted? */
+ bool active;
+
+ /* eDP display without DP-HDMI conversion */
+ bool main_display;
+
+ bool ignore_swap_complete;
+
+ /* Modes valid for the connected display */
+ struct dcp_display_mode *modes;
+ unsigned int nr_modes;
+
+ /* Attributes of the connected display */
+ int width_mm, height_mm;
+
+ unsigned notch_height;
+
+ /* Workqueue for sending vblank events when a dcp swap is not possible */
+ struct work_struct vblank_wq;
+
+ /* List of referenced drm_framebuffers which can be unreferenced
+ * on the next successfully completed swap.
+ */
+ struct list_head swapped_out_fbs;
+};
+
+#endif /* __APPLE_DCP_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/apple/dcp.c b/drivers/gpu/drm/apple/dcp.c
new file mode 100644
index 000000000000..c333ea61c49b
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/align.h>
+#include <linux/apple-mailbox.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/completion.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "parser.h"
+#include "trace.h"
+
+#define APPLE_DCP_COPROC_CPU_CONTROL 0x44
+#define APPLE_DCP_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define DCP_BOOT_TIMEOUT msecs_to_jiffies(1000)
+
+/* HACK: moved here to avoid circular dependency between apple_drv and dcp */
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc)
+{
+ unsigned long flags;
+
+ if (crtc->vsync_disabled)
+ return;
+
+ drm_crtc_handle_vblank(&crtc->base);
+
+ spin_lock_irqsave(&crtc->base.dev->event_lock, flags);
+ if (crtc->event) {
+ drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+ drm_crtc_vblank_put(&crtc->base);
+ crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&crtc->base.dev->event_lock, flags);
+}
+
+void dcp_set_dimensions(struct apple_dcp *dcp)
+{
+ int i;
+
+ /* Set the connector info */
+ if (dcp->connector) {
+ struct drm_connector *connector = &dcp->connector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ connector->display_info.width_mm = dcp->width_mm;
+ connector->display_info.height_mm = dcp->height_mm;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ }
+
+ /*
+ * Fix up any probed modes. Modes are created when parsing
+ * TimingElements, dimensions are calculated when parsing
+ * DisplayAttributes, and TimingElements may be sent first
+ */
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ dcp->modes[i].mode.width_mm = dcp->width_mm;
+ dcp->modes[i].mode.height_mm = dcp->height_mm;
+ }
+}
+
+/*
+ * Helper to send a DRM vblank event. We do not know how call swap_submit_dcp
+ * without surfaces. To avoid timeouts in drm_atomic_helper_wait_for_vblanks
+ * send a vblank event via a workqueue.
+ */
+static void dcp_delayed_vblank(struct work_struct *work)
+{
+ struct apple_dcp *dcp;
+
+ dcp = container_of(work, struct apple_dcp, vblank_wq);
+ mdelay(5);
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void dcp_recv_msg(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_dcp *dcp = cookie;
+
+ trace_dcp_recv_msg(dcp, endpoint, message);
+
+ switch (endpoint) {
+ case IOMFB_ENDPOINT:
+ return iomfb_recv_msg(dcp, message);
+ default:
+ WARN(endpoint, "unknown DCP endpoint %hhu", endpoint);
+ }
+}
+
+static void dcp_rtk_crashed(void *cookie)
+{
+ struct apple_dcp *dcp = cookie;
+
+ dcp->crashed = true;
+ dev_err(dcp->dev, "DCP has crashed");
+ if (dcp->connector) {
+ dcp->connector->connected = 0;
+ schedule_work(&dcp->connector->hotplug_wq);
+ }
+}
+
+static int dcp_rtk_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_dcp *dcp = cookie;
+
+ if (bfr->iova) {
+ struct iommu_domain *domain =
+ iommu_get_domain_for_dev(dcp->dev);
+ phys_addr_t phy_addr;
+
+ if (!domain)
+ return -ENOMEM;
+
+ // TODO: get map from device-tree
+ phy_addr = iommu_iova_to_phys(domain,
+ bfr->iova & ~dcp->asc_dram_mask);
+ if (!phy_addr)
+ return -ENOMEM;
+
+ // TODO: verify phy_addr, cache attribute
+ bfr->buffer = memremap(phy_addr, bfr->size, MEMREMAP_WB);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ bfr->is_mapped = true;
+ dev_info(dcp->dev,
+ "shmem_setup: iova: %lx -> pa: %lx -> iomem: %lx",
+ (uintptr_t)bfr->iova, (uintptr_t)phy_addr,
+ (uintptr_t)bfr->buffer);
+ } else {
+ bfr->buffer = dma_alloc_coherent(dcp->dev, bfr->size,
+ &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+
+ bfr->iova |= dcp->asc_dram_mask;
+
+ dev_info(dcp->dev, "shmem_setup: iova: %lx, buffer: %lx",
+ (uintptr_t)bfr->iova, (uintptr_t)bfr->buffer);
+ }
+
+ return 0;
+}
+
+static void dcp_rtk_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_dcp *dcp = cookie;
+
+ if (bfr->is_mapped)
+ memunmap(bfr->buffer);
+ else
+ dma_free_coherent(dcp->dev, bfr->size, bfr->buffer,
+ bfr->iova & ~dcp->asc_dram_mask);
+}
+
+static struct apple_rtkit_ops rtkit_ops = {
+ .crashed = dcp_rtk_crashed,
+ .recv_message = dcp_recv_msg,
+ .shmem_setup = dcp_rtk_shmem_setup,
+ .shmem_destroy = dcp_rtk_shmem_destroy,
+};
+
+
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message)
+{
+ trace_dcp_send_msg(dcp, endpoint, message);
+ apple_rtkit_send_message(dcp->rtk, endpoint, message, NULL,
+ false);
+}
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct drm_plane_state *new_state, *old_state;
+ struct drm_plane *plane;
+ struct drm_crtc_state *crtc_state;
+ int plane_idx, plane_count = 0;
+ bool needs_modeset;
+
+ if (dcp->crashed)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ needs_modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+ if (!needs_modeset && !dcp->connector->connected) {
+ dev_err(dcp->dev, "crtc_atomic_check: disconnected but no modeset");
+ return -EINVAL;
+ }
+
+ for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
+ /* skip planes not for this crtc */
+ if (new_state->crtc != crtc)
+ continue;
+
+ plane_count += 1;
+ }
+
+ if (plane_count > DCP_MAX_PLANES) {
+ dev_err(dcp->dev, "crtc_atomic_check: Blend supports only 2 layers!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dcp_crtc_atomic_check);
+
+void dcp_link(struct platform_device *pdev, struct apple_crtc *crtc,
+ struct apple_connector *connector)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ dcp->crtc = crtc;
+ dcp->connector = connector;
+}
+EXPORT_SYMBOL_GPL(dcp_link);
+
+int dcp_start(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ int ret;
+
+ /* start RTKit endpoints */
+ ret = iomfb_start_rtkit(dcp);
+ if (ret)
+ dev_err(dcp->dev, "Failed to start IOMFB endpoint: %d", ret);
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(dcp_start);
+
+static struct platform_device *dcp_get_dev(struct device *dev, const char *name)
+{
+ struct platform_device *pdev;
+ struct device_node *node = of_parse_phandle(dev->of_node, name, 0);
+
+ if (!node)
+ return NULL;
+
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ return pdev;
+}
+
+static int dcp_get_disp_regs(struct apple_dcp *dcp)
+{
+ struct platform_device *pdev = to_platform_device(dcp->dev);
+ int count = pdev->num_resources - 1;
+ int i;
+
+ if (count <= 0 || count > MAX_DISP_REGISTERS)
+ return -EINVAL;
+
+ for (i = 0; i < count; ++i) {
+ dcp->disp_registers[i] =
+ platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+ }
+
+ dcp->nr_disp_registers = count;
+ return 0;
+}
+
+static int dcp_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_dcp *dcp;
+ u32 cpu_ctrl;
+ int ret;
+
+ dcp = devm_kzalloc(dev, sizeof(*dcp), GFP_KERNEL);
+ if (!dcp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dcp);
+ dcp->dev = dev;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dcp->coproc_reg = devm_platform_ioremap_resource_byname(pdev, "coproc");
+ if (IS_ERR(dcp->coproc_reg))
+ return PTR_ERR(dcp->coproc_reg);
+
+ of_platform_default_populate(dev->of_node, NULL, dev);
+
+ dcp->piodma = dcp_get_dev(dev, "apple,piodma-mapper");
+ if (!dcp->piodma) {
+ dev_err(dev, "failed to find piodma\n");
+ return -ENODEV;
+ }
+
+ dcp->piodma_link = device_link_add(dev, &dcp->piodma->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dcp->piodma_link) {
+ dev_err(dev, "Failed to link to piodma device");
+ return -EINVAL;
+ }
+
+ if (dcp->piodma_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return -EPROBE_DEFER;
+
+ ret = dcp_get_disp_regs(dcp);
+ if (ret) {
+ dev_err(dev, "failed to find display registers\n");
+ return ret;
+ }
+
+ dcp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dcp->clk))
+ return dev_err_probe(dev, PTR_ERR(dcp->clk),
+ "Unable to find clock\n");
+
+ ret = of_property_read_u64(dev->of_node, "apple,asc-dram-mask",
+ &dcp->asc_dram_mask);
+ if (ret)
+ dev_warn(dev, "failed read 'apple,asc-dram-mask': %d\n", ret);
+ dev_dbg(dev, "'apple,asc-dram-mask': 0x%011llx\n", dcp->asc_dram_mask);
+
+ ret = of_property_read_u32(dev->of_node, "apple,notch-height",
+ &dcp->notch_height);
+ if (dcp->notch_height > MAX_NOTCH_HEIGHT)
+ dcp->notch_height = MAX_NOTCH_HEIGHT;
+ if (dcp->notch_height > 0)
+ dev_info(dev, "Detected display with notch of %u pixel\n", dcp->notch_height);
+
+ bitmap_zero(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+ // TDOD: mem_desc IDs start at 1, for simplicity just skip '0' entry
+ set_bit(0, dcp->memdesc_map);
+
+ INIT_WORK(&dcp->vblank_wq, dcp_delayed_vblank);
+
+ dcp->swapped_out_fbs =
+ (struct list_head)LIST_HEAD_INIT(dcp->swapped_out_fbs);
+
+ cpu_ctrl =
+ readl_relaxed(dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+ writel_relaxed(cpu_ctrl | APPLE_DCP_COPROC_CPU_CONTROL_RUN,
+ dcp->coproc_reg + APPLE_DCP_COPROC_CPU_CONTROL);
+
+ dcp->rtk = devm_apple_rtkit_init(dev, dcp, "mbox", 0, &rtkit_ops);
+ if (IS_ERR(dcp->rtk))
+ return dev_err_probe(dev, PTR_ERR(dcp->rtk),
+ "Failed to intialize RTKit");
+
+ ret = apple_rtkit_wake(dcp->rtk);
+ if (ret)
+ return dev_err_probe(dev, PTR_ERR(dcp->rtk),
+ "Failed to boot RTKit: %d", ret);
+
+ return ret;
+}
+
+/*
+ * We need to shutdown DCP before tearing down the display subsystem. Otherwise
+ * the DCP will crash and briefly flash a green screen of death.
+ */
+static void dcp_platform_shutdown(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ iomfb_shutdown(dcp);
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,dcp" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We don't hold any useful persistent state, so for suspend/resume it suffices
+ * to power off/on the entire DCP. The firmware will sort out the details for
+ * us.
+ */
+static int dcp_suspend(struct device *dev)
+{
+ dcp_poweroff(to_platform_device(dev));
+ return 0;
+}
+
+static int dcp_resume(struct device *dev)
+{
+ dcp_poweron(to_platform_device(dev));
+ return 0;
+}
+
+static const struct dev_pm_ops dcp_pm_ops = {
+ .suspend = dcp_suspend,
+ .resume = dcp_resume,
+};
+#endif
+
+static struct platform_driver apple_platform_driver = {
+ .probe = dcp_platform_probe,
+ .shutdown = dcp_platform_shutdown,
+ .driver = {
+ .name = "apple-dcp",
+ .of_match_table = of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &dcp_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(apple_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION("Apple Display Controller DRM driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/dcp.h b/drivers/gpu/drm/apple/dcp.h
new file mode 100644
index 000000000000..60e9bcfa4714
--- /dev/null
+++ b/drivers/gpu/drm/apple/dcp.h
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_H__
+#define __APPLE_DCP_H__
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_fourcc.h>
+
+#include "dcp-internal.h"
+#include "parser.h"
+
+struct apple_crtc {
+ struct drm_crtc base;
+ struct drm_pending_vblank_event *event;
+ bool vsync_disabled;
+
+ /* Reference to the DCP device owning this CRTC */
+ struct platform_device *dcp;
+};
+
+#define to_apple_crtc(x) container_of(x, struct apple_crtc, base)
+
+void dcp_hotplug(struct work_struct *work);
+
+struct apple_connector {
+ struct drm_connector base;
+ bool connected;
+
+ struct platform_device *dcp;
+
+ /* Workqueue for sending hotplug events to the associated device */
+ struct work_struct hotplug_wq;
+};
+
+#define to_apple_connector(x) container_of(x, struct apple_connector, base)
+
+void dcp_poweroff(struct platform_device *pdev);
+void dcp_poweron(struct platform_device *pdev);
+int dcp_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state);
+void dcp_link(struct platform_device *pdev, struct apple_crtc *apple,
+ struct apple_connector *connector);
+int dcp_start(struct platform_device *pdev);
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state);
+bool dcp_is_initialized(struct platform_device *pdev);
+void apple_crtc_vblank(struct apple_crtc *apple);
+void dcp_drm_crtc_vblank(struct apple_crtc *crtc);
+int dcp_get_modes(struct drm_connector *connector);
+int dcp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void dcp_set_dimensions(struct apple_dcp *dcp);
+void dcp_send_message(struct apple_dcp *dcp, u8 endpoint, u64 message);
+
+int iomfb_start_rtkit(struct apple_dcp *dcp);
+void iomfb_shutdown(struct apple_dcp *dcp);
+/* rtkit message handler for IOMFB messages */
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message);
+
+#endif
diff --git a/drivers/gpu/drm/apple/dummy-piodma.c b/drivers/gpu/drm/apple/dummy-piodma.c
new file mode 100644
index 000000000000..05d3e6130bf1
--- /dev/null
+++ b/drivers/gpu/drm/apple/dummy-piodma.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+
+static int dcp_piodma_probe(struct platform_device *pdev)
+{
+ return dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "apple,dcp-piodma" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver dcp_piodma_platform_driver = {
+ .probe = dcp_piodma_probe,
+ .driver = {
+ .name = "apple,dcp-piodma",
+ .of_match_table = of_match,
+ },
+};
+
+module_platform_driver(dcp_piodma_platform_driver);
+
+MODULE_AUTHOR("Alyssa Rosenzweig <alyssa@rosenzweig.io>");
+MODULE_DESCRIPTION("[HACK] Apple DCP PIODMA shim");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/apple/iomfb.c b/drivers/gpu/drm/apple/iomfb.c
new file mode 100644
index 000000000000..79e96070c45f
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/align.h>
+#include <linux/apple-mailbox.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/completion.h>
+
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcp.h"
+#include "dcp-internal.h"
+#include "iomfb.h"
+#include "parser.h"
+#include "trace.h"
+
+/* Register defines used in bandwidth setup structure */
+#define REG_SCRATCH (0x14)
+#define REG_SCRATCH_T600X (0x988)
+#define REG_DOORBELL (0x0)
+#define REG_DOORBELL_BIT (2)
+
+struct dcp_wait_cookie {
+ struct kref refcount;
+ struct completion done;
+};
+
+static void release_wait_cookie(struct kref *ref)
+{
+ struct dcp_wait_cookie *cookie;
+ cookie = container_of(ref, struct dcp_wait_cookie, refcount);
+
+ kfree(cookie);
+}
+
+static int dcp_tx_offset(enum dcp_context_id id)
+{
+ switch (id) {
+ case DCP_CONTEXT_CB:
+ case DCP_CONTEXT_CMD:
+ return 0x00000;
+ case DCP_CONTEXT_OOBCB:
+ case DCP_CONTEXT_OOBCMD:
+ return 0x08000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dcp_channel_offset(enum dcp_context_id id)
+{
+ switch (id) {
+ case DCP_CONTEXT_ASYNC:
+ return 0x40000;
+ case DCP_CONTEXT_CB:
+ return 0x60000;
+ case DCP_CONTEXT_OOBCB:
+ return 0x68000;
+ default:
+ return dcp_tx_offset(id);
+ }
+}
+
+static inline u64 dcpep_set_shmem(u64 dart_va)
+{
+ return (DCPEP_TYPE_SET_SHMEM << DCPEP_TYPE_SHIFT) |
+ (DCPEP_FLAG_VALUE << DCPEP_FLAG_SHIFT) |
+ (dart_va << DCPEP_DVA_SHIFT);
+}
+
+static inline u64 dcpep_msg(enum dcp_context_id id, u32 length, u16 offset)
+{
+ return (DCPEP_TYPE_MESSAGE << DCPEP_TYPE_SHIFT) |
+ ((u64)id << DCPEP_CONTEXT_SHIFT) |
+ ((u64)offset << DCPEP_OFFSET_SHIFT) |
+ ((u64)length << DCPEP_LENGTH_SHIFT);
+}
+
+static inline u64 dcpep_ack(enum dcp_context_id id)
+{
+ return dcpep_msg(id, 0, 0) | DCPEP_ACK;
+}
+
+/*
+ * A channel is busy if we have sent a message that has yet to be
+ * acked. The driver must not sent a message to a busy channel.
+ */
+static bool dcp_channel_busy(struct dcp_call_channel *ch)
+{
+ return (ch->depth != 0);
+}
+
+/* Get a call channel for a context */
+static struct dcp_call_channel *
+dcp_get_call_channel(struct apple_dcp *dcp, enum dcp_context_id context)
+{
+ switch (context) {
+ case DCP_CONTEXT_CMD:
+ case DCP_CONTEXT_CB:
+ return &dcp->ch_cmd;
+ case DCP_CONTEXT_OOBCMD:
+ case DCP_CONTEXT_OOBCB:
+ return &dcp->ch_oobcmd;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Get the context ID passed to the DCP for a command we push. The rule is
+ * simple: callback contexts are used when replying to the DCP, command
+ * contexts are used otherwise. That corresponds to a non/zero call stack
+ * depth. This rule frees the caller from tracking the call context manually.
+ */
+static enum dcp_context_id dcp_call_context(struct apple_dcp *dcp, bool oob)
+{
+ u8 depth = oob ? dcp->ch_oobcmd.depth : dcp->ch_cmd.depth;
+
+ if (depth)
+ return oob ? DCP_CONTEXT_OOBCB : DCP_CONTEXT_CB;
+ else
+ return oob ? DCP_CONTEXT_OOBCMD : DCP_CONTEXT_CMD;
+}
+
+/* Get a callback channel for a context */
+static struct dcp_cb_channel *dcp_get_cb_channel(struct apple_dcp *dcp,
+ enum dcp_context_id context)
+{
+ switch (context) {
+ case DCP_CONTEXT_CB:
+ return &dcp->ch_cb;
+ case DCP_CONTEXT_OOBCB:
+ return &dcp->ch_oobcb;
+ case DCP_CONTEXT_ASYNC:
+ return &dcp->ch_async;
+ default:
+ return NULL;
+ }
+}
+
+/* Get the start of a packet: after the end of the previous packet */
+static u16 dcp_packet_start(struct dcp_call_channel *ch, u8 depth)
+{
+ if (depth > 0)
+ return ch->end[depth - 1];
+ else
+ return 0;
+}
+
+/* Pushes and pops the depth of the call stack with safety checks */
+static u8 dcp_push_depth(u8 *depth)
+{
+ u8 ret = (*depth)++;
+
+ WARN_ON(ret >= DCP_MAX_CALL_DEPTH);
+ return ret;
+}
+
+static u8 dcp_pop_depth(u8 *depth)
+{
+ WARN_ON((*depth) == 0);
+
+ return --(*depth);
+}
+
+#define DCP_METHOD(tag, name) [name] = { #name, tag }
+
+const struct dcp_method_entry dcp_methods[dcpep_num_methods] = {
+ DCP_METHOD("A000", dcpep_late_init_signal),
+ DCP_METHOD("A029", dcpep_setup_video_limits),
+ DCP_METHOD("A034", dcpep_update_notify_clients_dcp),
+ DCP_METHOD("A357", dcpep_set_create_dfb),
+ DCP_METHOD("A401", dcpep_start_signal),
+ DCP_METHOD("A407", dcpep_swap_start),
+ DCP_METHOD("A408", dcpep_swap_submit),
+ DCP_METHOD("A410", dcpep_set_display_device),
+ DCP_METHOD("A411", dcpep_is_main_display),
+ DCP_METHOD("A412", dcpep_set_digital_out_mode),
+ DCP_METHOD("A439", dcpep_set_parameter_dcp),
+ DCP_METHOD("A443", dcpep_create_default_fb),
+ DCP_METHOD("A447", dcpep_enable_disable_video_power_savings),
+ DCP_METHOD("A454", dcpep_first_client_open),
+ DCP_METHOD("A460", dcpep_set_display_refresh_properties),
+ DCP_METHOD("A463", dcpep_flush_supports_power),
+ DCP_METHOD("A468", dcpep_set_power_state),
+};
+
+/* Call a DCP function given by a tag */
+static void dcp_push(struct apple_dcp *dcp, bool oob, enum dcpep_method method,
+ u32 in_len, u32 out_len, void *data, dcp_callback_t cb,
+ void *cookie)
+{
+ struct dcp_call_channel *ch = oob ? &dcp->ch_oobcmd : &dcp->ch_cmd;
+ enum dcp_context_id context = dcp_call_context(dcp, oob);
+
+ struct dcp_packet_header header = {
+ .in_len = in_len,
+ .out_len = out_len,
+
+ /* Tag is reversed due to endianness of the fourcc */
+ .tag[0] = dcp_methods[method].tag[3],
+ .tag[1] = dcp_methods[method].tag[2],
+ .tag[2] = dcp_methods[method].tag[1],
+ .tag[3] = dcp_methods[method].tag[0],
+ };
+
+ u8 depth = dcp_push_depth(&ch->depth);
+ u16 offset = dcp_packet_start(ch, depth);
+
+ void *out = dcp->shmem + dcp_tx_offset(context) + offset;
+ void *out_data = out + sizeof(header);
+ size_t data_len = sizeof(header) + in_len + out_len;
+
+ memcpy(out, &header, sizeof(header));
+
+ if (in_len > 0)
+ memcpy(out_data, data, in_len);
+
+ trace_iomfb_push(dcp, &dcp_methods[method], context, offset, depth);
+
+ ch->callbacks[depth] = cb;
+ ch->cookies[depth] = cookie;
+ ch->output[depth] = out + sizeof(header) + in_len;
+ ch->end[depth] = offset + ALIGN(data_len, DCP_PACKET_ALIGNMENT);
+
+ dcp_send_message(dcp, IOMFB_ENDPOINT,
+ dcpep_msg(context, data_len, offset));
+}
+
+#define DCP_THUNK_VOID(func, handle) \
+ static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+ void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, 0, 0, NULL, cb, cookie); \
+ }
+
+#define DCP_THUNK_OUT(func, handle, T) \
+ static void func(struct apple_dcp *dcp, bool oob, dcp_callback_t cb, \
+ void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, 0, sizeof(T), NULL, cb, cookie); \
+ }
+
+#define DCP_THUNK_IN(func, handle, T) \
+ static void func(struct apple_dcp *dcp, bool oob, T *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, sizeof(T), 0, data, cb, cookie); \
+ }
+
+#define DCP_THUNK_INOUT(func, handle, T_in, T_out) \
+ static void func(struct apple_dcp *dcp, bool oob, T_in *data, \
+ dcp_callback_t cb, void *cookie) \
+ { \
+ dcp_push(dcp, oob, handle, sizeof(T_in), sizeof(T_out), data, \
+ cb, cookie); \
+ }
+
+DCP_THUNK_INOUT(dcp_swap_submit, dcpep_swap_submit, struct dcp_swap_submit_req,
+ struct dcp_swap_submit_resp);
+
+DCP_THUNK_INOUT(dcp_swap_start, dcpep_swap_start, struct dcp_swap_start_req,
+ struct dcp_swap_start_resp);
+
+DCP_THUNK_INOUT(dcp_set_power_state, dcpep_set_power_state,
+ struct dcp_set_power_state_req,
+ struct dcp_set_power_state_resp);
+
+DCP_THUNK_INOUT(dcp_set_digital_out_mode, dcpep_set_digital_out_mode,
+ struct dcp_set_digital_out_mode_req, u32);
+
+DCP_THUNK_INOUT(dcp_set_display_device, dcpep_set_display_device, u32, u32);
+
+DCP_THUNK_OUT(dcp_set_display_refresh_properties,
+ dcpep_set_display_refresh_properties, u32);
+
+DCP_THUNK_OUT(dcp_late_init_signal, dcpep_late_init_signal, u32);
+DCP_THUNK_IN(dcp_flush_supports_power, dcpep_flush_supports_power, u32);
+DCP_THUNK_OUT(dcp_create_default_fb, dcpep_create_default_fb, u32);
+DCP_THUNK_OUT(dcp_start_signal, dcpep_start_signal, u32);
+DCP_THUNK_VOID(dcp_setup_video_limits, dcpep_setup_video_limits);
+DCP_THUNK_VOID(dcp_set_create_dfb, dcpep_set_create_dfb);
+DCP_THUNK_VOID(dcp_first_client_open, dcpep_first_client_open);
+
+__attribute__((unused))
+DCP_THUNK_IN(dcp_update_notify_clients_dcp, dcpep_update_notify_clients_dcp,
+ struct dcp_update_notify_clients_dcp);
+
+DCP_THUNK_INOUT(dcp_set_parameter_dcp, dcpep_set_parameter_dcp,
+ struct dcp_set_parameter_dcp, u32);
+
+DCP_THUNK_INOUT(dcp_enable_disable_video_power_savings,
+ dcpep_enable_disable_video_power_savings, u32, int);
+
+DCP_THUNK_OUT(dcp_is_main_display, dcpep_is_main_display, u32);
+
+/* Parse a callback tag "D123" into the ID 123. Returns -EINVAL on failure. */
+static int dcp_parse_tag(char tag[4])
+{
+ u32 d[3];
+ int i;
+
+ if (tag[3] != 'D')
+ return -EINVAL;
+
+ for (i = 0; i < 3; ++i) {
+ d[i] = (u32)(tag[i] - '0');
+
+ if (d[i] > 9)
+ return -EINVAL;
+ }
+
+ return d[0] + (d[1] * 10) + (d[2] * 100);
+}
+
+/* Ack a callback from the DCP */
+static void dcp_ack(struct apple_dcp *dcp, enum dcp_context_id context)
+{
+ struct dcp_cb_channel *ch = dcp_get_cb_channel(dcp, context);
+
+ dcp_pop_depth(&ch->depth);
+ dcp_send_message(dcp, IOMFB_ENDPOINT,
+ dcpep_ack(context));
+}
+
+/* DCP callback handlers */
+static void dcpep_cb_nop(struct apple_dcp *dcp)
+{
+ /* No operation */
+}
+
+static u8 dcpep_cb_true(struct apple_dcp *dcp)
+{
+ return true;
+}
+
+static u8 dcpep_cb_false(struct apple_dcp *dcp)
+{
+ return false;
+}
+
+static u32 dcpep_cb_zero(struct apple_dcp *dcp)
+{
+ return 0;
+}
+
+static void dcpep_cb_swap_complete(struct apple_dcp *dcp,
+ struct dc_swap_complete_resp *resp)
+{
+ trace_iomfb_swap_complete(dcp, resp->swap_id);
+
+ if (!dcp->ignore_swap_complete)
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static struct dcp_get_uint_prop_resp
+dcpep_cb_get_uint_prop(struct apple_dcp *dcp, struct dcp_get_uint_prop_req *req)
+{
+ /* unimplemented for now */
+ return (struct dcp_get_uint_prop_resp){ .value = 0 };
+}
+
+/*
+ * Callback to map a buffer allocated with allocate_buf for PIODMA usage.
+ * PIODMA is separate from the main DCP and uses own IOVA space on a dedicated
+ * stream of the display DART, rather than the expected DCP DART.
+ *
+ * XXX: This relies on dma_get_sgtable in concert with dma_map_sgtable, which
+ * is a "fundamentally unsafe" operation according to the docs. And yet
+ * everyone does it...
+ */
+static struct dcp_map_buf_resp dcpep_cb_map_piodma(struct apple_dcp *dcp,
+ struct dcp_map_buf_req *req)
+{
+ struct sg_table *map;
+ int ret;
+
+ if (req->buffer >= ARRAY_SIZE(dcp->memdesc))
+ goto reject;
+
+ map = &dcp->memdesc[req->buffer].map;
+
+ if (!map->sgl)
+ goto reject;
+
+ /* Use PIODMA device instead of DCP to map against the right IOMMU. */
+ ret = dma_map_sgtable(&dcp->piodma->dev, map, DMA_BIDIRECTIONAL, 0);
+
+ if (ret)
+ goto reject;
+
+ return (struct dcp_map_buf_resp){ .dva = sg_dma_address(map->sgl) };
+
+reject:
+ dev_err(dcp->dev, "denying map of invalid buffer %llx for pidoma\n",
+ req->buffer);
+ return (struct dcp_map_buf_resp){ .ret = EINVAL };
+}
+
+static void dcpep_cb_unmap_piodma(struct apple_dcp *dcp,
+ struct dcp_unmap_buf_resp *resp)
+{
+ struct sg_table *map;
+ dma_addr_t dma_addr;
+
+ if (resp->buffer >= ARRAY_SIZE(dcp->memdesc)) {
+ dev_warn(dcp->dev, "unmap request for out of range buffer %llu",
+ resp->buffer);
+ return;
+ }
+
+ map = &dcp->memdesc[resp->buffer].map;
+
+ if (!map->sgl) {
+ dev_warn(dcp->dev,
+ "unmap for non-mapped buffer %llu iova:0x%08llx",
+ resp->buffer, resp->dva);
+ return;
+ }
+
+ dma_addr = sg_dma_address(map->sgl);
+ if (dma_addr != resp->dva) {
+ dev_warn(dcp->dev, "unmap buffer %llu address mismatch dma_addr:%llx dva:%llx",
+ resp->buffer, dma_addr, resp->dva);
+ return;
+ }
+
+ /* Use PIODMA device instead of DCP to unmap from the right IOMMU. */
+ dma_unmap_sgtable(&dcp->piodma->dev, map, DMA_BIDIRECTIONAL, 0);
+}
+
+/*
+ * Allocate an IOVA contiguous buffer mapped to the DCP. The buffer need not be
+ * physically contigiuous, however we should save the sgtable in case the
+ * buffer needs to be later mapped for PIODMA.
+ */
+static struct dcp_allocate_buffer_resp
+dcpep_cb_allocate_buffer(struct apple_dcp *dcp,
+ struct dcp_allocate_buffer_req *req)
+{
+ struct dcp_allocate_buffer_resp resp = { 0 };
+ struct dcp_mem_descriptor *memdesc;
+ u32 id;
+
+ resp.dva_size = ALIGN(req->size, 4096);
+ resp.mem_desc_id =
+ find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+
+ if (resp.mem_desc_id >= DCP_MAX_MAPPINGS) {
+ dev_warn(dcp->dev, "DCP overflowed mapping table, ignoring");
+ resp.dva_size = 0;
+ resp.mem_desc_id = 0;
+ return resp;
+ }
+ id = resp.mem_desc_id;
+ set_bit(id, dcp->memdesc_map);
+
+ memdesc = &dcp->memdesc[id];
+
+ memdesc->size = resp.dva_size;
+ memdesc->buf = dma_alloc_coherent(dcp->dev, memdesc->size,
+ &memdesc->dva, GFP_KERNEL);
+
+ dma_get_sgtable(dcp->dev, &memdesc->map, memdesc->buf, memdesc->dva,
+ memdesc->size);
+ resp.dva = memdesc->dva;
+
+ return resp;
+}
+
+static u8 dcpep_cb_release_mem_desc(struct apple_dcp *dcp, u32 *mem_desc_id)
+{
+ struct dcp_mem_descriptor *memdesc;
+ u32 id = *mem_desc_id;
+
+ if (id >= DCP_MAX_MAPPINGS) {
+ dev_warn(dcp->dev,
+ "unmap request for out of range mem_desc_id %u", id);
+ return 0;
+ }
+
+ if (!test_and_clear_bit(id, dcp->memdesc_map)) {
+ dev_warn(dcp->dev, "unmap request for unused mem_desc_id %u",
+ id);
+ return 0;
+ }
+
+ memdesc = &dcp->memdesc[id];
+ if (memdesc->buf) {
+ dma_free_coherent(dcp->dev, memdesc->size, memdesc->buf,
+ memdesc->dva);
+
+ memdesc->buf = NULL;
+ memset(&memdesc->map, 0, sizeof(memdesc->map));
+ } else {
+ memdesc->reg = 0;
+ }
+
+ memdesc->size = 0;
+
+ return 1;
+}
+
+/* Validate that the specified region is a display register */
+static bool is_disp_register(struct apple_dcp *dcp, u64 start, u64 end)
+{
+ int i;
+
+ for (i = 0; i < dcp->nr_disp_registers; ++i) {
+ struct resource *r = dcp->disp_registers[i];
+
+ if ((start >= r->start) && (end <= r->end))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Map contiguous physical memory into the DCP's address space. The firmware
+ * uses this to map the display registers we advertise in
+ * sr_map_device_memory_with_index, so we bounds check against that to guard
+ * safe against malicious coprocessors.
+ */
+static struct dcp_map_physical_resp
+dcpep_cb_map_physical(struct apple_dcp *dcp, struct dcp_map_physical_req *req)
+{
+ int size = ALIGN(req->size, 4096);
+ u32 id;
+
+ if (!is_disp_register(dcp, req->paddr, req->paddr + size - 1)) {
+ dev_err(dcp->dev, "refusing to map phys address %llx size %llx",
+ req->paddr, req->size);
+ return (struct dcp_map_physical_resp){};
+ }
+
+ id = find_first_zero_bit(dcp->memdesc_map, DCP_MAX_MAPPINGS);
+ set_bit(id, dcp->memdesc_map);
+ dcp->memdesc[id].size = size;
+ dcp->memdesc[id].reg = req->paddr;
+
+ return (struct dcp_map_physical_resp){
+ .dva_size = size,
+ .mem_desc_id = id,
+ .dva = dma_map_resource(dcp->dev, req->paddr, size,
+ DMA_BIDIRECTIONAL, 0),
+ };
+}
+
+static u64 dcpep_cb_get_frequency(struct apple_dcp *dcp)
+{
+ return clk_get_rate(dcp->clk);
+}
+
+static struct dcp_map_reg_resp dcpep_cb_map_reg(struct apple_dcp *dcp,
+ struct dcp_map_reg_req *req)
+{
+ if (req->index >= dcp->nr_disp_registers) {
+ dev_warn(dcp->dev, "attempted to read invalid reg index %u",
+ req->index);
+
+ return (struct dcp_map_reg_resp){ .ret = 1 };
+ } else {
+ struct resource *rsrc = dcp->disp_registers[req->index];
+
+ return (struct dcp_map_reg_resp){
+ .addr = rsrc->start, .length = resource_size(rsrc)
+ };
+ }
+}
+
+static struct dcp_read_edt_data_resp
+dcpep_cb_read_edt_data(struct apple_dcp *dcp, struct dcp_read_edt_data_req *req)
+{
+ return (struct dcp_read_edt_data_resp){
+ .value[0] = req->value[0],
+ .ret = 0,
+ };
+}
+
+/* Chunked data transfer for property dictionaries */
+static u8 dcpep_cb_prop_start(struct apple_dcp *dcp, u32 *length)
+{
+ if (dcp->chunks.data != NULL) {
+ dev_warn(dcp->dev, "ignoring spurious transfer start\n");
+ return false;
+ }
+
+ dcp->chunks.length = *length;
+ dcp->chunks.data = devm_kzalloc(dcp->dev, *length, GFP_KERNEL);
+
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "failed to allocate chunks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static u8 dcpep_cb_prop_chunk(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_chunk_req *req)
+{
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "ignoring spurious chunk\n");
+ return false;
+ }
+
+ if (req->offset + req->length > dcp->chunks.length) {
+ dev_warn(dcp->dev, "ignoring overflowing chunk\n");
+ return false;
+ }
+
+ memcpy(dcp->chunks.data + req->offset, req->data, req->length);
+ return true;
+}
+
+static bool dcpep_process_chunks(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_end_req *req)
+{
+ struct dcp_parse_ctx ctx;
+ int ret;
+
+ if (!dcp->chunks.data) {
+ dev_warn(dcp->dev, "ignoring spurious end\n");
+ return false;
+ }
+
+ ret = parse(dcp->chunks.data, dcp->chunks.length, &ctx);
+
+ if (ret) {
+ dev_warn(dcp->dev, "bad header on dcpav props\n");
+ return false;
+ }
+
+ if (!strcmp(req->key, "TimingElements")) {
+ dcp->modes = enumerate_modes(&ctx, &dcp->nr_modes,
+ dcp->width_mm, dcp->height_mm,
+ dcp->notch_height);
+
+ if (IS_ERR(dcp->modes)) {
+ dev_warn(dcp->dev, "failed to parse modes\n");
+ dcp->modes = NULL;
+ dcp->nr_modes = 0;
+ return false;
+ }
+ } else if (!strcmp(req->key, "DisplayAttributes")) {
+ ret = parse_display_attributes(&ctx, &dcp->width_mm,
+ &dcp->height_mm);
+
+ if (ret) {
+ dev_warn(dcp->dev, "failed to parse display attribs\n");
+ return false;
+ }
+
+ dcp_set_dimensions(dcp);
+ }
+
+ return true;
+}
+
+static u8 dcpep_cb_prop_end(struct apple_dcp *dcp,
+ struct dcp_set_dcpav_prop_end_req *req)
+{
+ u8 resp = dcpep_process_chunks(dcp, req);
+
+ /* Reset for the next transfer */
+ devm_kfree(dcp->dev, dcp->chunks.data);
+ dcp->chunks.data = NULL;
+
+ return resp;
+}
+
+/* Boot sequence */
+static void boot_done(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_cb_channel *ch = &dcp->ch_cb;
+ u8 *succ = ch->output[ch->depth - 1];
+ dev_dbg(dcp->dev, "boot done");
+
+ *succ = true;
+ dcp_ack(dcp, DCP_CONTEXT_CB);
+}
+
+static void boot_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_set_display_refresh_properties(dcp, false, boot_done, NULL);
+}
+
+static void boot_4(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_late_init_signal(dcp, false, boot_5, NULL);
+}
+
+static void boot_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ u32 v_true = true;
+
+ dcp_flush_supports_power(dcp, false, &v_true, boot_4, NULL);
+}
+
+static void boot_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_setup_video_limits(dcp, false, boot_3, NULL);
+}
+
+static void boot_1_5(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_create_default_fb(dcp, false, boot_2, NULL);
+}
+
+/* Use special function signature to defer the ACK */
+static bool dcpep_cb_boot_1(struct apple_dcp *dcp, int tag, void *out, void *in)
+{
+ trace_iomfb_callback(dcp, tag, __func__);
+ dcp_set_create_dfb(dcp, false, boot_1_5, NULL);
+ return false;
+}
+
+static struct dcp_rt_bandwidth dcpep_cb_rt_bandwidth(struct apple_dcp *dcp)
+{
+ if (dcp->disp_registers[5] && dcp->disp_registers[6])
+ return (struct dcp_rt_bandwidth){
+ .reg_scratch =
+ dcp->disp_registers[5]->start + REG_SCRATCH,
+ .reg_doorbell =
+ dcp->disp_registers[6]->start + REG_DOORBELL,
+ .doorbell_bit = REG_DOORBELL_BIT,
+
+ .padding[3] = 0x4, // XXX: required by 11.x firmware
+ };
+ else if (dcp->disp_registers[4])
+ return (struct dcp_rt_bandwidth){
+ .reg_scratch = dcp->disp_registers[4]->start +
+ REG_SCRATCH_T600X,
+ .reg_doorbell = 0,
+ .doorbell_bit = 0,
+ };
+ else
+ return (struct dcp_rt_bandwidth){
+ .reg_scratch = 0,
+ .reg_doorbell = 0,
+ .doorbell_bit = 0,
+ };
+}
+
+/* Callback to get the current time as milliseconds since the UNIX epoch */
+static u64 dcpep_cb_get_time(struct apple_dcp *dcp)
+{
+ return ktime_to_ms(ktime_get_real());
+}
+
+struct dcp_swap_cookie {
+ struct kref refcount;
+ struct completion done;
+ u32 swap_id;
+};
+
+static void release_swap_cookie(struct kref *ref)
+{
+ struct dcp_swap_cookie *cookie;
+ cookie = container_of(ref, struct dcp_swap_cookie, refcount);
+
+ kfree(cookie);
+}
+
+static void dcp_swap_cleared(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_submit_resp *resp = data;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (cookie) {
+ struct dcp_swap_cookie *info = cookie;
+ complete(&info->done);
+ kref_put(&info->refcount, release_swap_cookie);
+ }
+
+ if (resp->ret) {
+ dev_err(dcp->dev, "swap_clear failed! status %u\n", resp->ret);
+ dcp_drm_crtc_vblank(dcp->crtc);
+ return;
+ }
+
+ while (!list_empty(&dcp->swapped_out_fbs)) {
+ struct dcp_fb_reference *entry;
+ entry = list_first_entry(&dcp->swapped_out_fbs,
+ struct dcp_fb_reference, head);
+ if (entry->fb)
+ drm_framebuffer_put(entry->fb);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+}
+
+static void dcp_swap_clear_started(struct apple_dcp *dcp, void *data,
+ void *cookie)
+{
+ struct dcp_swap_start_resp *resp = data;
+ dev_dbg(dcp->dev, "%s swap_id: %u", __func__, resp->swap_id);
+ dcp->swap.swap.swap_id = resp->swap_id;
+
+ if (cookie) {
+ struct dcp_swap_cookie *info = cookie;
+ info->swap_id = resp->swap_id;
+ }
+
+ dcp_swap_submit(dcp, false, &dcp->swap, dcp_swap_cleared, cookie);
+}
+
+static void dcp_on_final(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+static void dcp_on_set_parameter(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct dcp_set_parameter_dcp param = {
+ .param = 14,
+ .value = { 0 },
+ .count = 1,
+ };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ dcp_set_parameter_dcp(dcp, false, &param, dcp_on_final, cookie);
+}
+
+void dcp_poweron(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct dcp_wait_cookie *cookie;
+ struct dcp_set_power_state_req req = {
+ .unklong = 1,
+ };
+ int ret;
+ u32 handle;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ if (dcp->main_display) {
+ handle = 0;
+ dcp_set_display_device(dcp, false, &handle, dcp_on_final,
+ cookie);
+ } else {
+ handle = 2;
+ dcp_set_display_device(dcp, false, &handle,
+ dcp_on_set_parameter, cookie);
+ }
+ dcp_set_power_state(dcp, true, &req, NULL, NULL);
+
+ ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(500));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "wait for power timed out");
+
+ kref_put(&cookie->refcount, release_wait_cookie);;
+}
+EXPORT_SYMBOL(dcp_poweron);
+
+static void complete_set_powerstate(struct apple_dcp *dcp, void *out,
+ void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+void dcp_poweroff(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ int ret, swap_id;
+ struct dcp_set_power_state_req power_req = {
+ .unklong = 0,
+ };
+ struct dcp_swap_cookie *cookie;
+ struct dcp_wait_cookie *poff_cookie;
+ struct dcp_swap_start_req swap_req = { 0 };
+
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return;
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ // clear surfaces
+ memset(&dcp->swap, 0, sizeof(dcp->swap));
+
+ dcp->swap.swap.swap_enabled = DCP_REMOVE_LAYERS | 0x7;
+ dcp->swap.swap.swap_completed = DCP_REMOVE_LAYERS | 0x7;
+ dcp->swap.swap.unk_10c = 0xFF000000;
+
+ for (int l = 0; l < SWAP_SURFACES; l++)
+ dcp->swap.surf_null[l] = true;
+
+ dcp_swap_start(dcp, false, &swap_req, dcp_swap_clear_started, cookie);
+
+ ret = wait_for_completion_timeout(&cookie->done, msecs_to_jiffies(50));
+ swap_id = cookie->swap_id;
+ kref_put(&cookie->refcount, release_swap_cookie);
+ if (ret <= 0) {
+ dcp->crashed = true;
+ return;
+ }
+
+ dev_dbg(dcp->dev, "%s: clear swap submitted: %u", __func__, swap_id);
+
+ poff_cookie = kzalloc(sizeof(*poff_cookie), GFP_KERNEL);
+ if (!poff_cookie)
+ return;
+ init_completion(&poff_cookie->done);
+ kref_init(&poff_cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&poff_cookie->refcount);
+
+ dcp_set_power_state(dcp, false, &power_req, complete_set_powerstate,
+ poff_cookie);
+ ret = wait_for_completion_timeout(&poff_cookie->done,
+ msecs_to_jiffies(1000));
+
+ if (ret == 0)
+ dev_warn(dcp->dev, "setPowerState(0) timeout %u ms", 1000);
+ else if (ret > 0)
+ dev_dbg(dcp->dev,
+ "setPowerState(0) finished with %d ms to spare",
+ jiffies_to_msecs(ret));
+
+ kref_put(&poff_cookie->refcount, release_wait_cookie);
+ dev_dbg(dcp->dev, "%s: setPowerState(0) done", __func__);
+}
+EXPORT_SYMBOL(dcp_poweroff);
+
+/*
+ * Helper to send a DRM hotplug event. The DCP is accessed from a single
+ * (RTKit) thread. To handle hotplug callbacks, we need to call
+ * drm_kms_helper_hotplug_event, which does an atomic commit (via DCP) and
+ * waits for vblank (a DCP callback). That means we deadlock if we call from
+ * the RTKit thread! Instead, move the call to another thread via a workqueue.
+ */
+void dcp_hotplug(struct work_struct *work)
+{
+ struct apple_connector *connector;
+ struct drm_device *dev;
+ struct apple_dcp *dcp;
+
+ connector = container_of(work, struct apple_connector, hotplug_wq);
+ dev = connector->base.dev;
+
+ dcp = platform_get_drvdata(connector->dcp);
+ dev_info(dcp->dev, "%s: connected: %d", __func__, connector->connected);
+
+ /*
+ * DCP defers link training until we set a display mode. But we set
+ * display modes from atomic_flush, so userspace needs to trigger a
+ * flush, or the CRTC gets no signal.
+ */
+ if (!dcp->valid_mode && connector->connected) {
+ drm_connector_set_link_status_property(
+ &connector->base, DRM_MODE_LINK_STATUS_BAD);
+ }
+
+ if (dev && dev->registered)
+ drm_kms_helper_hotplug_event(dev);
+}
+EXPORT_SYMBOL_GPL(dcp_hotplug);
+
+static void dcpep_cb_hotplug(struct apple_dcp *dcp, u64 *connected)
+{
+ struct apple_connector *connector = dcp->connector;
+
+ /* DCP issues hotplug_gated callbacks after SetPowerState() calls on
+ * devices with display (macbooks, imacs). This must not result in
+ * connector state changes on DRM side. Some applications won't enable
+ * a CRTC with a connector in disconnected state. Weston after DPMS off
+ * is one example. dcp_is_main_display() returns true on devices with
+ * integrated display. Ignore the hotplug_gated() callbacks there.
+ */
+ if (dcp->main_display)
+ return;
+
+ /* Hotplug invalidates mode. DRM doesn't always handle this. */
+ if (!(*connected)) {
+ dcp->valid_mode = false;
+ /* after unplug swap will not complete until the next
+ * set_digital_out_mode */
+ schedule_work(&dcp->vblank_wq);
+ }
+
+ if (connector && connector->connected != !!(*connected)) {
+ connector->connected = !!(*connected);
+ dcp->valid_mode = false;
+ schedule_work(&connector->hotplug_wq);
+ }
+}
+
+static void
+dcpep_cb_swap_complete_intent_gated(struct apple_dcp *dcp,
+ struct dcp_swap_complete_intent_gated *info)
+{
+ trace_iomfb_swap_complete_intent_gated(dcp, info->swap_id,
+ info->width, info->height);
+}
+
+#define DCPEP_MAX_CB (1000)
+
+/*
+ * Define type-safe trampolines. Define typedefs to enforce type-safety on the
+ * input data (so if the types don't match, gcc errors out).
+ */
+
+#define TRAMPOLINE_VOID(func, handler) \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ handler(dcp); \
+ return true; \
+ }
+
+#define TRAMPOLINE_IN(func, handler, T_in) \
+ typedef void (*callback_##handler)(struct apple_dcp *, T_in *); \
+ \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ callback_##handler cb = handler; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ cb(dcp, in); \
+ return true; \
+ }
+
+#define TRAMPOLINE_INOUT(func, handler, T_in, T_out) \
+ typedef T_out (*callback_##handler)(struct apple_dcp *, T_in *); \
+ \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ T_out *typed_out = out; \
+ callback_##handler cb = handler; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ *typed_out = cb(dcp, in); \
+ return true; \
+ }
+
+#define TRAMPOLINE_OUT(func, handler, T_out) \
+ static bool func(struct apple_dcp *dcp, int tag, void *out, void *in) \
+ { \
+ T_out *typed_out = out; \
+ \
+ trace_iomfb_callback(dcp, tag, #handler); \
+ *typed_out = handler(dcp); \
+ return true; \
+ }
+
+TRAMPOLINE_VOID(trampoline_nop, dcpep_cb_nop);
+TRAMPOLINE_OUT(trampoline_true, dcpep_cb_true, u8);
+TRAMPOLINE_OUT(trampoline_false, dcpep_cb_false, u8);
+TRAMPOLINE_OUT(trampoline_zero, dcpep_cb_zero, u32);
+TRAMPOLINE_IN(trampoline_swap_complete, dcpep_cb_swap_complete,
+ struct dc_swap_complete_resp);
+TRAMPOLINE_INOUT(trampoline_get_uint_prop, dcpep_cb_get_uint_prop,
+ struct dcp_get_uint_prop_req, struct dcp_get_uint_prop_resp);
+TRAMPOLINE_INOUT(trampoline_map_piodma, dcpep_cb_map_piodma,
+ struct dcp_map_buf_req, struct dcp_map_buf_resp);
+TRAMPOLINE_IN(trampoline_unmap_piodma, dcpep_cb_unmap_piodma,
+ struct dcp_unmap_buf_resp);
+TRAMPOLINE_INOUT(trampoline_allocate_buffer, dcpep_cb_allocate_buffer,
+ struct dcp_allocate_buffer_req,
+ struct dcp_allocate_buffer_resp);
+TRAMPOLINE_INOUT(trampoline_map_physical, dcpep_cb_map_physical,
+ struct dcp_map_physical_req, struct dcp_map_physical_resp);
+TRAMPOLINE_INOUT(trampoline_release_mem_desc, dcpep_cb_release_mem_desc, u32,
+ u8);
+TRAMPOLINE_INOUT(trampoline_map_reg, dcpep_cb_map_reg, struct dcp_map_reg_req,
+ struct dcp_map_reg_resp);
+TRAMPOLINE_INOUT(trampoline_read_edt_data, dcpep_cb_read_edt_data,
+ struct dcp_read_edt_data_req, struct dcp_read_edt_data_resp);
+TRAMPOLINE_INOUT(trampoline_prop_start, dcpep_cb_prop_start, u32, u8);
+TRAMPOLINE_INOUT(trampoline_prop_chunk, dcpep_cb_prop_chunk,
+ struct dcp_set_dcpav_prop_chunk_req, u8);
+TRAMPOLINE_INOUT(trampoline_prop_end, dcpep_cb_prop_end,
+ struct dcp_set_dcpav_prop_end_req, u8);
+TRAMPOLINE_OUT(trampoline_rt_bandwidth, dcpep_cb_rt_bandwidth,
+ struct dcp_rt_bandwidth);
+TRAMPOLINE_OUT(trampoline_get_frequency, dcpep_cb_get_frequency, u64);
+TRAMPOLINE_OUT(trampoline_get_time, dcpep_cb_get_time, u64);
+TRAMPOLINE_IN(trampoline_hotplug, dcpep_cb_hotplug, u64);
+TRAMPOLINE_IN(trampoline_swap_complete_intent_gated,
+ dcpep_cb_swap_complete_intent_gated,
+ struct dcp_swap_complete_intent_gated);
+
+bool (*const dcpep_cb_handlers[DCPEP_MAX_CB])(struct apple_dcp *, int, void *,
+ void *) = {
+ [0] = trampoline_true, /* did_boot_signal */
+ [1] = trampoline_true, /* did_power_on_signal */
+ [2] = trampoline_nop, /* will_power_off_signal */
+ [3] = trampoline_rt_bandwidth,
+ [100] = trampoline_nop, /* match_pmu_service */
+ [101] = trampoline_zero, /* get_display_default_stride */
+ [103] = trampoline_nop, /* set_boolean_property */
+ [106] = trampoline_nop, /* remove_property */
+ [107] = trampoline_true, /* create_provider_service */
+ [108] = trampoline_true, /* create_product_service */
+ [109] = trampoline_true, /* create_pmu_service */
+ [110] = trampoline_true, /* create_iomfb_service */
+ [111] = trampoline_false, /* create_backlight_service */
+ [116] = dcpep_cb_boot_1,
+ [117] = trampoline_false, /* is_dark_boot */
+ [118] = trampoline_false, /* is_dark_boot / is_waking_from_hibernate*/
+ [120] = trampoline_read_edt_data,
+ [122] = trampoline_prop_start,
+ [123] = trampoline_prop_chunk,
+ [124] = trampoline_prop_end,
+ [201] = trampoline_map_piodma,
+ [202] = trampoline_unmap_piodma,
+ [206] = trampoline_true, /* match_pmu_service_2 */
+ [207] = trampoline_true, /* match_backlight_service */
+ [208] = trampoline_get_time,
+ [211] = trampoline_nop, /* update_backlight_factor_prop */
+ [300] = trampoline_nop, /* pr_publish */
+ [401] = trampoline_get_uint_prop,
+ [404] = trampoline_nop, /* sr_set_uint_prop */
+ [406] = trampoline_nop, /* set_fx_prop */
+ [408] = trampoline_get_frequency,
+ [411] = trampoline_map_reg,
+ [413] = trampoline_true, /* sr_set_property_dict */
+ [414] = trampoline_true, /* sr_set_property_int */
+ [415] = trampoline_true, /* sr_set_property_bool */
+ [451] = trampoline_allocate_buffer,
+ [452] = trampoline_map_physical,
+ [456] = trampoline_release_mem_desc,
+ [552] = trampoline_true, /* set_property_dict_0 */
+ [561] = trampoline_true, /* set_property_dict */
+ [563] = trampoline_true, /* set_property_int */
+ [565] = trampoline_true, /* set_property_bool */
+ [567] = trampoline_true, /* set_property_str */
+ [574] = trampoline_zero, /* power_up_dart */
+ [576] = trampoline_hotplug,
+ [577] = trampoline_nop, /* powerstate_notify */
+ [582] = trampoline_true, /* create_default_fb_surface */
+ [589] = trampoline_swap_complete,
+ [591] = trampoline_swap_complete_intent_gated,
+ [593] = trampoline_nop, /* enable_backlight_message_ap_gated */
+ [598] = trampoline_nop, /* find_swap_function_gated */
+};
+
+static void dcpep_handle_cb(struct apple_dcp *dcp, enum dcp_context_id context,
+ void *data, u32 length)
+{
+ struct device *dev = dcp->dev;
+ struct dcp_packet_header *hdr = data;
+ void *in, *out;
+ int tag = dcp_parse_tag(hdr->tag);
+ struct dcp_cb_channel *ch = dcp_get_cb_channel(dcp, context);
+ u8 depth;
+
+ if (tag < 0 || tag >= DCPEP_MAX_CB || !dcpep_cb_handlers[tag]) {
+ dev_warn(dev, "received unknown callback %c%c%c%c\n",
+ hdr->tag[3], hdr->tag[2], hdr->tag[1], hdr->tag[0]);
+ return;
+ }
+
+ in = data + sizeof(*hdr);
+ out = in + hdr->in_len;
+
+ // TODO: verify that in_len and out_len match our prototypes
+ // for now just clear the out data to have at least consistant results
+ if (hdr->out_len)
+ memset(out, 0, hdr->out_len);
+
+ depth = dcp_push_depth(&ch->depth);
+ ch->output[depth] = out;
+
+ if (dcpep_cb_handlers[tag](dcp, tag, out, in))
+ dcp_ack(dcp, context);
+}
+
+static void dcpep_handle_ack(struct apple_dcp *dcp, enum dcp_context_id context,
+ void *data, u32 length)
+{
+ struct dcp_packet_header *header = data;
+ struct dcp_call_channel *ch = dcp_get_call_channel(dcp, context);
+ void *cookie;
+ dcp_callback_t cb;
+
+ if (!ch) {
+ dev_warn(dcp->dev, "ignoring ack on context %X\n", context);
+ return;
+ }
+
+ dcp_pop_depth(&ch->depth);
+
+ cb = ch->callbacks[ch->depth];
+ cookie = ch->cookies[ch->depth];
+
+ ch->callbacks[ch->depth] = NULL;
+ ch->cookies[ch->depth] = NULL;
+
+ if (cb)
+ cb(dcp, data + sizeof(*header) + header->in_len, cookie);
+}
+
+static void dcpep_got_msg(struct apple_dcp *dcp, u64 message)
+{
+ enum dcp_context_id ctx_id;
+ u16 offset;
+ u32 length;
+ int channel_offset;
+ void *data;
+
+ ctx_id = (message & DCPEP_CONTEXT_MASK) >> DCPEP_CONTEXT_SHIFT;
+ offset = (message & DCPEP_OFFSET_MASK) >> DCPEP_OFFSET_SHIFT;
+ length = (message >> DCPEP_LENGTH_SHIFT);
+
+ channel_offset = dcp_channel_offset(ctx_id);
+
+ if (channel_offset < 0) {
+ dev_warn(dcp->dev, "invalid context received %u", ctx_id);
+ return;
+ }
+
+ data = dcp->shmem + channel_offset + offset;
+
+ if (message & DCPEP_ACK)
+ dcpep_handle_ack(dcp, ctx_id, data, length);
+ else
+ dcpep_handle_cb(dcp, ctx_id, data, length);
+}
+
+/*
+ * Callback for swap requests. If a swap failed, we'll never get a swap
+ * complete event so we need to fake a vblank event early to avoid a hang.
+ */
+
+static void dcp_swapped(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_submit_resp *resp = data;
+
+ if (resp->ret) {
+ dev_err(dcp->dev, "swap failed! status %u\n", resp->ret);
+ dcp_drm_crtc_vblank(dcp->crtc);
+ return;
+ }
+
+ while (!list_empty(&dcp->swapped_out_fbs)) {
+ struct dcp_fb_reference *entry;
+ entry = list_first_entry(&dcp->swapped_out_fbs,
+ struct dcp_fb_reference, head);
+ if (entry->fb)
+ drm_framebuffer_put(entry->fb);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+}
+
+static void dcp_swap_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_start_resp *resp = data;
+
+ dcp->swap.swap.swap_id = resp->swap_id;
+
+ trace_iomfb_swap_submit(dcp, resp->swap_id);
+ dcp_swap_submit(dcp, false, &dcp->swap, dcp_swapped, NULL);
+}
+
+/*
+ * DRM specifies rectangles as start and end coordinates. DCP specifies
+ * rectangles as a start coordinate and a width/height. Convert a DRM rectangle
+ * to a DCP rectangle.
+ */
+static struct dcp_rect drm_to_dcp_rect(struct drm_rect *rect)
+{
+ return (struct dcp_rect){ .x = rect->x1,
+ .y = rect->y1,
+ .w = drm_rect_width(rect),
+ .h = drm_rect_height(rect) };
+}
+
+static u32 drm_format_to_dcp(u32 drm)
+{
+ switch (drm) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return fourcc_code('A', 'R', 'G', 'B');
+
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return fourcc_code('A', 'B', 'G', 'R');
+
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XRGB2101010:
+ return fourcc_code('r', '0', '3', 'w');
+ }
+
+ pr_warn("DRM format %X not supported in DCP\n", drm);
+ return 0;
+}
+
+static u8 drm_format_to_colorspace(u32 drm)
+{
+ switch (drm) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return 1;
+
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XRGB2101010:
+ return 2;
+ }
+
+ return 1;
+}
+
+int dcp_get_modes(struct drm_connector *connector)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+ struct platform_device *pdev = apple_connector->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+ int i;
+
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ mode = drm_mode_duplicate(dev, &dcp->modes[i].mode);
+
+ if (!mode) {
+ dev_err(dev->dev, "Failed to duplicate display mode\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return dcp->nr_modes;
+}
+EXPORT_SYMBOL_GPL(dcp_get_modes);
+
+/* The user may own drm_display_mode, so we need to search for our copy */
+static struct dcp_display_mode *lookup_mode(struct apple_dcp *dcp,
+ struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < dcp->nr_modes; ++i) {
+ if (drm_mode_match(mode, &dcp->modes[i].mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_CLOCK))
+ return &dcp->modes[i];
+ }
+
+ return NULL;
+}
+
+int dcp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct apple_connector *apple_connector = to_apple_connector(connector);
+ struct platform_device *pdev = apple_connector->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return lookup_mode(dcp, mode) ? MODE_OK : MODE_BAD;
+}
+EXPORT_SYMBOL_GPL(dcp_mode_valid);
+
+/* Helpers to modeset and swap, used to flush */
+static void do_swap(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ struct dcp_swap_start_req start_req = { 0 };
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ if (dcp->connector && dcp->connector->connected)
+ dcp_swap_start(dcp, false, &start_req, dcp_swap_started, NULL);
+ else
+ dcp_drm_crtc_vblank(dcp->crtc);
+}
+
+static void complete_set_digital_out_mode(struct apple_dcp *dcp, void *data,
+ void *cookie)
+{
+ struct dcp_wait_cookie *wait = cookie;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ dcp->ignore_swap_complete = false;
+
+ if (wait) {
+ complete(&wait->done);
+ kref_put(&wait->refcount, release_wait_cookie);
+ }
+}
+
+void dcp_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct platform_device *pdev = to_apple_crtc(crtc)->dcp;
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+ struct drm_plane *plane;
+ struct drm_plane_state *new_state, *old_state;
+ struct drm_crtc_state *crtc_state;
+ struct dcp_swap_submit_req *req = &dcp->swap;
+ int plane_idx, l;
+ int has_surface = 0;
+ bool modeset;
+ dev_dbg(dcp->dev, "%s", __func__);
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ modeset = drm_atomic_crtc_needs_modeset(crtc_state) || !dcp->valid_mode;
+
+ if (dcp_channel_busy(&dcp->ch_cmd))
+ {
+ dev_err(dcp->dev, "unexpected busy command channel");
+ /* HACK: issue a delayed vblank event to avoid timeouts in
+ * drm_atomic_helper_wait_for_vblanks().
+ */
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ /* Reset to defaults */
+ memset(req, 0, sizeof(*req));
+ for (l = 0; l < SWAP_SURFACES; l++)
+ req->surf_null[l] = true;
+
+ l = 0;
+ for_each_oldnew_plane_in_state(state, plane, old_state, new_state, plane_idx) {
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect src_rect;
+ bool opaque = false;
+
+ /* skip planes not for this crtc */
+ if (old_state->crtc != crtc && new_state->crtc != crtc)
+ continue;
+
+ WARN_ON(l >= SWAP_SURFACES);
+
+ req->swap.swap_enabled |= BIT(l);
+
+ if (old_state->fb && fb != old_state->fb) {
+ /*
+ * Race condition between a framebuffer unbind getting
+ * swapped out and GEM unreferencing a framebuffer. If
+ * we lose the race, the display gets IOVA faults and
+ * the DCP crashes. We need to extend the lifetime of
+ * the drm_framebuffer (and hence the GEM object) until
+ * after we get a swap complete for the swap unbinding
+ * it.
+ */
+ struct dcp_fb_reference *entry =
+ kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->fb = old_state->fb;
+ list_add_tail(&entry->head,
+ &dcp->swapped_out_fbs);
+ }
+ drm_framebuffer_get(old_state->fb);
+ }
+
+ if (!new_state->fb) {
+ if (old_state->fb)
+ req->swap.swap_enabled |= DCP_REMOVE_LAYERS;
+
+ l += 1;
+ continue;
+ }
+ req->surf_null[l] = false;
+ has_surface = 1;
+
+ if (fb->format->has_alpha ||
+ new_state->plane->type == DRM_PLANE_TYPE_PRIMARY)
+ opaque = true;
+ drm_rect_fp_to_int(&src_rect, &new_state->src);
+
+ req->swap.src_rect[l] = drm_to_dcp_rect(&src_rect);
+ req->swap.dst_rect[l] = drm_to_dcp_rect(&new_state->dst);
+
+ if (dcp->notch_height > 0)
+ req->swap.dst_rect[l].y += dcp->notch_height;
+
+ req->surf_iova[l] = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ req->surf[l] = (struct dcp_surface){
+ .opaque = opaque,
+ .format = drm_format_to_dcp(fb->format->format),
+ .xfer_func = 13,
+ .colorspace = drm_format_to_colorspace(fb->format->format),
+ .stride = fb->pitches[0],
+ .width = fb->width,
+ .height = fb->height,
+ .buf_size = fb->height * fb->pitches[0],
+ .surface_id = req->swap.surf_ids[l],
+
+ /* Only used for compressed or multiplanar surfaces */
+ .pix_size = 1,
+ .pel_w = 1,
+ .pel_h = 1,
+ .has_comp = 1,
+ .has_planes = 1,
+ };
+
+ l += 1;
+ }
+
+ /* These fields should be set together */
+ req->swap.swap_completed = req->swap.swap_enabled;
+
+ if (modeset) {
+ struct dcp_display_mode *mode;
+ struct dcp_wait_cookie *cookie;
+ int ret;
+
+ mode = lookup_mode(dcp, &crtc_state->mode);
+ if (!mode) {
+ dev_warn(dcp->dev, "no match for " DRM_MODE_FMT,
+ DRM_MODE_ARG(&crtc_state->mode));
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ dev_info(dcp->dev, "set_digital_out_mode(color:%d timing:%d)",
+ mode->color_mode_id, mode->timing_mode_id);
+ dcp->mode = (struct dcp_set_digital_out_mode_req){
+ .color_mode_id = mode->color_mode_id,
+ .timing_mode_id = mode->timing_mode_id
+ };
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie) {
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ init_completion(&cookie->done);
+ kref_init(&cookie->refcount);
+ /* increase refcount to ensure the receiver has a reference */
+ kref_get(&cookie->refcount);
+
+ dcp_set_digital_out_mode(dcp, false, &dcp->mode,
+ complete_set_digital_out_mode, cookie);
+
+ dev_dbg(dcp->dev, "%s - wait for modeset", __func__);
+ ret = wait_for_completion_timeout(&cookie->done,
+ msecs_to_jiffies(500));
+
+ kref_put(&cookie->refcount, release_wait_cookie);
+
+ if (ret == 0) {
+ dev_dbg(dcp->dev, "set_digital_out_mode 200 ms");
+ schedule_work(&dcp->vblank_wq);
+ return;
+ } else if (ret > 0) {
+ dev_dbg(dcp->dev,
+ "set_digital_out_mode finished with %d to spare",
+ jiffies_to_msecs(ret));
+ }
+
+ dcp->valid_mode = true;
+ }
+
+ if (!has_surface) {
+ if (crtc_state->enable && crtc_state->active &&
+ !crtc_state->planes_changed) {
+ schedule_work(&dcp->vblank_wq);
+ return;
+ }
+
+ req->clear = 1;
+ }
+ do_swap(dcp, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(dcp_flush);
+
+bool dcp_is_initialized(struct platform_device *pdev)
+{
+ struct apple_dcp *dcp = platform_get_drvdata(pdev);
+
+ return dcp->active;
+}
+EXPORT_SYMBOL_GPL(dcp_is_initialized);
+
+static void res_is_main_display(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ struct apple_connector *connector;
+ int result = *(int *)out;
+ dev_info(dcp->dev, "DCP is_main_display: %d\n", result);
+
+ dcp->main_display = result != 0;
+
+ dcp->active = true;
+
+ connector = dcp->connector;
+ if (connector) {
+ connector->connected = dcp->nr_modes > 0;
+ schedule_work(&connector->hotplug_wq);
+ }
+}
+
+static void init_3(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_is_main_display(dcp, false, res_is_main_display, NULL);
+}
+
+static void init_2(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ dcp_first_client_open(dcp, false, init_3, NULL);
+}
+
+static void init_1(struct apple_dcp *dcp, void *out, void *cookie)
+{
+ u32 val = 0;
+ dcp_enable_disable_video_power_savings(dcp, false, &val, init_2, NULL);
+}
+
+static void dcp_started(struct apple_dcp *dcp, void *data, void *cookie)
+{
+ dev_info(dcp->dev, "DCP booted\n");
+
+ init_1(dcp, data, cookie);
+}
+
+void iomfb_recv_msg(struct apple_dcp *dcp, u64 message)
+{
+ enum dcpep_type type = (message >> DCPEP_TYPE_SHIFT) & DCPEP_TYPE_MASK;
+
+ if (type == DCPEP_TYPE_INITIALIZED)
+ dcp_start_signal(dcp, false, dcp_started, NULL);
+ else if (type == DCPEP_TYPE_MESSAGE)
+ dcpep_got_msg(dcp, message);
+ else
+ dev_warn(dcp->dev, "Ignoring unknown message %llx\n", message);
+}
+
+int iomfb_start_rtkit(struct apple_dcp *dcp)
+{
+ dma_addr_t shmem_iova;
+ apple_rtkit_start_ep(dcp->rtk, IOMFB_ENDPOINT);
+
+ dcp->shmem = dma_alloc_coherent(dcp->dev, DCP_SHMEM_SIZE, &shmem_iova,
+ GFP_KERNEL);
+
+ shmem_iova |= dcp->asc_dram_mask;
+ dcp_send_message(dcp, IOMFB_ENDPOINT, dcpep_set_shmem(shmem_iova));
+
+ return 0;
+}
+
+void iomfb_shutdown(struct apple_dcp *dcp)
+{
+ struct dcp_set_power_state_req req = {
+ /* defaults are ok */
+ };
+
+ /* We're going down */
+ dcp->active = false;
+ dcp->valid_mode = false;
+
+ dcp_set_power_state(dcp, false, &req, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/apple/iomfb.h b/drivers/gpu/drm/apple/iomfb.h
new file mode 100644
index 000000000000..f9ead84c21f2
--- /dev/null
+++ b/drivers/gpu/drm/apple/iomfb.h
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCPEP_H__
+#define __APPLE_DCPEP_H__
+
+#include <linux/types.h>
+
+/* Fixed size of shared memory between DCP and AP */
+#define DCP_SHMEM_SIZE 0x100000
+
+/* DCP message contexts */
+enum dcp_context_id {
+ /* Callback */
+ DCP_CONTEXT_CB = 0,
+
+ /* Command */
+ DCP_CONTEXT_CMD = 2,
+
+ /* Asynchronous */
+ DCP_CONTEXT_ASYNC = 3,
+
+ /* Out-of-band callback */
+ DCP_CONTEXT_OOBCB = 4,
+
+ /* Out-of-band command */
+ DCP_CONTEXT_OOBCMD = 6,
+
+ DCP_NUM_CONTEXTS
+};
+
+/* RTKit endpoint message types */
+enum dcpep_type {
+ /* Set shared memory */
+ DCPEP_TYPE_SET_SHMEM = 0,
+
+ /* DCP is initialized */
+ DCPEP_TYPE_INITIALIZED = 1,
+
+ /* Remote procedure call */
+ DCPEP_TYPE_MESSAGE = 2,
+};
+
+/* Message */
+#define DCPEP_TYPE_SHIFT (0)
+#define DCPEP_TYPE_MASK GENMASK(1, 0)
+#define DCPEP_ACK BIT_ULL(6)
+#define DCPEP_CONTEXT_SHIFT (8)
+#define DCPEP_CONTEXT_MASK GENMASK(11, 8)
+#define DCPEP_OFFSET_SHIFT (16)
+#define DCPEP_OFFSET_MASK GENMASK(31, 16)
+#define DCPEP_LENGTH_SHIFT (32)
+
+/* Set shmem */
+#define DCPEP_DVA_SHIFT (16)
+#define DCPEP_FLAG_SHIFT (4)
+#define DCPEP_FLAG_VALUE (4)
+
+struct dcp_packet_header {
+ char tag[4];
+ u32 in_len;
+ u32 out_len;
+} __packed;
+
+#define DCP_IS_NULL(ptr) ((ptr) ? 1 : 0)
+#define DCP_PACKET_ALIGNMENT (0x40)
+
+/* Structures used in v12.0 firmware */
+
+#define SWAP_SURFACES 4
+#define MAX_PLANES 3
+
+struct dcp_iouserclient {
+ /* Handle for the IOUserClient. macOS sets this to a kernel VA. */
+ u64 handle;
+ u32 unk;
+ u8 flag1;
+ u8 flag2;
+ u8 padding[2];
+} __packed;
+
+struct dcp_rect {
+ u32 x;
+ u32 y;
+ u32 w;
+ u32 h;
+} __packed;
+
+/*
+ * Set in the swap_{enabled,completed} field to remove missing
+ * layers. Without this flag, the DCP will assume missing layers have
+ * not changed since the previous frame and will preserve their
+ * content.
+ */
+#define DCP_REMOVE_LAYERS BIT(31)
+
+struct dcp_swap {
+ u64 ts1;
+ u64 ts2;
+ u64 unk_10[6];
+ u64 flags1;
+ u64 flags2;
+
+ u32 swap_id;
+
+ u32 surf_ids[SWAP_SURFACES];
+ struct dcp_rect src_rect[SWAP_SURFACES];
+ u32 surf_flags[SWAP_SURFACES];
+ u32 surf_unk[SWAP_SURFACES];
+ struct dcp_rect dst_rect[SWAP_SURFACES];
+ u32 swap_enabled;
+ u32 swap_completed;
+
+ u32 unk_10c;
+ u8 unk_110[0x1b8];
+ u32 unk_2c8;
+ u8 unk_2cc[0x14];
+ u32 unk_2e0;
+ u8 unk_2e4[0x3c];
+} __packed;
+
+/* Information describing a plane of a planar compressed surface */
+struct dcp_plane_info {
+ u32 width;
+ u32 height;
+ u32 base;
+ u32 offset;
+ u32 stride;
+ u32 size;
+ u16 tile_size;
+ u8 tile_w;
+ u8 tile_h;
+ u32 unk[13];
+} __packed;
+
+struct dcp_component_types {
+ u8 count;
+ u8 types[7];
+} __packed;
+
+/* Information describing a surface */
+struct dcp_surface {
+ u8 is_tiled;
+ u8 unk_1;
+ u8 opaque; /** ignore alpha, also required YUV overlays */
+ u32 plane_cnt;
+ u32 plane_cnt2;
+ u32 format; /* DCP fourcc */
+ u32 unk_f;
+ u8 xfer_func;
+ u8 colorspace;
+ u32 stride;
+ u16 pix_size;
+ u8 pel_w;
+ u8 pel_h;
+ u32 offset;
+ u32 width;
+ u32 height;
+ u32 buf_size;
+ u32 unk_2d;
+ u32 unk_31;
+ u32 surface_id;
+ struct dcp_component_types comp_types[MAX_PLANES];
+ u64 has_comp;
+ struct dcp_plane_info planes[MAX_PLANES];
+ u64 has_planes;
+ u32 compression_info[MAX_PLANES][13];
+ u64 has_compr_info;
+ u64 unk_1f5;
+ u8 padding[7];
+} __packed;
+
+struct dcp_rt_bandwidth {
+ u64 unk1;
+ u64 reg_scratch;
+ u64 reg_doorbell;
+ u32 unk2;
+ u32 doorbell_bit;
+ u32 padding[7];
+} __packed;
+
+/* Method calls */
+
+enum dcpep_method {
+ dcpep_late_init_signal,
+ dcpep_setup_video_limits,
+ dcpep_set_create_dfb,
+ dcpep_start_signal,
+ dcpep_swap_start,
+ dcpep_swap_submit,
+ dcpep_set_display_device,
+ dcpep_set_digital_out_mode,
+ dcpep_create_default_fb,
+ dcpep_set_display_refresh_properties,
+ dcpep_flush_supports_power,
+ dcpep_set_power_state,
+ dcpep_first_client_open,
+ dcpep_update_notify_clients_dcp,
+ dcpep_set_parameter_dcp,
+ dcpep_enable_disable_video_power_savings,
+ dcpep_is_main_display,
+ dcpep_num_methods
+};
+
+struct dcp_method_entry {
+ const char *name;
+ char tag[4];
+};
+
+/* Prototypes */
+
+struct dcp_set_digital_out_mode_req {
+ u32 color_mode_id;
+ u32 timing_mode_id;
+} __packed;
+
+struct dcp_map_buf_req {
+ u64 buffer;
+ u8 unk;
+ u8 buf_null;
+ u8 vaddr_null;
+ u8 dva_null;
+} __packed;
+
+struct dcp_map_buf_resp {
+ u64 vaddr;
+ u64 dva;
+ u32 ret;
+} __packed;
+
+struct dcp_unmap_buf_resp {
+ u64 buffer;
+ u64 vaddr;
+ u64 dva;
+ u8 unk;
+ u8 buf_null;
+} __packed;
+
+struct dcp_allocate_buffer_req {
+ u32 unk0;
+ u64 size;
+ u32 unk2;
+ u8 paddr_null;
+ u8 dva_null;
+ u8 dva_size_null;
+ u8 padding;
+} __packed;
+
+struct dcp_allocate_buffer_resp {
+ u64 paddr;
+ u64 dva;
+ u64 dva_size;
+ u32 mem_desc_id;
+} __packed;
+
+struct dcp_map_physical_req {
+ u64 paddr;
+ u64 size;
+ u32 flags;
+ u8 dva_null;
+ u8 dva_size_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_map_physical_resp {
+ u64 dva;
+ u64 dva_size;
+ u32 mem_desc_id;
+} __packed;
+
+struct dcp_map_reg_req {
+ char obj[4];
+ u32 index;
+ u32 flags;
+ u8 addr_null;
+ u8 length_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_map_reg_resp {
+ u64 addr;
+ u64 length;
+ u32 ret;
+} __packed;
+
+struct dcp_swap_start_req {
+ u32 swap_id;
+ struct dcp_iouserclient client;
+ u8 swap_id_null;
+ u8 client_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_swap_start_resp {
+ u32 swap_id;
+ struct dcp_iouserclient client;
+ u32 ret;
+} __packed;
+
+struct dcp_swap_submit_req {
+ struct dcp_swap swap;
+ struct dcp_surface surf[SWAP_SURFACES];
+ u64 surf_iova[SWAP_SURFACES];
+ u8 unkbool;
+ u64 unkdouble;
+ u32 clear; // or maybe switch to default fb?
+ u8 swap_null;
+ u8 surf_null[SWAP_SURFACES];
+ u8 unkoutbool_null;
+ u8 padding[1];
+} __packed;
+
+struct dcp_swap_submit_resp {
+ u8 unkoutbool;
+ u32 ret;
+ u8 padding[3];
+} __packed;
+
+struct dc_swap_complete_resp {
+ u32 swap_id;
+ u8 unkbool;
+ u64 swap_data;
+ u8 swap_info[0x6c4];
+ u32 unkint;
+ u8 swap_info_null;
+} __packed;
+
+struct dcp_get_uint_prop_req {
+ char obj[4];
+ char key[0x40];
+ u64 value;
+ u8 value_null;
+ u8 padding[3];
+} __packed;
+
+struct dcp_get_uint_prop_resp {
+ u64 value;
+ u8 ret;
+ u8 padding[3];
+} __packed;
+
+struct dcp_set_power_state_req {
+ u64 unklong;
+ u8 unkbool;
+ u8 unkint_null;
+ u8 padding[2];
+} __packed;
+
+struct dcp_set_power_state_resp {
+ u32 unkint;
+ u32 ret;
+} __packed;
+
+struct dcp_set_dcpav_prop_chunk_req {
+ char data[0x1000];
+ u32 offset;
+ u32 length;
+} __packed;
+
+struct dcp_set_dcpav_prop_end_req {
+ char key[0x40];
+} __packed;
+
+struct dcp_update_notify_clients_dcp {
+ u32 client_0;
+ u32 client_1;
+ u32 client_2;
+ u32 client_3;
+ u32 client_4;
+ u32 client_5;
+ u32 client_6;
+ u32 client_7;
+ u32 client_8;
+ u32 client_9;
+ u32 client_a;
+ u32 client_b;
+ u32 client_c;
+ u32 client_d;
+} __packed;
+
+struct dcp_set_parameter_dcp {
+ u32 param;
+ u32 value[8];
+ u32 count;
+} __packed;
+
+struct dcp_swap_complete_intent_gated {
+ u32 swap_id;
+ u8 unkBool;
+ u32 unkInt;
+ u32 width;
+ u32 height;
+} __packed;
+
+struct dcp_read_edt_data_req {
+ char key[0x40];
+ u32 count;
+ u32 value[8];
+} __packed;
+
+struct dcp_read_edt_data_resp {
+ u32 value[8];
+ u8 ret;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/apple/parser.c b/drivers/gpu/drm/apple/parser.c
new file mode 100644
index 000000000000..910b0e57a35a
--- /dev/null
+++ b/drivers/gpu/drm/apple/parser.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "parser.h"
+
+#define DCP_PARSE_HEADER 0xd3
+
+enum dcp_parse_type {
+ DCP_TYPE_DICTIONARY = 1,
+ DCP_TYPE_ARRAY = 2,
+ DCP_TYPE_INT64 = 4,
+ DCP_TYPE_STRING = 9,
+ DCP_TYPE_BLOB = 10,
+ DCP_TYPE_BOOL = 11
+};
+
+struct dcp_parse_tag {
+ unsigned int size : 24;
+ enum dcp_parse_type type : 5;
+ unsigned int padding : 2;
+ bool last : 1;
+} __packed;
+
+static void *parse_bytes(struct dcp_parse_ctx *ctx, size_t count)
+{
+ void *ptr = ctx->blob + ctx->pos;
+
+ if (ctx->pos + count > ctx->len)
+ return ERR_PTR(-EINVAL);
+
+ ctx->pos += count;
+ return ptr;
+}
+
+static u32 *parse_u32(struct dcp_parse_ctx *ctx)
+{
+ return parse_bytes(ctx, sizeof(u32));
+}
+
+static struct dcp_parse_tag *parse_tag(struct dcp_parse_ctx *ctx)
+{
+ struct dcp_parse_tag *tag;
+
+ /* Align to 32-bits */
+ ctx->pos = round_up(ctx->pos, 4);
+
+ tag = parse_bytes(ctx, sizeof(struct dcp_parse_tag));
+
+ if (IS_ERR(tag))
+ return tag;
+
+ if (tag->padding)
+ return ERR_PTR(-EINVAL);
+
+ return tag;
+}
+
+static struct dcp_parse_tag *parse_tag_of_type(struct dcp_parse_ctx *ctx,
+ enum dcp_parse_type type)
+{
+ struct dcp_parse_tag *tag = parse_tag(ctx);
+
+ if (IS_ERR(tag))
+ return tag;
+
+ if (tag->type != type)
+ return ERR_PTR(-EINVAL);
+
+ return tag;
+}
+
+static int skip(struct dcp_parse_ctx *handle)
+{
+ struct dcp_parse_tag *tag = parse_tag(handle);
+ int ret = 0;
+ int i;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ switch (tag->type) {
+ case DCP_TYPE_DICTIONARY:
+ for (i = 0; i < tag->size; ++i) {
+ ret |= skip(handle); /* key */
+ ret |= skip(handle); /* value */
+ }
+
+ return ret;
+
+ case DCP_TYPE_ARRAY:
+ for (i = 0; i < tag->size; ++i)
+ ret |= skip(handle);
+
+ return ret;
+
+ case DCP_TYPE_INT64:
+ handle->pos += sizeof(s64);
+ return 0;
+
+ case DCP_TYPE_STRING:
+ case DCP_TYPE_BLOB:
+ handle->pos += tag->size;
+ return 0;
+
+ case DCP_TYPE_BOOL:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Caller must free the result */
+static char *parse_string(struct dcp_parse_ctx *handle)
+{
+ struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_STRING);
+ const char *in;
+ char *out;
+
+ if (IS_ERR(tag))
+ return (void *)tag;
+
+ in = parse_bytes(handle, tag->size);
+ if (IS_ERR(in))
+ return (void *)in;
+
+ out = kmalloc(tag->size + 1, GFP_KERNEL);
+
+ memcpy(out, in, tag->size);
+ out[tag->size] = '\0';
+ return out;
+}
+
+static int parse_int(struct dcp_parse_ctx *handle, s64 *value)
+{
+ void *tag = parse_tag_of_type(handle, DCP_TYPE_INT64);
+ s64 *in;
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ in = parse_bytes(handle, sizeof(s64));
+
+ if (IS_ERR(in))
+ return PTR_ERR(in);
+
+ memcpy(value, in, sizeof(*value));
+ return 0;
+}
+
+static int parse_bool(struct dcp_parse_ctx *handle, bool *b)
+{
+ struct dcp_parse_tag *tag = parse_tag_of_type(handle, DCP_TYPE_BOOL);
+
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ *b = !!tag->size;
+ return 0;
+}
+
+struct iterator {
+ struct dcp_parse_ctx *handle;
+ u32 idx, len;
+};
+
+int iterator_begin(struct dcp_parse_ctx *handle, struct iterator *it, bool dict)
+{
+ struct dcp_parse_tag *tag;
+ enum dcp_parse_type type = dict ? DCP_TYPE_DICTIONARY : DCP_TYPE_ARRAY;
+
+ *it = (struct iterator) {
+ .handle = handle,
+ .idx = 0
+ };
+
+ tag = parse_tag_of_type(it->handle, type);
+ if (IS_ERR(tag))
+ return PTR_ERR(tag);
+
+ it->len = tag->size;
+ return 0;
+}
+
+#define dcp_parse_foreach_in_array(handle, it) \
+ for (iterator_begin(handle, &it, false); it.idx < it.len; ++it.idx)
+#define dcp_parse_foreach_in_dict(handle, it) \
+ for (iterator_begin(handle, &it, true); it.idx < it.len; ++it.idx)
+
+int parse(void *blob, size_t size, struct dcp_parse_ctx *ctx)
+{
+ u32 *header;
+
+ *ctx = (struct dcp_parse_ctx) {
+ .blob = blob,
+ .len = size,
+ .pos = 0,
+ };
+
+ header = parse_u32(ctx);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ if (*header != DCP_PARSE_HEADER)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct dimension {
+ s64 total, front_porch, sync_width, active;
+ s64 precise_sync_rate;
+};
+
+static int parse_dimension(struct dcp_parse_ctx *handle, struct dimension *dim)
+{
+ struct iterator it;
+ int ret = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(handle);
+ else if (!strcmp(key, "Active"))
+ ret = parse_int(it.handle, &dim->active);
+ else if (!strcmp(key, "Total"))
+ ret = parse_int(it.handle, &dim->total);
+ else if (!strcmp(key, "FrontPorch"))
+ ret = parse_int(it.handle, &dim->front_porch);
+ else if (!strcmp(key, "SyncWidth"))
+ ret = parse_int(it.handle, &dim->sync_width);
+ else if (!strcmp(key, "PreciseSyncRate"))
+ ret = parse_int(it.handle, &dim->precise_sync_rate);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int parse_color_modes(struct dcp_parse_ctx *handle, s64 *best_id)
+{
+ struct iterator outer_it;
+ int ret = 0;
+ s64 best_score = -1;
+
+ *best_id = -1;
+
+ dcp_parse_foreach_in_array(handle, outer_it) {
+ struct iterator it;
+ s64 score = -1, id = -1;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "Score"))
+ ret = parse_int(it.handle, &score);
+ else if (!strcmp(key, "ID"))
+ ret = parse_int(it.handle, &id);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ /* Skip partial entries */
+ if (score < 0 || id < 0)
+ continue;
+
+ if (score > best_score) {
+ best_score = score;
+ *best_id = id;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate the pixel clock for a mode given the 16:16 fixed-point refresh
+ * rate. The pixel clock is the refresh rate times the pixel count. DRM
+ * specifies the clock in kHz. The intermediate result may overflow a u32, so
+ * use a u64 where required.
+ */
+static u32 calculate_clock(struct dimension *horiz, struct dimension *vert)
+{
+ u32 pixels = horiz->total * vert->total;
+ u64 clock = mul_u32_u32(pixels, vert->precise_sync_rate);
+
+ return DIV_ROUND_CLOSEST_ULL(clock >> 16, 1000);
+}
+
+static int parse_mode(struct dcp_parse_ctx *handle,
+ struct dcp_display_mode *out, s64 *score, int width_mm,
+ int height_mm, unsigned notch_height)
+{
+ int ret = 0;
+ struct iterator it;
+ struct dimension horiz, vert;
+ s64 id = -1;
+ s64 best_color_mode = -1;
+ bool is_virtual = false;
+ struct drm_display_mode *mode = &out->mode;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "HorizontalAttributes"))
+ ret = parse_dimension(it.handle, &horiz);
+ else if (!strcmp(key, "VerticalAttributes"))
+ ret = parse_dimension(it.handle, &vert);
+ else if (!strcmp(key, "ColorModes"))
+ ret = parse_color_modes(it.handle, &best_color_mode);
+ else if (!strcmp(key, "ID"))
+ ret = parse_int(it.handle, &id);
+ else if (!strcmp(key, "IsVirtual"))
+ ret = parse_bool(it.handle, &is_virtual);
+ else if (!strcmp(key, "Score"))
+ ret = parse_int(it.handle, score);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Reject modes without valid color mode.
+ */
+ if (best_color_mode < 0)
+ return -EINVAL;
+
+ /*
+ * We need to skip virtual modes. In some cases, virtual modes are "too
+ * big" for the monitor and can cause breakage. It is unclear why the
+ * DCP reports these modes at all. Treat as a recoverable error.
+ */
+ if (is_virtual)
+ return -EINVAL;
+
+ vert.active -= notch_height;
+ vert.sync_width += notch_height;
+
+ /* From here we must succeed. Start filling out the mode. */
+ *mode = (struct drm_display_mode) {
+ .type = DRM_MODE_TYPE_DRIVER,
+ .clock = calculate_clock(&horiz, &vert),
+
+ .vdisplay = vert.active,
+ .vsync_start = vert.active + vert.front_porch,
+ .vsync_end = vert.active + vert.front_porch + vert.sync_width,
+ .vtotal = vert.total,
+
+ .hdisplay = horiz.active,
+ .hsync_start = horiz.active + horiz.front_porch,
+ .hsync_end = horiz.active + horiz.front_porch +
+ horiz.sync_width,
+ .htotal = horiz.total,
+
+ .width_mm = width_mm,
+ .height_mm = height_mm,
+ };
+
+ drm_mode_set_name(mode);
+
+ out->timing_mode_id = id;
+ out->color_mode_id = best_color_mode;
+
+ return 0;
+}
+
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+ unsigned int *count, int width_mm,
+ int height_mm, unsigned notch_height)
+{
+ struct iterator it;
+ int ret;
+ struct dcp_display_mode *mode, *modes;
+ struct dcp_display_mode *best_mode = NULL;
+ s64 score, best_score = -1;
+
+ ret = iterator_begin(handle, &it, false);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Start with a worst case allocation */
+ modes = kmalloc_array(it.len, sizeof(*modes), GFP_KERNEL);
+ *count = 0;
+
+ if (!modes)
+ return ERR_PTR(-ENOMEM);
+
+ for (; it.idx < it.len; ++it.idx) {
+ mode = &modes[*count];
+ ret = parse_mode(it.handle, mode, &score, width_mm, height_mm, notch_height);
+
+ /* Errors for a single mode are recoverable -- just skip it. */
+ if (ret)
+ continue;
+
+ /* Process a successful mode */
+ (*count)++;
+
+ if (score > best_score) {
+ best_score = score;
+ best_mode = mode;
+ }
+ }
+
+ if (best_mode != NULL)
+ best_mode->mode.type |= DRM_MODE_TYPE_PREFERRED;
+
+ return modes;
+}
+
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+ int *height_mm)
+{
+ int ret = 0;
+ struct iterator it;
+ s64 width_cm = 0, height_cm = 0;
+
+ dcp_parse_foreach_in_dict(handle, it) {
+ char *key = parse_string(it.handle);
+
+ if (IS_ERR(key))
+ ret = PTR_ERR(key);
+ else if (!strcmp(key, "MaxHorizontalImageSize"))
+ ret = parse_int(it.handle, &width_cm);
+ else if (!strcmp(key, "MaxVerticalImageSize"))
+ ret = parse_int(it.handle, &height_cm);
+ else
+ skip(it.handle);
+
+ if (ret)
+ return ret;
+ }
+
+ /* 1cm = 10mm */
+ *width_mm = 10 * width_cm;
+ *height_mm = 10 * height_cm;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/apple/parser.h b/drivers/gpu/drm/apple/parser.h
new file mode 100644
index 000000000000..a2d479258ed0
--- /dev/null
+++ b/drivers/gpu/drm/apple/parser.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io> */
+
+#ifndef __APPLE_DCP_PARSER_H__
+#define __APPLE_DCP_PARSER_H__
+
+/* For mode parsing */
+#include <drm/drm_modes.h>
+
+struct dcp_parse_ctx {
+ void *blob;
+ u32 pos, len;
+};
+
+/*
+ * Represents a single display mode. These mode objects are populated at
+ * runtime based on the TimingElements dictionary sent by the DCP.
+ */
+struct dcp_display_mode {
+ struct drm_display_mode mode;
+ u32 color_mode_id;
+ u32 timing_mode_id;
+};
+
+int parse(void *blob, size_t size, struct dcp_parse_ctx *ctx);
+struct dcp_display_mode *enumerate_modes(struct dcp_parse_ctx *handle,
+ unsigned int *count, int width_mm,
+ int height_mm, unsigned notch_height);
+int parse_display_attributes(struct dcp_parse_ctx *handle, int *width_mm,
+ int *height_mm);
+
+#endif
diff --git a/drivers/gpu/drm/apple/trace.c b/drivers/gpu/drm/apple/trace.c
new file mode 100644
index 000000000000..6f40d5a583df
--- /dev/null
+++ b/drivers/gpu/drm/apple/trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Tracepoints for Apple DCP driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/gpu/drm/apple/trace.h b/drivers/gpu/drm/apple/trace.h
new file mode 100644
index 000000000000..d6a4742fcf47
--- /dev/null
+++ b/drivers/gpu/drm/apple/trace.h
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (C) The Asahi Linux Contributors */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dcp
+
+#if !defined(_TRACE_DCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCP_H
+
+#include "dcp-internal.h"
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#define show_dcp_endpoint(ep) \
+ __print_symbolic(ep, { SYSTEM_ENDPOINT, "system" }, \
+ { TEST_ENDPOINT, "test" }, \
+ { DCP_EXPERT_ENDPOINT, "dcpexpert" }, \
+ { DISP0_ENDPOINT, "disp0" }, \
+ { DPTX_ENDPOINT, "dptxport" }, \
+ { HDCP_ENDPOINT, "hdcp" }, \
+ { REMOTE_ALLOC_ENDPOINT, "remotealloc" }, \
+ { IOMFB_ENDPOINT, "iomfb" })
+
+TRACE_EVENT(dcp_recv_msg,
+ TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+ TP_ARGS(dcp, endpoint, message),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u8, endpoint)
+ __field(u64, message)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->endpoint = endpoint;
+ __entry->message = message;),
+
+ TP_printk("%s: endpoint 0x%x (%s): received message 0x%016llx",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(dcp_send_msg,
+ TP_PROTO(struct apple_dcp *dcp, u8 endpoint, u64 message),
+ TP_ARGS(dcp, endpoint, message),
+
+ TP_STRUCT__entry(__string(devname, dev_name(dcp->dev))
+ __field(u8, endpoint)
+ __field(u64, message)),
+
+ TP_fast_assign(__assign_str(devname, dev_name(dcp->dev));
+ __entry->endpoint = endpoint;
+ __entry->message = message;),
+
+ TP_printk("%s: endpoint 0x%x (%s): will send message 0x%016llx",
+ __get_str(devname), __entry->endpoint,
+ show_dcp_endpoint(__entry->endpoint), __entry->message));
+
+TRACE_EVENT(iomfb_callback,
+ TP_PROTO(struct apple_dcp *dcp, int tag, const char *name),
+ TP_ARGS(dcp, tag, name),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dcp->dev))
+ __field(int, tag)
+ __field(const char *, name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dcp->dev));
+ __entry->tag = tag; __entry->name = name;
+ ),
+
+ TP_printk("%s: Callback D%03d %s", __get_str(devname), __entry->tag,
+ __entry->name));
+
+TRACE_EVENT(iomfb_push,
+ TP_PROTO(struct apple_dcp *dcp,
+ const struct dcp_method_entry *method, int context,
+ int offset, int depth),
+ TP_ARGS(dcp, method, context, offset, depth),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dcp->dev))
+ __string(name, method->name)
+ __field(int, context)
+ __field(int, offset)
+ __field(int, depth)),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dcp->dev));
+ __assign_str(name, method->name);
+ __entry->context = context; __entry->offset = offset;
+ __entry->depth = depth;
+ ),
+
+ TP_printk("%s: Method %s: context %u, offset %u, depth %u",
+ __get_str(devname), __get_str(name), __entry->context,
+ __entry->offset, __entry->depth));
+
+TRACE_EVENT(iomfb_swap_submit,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%d",
+ __entry->dcp,
+ __entry->swap_id)
+);
+
+TRACE_EVENT(iomfb_swap_complete,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id),
+ TP_ARGS(dcp, swap_id),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ ),
+ TP_printk("dcp=%llx, swap_id=%d",
+ __entry->dcp,
+ __entry->swap_id
+ )
+);
+
+TRACE_EVENT(iomfb_swap_complete_intent_gated,
+ TP_PROTO(struct apple_dcp *dcp, u32 swap_id, u32 width, u32 height),
+ TP_ARGS(dcp, swap_id, width, height),
+ TP_STRUCT__entry(
+ __field(u64, dcp)
+ __field(u32, swap_id)
+ __field(u32, width)
+ __field(u32, height)
+ ),
+ TP_fast_assign(
+ __entry->dcp = (u64)dcp;
+ __entry->swap_id = swap_id;
+ __entry->height = height;
+ __entry->width = width;
+ ),
+ TP_printk("dcp=%llx, swap_id=%u %ux%u",
+ __entry->dcp,
+ __entry->swap_id,
+ __entry->width,
+ __entry->height
+ )
+);
+
+#endif /* _TRACE_DCP_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 565957264875..f64b0aa0095f 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -72,6 +72,7 @@ config DRM_SIMPLEDRM
select APERTURE_HELPERS
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for simple platform-provided framebuffers.
@@ -83,6 +84,13 @@ config DRM_SIMPLEDRM
On x86 BIOS or UEFI systems, you should also select SYSFB_SIMPLEFB
to use UEFI and VESA framebuffers.
+config DRM_SIMPLEDRM_BACKLIGHT
+ tristate "Backlight support for simpledrm"
+ depends on DRM_SIMPLEDRM && !DRM_APPLE
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ Enable backlight support for simpledrm.
+
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 18489779fb8a..cc0d9d31a5f1 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
+#if defined CONFIG_DRM_SIMPLEDRM_BACKLIGHT
+#include <linux/backlight.h>
+#endif
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/minmax.h>
@@ -216,6 +219,10 @@ struct simpledrm_device {
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
+#if defined CONFIG_DRM_SIMPLEDRM_BACKLIGHT
+ /* backlight */
+ struct backlight_device *backlight;
+#endif
};
static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
@@ -460,8 +467,6 @@ static const uint32_t simpledrm_primary_plane_formats[] = {
//DRM_FORMAT_XRGB1555,
//DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
};
static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
@@ -558,6 +563,28 @@ static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return drm_atomic_add_affected_planes(new_state, crtc);
}
+#if defined CONFIG_DRM_SIMPLEDRM_BACKLIGHT
+static void simpledrm_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
+
+ if (sdev->backlight)
+ backlight_enable(sdev->backlight);
+}
+
+static void simpledrm_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
+
+ if (sdev->backlight)
+ backlight_disable(sdev->backlight);
+}
+#endif
+
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
@@ -566,6 +593,10 @@ static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
.mode_valid = simpledrm_crtc_helper_mode_valid,
.atomic_check = simpledrm_crtc_helper_atomic_check,
+#if defined CONFIG_DRM_SIMPLEDRM_BACKLIGHT
+ .atomic_enable = simpledrm_crtc_helper_atomic_enable,
+ .atomic_disable = simpledrm_crtc_helper_atomic_disable,
+#endif
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
@@ -655,6 +686,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
* Hardware settings
*/
+#if defined CONFIG_DRM_SIMPLEDRM_BACKLIGHT
+ sdev->backlight = devm_of_find_backlight(&pdev->dev);
+ if (IS_ERR(sdev->backlight))
+ sdev->backlight = NULL;
+#endif
ret = simpledrm_device_init_clocks(sdev);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 185a077d59cd..2197006033c4 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -122,7 +122,7 @@ config HID_APPLE
tristate "Apple {i,Power,Mac}Books"
depends on LEDS_CLASS
depends on NEW_LEDS
- default !EXPERT
+ default !EXPERT || SPI_HID_APPLE
help
Support for some Apple devices which less or more break
HID specification.
@@ -648,11 +648,13 @@ config LOGIWHEELS_FF
config HID_MAGICMOUSE
tristate "Apple Magic Mouse/Trackpad multi-touch support"
+ default SPI_HID_APPLE
help
Support for the Apple Magic Mouse/Trackpad multi-touch.
Say Y here if you want support for the multi-touch features of the
- Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+ Apple Wireless "Magic" Mouse, the Apple Wireless "Magic" Trackpad and
+ force touch Trackpads in Macbooks starting from 2015.
config HID_MALTRON
tristate "Maltron L90 keyboard"
@@ -1005,7 +1007,7 @@ config HID_SONY
* Guitar Hero PS3 and PC guitar dongles
config SONY_FF
- bool "Sony PS2/3/4 accessories force feedback support"
+ bool "Sony PS2/3/4 accessories force feedback support"
depends on HID_SONY
select INPUT_FF_MEMLESS
help
@@ -1290,4 +1292,8 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
source "drivers/hid/surface-hid/Kconfig"
+source "drivers/hid/spi-hid/Kconfig"
+
+source "drivers/hid/dockchannel-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index e8014c1a2f8b..2fb1a40657ae 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -164,3 +164,7 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
+
+obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid/
+
+obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid/
diff --git a/drivers/hid/dockchannel-hid/Kconfig b/drivers/hid/dockchannel-hid/Kconfig
new file mode 100644
index 000000000000..8a81d551a83d
--- /dev/null
+++ b/drivers/hid/dockchannel-hid/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+menu "DockChannel HID support"
+ depends on APPLE_DOCKCHANNEL
+
+config HID_DOCKCHANNEL
+ tristate "HID over DockChannel transport layer for Apple Silicon SoCs"
+ default ARCH_APPLE
+ depends on APPLE_DOCKCHANNEL && INPUT && OF && HID
+ help
+ Say Y here if you use an M2 or later Apple Silicon based laptop.
+ The keyboard and touchpad are HID based devices connected via the
+ proprietary DockChannel interface.
+
+endmenu
diff --git a/drivers/hid/dockchannel-hid/Makefile b/drivers/hid/dockchannel-hid/Makefile
new file mode 100644
index 000000000000..7dba766b047f
--- /dev/null
+++ b/drivers/hid/dockchannel-hid/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+#
+# Makefile for DockChannel HID transport drivers
+#
+
+obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid.o
diff --git a/drivers/hid/dockchannel-hid/dockchannel-hid.c b/drivers/hid/dockchannel-hid/dockchannel-hid.c
new file mode 100644
index 000000000000..d9cec1276001
--- /dev/null
+++ b/drivers/hid/dockchannel-hid/dockchannel-hid.c
@@ -0,0 +1,1152 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0 OR MIT
+ *
+ * Apple DockChannel HID transport driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hid.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/dockchannel.h>
+#include <linux/of.h>
+#include "../hid-ids.h"
+
+#define COMMAND_TIMEOUT_MS 1000
+#define START_TIMEOUT_MS 2000
+
+#define MAX_INTERFACES 16
+
+/* Data + checksum */
+#define MAX_PKT_SIZE (0xffff + 4)
+
+#define DCHID_CHANNEL_CMD 0x11
+#define DCHID_CHANNEL_REPORT 0x12
+
+struct dchid_hdr {
+ u8 hdr_len;
+ u8 channel;
+ __le16 length;
+ u8 seq;
+ u8 iface;
+ __le16 pad;
+} __packed;
+
+#define IFACE_COMM 0
+
+#define FLAGS_GROUP GENMASK(7, 6)
+#define FLAGS_REQ GENMASK(5, 0)
+
+#define GROUP_INPUT 0
+#define GROUP_OUTPUT 1
+#define GROUP_CMD 2
+
+#define REQ_SET_REPORT 0
+#define REQ_GET_REPORT 1
+
+struct dchid_subhdr {
+ u8 flags;
+ u8 unk;
+ __le16 length;
+ __le32 retcode;
+} __packed;
+
+#define EVENT_GPIO_CMD 0xa0
+#define EVENT_INIT 0xf0
+#define EVENT_READY 0xf1
+
+struct dchid_init_hdr {
+ u8 type;
+ u8 unk1;
+ u8 unk2;
+ u8 iface;
+ char name[16];
+} __packed;
+
+#define INIT_HID_DESCRIPTOR 0
+#define INIT_GPIO_REQUEST 1
+#define INIT_TERMINATOR 2
+
+#define CMD_RESET_INTERFACE 0x40
+#define CMD_SEND_FIRMWARE 0x95
+#define CMD_ENABLE_INTERFACE 0xb4
+#define CMD_ACK_GPIO_CMD 0xa1
+
+struct dchid_init_block_hdr {
+ __le16 type;
+ __le16 subtype;
+ __le16 length;
+} __packed;
+
+#define MAX_GPIO_NAME 32
+
+struct dchid_gpio_request {
+ __le16 unk;
+ __le16 id;
+ char name[MAX_GPIO_NAME];
+} __packed;
+
+struct dchid_gpio_cmd {
+ u8 type;
+ u8 iface;
+ u8 gpio;
+ u8 unk;
+ u8 cmd;
+} __packed;
+
+struct dchid_gpio_ack {
+ u8 type;
+ __le32 retcode;
+ u8 cmd[];
+} __packed;
+
+#define STM_REPORT_ID 0x10
+#define STM_REPORT_SERIAL 0x11
+#define STM_REPORT_KEYBTYPE 0x14
+
+#define KEYBOARD_TYPE_ANSI 0
+#define KEYBOARD_TYPE_ISO 1
+#define KEYBOARD_TYPE_JIS 2
+
+struct dchid_stm_id {
+ u8 unk;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_number;
+ u8 unk2;
+ u8 unk3;
+ u8 keyboard_type;
+ u8 serial_length;
+ /* Serial follows, but we grab it with a different report. */
+} __packed;
+
+#define FW_MAGIC 0x46444948
+#define FW_VER 1
+
+struct fw_header {
+ __le32 magic;
+ __le32 version;
+ __le32 hdr_length;
+ __le32 data_length;
+ __le32 iface_offset;
+} __packed;
+
+struct dchid_work {
+ struct work_struct work;
+ struct dchid_iface *iface;
+
+ struct dchid_hdr hdr;
+ u8 data[];
+};
+
+struct dchid_iface {
+ struct dockchannel_hid *dchid;
+ struct hid_device *hid;
+ struct workqueue_struct *wq;
+
+ bool creating;
+ struct work_struct create_work;
+
+ int index;
+ const char *name;
+ const struct device_node *of_node;
+
+ uint8_t tx_seq;
+ bool deferred;
+ bool starting;
+ bool open;
+ struct completion ready;
+
+ void *hid_desc;
+ size_t hid_desc_len;
+
+ struct gpio_desc *gpio;
+ int gpio_id;
+
+ struct mutex out_mutex;
+ u32 out_flags;
+ int out_report;
+ u32 retcode;
+ void *resp_buf;
+ size_t resp_size;
+ struct completion out_complete;
+};
+
+struct dockchannel_hid {
+ struct device *dev;
+ struct dockchannel *dc;
+ struct device_link *helper_link;
+
+ bool id_ready;
+ struct dchid_stm_id device_id;
+ char serial[64];
+
+ struct dchid_iface *comm;
+ struct dchid_iface *ifaces[MAX_INTERFACES];
+
+ u8 pkt_buf[MAX_PKT_SIZE];
+
+ /* Workqueue to asynchronously create HID devices */
+ struct workqueue_struct *new_iface_wq;
+};
+
+static struct dchid_iface *
+dchid_get_interface(struct dockchannel_hid *dchid, int index, const char *name)
+{
+ struct dchid_iface *iface;
+
+ if (index >= MAX_INTERFACES) {
+ dev_err(dchid->dev, "Interface index %d out of range\n", index);
+ return NULL;
+ }
+
+ if (dchid->ifaces[index])
+ return dchid->ifaces[index];
+
+ iface = devm_kzalloc(dchid->dev, sizeof(struct dchid_iface), GFP_KERNEL);
+ if (!iface)
+ return NULL;
+
+ iface->index = index;
+ iface->name = devm_kstrdup(dchid->dev, name, GFP_KERNEL);
+ iface->dchid = dchid;
+ iface->out_report= -1;
+ init_completion(&iface->out_complete);
+ init_completion(&iface->ready);
+ mutex_init(&iface->out_mutex);
+ iface->wq = alloc_ordered_workqueue("dchid-%s", WQ_MEM_RECLAIM, iface->name);
+ if (!iface->wq)
+ return NULL;
+
+ /* Comm is not a HID subdevice */
+ if (!strcmp(name, "comm")) {
+ dchid->ifaces[index] = iface;
+ return iface;
+ }
+
+ iface->of_node = of_get_child_by_name(dchid->dev->of_node, name);
+ if (!iface->of_node) {
+ dev_warn(dchid->dev, "No OF node for subdevice %s, ignoring.", name);
+ return NULL;
+ }
+
+ dchid->ifaces[index] = iface;
+ return iface;
+}
+
+static u32 dchid_checksum(void *p, size_t length)
+{
+ u32 sum = 0;
+
+ while (length >= 4) {
+ sum += get_unaligned_le32(p);
+ p += 4;
+ length -= 4;
+ }
+
+ WARN_ON_ONCE(length);
+ return sum;
+}
+
+static int dchid_send(struct dchid_iface *iface, u32 flags, void *msg, size_t size)
+{
+ u32 checksum = 0xffffffff;
+ size_t wsize = round_down(size, 4);
+ size_t tsize = size - wsize;
+ int ret;
+ struct {
+ struct dchid_hdr hdr;
+ struct dchid_subhdr sub;
+ } __packed h;
+
+ memset(&h, 0, sizeof(h));
+ h.hdr.hdr_len = sizeof(h.hdr);
+ h.hdr.channel = DCHID_CHANNEL_CMD;
+ h.hdr.length = round_up(size, 4) + sizeof(h.sub);
+ h.hdr.seq = iface->tx_seq;
+ h.hdr.iface = iface->index;
+ h.sub.flags = flags;
+ h.sub.length = size;
+
+ ret = dockchannel_send(iface->dchid->dc, &h, sizeof(h));
+ if (ret < 0)
+ return ret;
+ checksum -= dchid_checksum(&h, sizeof(h));
+
+ ret = dockchannel_send(iface->dchid->dc, msg, wsize);
+ if (ret < 0)
+ return ret;
+ checksum -= dchid_checksum(msg, wsize);
+
+ if (tsize) {
+ u8 tail[4] = {0, 0, 0, 0};
+
+ memcpy(tail, msg + wsize, tsize);
+ ret = dockchannel_send(iface->dchid->dc, tail, sizeof(tail));
+ if (ret < 0)
+ return ret;
+ checksum -= dchid_checksum(tail, sizeof(tail));
+ }
+
+ ret = dockchannel_send(iface->dchid->dc, &checksum, sizeof(checksum));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dchid_cmd(struct dchid_iface *iface, u32 type, u32 req,
+ void *data, size_t size, void *resp_buf, size_t resp_size)
+{
+ int ret;
+ int report_id = *(u8*)data;
+
+ mutex_lock(&iface->out_mutex);
+
+ WARN_ON(iface->out_report != -1);
+ iface->out_report = report_id;
+ iface->out_flags = FIELD_PREP(FLAGS_GROUP, type) | FIELD_PREP(FLAGS_REQ, req);
+ iface->resp_buf = resp_buf;
+ iface->resp_size = resp_size;
+ reinit_completion(&iface->out_complete);
+
+ ret = dchid_send(iface, iface->out_flags, data, size);
+ if (ret < 0)
+ goto done;
+
+ if (!wait_for_completion_timeout(&iface->out_complete, msecs_to_jiffies(COMMAND_TIMEOUT_MS))) {
+ dev_err(iface->dchid->dev, "output report 0x%x to iface %d (%s) timed out\n",
+ report_id, iface->index, iface->name);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ ret = iface->resp_size;
+ if (iface->retcode) {
+ dev_err(iface->dchid->dev,
+ "output report 0x%x to iface %d (%s) failed with err 0x%x\n",
+ report_id, iface->index, iface->name, iface->retcode);
+ ret = -EIO;
+ }
+
+done:
+ iface->tx_seq++;
+ iface->out_report = -1;
+ iface->out_flags = 0;
+ iface->resp_buf = NULL;
+ iface->resp_size = 0;
+ mutex_unlock(&iface->out_mutex);
+ return ret;
+}
+
+static int dchid_comm_cmd(struct dockchannel_hid *dchid, void *cmd, size_t size)
+{
+ return dchid_cmd(dchid->comm, GROUP_CMD, REQ_SET_REPORT, cmd, size, NULL, 0);
+}
+
+static int dchid_enable_interface(struct dchid_iface *iface)
+{
+ u8 msg[] = { CMD_ENABLE_INTERFACE, iface->index };
+
+ return dchid_comm_cmd(iface->dchid, msg, sizeof(msg));
+}
+
+static int dchid_reset_interface(struct dchid_iface *iface, int state)
+{
+ u8 msg[] = { CMD_RESET_INTERFACE, 1, iface->index, state };
+
+ return dchid_comm_cmd(iface->dchid, msg, sizeof(msg));
+}
+
+static int dchid_send_firmware(struct dchid_iface *iface, void *firmware, size_t size)
+{
+ struct {
+ u8 cmd;
+ u8 unk1;
+ u8 unk2;
+ u8 iface;
+ u64 addr;
+ u32 size;
+ } __packed msg = {
+ .cmd = CMD_SEND_FIRMWARE,
+ .unk1 = 2,
+ .unk2 = 0,
+ .iface = iface->index,
+ .size = size,
+ };
+ dma_addr_t addr;
+ void *buf = dmam_alloc_coherent(iface->dchid->dev, size, &addr, GFP_KERNEL);
+
+ if (IS_ERR_OR_NULL(buf))
+ return buf ? PTR_ERR(buf) : -ENOMEM;
+
+ msg.addr = addr;
+ memcpy(buf, firmware, size);
+ wmb();
+
+ return dchid_comm_cmd(iface->dchid, &msg, sizeof(msg));
+}
+
+static int dchid_get_firmware(struct dchid_iface *iface, void **firmware, size_t *size)
+{
+ int ret;
+ const char *fw_name;
+ const struct firmware *fw;
+ struct fw_header *hdr;
+ u8 *fw_data;
+
+ ret = of_property_read_string(iface->of_node, "firmware-name", &fw_name);
+ if (ret) {
+ /* Firmware is only for some devices */
+ *firmware = NULL;
+ *size = 0;
+ return 0;
+ }
+
+ ret = request_firmware(&fw, fw_name, iface->dchid->dev);
+ if (ret)
+ return ret;
+
+ hdr = (struct fw_header *)fw->data;
+
+ if (hdr->magic != FW_MAGIC || hdr->version != FW_VER ||
+ hdr->hdr_length < sizeof(*hdr) || hdr->hdr_length > fw->size ||
+ (hdr->hdr_length + (size_t)hdr->data_length) > fw->size ||
+ hdr->iface_offset >= hdr->data_length) {
+ dev_warn(iface->dchid->dev, "%s: invalid firmware header\n",
+ fw_name);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ fw_data = devm_kmemdup(iface->dchid->dev, fw->data + hdr->hdr_length,
+ hdr->data_length, GFP_KERNEL);
+ if (!fw_data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (hdr->iface_offset)
+ fw_data[hdr->iface_offset] = iface->index;
+
+ *firmware = fw_data;
+ *size = hdr->data_length;
+
+done:
+ release_firmware(fw);
+ return ret;
+}
+
+static int dchid_start_interface(struct dchid_iface *iface)
+{
+ void *fw;
+ size_t size;
+ int ret;
+
+ if (iface->starting) {
+ dev_warn(iface->dchid->dev, "Interface %s is already starting", iface->name);
+ return -EINPROGRESS;
+ }
+
+ dev_info(iface->dchid->dev, "Starting interface %s\n", iface->name);
+
+ iface->starting = true;
+
+ /* Look to see if we need firmware */
+ ret = dchid_get_firmware(iface, &fw, &size);
+ if (ret < 0)
+ goto err;
+
+ /* Only multi-touch has firmware */
+ if (fw && size) {
+
+ /* Send firmware to the device */
+ dev_info(iface->dchid->dev, "Sending firmware for %s\n", iface->name);
+ ret = dchid_send_firmware(iface, fw, size);
+ if (ret < 0) {
+ dev_err(iface->dchid->dev, "Failed to send %s firmwareS", iface->name);
+ goto err;
+ }
+
+ /* After loading firmware, multi-touch needs a reset */
+ dev_info(iface->dchid->dev, "Resetting %s\n", iface->name);
+ dchid_reset_interface(iface, 0);
+ dchid_reset_interface(iface, 2);
+ }
+
+ return 0;
+
+err:
+ iface->starting = false;
+ return ret;
+}
+
+static int dchid_start(struct hid_device *hdev)
+{
+ return 0;
+};
+
+static void dchid_stop(struct hid_device *hdev)
+{
+ /* no-op, we don't know what the shutdown commands are, if any */
+}
+
+static int dchid_open(struct hid_device *hdev)
+{
+ struct dchid_iface *iface = hdev->driver_data;
+ int ret;
+
+ if (!completion_done(&iface->ready)) {
+ ret = dchid_start_interface(iface);
+ if (ret < 0)
+ return ret;
+
+ if (!wait_for_completion_timeout(&iface->ready, msecs_to_jiffies(START_TIMEOUT_MS))) {
+ dev_err(iface->dchid->dev, "iface %s start timed out\n", iface->name);
+ return -ETIMEDOUT;
+ }
+ }
+
+ iface->open = true;
+ return 0;
+}
+
+static void dchid_close(struct hid_device *hdev)
+{
+ struct dchid_iface *iface = hdev->driver_data;
+
+ iface->open = false;
+}
+
+static int dchid_parse(struct hid_device *hdev)
+{
+ struct dchid_iface *iface = hdev->driver_data;
+
+ return hid_parse_report(hdev, iface->hid_desc, iface->hid_desc_len);
+}
+
+/* Note: buf excludes report number! For ease of fetching strings/etc. */
+static int dchid_get_report_cmd(struct dchid_iface *iface, u8 reportnum, void *buf, size_t len)
+{
+ int ret = dchid_cmd(iface, GROUP_CMD, REQ_GET_REPORT, &reportnum, 1, buf, len);
+
+ return ret <= 0 ? ret : ret - 1;
+}
+
+/* Note: buf includes report number! */
+static int dchid_set_report(struct dchid_iface *iface, void *buf, size_t len)
+{
+ return dchid_cmd(iface, GROUP_OUTPUT, REQ_SET_REPORT, buf, len, NULL, 0);
+}
+
+static int dchid_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct dchid_iface *iface = hdev->driver_data;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ buf[0] = reportnum;
+ return dchid_cmd(iface, GROUP_OUTPUT, REQ_GET_REPORT, &reportnum, 1, buf + 1, len - 1);
+ case HID_REQ_SET_REPORT:
+ return dchid_set_report(iface, buf, len);
+ default:
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct hid_ll_driver dchid_ll = {
+ .start = &dchid_start,
+ .stop = &dchid_stop,
+ .open = &dchid_open,
+ .close = &dchid_close,
+ .parse = &dchid_parse,
+ .raw_request = &dchid_raw_request,
+};
+
+static void dchid_create_interface_work(struct work_struct *ws)
+{
+ struct dchid_iface *iface = container_of(ws, struct dchid_iface, create_work);
+ struct dockchannel_hid *dchid = iface->dchid;
+ struct hid_device *hid;
+ int ret;
+
+ if (iface->hid) {
+ dev_warn(dchid->dev, "Interface %s already created!\n",
+ iface->name);
+ return;
+ }
+
+ dev_info(dchid->dev, "New interface %s\n", iface->name);
+
+ /* Start the interface. This is not the entire init process, as firmware is loaded later on device open. */
+ ret = dchid_enable_interface(iface);
+ if (ret < 0) {
+ dev_warn(dchid->dev, "Failed to enable %s: %d\n", iface->name, ret);
+ return;
+ }
+
+ iface->deferred = false;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return;
+
+ snprintf(hid->name, sizeof(hid->name), "Apple MTP %s", iface->name);
+ snprintf(hid->phys, sizeof(hid->phys), "%s.%d (%s)",
+ dev_name(dchid->dev), iface->index, iface->name);
+ strscpy(hid->uniq, dchid->serial, sizeof(hid->uniq));
+
+ hid->ll_driver = &dchid_ll;
+ hid->bus = BUS_HOST;
+ hid->vendor = dchid->device_id.vendor_id;
+ hid->product = dchid->device_id.product_id;
+ hid->version = dchid->device_id.version_number;
+ hid->type = HID_TYPE_OTHER;
+ if (!strcmp(iface->name, "multi-touch")) {
+ hid->type = HID_TYPE_SPI_MOUSE;
+ } else if (!strcmp(iface->name, "keyboard")) {
+ hid->type = HID_TYPE_SPI_KEYBOARD;
+
+ /*
+ * These country codes match what earlier Apple HID keyboards did.
+ * Apple seems to allocate keyboard IDs in groups of 3 (for the 3
+ * layout groups), hence the % 3.
+ */
+ switch (dchid->device_id.keyboard_type % 3) {
+ case KEYBOARD_TYPE_ANSI:
+ hid->country = 33; // US-English
+ break;
+
+ case KEYBOARD_TYPE_ISO:
+ hid->country = 13; // ISO
+ break;
+
+ case KEYBOARD_TYPE_JIS:
+ hid->country = 15; // Japan
+ break;
+ }
+ }
+
+ hid->dev.parent = iface->dchid->dev;
+ hid->driver_data = iface;
+
+ iface->hid = hid;
+
+ ret = hid_add_device(hid);
+ if (ret < 0) {
+ iface->hid = NULL;
+ hid_destroy_device(hid);
+ dev_warn(iface->dchid->dev, "Failed to register hid device %s", iface->name);
+ }
+}
+
+static int dchid_create_interface(struct dchid_iface *iface)
+{
+ if (iface->creating)
+ return -EBUSY;
+
+ iface->creating = true;
+ INIT_WORK(&iface->create_work, dchid_create_interface_work);
+ return queue_work(iface->dchid->new_iface_wq, &iface->create_work);
+}
+
+static void dchid_handle_descriptor(struct dchid_iface *iface, void *hid_desc, size_t desc_len)
+{
+ if (iface->hid) {
+ dev_warn(iface->dchid->dev, "Tried to initialize already started interface %s!\n",
+ iface->name);
+ return;
+ }
+
+ iface->hid_desc = devm_kmemdup(iface->dchid->dev, hid_desc, desc_len, GFP_KERNEL);
+ if (!iface->hid_desc)
+ return;
+
+ iface->hid_desc_len = desc_len;
+
+ /* We need to enable STM first, since it'll give us the device IDs */
+ if (iface->dchid->id_ready || !strcmp(iface->name, "stm")) {
+ dchid_create_interface(iface);
+ } else {
+ iface->deferred = true;
+ }
+}
+
+static void dchid_handle_ready(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+ struct dchid_iface *iface;
+ u8 *pkt = data;
+ u8 index;
+ int i, ret;
+
+ if (length < 2) {
+ dev_err(dchid->dev, "Bad length for ready message: %zu\n", length);
+ return;
+ }
+
+ index = pkt[1];
+
+ if (index >= MAX_INTERFACES) {
+ dev_err(dchid->dev, "Got ready notification for bad iface %d\n", index);
+ return;
+ }
+
+ iface = dchid->ifaces[index];
+ if (!iface) {
+ dev_err(dchid->dev, "Got ready notification for unknown iface %d\n", index);
+ return;
+ }
+
+ dev_info(dchid->dev, "Interface %s is now ready\n", iface->name);
+ complete_all(&iface->ready);
+
+ /* When STM is ready, grab global device info */
+ if (!strcmp(iface->name, "stm")) {
+ ret = dchid_get_report_cmd(iface, STM_REPORT_ID, &dchid->device_id,
+ sizeof(dchid->device_id));
+ if (ret < sizeof(dchid->device_id)) {
+ dev_warn(iface->dchid->dev, "Failed to get device ID from STM!\n");
+ /* Fake it and keep going. Things might still work... */
+ memset(&dchid->device_id, 0, sizeof(dchid->device_id));
+ dchid->device_id.vendor_id = HOST_VENDOR_ID_APPLE;
+ }
+ ret = dchid_get_report_cmd(iface, STM_REPORT_SERIAL, dchid->serial,
+ sizeof(dchid->serial) - 1);
+ if (ret < 0) {
+ dev_warn(iface->dchid->dev, "Failed to get serial from STM!\n");
+ dchid->serial[0] = 0;
+ }
+
+ dchid->id_ready = true;
+ for (i = 0; i < MAX_INTERFACES; i++) {
+ if (!dchid->ifaces[i] || !dchid->ifaces[i]->deferred)
+ continue;
+ dchid_create_interface(dchid->ifaces[i]);
+ }
+ }
+}
+
+static void dchid_request_gpio(struct dchid_iface *iface, int id, const char *name)
+{
+ char prop_name[MAX_GPIO_NAME + 16];
+
+ dev_info(iface->dchid->dev, "Requesting GPIO %s#%d: %s\n", iface->name, id, name);
+
+ if (iface->gpio) {
+ dev_err(iface->dchid->dev, "Cannot request more than one GPIO per interface!\n");
+ return;
+ }
+
+ snprintf(prop_name, sizeof(prop_name), "apple,%s-gpios", name);
+
+ iface->gpio = devm_gpiod_get_from_of_node(iface->dchid->dev,
+ iface->of_node, prop_name, 0,
+ GPIOD_OUT_LOW, name);
+
+ if (IS_ERR_OR_NULL(iface->gpio)) {
+ dev_err(iface->dchid->dev, "Failed to request GPIO %s\n", prop_name);
+ iface->gpio = NULL;
+ return;
+ }
+
+ iface->gpio_id = id;
+}
+
+static void dchid_handle_init(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+ struct dchid_init_hdr *hdr = data;
+ struct dchid_iface *iface;
+ struct dchid_init_block_hdr *blk;
+
+ if (length < sizeof(*hdr))
+ return;
+
+ iface = dchid_get_interface(dchid, hdr->iface, hdr->name);
+ if (!iface)
+ return;
+
+ data += sizeof(*hdr);
+ length -= sizeof(*hdr);
+
+ while (length > sizeof(*blk)) {
+ blk = data;
+ data += sizeof(*blk);
+ length -= sizeof(*blk);
+
+ if (blk->length > length)
+ return;
+ switch (blk->type) {
+ case INIT_HID_DESCRIPTOR:
+ dchid_handle_descriptor(iface, data, blk->length);
+ break;
+
+ case INIT_GPIO_REQUEST: {
+ struct dchid_gpio_request *req = data;
+
+ if (sizeof(*req) > length)
+ return;
+ dchid_request_gpio(iface, req->id, req->name);
+ break;
+ }
+
+ case INIT_TERMINATOR:
+ return;
+ }
+
+ data += blk->length + sizeof(*blk);
+ length -= blk->length + sizeof(*blk);
+ }
+}
+
+static void dchid_handle_gpio(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+ struct dchid_gpio_cmd *cmd = data;
+ struct dchid_iface *iface;
+ u32 retcode = 0xe000f00d; /* Give it a random Apple-style error code */
+ struct dchid_gpio_ack *ack;
+
+ if (length < sizeof(*cmd))
+ return;
+
+ if (cmd->iface >= MAX_INTERFACES || !(iface = dchid->ifaces[cmd->iface])) {
+ dev_err(dchid->dev, "Got GPIO command for bad inteface %d\n", cmd->iface);
+ goto err;
+ }
+
+ if (!iface->gpio || cmd->gpio != iface->gpio_id) {
+ dev_err(dchid->dev, "Got GPIO command for bad GPIO %s#%d\n",
+ iface->name, cmd->gpio);
+ goto err;
+ }
+
+ dev_info(dchid->dev, "GPIO command: %s#%d: %d\n", iface->name, cmd->gpio, cmd->cmd);
+
+ switch (cmd->cmd) {
+ case 3:
+ /* Pulse. */
+ gpiod_set_value_cansleep(iface->gpio, 1);
+ msleep(10); /* Random guess... */
+ gpiod_set_value_cansleep(iface->gpio, 0);
+ retcode = 0;
+ break;
+ default:
+ dev_err(dchid->dev, "Unknown GPIO command %d\n", cmd->cmd );
+ break;
+ }
+
+err:
+ /* Ack it */
+ ack = kzalloc(sizeof(*ack) + length, GFP_KERNEL);
+ if (!ack)
+ return;
+
+ ack->type = CMD_ACK_GPIO_CMD;
+ ack->retcode = retcode;
+ memcpy(ack->cmd, data, length);
+
+ if (dchid_comm_cmd(dchid, ack, sizeof(*ack) + length) < 0)
+ dev_err(dchid->dev, "Failed to ACK GPIO command\n");
+
+ kfree(ack);
+}
+
+static void dchid_handle_event(struct dockchannel_hid *dchid, void *data, size_t length)
+{
+ u8 *p = data;
+ switch (*p) {
+ case EVENT_INIT:
+ dchid_handle_init(dchid, data, length);
+ break;
+ case EVENT_READY:
+ dchid_handle_ready(dchid, data, length);
+ break;
+ case EVENT_GPIO_CMD:
+ dchid_handle_gpio(dchid, data, length);
+ break;
+ }
+}
+
+static void dchid_handle_report(struct dchid_iface *iface, void *data, size_t length)
+{
+ struct dockchannel_hid *dchid = iface->dchid;
+
+ if (!iface->hid) {
+ dev_warn(dchid->dev, "Report received but %s is not initialized!\n", iface->name);
+ return;
+ }
+
+ if (!iface->open)
+ return;
+
+ hid_input_report(iface->hid, HID_INPUT_REPORT, data, length, 1);
+}
+
+static void dchid_packet_work(struct work_struct *ws)
+{
+ struct dchid_work *work = container_of(ws, struct dchid_work, work);
+ struct dchid_subhdr *shdr = (void *)work->data;
+ struct dockchannel_hid *dchid = work->iface->dchid;
+ int type = FIELD_GET(FLAGS_GROUP, shdr->flags);
+ u8 *payload = work->data + sizeof(*shdr);
+
+ if (shdr->length + sizeof(*shdr) > work->hdr.length) {
+ dev_err(dchid->dev, "Bad sub header length (%d > %zu)\n",
+ shdr->length, work->hdr.length - sizeof(*shdr));
+ return;
+ }
+
+ switch (type) {
+ case GROUP_INPUT:
+ if (work->hdr.iface == IFACE_COMM)
+ dchid_handle_event(dchid, payload, shdr->length);
+ else
+ dchid_handle_report(work->iface, payload, shdr->length);
+ break;
+ default:
+ dev_err(dchid->dev, "Received unknown packet type %d\n", type);
+ break;
+ }
+
+ kfree(work);
+}
+
+static void dchid_handle_ack(struct dchid_iface *iface, struct dchid_hdr *hdr, void *data)
+{
+ struct dchid_subhdr *shdr = (void *)data;
+ u8 *payload = data + sizeof(*shdr);
+
+ if (shdr->length + sizeof(*shdr) > hdr->length) {
+ dev_err(iface->dchid->dev, "Bad sub header length (%d > %ld)\n",
+ shdr->length, hdr->length - sizeof(*shdr));
+ return;
+ }
+ if (shdr->flags != iface->out_flags) {
+ dev_err(iface->dchid->dev,
+ "Received unexpected flags 0x%x on ACK channel (expFected 0x%x)\n",
+ shdr->flags, iface->out_flags);
+ return;
+ }
+
+ if (shdr->length < 1) {
+ dev_err(iface->dchid->dev, "Received length 0 output report ack\n");
+ return;
+ }
+ if (iface->tx_seq != hdr->seq) {
+ dev_err(iface->dchid->dev, "Received ACK with bad seq (expected %d, got %d)\n",
+ iface->tx_seq, hdr->seq);
+ return;
+ }
+ if (iface->out_report != payload[0]) {
+ dev_err(iface->dchid->dev, "Received ACK with bad report (expected %d, got %d\n",
+ iface->out_report, payload[0]);
+ return;
+ }
+
+ if (iface->resp_buf && iface->resp_size)
+ memcpy(iface->resp_buf, payload + 1, min((size_t)shdr->length - 1, iface->resp_size));
+
+ iface->resp_size = shdr->length;
+ iface->out_report = -1;
+ iface->retcode = shdr->retcode;
+ complete(&iface->out_complete);
+}
+
+static void dchid_handle_packet(void *cookie, size_t avail)
+{
+ struct dockchannel_hid *dchid = cookie;
+ struct dchid_hdr hdr;
+ struct dchid_work *work;
+ struct dchid_iface *iface;
+ u32 checksum;
+
+ if (dockchannel_recv(dchid->dc, &hdr, sizeof(hdr)) != sizeof(hdr)) {
+ dev_err(dchid->dev, "Read failed (header)\n");
+ return;
+ }
+
+ if (hdr.hdr_len != sizeof(hdr)) {
+ dev_err(dchid->dev, "Bad header length %d\n", hdr.hdr_len);
+ goto done;
+ }
+
+ if (dockchannel_recv(dchid->dc, dchid->pkt_buf, hdr.length + 4) != (hdr.length + 4)) {
+ dev_err(dchid->dev, "Read failed (body)\n");
+ goto done;
+ }
+
+ checksum = dchid_checksum(&hdr, sizeof(hdr));
+ checksum += dchid_checksum(dchid->pkt_buf, hdr.length + 4);
+
+ if (checksum != 0xffffffff) {
+ dev_err(dchid->dev, "Checksum mismatch (iface %d): 0x%08x != 0xffffffff\n",
+ hdr.iface, checksum);
+ goto done;
+ }
+
+
+ if (hdr.iface >= MAX_INTERFACES) {
+ dev_err(dchid->dev, "Bad iface %d\n", hdr.iface);
+ }
+
+ iface = dchid->ifaces[hdr.iface];
+
+ if (!iface) {
+ dev_err(dchid->dev, "Received packet for uninitialized iface %d\n", hdr.iface);
+ goto done;
+ }
+
+ switch (hdr.channel) {
+ case DCHID_CHANNEL_CMD:
+ dchid_handle_ack(iface, &hdr, dchid->pkt_buf);
+ goto done;
+ case DCHID_CHANNEL_REPORT:
+ break;
+ default:
+ dev_warn(dchid->dev, "Unknown channel 0x%x, treating as report...\n",
+ hdr.channel);
+ break;
+ }
+
+ work = kzalloc(sizeof(*work) + hdr.length, GFP_KERNEL);
+ if (!work)
+ return;
+
+ work->hdr = hdr;
+ work->iface = iface;
+ memcpy(work->data, dchid->pkt_buf, hdr.length);
+ INIT_WORK(&work->work, dchid_packet_work);
+
+ queue_work(iface->wq, &work->work);
+
+done:
+ dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr));
+}
+
+static int dockchannel_hid_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dockchannel_hid *dchid;
+ struct device_node *child, *helper;
+ struct platform_device *helper_pdev;
+ struct property *prop;
+
+ dchid = devm_kzalloc(dev, sizeof(*dchid), GFP_KERNEL);
+ if (!dchid) {
+ return -ENOMEM;
+ }
+
+ dchid->dev = dev;
+
+ /*
+ * First make sure all the GPIOs are available, in cased we need to defer.
+ * This is necessary because MTP will request them by name later, and by then
+ * it's too late to defer the probe.
+ */
+
+ for_each_child_of_node(dev->of_node, child) {
+ for_each_property_of_node(child, prop) {
+ size_t len = strlen(prop->name);
+ struct gpio_desc *gpio;
+
+ if (len < 12 || strncmp("apple,", prop->name, 6) ||
+ strcmp("-gpios", prop->name + len - 6))
+ continue;
+
+ gpio = gpiod_get_from_of_node(child, prop->name, 0, GPIOD_ASIS,
+ prop->name);
+ if (IS_ERR_OR_NULL(gpio)) {
+ if (PTR_ERR(gpio) == -EPROBE_DEFER) {
+ of_node_put(child);
+ return -EPROBE_DEFER;
+ }
+ } else {
+ gpiod_put(gpio);
+ }
+ }
+ }
+
+ /*
+ * Make sure we also have the MTP coprocessor available, and
+ * defer probe if the helper hasn't probed yet.
+ */
+ helper = of_parse_phandle(dev->of_node, "apple,helper-cpu", 0);
+ if (!helper) {
+ dev_err(dev, "Missing apple,helper-cpu property");
+ return -EINVAL;
+ }
+
+ helper_pdev = of_find_device_by_node(helper);
+ of_node_put(helper);
+ if (!helper_pdev) {
+ dev_err(dev, "Failed to find helper device");
+ return -EINVAL;
+ }
+
+ dchid->helper_link = device_link_add(dev, &helper_pdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ put_device(&helper_pdev->dev);
+ if (!dchid->helper_link) {
+ dev_err(dev, "Failed to link to helper device");
+ return -EINVAL;
+ }
+
+ if (dchid->helper_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return -EPROBE_DEFER;
+
+ /* Now it is safe to begin initializing */
+ dchid->dc = dockchannel_init(pdev);
+ if (IS_ERR_OR_NULL(dchid->dc)) {
+ return PTR_ERR(dchid->dc);
+ }
+ dchid->new_iface_wq = alloc_workqueue("dchid-new", WQ_MEM_RECLAIM, 0);
+ if (!dchid->new_iface_wq)
+ return -ENOMEM;
+
+ dchid->comm = dchid_get_interface(dchid, IFACE_COMM, "comm");
+ if (!dchid->comm) {
+ dev_err(dchid->dev, "Failed to initialize comm interface");
+ return -EIO;
+ }
+
+ dev_info(dchid->dev, "Initialized, awaiting packets\n");
+ dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr));
+
+ return 0;
+}
+
+static int dockchannel_hid_remove(struct platform_device *pdev)
+{
+ BUG_ON(1);
+ return 0;
+}
+
+static const struct of_device_id dockchannel_hid_of_match[] = {
+ { .compatible = "apple,dockchannel-hid" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dockchannel_hid_of_match);
+MODULE_FIRMWARE("apple/tpmtfw-*.bin");
+
+static struct platform_driver dockchannel_hid_driver = {
+ .driver = {
+ .name = "dockchannel-hid",
+ .of_match_table = dockchannel_hid_of_match,
+ },
+ .probe = dockchannel_hid_probe,
+ .remove = dockchannel_hid_remove,
+};
+module_platform_driver(dockchannel_hid_driver);
+
+MODULE_DESCRIPTION("Apple DockChannel HID transport driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 6970797cdc56..266e7704523d 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -256,6 +256,50 @@ static const struct apple_key_translation apple_fn_keys[] = {
{ }
};
+static const struct apple_key_translation apple_fn_keys_spi[] = {
+ { KEY_BACKSPACE, KEY_DELETE },
+ { KEY_ENTER, KEY_INSERT },
+ { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
+ { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
+ { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY },
+ { KEY_F4, KEY_SEARCH, APPLE_FLAG_FKEY },
+ { KEY_F5, KEY_RECORD, APPLE_FLAG_FKEY },
+ { KEY_F6, KEY_SLEEP, APPLE_FLAG_FKEY },
+ { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
+ { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
+ { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
+ { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY },
+ { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
+ { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
+ { KEY_UP, KEY_PAGEUP },
+ { KEY_DOWN, KEY_PAGEDOWN },
+ { KEY_LEFT, KEY_HOME },
+ { KEY_RIGHT, KEY_END },
+ { }
+};
+
+static const struct apple_key_translation apple_fn_keys_mbp13[] = {
+ { KEY_BACKSPACE, KEY_DELETE },
+ { KEY_ENTER, KEY_INSERT },
+ { KEY_UP, KEY_PAGEUP },
+ { KEY_DOWN, KEY_PAGEDOWN },
+ { KEY_LEFT, KEY_HOME },
+ { KEY_RIGHT, KEY_END },
+ { KEY_1, KEY_F1 },
+ { KEY_2, KEY_F2 },
+ { KEY_3, KEY_F3 },
+ { KEY_4, KEY_F4 },
+ { KEY_5, KEY_F5 },
+ { KEY_6, KEY_F6 },
+ { KEY_7, KEY_F7 },
+ { KEY_8, KEY_F8 },
+ { KEY_9, KEY_F9 },
+ { KEY_0, KEY_F10 },
+ { KEY_MINUS, KEY_F11 },
+ { KEY_EQUAL, KEY_F12 },
+ { }
+};
+
static const struct apple_key_translation powerbook_fn_keys[] = {
{ KEY_BACKSPACE, KEY_DELETE },
{ KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
@@ -425,6 +469,16 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
+ else if (hid->bus == BUS_HOST || hid->bus == BUS_SPI)
+ switch (hid->product) {
+ case SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020:
+ case HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022:
+ table = apple_fn_keys_mbp13;
+ break;
+ default:
+ table = apple_fn_keys_spi;
+ break;
+ }
else if (hid->product < 0x21d || hid->product >= 0x300)
table = powerbook_fn_keys;
else
@@ -632,6 +686,8 @@ static void apple_setup_input(struct input_dev *input)
/* Enable all needed keys */
apple_setup_key_translation(input, apple_fn_keys);
+ apple_setup_key_translation(input, apple_fn_keys_spi);
+ apple_setup_key_translation(input, apple_fn_keys_mbp13);
apple_setup_key_translation(input, powerbook_fn_keys);
apple_setup_key_translation(input, powerbook_numlock_keys);
apple_setup_key_translation(input, apple_iso_keyboard);
@@ -808,6 +864,10 @@ static int apple_probe(struct hid_device *hdev,
struct apple_sc *asc;
int ret;
+ if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE &&
+ hdev->type != HID_TYPE_SPI_KEYBOARD)
+ return -ENODEV;
+
asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL);
if (asc == NULL) {
hid_err(hdev, "can't alloc apple descriptor\n");
@@ -1049,6 +1109,10 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID),
+ .driver_data = APPLE_HAS_FN },
+ { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE,
+ HID_ANY_ID), .driver_data = APPLE_HAS_FN },
{ }
};
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9c1d31f63f85..2029480cc88d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2221,6 +2221,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
case BUS_I2C:
bus = "I2C";
break;
+ case BUS_SPI:
+ bus = "SPI";
+ break;
+ case BUS_HOST:
+ bus = "HOST";
+ break;
case BUS_VIRTUAL:
bus = "VIRTUAL";
break;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index dad953f66996..d317aaa8bb38 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -89,6 +89,8 @@
#define USB_VENDOR_ID_APPLE 0x05ac
#define BT_VENDOR_ID_APPLE 0x004c
+#define SPI_VENDOR_ID_APPLE 0x05ac
+#define HOST_VENDOR_ID_APPLE 0x05ac
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
@@ -187,6 +189,12 @@
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
+#define SPI_DEVICE_ID_APPLE_MACBOOK_AIR_2020 0x0281
+#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020 0x0341
+#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO14_2021 0x0342
+#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO16_2021 0x0343
+#define HOST_DEVICE_ID_APPLE_MACBOOK_AIR13_2022 0x0351
+#define HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022 0x0354
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index c9c968d4b36a..b22ab0f320c0 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -59,8 +59,12 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define MOUSE_REPORT_ID 0x29
#define MOUSE2_REPORT_ID 0x12
#define DOUBLE_REPORT_ID 0xf7
+#define SPI_REPORT_ID 0x02
+#define MTP_REPORT_ID 0x75
#define USB_BATTERY_TIMEOUT_MS 60000
+#define MAX_CONTACTS 16
+
/* These definitions are not precise, but they're close enough. (Bits
* 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem
* to be some kind of bit mask -- 0x20 may be a near-field reading,
@@ -111,6 +115,25 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD2_RES_Y \
((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100))
+#define J314_TP_DIMENSION_X (float)13000
+#define J314_TP_MIN_X -5900
+#define J314_TP_MAX_X 6500
+#define J314_TP_RES_X \
+ ((J314_TP_MAX_X - J314_TP_MIN_X) / (J314_TP_DIMENSION_X / 100))
+#define J314_TP_DIMENSION_Y (float)8100
+#define J314_TP_MIN_Y -200
+#define J314_TP_MAX_Y 7400
+#define J314_TP_RES_Y \
+ ((J314_TP_MAX_Y - J314_TP_MIN_Y) / (J314_TP_DIMENSION_Y / 100))
+
+#define J314_TP_MAX_FINGER_ORIENTATION 16384
+
+struct magicmouse_input_ops {
+ int (*raw_event)(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size);
+ int (*setup_input)(struct input_dev *input, struct hid_device *hdev);
+};
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -129,9 +152,8 @@ struct magicmouse_sc {
int scroll_accel;
unsigned long scroll_jiffies;
+ struct input_mt_pos pos[MAX_CONTACTS];
struct {
- short x;
- short y;
short scroll_x;
short scroll_y;
short scroll_x_hr;
@@ -139,12 +161,13 @@ struct magicmouse_sc {
u8 size;
bool scroll_x_active;
bool scroll_y_active;
- } touches[16];
- int tracking_ids[16];
+ } touches[MAX_CONTACTS];
+ int tracking_ids[MAX_CONTACTS];
struct hid_device *hdev;
struct delayed_work work;
struct timer_list battery_timer;
+ struct magicmouse_input_ops input_ops;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -188,7 +211,7 @@ static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state)
} else if (last_state != 0) {
state = last_state;
} else if ((id = magicmouse_firm_touch(msc)) >= 0) {
- int x = msc->touches[id].x;
+ int x = msc->pos[id].x;
if (x < middle_button_start)
state = 1;
else if (x > middle_button_stop)
@@ -249,8 +272,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* Store tracking ID and other fields. */
msc->tracking_ids[raw_id] = id;
- msc->touches[id].x = x;
- msc->touches[id].y = y;
+ msc->pos[id].x = x;
+ msc->pos[id].y = y;
msc->touches[id].size = size;
/* If requested, emulate a scroll wheel by detecting small
@@ -374,6 +397,14 @@ static int magicmouse_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+ return msc->input_ops.raw_event(hdev, report, data, size);
+}
+
+static int magicmouse_raw_event_usb(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
struct input_dev *input = msc->input;
int x = 0, y = 0, ii, clicks = 0, npoints;
@@ -502,6 +533,175 @@ static int magicmouse_raw_event(struct hid_device *hdev,
return 1;
}
+/**
+ * struct tp_finger - single trackpad finger structure, le16-aligned
+ *
+ * @unknown1: unknown
+ * @unknown2: unknown
+ * @abs_x: absolute x coordinate
+ * @abs_y: absolute y coordinate
+ * @rel_x: relative x coordinate
+ * @rel_y: relative y coordinate
+ * @tool_major: tool area, major axis
+ * @tool_minor: tool area, minor axis
+ * @orientation: 16384 when point, else 15 bit angle
+ * @touch_major: touch area, major axis
+ * @touch_minor: touch area, minor axis
+ * @unused: zeros
+ * @pressure: pressure on forcetouch touchpad
+ * @multi: one finger: varies, more fingers: constant
+ * @crc16: on last finger: crc over the whole message struct
+ * (i.e. message header + this struct) minus the last
+ * @crc16 field; unknown on all other fingers.
+ */
+struct tp_finger {
+ __le16 unknown1;
+ __le16 unknown2;
+ __le16 abs_x;
+ __le16 abs_y;
+ __le16 rel_x;
+ __le16 rel_y;
+ __le16 tool_major;
+ __le16 tool_minor;
+ __le16 orientation;
+ __le16 touch_major;
+ __le16 touch_minor;
+ __le16 unused[2];
+ __le16 pressure;
+ __le16 multi;
+} __attribute__((packed, aligned(2)));
+
+/**
+ * vendor trackpad report
+ *
+ * @num_fingers: the number of fingers being reported in @fingers
+ * @buttons: same as HID buttons
+ */
+struct tp_header {
+ // HID vendor part, up to 1751 bytes
+ u8 unknown[22];
+ u8 num_fingers;
+ u8 buttons;
+ u8 unknown3[14];
+};
+
+/**
+ * standard HID mouse report
+ *
+ * @report_id: reportid
+ * @buttons: HID Usage Buttons 3 1-bit reports
+ */
+struct tp_mouse_report {
+ // HID mouse report
+ u8 report_id;
+ u8 buttons;
+ u8 rel_x;
+ u8 rel_y;
+ u8 padding[4];
+};
+
+static inline int le16_to_int(__le16 x)
+{
+ return (signed short)le16_to_cpu(x);
+}
+
+static void report_finger_data(struct input_dev *input, int slot,
+ const struct input_mt_pos *pos,
+ const struct tp_finger *f)
+{
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ le16_to_int(f->touch_major) << 1);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ le16_to_int(f->touch_minor) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+ le16_to_int(f->tool_major) << 1);
+ input_report_abs(input, ABS_MT_WIDTH_MINOR,
+ le16_to_int(f->tool_minor) << 1);
+ input_report_abs(input, ABS_MT_ORIENTATION,
+ J314_TP_MAX_FINGER_ORIENTATION - le16_to_int(f->orientation));
+ input_report_abs(input, ABS_MT_PRESSURE, le16_to_int(f->pressure));
+ input_report_abs(input, ABS_MT_POSITION_X, pos->x);
+ input_report_abs(input, ABS_MT_POSITION_Y, pos->y);
+}
+
+static int magicmouse_raw_event_mtp(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+ struct input_dev *input = msc->input;
+ struct tp_header *tp_hdr;
+ struct tp_finger *f;
+ int i, n;
+ u32 npoints;
+ const size_t hdr_sz = sizeof(struct tp_header);
+ const size_t touch_sz = sizeof(struct tp_finger);
+ u8 map_contacs[MAX_CONTACTS];
+
+ // hid_warn(hdev, "%s\n", __func__);
+ // print_hex_dump_debug("appleft ev: ", DUMP_PREFIX_OFFSET, 16, 1, data,
+ // size, false);
+
+ /* Expect 46 bytes of prefix, and N * 30 bytes of touch data. */
+ if (size < hdr_sz || ((size - hdr_sz) % touch_sz) != 0)
+ return 0;
+
+ tp_hdr = (struct tp_header *)data;
+
+ npoints = (size - hdr_sz) / touch_sz;
+ if (npoints < tp_hdr->num_fingers || npoints > MAX_CONTACTS) {
+ hid_warn(hdev,
+ "unexpected number of touches (%u) for "
+ "report\n",
+ npoints);
+ return 0;
+ }
+
+ n = 0;
+ for (i = 0; i < tp_hdr->num_fingers; i++) {
+ f = (struct tp_finger *)(data + hdr_sz + i * touch_sz);
+ if (le16_to_int(f->touch_major) == 0)
+ continue;
+
+ hid_dbg(hdev, "ev x:%04x y:%04x\n", le16_to_int(f->abs_x),
+ le16_to_int(f->abs_y));
+ msc->pos[n].x = le16_to_int(f->abs_x);
+ msc->pos[n].y = -le16_to_int(f->abs_y);
+ map_contacs[n] = i;
+ n++;
+ }
+
+ input_mt_assign_slots(input, msc->tracking_ids, msc->pos, n, 0);
+
+ for (i = 0; i < n; i++) {
+ int idx = map_contacs[i];
+ f = (struct tp_finger *)(data + hdr_sz + idx * touch_sz);
+ report_finger_data(input, msc->tracking_ids[i], &msc->pos[i], f);
+ }
+
+ input_mt_sync_frame(input);
+ input_report_key(input, BTN_MOUSE, tp_hdr->buttons & 1);
+
+ input_sync(input);
+ return 1;
+}
+
+static int magicmouse_raw_event_spi(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ const size_t hdr_sz = sizeof(struct tp_mouse_report);
+
+ if (size < hdr_sz)
+ return 0;
+
+ if (data[0] != TRACKPAD2_USB_REPORT_ID)
+ return 0;
+
+ return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz);
+}
+
static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -519,7 +719,17 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
-static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+
+static int magicmouse_setup_input(struct input_dev *input,
+ struct hid_device *hdev)
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+ return msc->input_ops.setup_input(input, hdev);
+}
+
+static int magicmouse_setup_input_usb(struct input_dev *input,
+ struct hid_device *hdev)
{
int error;
int mt_flags = 0;
@@ -592,7 +802,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_ABS, input->evbit);
- error = input_mt_init_slots(input, 16, mt_flags);
+ error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags);
if (error)
return error;
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
@@ -671,6 +881,79 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
return 0;
}
+static int magicmouse_setup_input_spi(struct input_dev *input,
+ struct hid_device *hdev)
+{
+ int error;
+ int mt_flags = 0;
+
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __clear_bit(BTN_0, input->keybit);
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
+ __clear_bit(EV_REL, input->evbit);
+ __clear_bit(REL_X, input->relbit);
+ __clear_bit(REL_Y, input->relbit);
+
+ mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK;
+
+ /* finger touch area */
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 5000, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 5000, 0, 0);
+
+ /* finger approach area */
+ input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 5000, 0, 0);
+ input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 5000, 0, 0);
+
+ /* Note: Touch Y position from the device is inverted relative
+ * to how pointer motion is reported (and relative to how USB
+ * HID recommends the coordinates work). This driver keeps
+ * the origin at the same position, and just uses the additive
+ * inverse of the reported Y.
+ */
+
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 6000, 0, 0);
+
+ /*
+ * This makes libinput recognize this as a PressurePad and
+ * stop trying to use pressure for touch size. Pressure unit
+ * seems to be ~grams on these touchpads.
+ */
+ input_abs_set_res(input, ABS_MT_PRESSURE, 1);
+
+ /* finger orientation */
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -J314_TP_MAX_FINGER_ORIENTATION,
+ J314_TP_MAX_FINGER_ORIENTATION, 0, 0);
+
+ /* finger position */
+ input_set_abs_params(input, ABS_MT_POSITION_X, J314_TP_MIN_X, J314_TP_MAX_X,
+ 0, 0);
+ /* Y axis is inverted */
+ input_set_abs_params(input, ABS_MT_POSITION_Y, -J314_TP_MAX_Y, -J314_TP_MIN_Y,
+ 0, 0);
+
+ /* X/Y resolution */
+ input_abs_set_res(input, ABS_MT_POSITION_X, J314_TP_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, J314_TP_RES_Y);
+
+ input_set_events_per_packet(input, 60);
+
+ /* touchpad button */
+ input_set_capability(input, EV_KEY, BTN_MOUSE);
+
+ /*
+ * hid-input may mark device as using autorepeat, but the trackpad does
+ * not actually want it.
+ */
+ __clear_bit(EV_REP, input->evbit);
+
+ error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags);
+ if (error)
+ return error;
+
+ return 0;
+}
+
static int magicmouse_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field,
struct hid_usage *usage, unsigned long **bit, int *max)
@@ -726,6 +1009,9 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
feature_size = sizeof(feature_mt_trackpad2_usb);
feature = feature_mt_trackpad2_usb;
}
+ } else if (hdev->vendor == SPI_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
} else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) {
feature_size = sizeof(feature_mt_mouse2);
feature = feature_mt_mouse2;
@@ -800,12 +1086,30 @@ static int magicmouse_probe(struct hid_device *hdev,
struct hid_report *report;
int ret;
+ if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE &&
+ hdev->type != HID_TYPE_SPI_MOUSE)
+ return -ENODEV;
+
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
hid_err(hdev, "can't alloc magicmouse descriptor\n");
return -ENOMEM;
}
+ // internal trackpad use a data format use input ops to avoid
+ // conflicts with the report ID.
+ if (id->bus == BUS_HOST) {
+ msc->input_ops.raw_event = magicmouse_raw_event_mtp;
+ msc->input_ops.setup_input = magicmouse_setup_input_spi;
+ } else if (id->bus == BUS_SPI) {
+ msc->input_ops.raw_event = magicmouse_raw_event_spi;
+ msc->input_ops.setup_input = magicmouse_setup_input_spi;
+
+ } else {
+ msc->input_ops.raw_event = magicmouse_raw_event_usb;
+ msc->input_ops.setup_input = magicmouse_setup_input_usb;
+ }
+
msc->scroll_accel = SCROLL_ACCEL_DEFAULT;
msc->hdev = hdev;
INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work);
@@ -854,6 +1158,10 @@ static int magicmouse_probe(struct hid_device *hdev,
else /* USB_VENDOR_ID_APPLE */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_USB_REPORT_ID, 0);
+ } else if (id->bus == BUS_SPI) {
+ report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_REPORT_ID, 0);
+ } else if (id->bus == BUS_HOST) {
+ report = hid_register_report(hdev, HID_INPUT_REPORT, MTP_REPORT_ID, 0);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
@@ -868,6 +1176,10 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ /* MTP devices do not need the MT enable, this is handled by the MTP driver */
+ if (id->bus == BUS_HOST)
+ return 0;
+
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -948,6 +1260,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID),
+ .driver_data = 0 },
+ { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE,
+ HID_ANY_ID), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/spi-hid/Kconfig b/drivers/hid/spi-hid/Kconfig
new file mode 100644
index 000000000000..8e37f0fec28a
--- /dev/null
+++ b/drivers/hid/spi-hid/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "SPI HID support"
+ depends on SPI
+
+config SPI_HID_APPLE_OF
+ tristate "HID over SPI transport layer for Apple Silicon SoCs"
+ default ARCH_APPLE
+ depends on SPI && INPUT && OF
+ help
+ Say Y here if you use Apple Silicon based laptop. The keyboard and
+ touchpad are HID based devices connected via SPI.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called spi-hid-apple-of. It will also build/depend on the
+ module spi-hid-apple.
+
+endmenu
+
+config SPI_HID_APPLE_CORE
+ tristate
+ default y if SPI_HID_APPLE_OF=y
+ default m if SPI_HID_APPLE_OF=m
+ select HID
+ select CRC16
diff --git a/drivers/hid/spi-hid/Makefile b/drivers/hid/spi-hid/Makefile
new file mode 100644
index 000000000000..f276ee12cb94
--- /dev/null
+++ b/drivers/hid/spi-hid/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for SPI HID tarnsport drivers
+#
+
+obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid-apple.o
+
+spi-hid-apple-objs = spi-hid-apple-core.o
+
+obj-$(CONFIG_SPI_HID_APPLE_OF) += spi-hid-apple-of.o
diff --git a/drivers/hid/spi-hid/spi-hid-apple-core.c b/drivers/hid/spi-hid/spi-hid-apple-core.c
new file mode 100644
index 000000000000..3b0bb65e357a
--- /dev/null
+++ b/drivers/hid/spi-hid/spi-hid-apple-core.c
@@ -0,0 +1,1029 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Apple SPI HID transport driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Based on: drivers/input/applespi.c
+ *
+ * MacBook (Pro) SPI keyboard and touchpad driver
+ *
+ * Copyright (c) 2015-2018 Federico Lorenzi
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ *
+ */
+
+//#define DEBUG 2
+
+#include <asm/unaligned.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device/driver.h>
+#include <linux/hid.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+
+#include "spi-hid-apple.h"
+
+#define SPIHID_DEF_WAIT msecs_to_jiffies(100)
+
+#define SPIHID_MAX_INPUT_REPORT_SIZE 0x800
+
+/* support only keyboard, trackpad and management dev for now */
+#define SPIHID_MAX_DEVICES 3
+
+#define SPIHID_DEVICE_ID_MNGT 0x0
+#define SPIHID_DEVICE_ID_KBD 0x1
+#define SPIHID_DEVICE_ID_TP 0x2
+#define SPIHID_DEVICE_ID_INFO 0xd0
+
+#define SPIHID_READ_PACKET 0x20
+#define SPIHID_WRITE_PACKET 0x40
+
+#define SPIHID_DESC_MAX 512
+
+#define SPIHID_SET_LEDS 0x0151 /* caps lock */
+
+#define SPI_RW_CHG_DELAY_US 200 /* 'Inter Stage Us'? */
+
+static const u8 spi_hid_apple_booted[4] = { 0xa0, 0x80, 0x00, 0x00 };
+static const u8 spi_hid_apple_status_ok[4] = { 0xac, 0x27, 0x68, 0xd5 };
+
+struct spihid_interface {
+ struct hid_device *hid;
+ u8 *hid_desc;
+ u32 hid_desc_len;
+ u32 id;
+ unsigned country;
+ u32 max_control_report_len;
+ u32 max_input_report_len;
+ u32 max_output_report_len;
+ u8 name[32];
+ bool ready;
+};
+
+struct spihid_input_report {
+ u8 *buf;
+ u32 length;
+ u32 offset;
+ u8 device;
+ u8 flags;
+};
+
+struct spihid_apple {
+ struct spi_device *spidev;
+
+ struct spihid_apple_ops *ops;
+
+ struct spihid_interface mngt;
+ struct spihid_interface kbd;
+ struct spihid_interface tp;
+
+ wait_queue_head_t wait;
+ struct mutex tx_lock; //< protects against concurrent SPI writes
+
+ struct spi_message rx_msg;
+ struct spi_message tx_msg;
+ struct spi_transfer rx_transfer;
+ struct spi_transfer tx_transfer;
+ struct spi_transfer status_transfer;
+
+ u8 *rx_buf;
+ u8 *tx_buf;
+ u8 *status_buf;
+
+ u8 vendor[32];
+ u8 product[64];
+ u8 serial[32];
+
+ u32 num_devices;
+
+ u32 vendor_id;
+ u32 product_id;
+ u32 version_number;
+
+ u8 msg_id;
+
+ /* fragmented HID report */
+ struct spihid_input_report report;
+
+ /* state tracking flags */
+ bool status_booted;
+};
+
+/**
+ * struct spihid_msg_hdr - common header of protocol messages.
+ *
+ * Each message begins with fixed header, followed by a message-type specific
+ * payload, and ends with a 16-bit crc. Because of the varying lengths of the
+ * payload, the crc is defined at the end of each payload struct, rather than
+ * in this struct.
+ *
+ * @unknown0: request type? output, input (0x10), feature, protocol
+ * @unknown1: maybe report id?
+ * @unknown2: mostly zero, in info request maybe device num
+ * @msgid: incremented on each message, rolls over after 255; there is a
+ * separate counter for each message type.
+ * @rsplen: response length (the exact nature of this field is quite
+ * speculative). On a request/write this is often the same as
+ * @length, though in some cases it has been seen to be much larger
+ * (e.g. 0x400); on a response/read this the same as on the
+ * request; for reads that are not responses it is 0.
+ * @length: length of the remainder of the data in the whole message
+ * structure (after re-assembly in case of being split over
+ * multiple spi-packets), minus the trailing crc. The total size
+ * of a message is therefore @length + 10.
+ */
+
+struct spihid_msg_hdr {
+ u8 unknown0;
+ u8 unknown1;
+ u8 unknown2;
+ u8 id;
+ __le16 rsplen;
+ __le16 length;
+};
+
+/**
+ * struct spihid_transfer_packet - a complete spi packet; always 256 bytes. This carries
+ * the (parts of the) message in the data. But note that this does not
+ * necessarily contain a complete message, as in some cases (e.g. many
+ * fingers pressed) the message is split over multiple packets (see the
+ * @offset, @remain, and @length fields). In general the data parts in
+ * spihid_transfer_packet's are concatenated until @remaining is 0, and the
+ * result is an message.
+ *
+ * @flags: 0x40 = write (to device), 0x20 = read (from device); note that
+ * the response to a write still has 0x40.
+ * @device: 1 = keyboard, 2 = touchpad
+ * @offset: specifies the offset of this packet's data in the complete
+ * message; i.e. > 0 indicates this is a continuation packet (in
+ * the second packet for a message split over multiple packets
+ * this would then be the same as the @length in the first packet)
+ * @remain: number of message bytes remaining in subsequents packets (in
+ * the first packet of a message split over two packets this would
+ * then be the same as the @length in the second packet)
+ * @length: length of the valid data in the @data in this packet
+ * @data: all or part of a message
+ * @crc16: crc over this whole structure minus this @crc16 field. This
+ * covers just this packet, even on multi-packet messages (in
+ * contrast to the crc in the message).
+ */
+struct spihid_transfer_packet {
+ u8 flags;
+ u8 device;
+ __le16 offset;
+ __le16 remain;
+ __le16 length;
+ u8 data[246];
+ __le16 crc16;
+};
+
+/*
+ * how HID is mapped onto the protocol is not fully clear. This are the known
+ * reports/request:
+ *
+ * pkt.flags pkt.dev? msg.u0 msg.u1 msg.u2
+ * info 0x40 0xd0 0x20 0x01 0xd0
+ *
+ * info mngt: 0x40 0xd0 0x20 0x10 0x00
+ * info kbd: 0x40 0xd0 0x20 0x10 0x01
+ * info tp: 0x40 0xd0 0x20 0x10 0x02
+ *
+ * desc kbd: 0x40 0xd0 0x20 0x10 0x01
+ * desc trackpad: 0x40 0xd0 0x20 0x10 0x02
+ *
+ * mt mode: 0x40 0x02 0x52 0x02 0x00 set protocol?
+ * capslock led 0x40 0x01 0x51 0x01 0x00 output report
+ *
+ * report kbd: 0x20 0x01 0x10 0x01 0x00 input report
+ * report tp: 0x20 0x02 0x10 0x02 0x00 input report
+ *
+ */
+
+
+static int spihid_apple_request(struct spihid_apple *spihid, u8 target, u8 unk0,
+ u8 unk1, u8 unk2, u16 resp_len, u8 *buf,
+ size_t len)
+{
+ struct spihid_transfer_packet *pkt;
+ struct spihid_msg_hdr *hdr;
+ u16 crc;
+ int err;
+
+ /* know reports are small enoug to fit in a single packet */
+ if (len > sizeof(pkt->data) - sizeof(*hdr) - sizeof(__le16))
+ return -EINVAL;
+
+ err = mutex_lock_interruptible(&spihid->tx_lock);
+ if (err < 0)
+ return err;
+
+ pkt = (struct spihid_transfer_packet *)spihid->tx_buf;
+
+ memset(pkt, 0, sizeof(*pkt));
+ pkt->flags = SPIHID_WRITE_PACKET;
+ pkt->device = target;
+ pkt->length = cpu_to_le16(sizeof(*hdr) + len + sizeof(__le16));
+
+ hdr = (struct spihid_msg_hdr *)&pkt->data[0];
+ hdr->unknown0 = unk0;
+ hdr->unknown1 = unk1;
+ hdr->unknown2 = unk2;
+ hdr->id = spihid->msg_id++;
+ hdr->rsplen = cpu_to_le16(resp_len);
+ hdr->length = cpu_to_le16(len);
+
+ if (len)
+ memcpy(pkt->data + sizeof(*hdr), buf, len);
+ crc = crc16(0, &pkt->data[0], sizeof(*hdr) + len);
+ put_unaligned_le16(crc, pkt->data + sizeof(*hdr) + len);
+
+ pkt->crc16 = cpu_to_le16(crc16(0, spihid->tx_buf,
+ offsetof(struct spihid_transfer_packet, crc16)));
+
+ err = spi_sync(spihid->spidev, &spihid->tx_msg);
+ mutex_unlock(&spihid->tx_lock);
+ if (err < 0)
+ return err;
+
+ return (int)len;
+}
+
+static struct spihid_apple *spihid_get_data(struct spihid_interface *idev)
+{
+ switch (idev->id) {
+ case SPIHID_DEVICE_ID_KBD:
+ return container_of(idev, struct spihid_apple, kbd);
+ case SPIHID_DEVICE_ID_TP:
+ return container_of(idev, struct spihid_apple, tp);
+ default:
+ return NULL;
+ }
+}
+
+static int apple_ll_start(struct hid_device *hdev)
+{
+ /* no-op SPI transport is already setup */
+ return 0;
+};
+
+static void apple_ll_stop(struct hid_device *hdev)
+{
+ /* no-op, devices will be desstroyed on driver destruction */
+}
+
+static int apple_ll_open(struct hid_device *hdev)
+{
+ struct spihid_apple *spihid;
+ struct spihid_interface *idev = hdev->driver_data;
+
+ if (idev->hid_desc_len == 0) {
+ spihid = spihid_get_data(idev);
+ dev_warn(&spihid->spidev->dev,
+ "HID descriptor missing for dev %u", idev->id);
+ } else
+ idev->ready = true;
+
+ return 0;
+}
+
+static void apple_ll_close(struct hid_device *hdev)
+{
+ struct spihid_interface *idev = hdev->driver_data;
+ idev->ready = false;
+}
+
+static int apple_ll_parse(struct hid_device *hdev)
+{
+ struct spihid_interface *idev = hdev->driver_data;
+
+ return hid_parse_report(hdev, idev->hid_desc, idev->hid_desc_len);
+}
+
+static int apple_ll_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct spihid_interface *idev = hdev->driver_data;
+ struct spihid_apple *spihid = spihid_get_data(idev);
+
+ dev_dbg(&spihid->spidev->dev,
+ "apple_ll_raw_request: device:%u reportnum:%hhu rtype:%hhu",
+ idev->id, reportnum, rtype);
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ return -EINVAL; // spihid_get_raw_report();
+ case HID_REQ_SET_REPORT:
+ if (buf[0] != reportnum)
+ return -EINVAL;
+ if (reportnum != idev->id) {
+ dev_warn(&spihid->spidev->dev,
+ "device:%u reportnum:"
+ "%hhu mismatch",
+ idev->id, reportnum);
+ return -EINVAL;
+ }
+ return spihid_apple_request(spihid, idev->id, 0x52, reportnum, 0x00, 2, buf, len);
+ default:
+ return -EIO;
+ }
+}
+
+static int apple_ll_output_report(struct hid_device *hdev, __u8 *buf,
+ size_t len)
+{
+ struct spihid_interface *idev = hdev->driver_data;
+ struct spihid_apple *spihid = spihid_get_data(idev);
+ if (!spihid)
+ return -1;
+
+ dev_dbg(&spihid->spidev->dev,
+ "apple_ll_output_report: device:%u len:%zu:",
+ idev->id, len);
+ // second idev->id should maybe be buf[0]?
+ return spihid_apple_request(spihid, idev->id, 0x51, idev->id, 0x00, 0, buf, len);
+}
+
+static struct hid_ll_driver apple_hid_ll = {
+ .start = &apple_ll_start,
+ .stop = &apple_ll_stop,
+ .open = &apple_ll_open,
+ .close = &apple_ll_close,
+ .parse = &apple_ll_parse,
+ .raw_request = &apple_ll_raw_request,
+ .output_report = &apple_ll_output_report,
+};
+
+static struct spihid_interface *spihid_get_iface(struct spihid_apple *spihid,
+ u32 iface)
+{
+ switch (iface) {
+ case SPIHID_DEVICE_ID_MNGT:
+ return &spihid->mngt;
+ case SPIHID_DEVICE_ID_KBD:
+ return &spihid->kbd;
+ case SPIHID_DEVICE_ID_TP:
+ return &spihid->tp;
+ default:
+ return NULL;
+ }
+}
+
+static int spihid_verify_msg(struct spihid_apple *spihid, u8 *buf, size_t len)
+{
+ u16 msg_crc, crc;
+ struct device *dev = &spihid->spidev->dev;
+
+ crc = crc16(0, buf, len - sizeof(__le16));
+ msg_crc = get_unaligned_le16(buf + len - sizeof(__le16));
+ if (crc != msg_crc) {
+ dev_warn_ratelimited(dev, "Read message crc mismatch\n");
+ return 0;
+ }
+ return 1;
+}
+
+static bool spihid_status_report(struct spihid_apple *spihid, u8 *pl,
+ size_t len)
+{
+ struct device *dev = &spihid->spidev->dev;
+ dev_dbg(dev, "%s: len: %zu", __func__, len);
+ if (len == 5 && pl[0] == 0xe0)
+ return true;
+
+ return false;
+}
+
+static bool spihid_process_input_report(struct spihid_apple *spihid, u32 device,
+ struct spihid_msg_hdr *hdr, u8 *payload,
+ size_t len)
+{
+ //dev_dbg(&spihid>spidev->dev, "input report: req:%hx iface:%u ", hdr->unknown0, device);
+ if (hdr->unknown0 != 0x10)
+ return false;
+
+ /* HID device as well but Vendor usage only, handle it internally for now */
+ if (device == 0) {
+ if (hdr->unknown1 == 0xe0) {
+ return spihid_status_report(spihid, payload, len);
+ }
+ } else if (device < SPIHID_MAX_DEVICES) {
+ struct spihid_interface *iface =
+ spihid_get_iface(spihid, device);
+ if (iface && iface->hid && iface->ready) {
+ hid_input_report(iface->hid, HID_INPUT_REPORT, payload,
+ len, 1);
+ return true;
+ }
+ } else
+ dev_dbg(&spihid->spidev->dev,
+ "unexpected iface:%u for input report", device);
+
+ return false;
+}
+
+struct spihid_device_info {
+ __le16 u0[2];
+ __le16 num_devices;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_number;
+ __le16 vendor_str[2]; //< offset and string length
+ __le16 product_str[2]; //< offset and string length
+ __le16 serial_str[2]; //< offset and string length
+};
+
+static bool spihid_process_device_info(struct spihid_apple *spihid, u32 iface,
+ u8 *payload, size_t len)
+{
+ struct device *dev = &spihid->spidev->dev;
+
+ if (iface != SPIHID_DEVICE_ID_INFO)
+ return false;
+
+ if (spihid->vendor_id == 0 &&
+ len >= sizeof(struct spihid_device_info)) {
+ struct spihid_device_info *info =
+ (struct spihid_device_info *)payload;
+ u16 voff, vlen, poff, plen, soff, slen;
+ u32 num_devices;
+
+ num_devices = __le16_to_cpu(info->num_devices);
+
+ if (num_devices < SPIHID_MAX_DEVICES) {
+ dev_err(dev,
+ "Device info reports %u devices, expecting at least 3",
+ num_devices);
+ return false;
+ }
+ spihid->num_devices = num_devices;
+
+ if (spihid->num_devices > SPIHID_MAX_DEVICES) {
+ dev_info(
+ dev,
+ "limiting the number of devices to mngt, kbd and mouse");
+ spihid->num_devices = SPIHID_MAX_DEVICES;
+ }
+
+ spihid->vendor_id = __le16_to_cpu(info->vendor_id);
+ spihid->product_id = __le16_to_cpu(info->product_id);
+ spihid->version_number = __le16_to_cpu(info->version_number);
+
+ voff = __le16_to_cpu(info->vendor_str[0]);
+ vlen = __le16_to_cpu(info->vendor_str[1]);
+
+ if (voff < len && vlen <= len - voff &&
+ vlen < sizeof(spihid->vendor)) {
+ memcpy(spihid->vendor, payload + voff, vlen);
+ spihid->vendor[vlen] = '\0';
+ }
+
+ poff = __le16_to_cpu(info->product_str[0]);
+ plen = __le16_to_cpu(info->product_str[1]);
+
+ if (poff < len && plen <= len - poff &&
+ plen < sizeof(spihid->product)) {
+ memcpy(spihid->product, payload + poff, plen);
+ spihid->product[plen] = '\0';
+ }
+
+ soff = __le16_to_cpu(info->serial_str[0]);
+ slen = __le16_to_cpu(info->serial_str[1]);
+
+ if (soff < len && slen <= len - soff &&
+ slen < sizeof(spihid->serial)) {
+ memcpy(spihid->vendor, payload + soff, slen);
+ spihid->serial[slen] = '\0';
+ }
+
+ wake_up_interruptible(&spihid->wait);
+ }
+ return true;
+}
+
+struct spihid_iface_info {
+ u8 u_0;
+ u8 interface_num;
+ u8 u_2;
+ u8 u_3;
+ u8 u_4;
+ u8 country_code;
+ __le16 max_input_report_len;
+ __le16 max_output_report_len;
+ __le16 max_control_report_len;
+ __le16 name_offset;
+ __le16 name_length;
+};
+
+static bool spihid_process_iface_info(struct spihid_apple *spihid, u32 num,
+ u8 *payload, size_t len)
+{
+ struct spihid_iface_info *info;
+ struct spihid_interface *iface = spihid_get_iface(spihid, num);
+ u32 name_off, name_len;
+
+ if (!iface)
+ return false;
+
+ if (!iface->max_input_report_len) {
+ if (len < sizeof(*info))
+ return false;
+
+ info = (struct spihid_iface_info *)payload;
+
+ iface->max_input_report_len =
+ le16_to_cpu(info->max_input_report_len);
+ iface->max_output_report_len =
+ le16_to_cpu(info->max_output_report_len);
+ iface->max_control_report_len =
+ le16_to_cpu(info->max_control_report_len);
+ iface->country = info->country_code;
+
+ name_off = le16_to_cpu(info->name_offset);
+ name_len = le16_to_cpu(info->name_length);
+
+ if (name_off < len && name_len <= len - name_off &&
+ name_len < sizeof(iface->name)) {
+ memcpy(iface->name, payload + name_off, name_len);
+ iface->name[name_len] = '\0';
+ }
+
+ dev_dbg(&spihid->spidev->dev, "Info for %s, country code: 0x%x",
+ iface->name, iface->country);
+
+ wake_up_interruptible(&spihid->wait);
+ }
+
+ return true;
+}
+
+static int spihid_register_hid_device(struct spihid_apple *spihid,
+ struct spihid_interface *idev, u8 device);
+
+static bool spihid_process_iface_hid_report_desc(struct spihid_apple *spihid,
+ u32 num, u8 *payload,
+ size_t len)
+{
+ struct spihid_interface *iface = spihid_get_iface(spihid, num);
+
+ if (!iface)
+ return false;
+
+ if (iface->hid_desc_len == 0) {
+ if (len > SPIHID_DESC_MAX)
+ return false;
+ memcpy(iface->hid_desc, payload, len);
+ iface->hid_desc_len = len;
+
+ /* do not register the mngt iface as HID device */
+ if (num > 0)
+ spihid_register_hid_device(spihid, iface, num);
+
+ wake_up_interruptible(&spihid->wait);
+ }
+ return true;
+}
+
+static bool spihid_process_response(struct spihid_apple *spihid,
+ struct spihid_msg_hdr *hdr, u8 *payload,
+ size_t len)
+{
+ if (hdr->unknown0 == 0x20) {
+ switch (hdr->unknown1) {
+ case 0x01:
+ return spihid_process_device_info(spihid, hdr->unknown2,
+ payload, len);
+ case 0x02:
+ return spihid_process_iface_info(spihid, hdr->unknown2,
+ payload, len);
+ case 0x10:
+ return spihid_process_iface_hid_report_desc(
+ spihid, hdr->unknown2, payload, len);
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+static void spihid_process_message(struct spihid_apple *spihid, u8 *data,
+ size_t length, u8 device, u8 flags)
+{
+ struct device *dev = &spihid->spidev->dev;
+ struct spihid_msg_hdr *hdr;
+ bool handled = false;
+ u8 *payload;
+
+ if (!spihid_verify_msg(spihid, data, length))
+ return;
+
+ hdr = (struct spihid_msg_hdr *)data;
+
+ if (hdr->length == 0)
+ return;
+
+ payload = data + sizeof(struct spihid_msg_hdr);
+
+ switch (flags) {
+ case SPIHID_READ_PACKET:
+ handled = spihid_process_input_report(spihid, device, hdr,
+ payload, le16_to_cpu(hdr->length));
+ break;
+ case SPIHID_WRITE_PACKET:
+ handled = spihid_process_response(spihid, hdr, payload,
+ le16_to_cpu(hdr->length));
+ break;
+ default:
+ break;
+ }
+
+#if defined(DEBUG) && DEBUG > 1
+ {
+ dev_dbg(dev,
+ "R msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n",
+ hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id,
+ hdr->length);
+ print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ payload, le16_to_cpu(hdr->length)), true);
+ }
+#else
+ if (!handled) {
+ dev_dbg(dev,
+ "R unhandled msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n",
+ hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id,
+ hdr->length);
+ print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+ payload, le16_to_cpu(hdr->length), true);
+ }
+#endif
+}
+
+static void spihid_assemble_meesage(struct spihid_apple *spihid,
+ struct spihid_transfer_packet *pkt)
+{
+ size_t length, offset, remain;
+ struct device *dev = &spihid->spidev->dev;
+ struct spihid_input_report *rep = &spihid->report;
+
+ length = le16_to_cpu(pkt->length);
+ remain = le16_to_cpu(pkt->remain);
+ offset = le16_to_cpu(pkt->offset);
+
+ if (offset + length + remain > U16_MAX) {
+ return;
+ }
+
+ if (pkt->device != rep->device || pkt->flags != rep->flags ||
+ offset != rep->offset) {
+ rep->device = 0;
+ rep->flags = 0;
+ rep->offset = 0;
+ rep->length = 0;
+ }
+
+ if (offset == 0) {
+ if (rep->offset != 0) {
+ dev_warn(dev, "incomplete report off:%u len:%u",
+ rep->offset, rep->length);
+ }
+ memcpy(rep->buf, pkt->data, length);
+ rep->offset = length;
+ rep->length = length + remain;
+ rep->device = pkt->device;
+ rep->flags = pkt->flags;
+ } else if (offset == rep->offset) {
+ if (offset + length + remain != rep->length) {
+ dev_warn(dev, "incomplete report off:%u len:%u",
+ rep->offset, rep->length);
+ return;
+ }
+ memcpy(rep->buf + offset, pkt->data, length);
+ rep->offset += length;
+
+ if (rep->offset == rep->length) {
+ spihid_process_message(spihid, rep->buf, rep->length,
+ rep->device, rep->flags);
+ rep->device = 0;
+ rep->flags = 0;
+ rep->offset = 0;
+ rep->length = 0;
+ }
+ }
+}
+
+static void spihid_process_read(struct spihid_apple *spihid)
+{
+ u16 crc;
+ size_t length;
+ struct device *dev = &spihid->spidev->dev;
+ struct spihid_transfer_packet *pkt;
+
+ pkt = (struct spihid_transfer_packet *)spihid->rx_buf;
+
+ /* check transfer packet crc */
+ crc = crc16(0, spihid->rx_buf,
+ offsetof(struct spihid_transfer_packet, crc16));
+ if (crc != le16_to_cpu(pkt->crc16)) {
+ dev_warn_ratelimited(dev, "Read package crc mismatch\n");
+ return;
+ }
+
+ length = le16_to_cpu(pkt->length);
+
+ if (length < sizeof(struct spihid_msg_hdr) + 2) {
+ if (length == sizeof(spi_hid_apple_booted) &&
+ !memcmp(pkt->data, spi_hid_apple_booted, length)) {
+ if (!spihid->status_booted) {
+ spihid->status_booted = true;
+ wake_up_interruptible(&spihid->wait);
+ }
+ } else {
+ dev_info(dev, "R short packet: len:%zu\n", length);
+ print_hex_dump_debug("spihid pkt:", DUMP_PREFIX_OFFSET, 16, 1,
+ pkt->data, length, false);
+ }
+ return;
+ }
+
+#if defined(DEBUG) && DEBUG > 1
+ dev_dbg(dev,
+ "R pkt: flags:%02hhx dev:%02hhx off:%hu remain:%hu, len:%zu\n",
+ pkt->flags, pkt->device, pkt->offset, pkt->remain, length);
+#if defined(DEBUG) && DEBUG > 2
+ print_hex_dump_debug("spihid pkt: ", DUMP_PREFIX_OFFSET, 16, 1,
+ spihid->rx_buf,
+ sizeof(struct spihid_transfer_packet), true);
+#endif
+#endif
+
+ if (length > sizeof(pkt->data)) {
+ dev_warn_ratelimited(dev, "Invalid pkt len:%zu", length);
+ return;
+ }
+
+ /* short message */
+ if (pkt->offset == 0 && pkt->remain == 0) {
+ spihid_process_message(spihid, pkt->data, length, pkt->device,
+ pkt->flags);
+ } else {
+ spihid_assemble_meesage(spihid, pkt);
+ }
+}
+
+static void spihid_read_packet_sync(struct spihid_apple *spihid)
+{
+ int err;
+
+ err = spi_sync(spihid->spidev, &spihid->rx_msg);
+ if (!err) {
+ spihid_process_read(spihid);
+ } else {
+ dev_warn(&spihid->spidev->dev, "RX failed: %d\n", err);
+ }
+}
+
+irqreturn_t spihid_apple_core_irq(int irq, void *data)
+{
+ struct spi_device *spi = data;
+ struct spihid_apple *spihid = spi_get_drvdata(spi);
+
+ spihid_read_packet_sync(spihid);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_irq);
+
+static void spihid_apple_setup_spi_msgs(struct spihid_apple *spihid)
+{
+ memset(&spihid->rx_transfer, 0, sizeof(spihid->rx_transfer));
+
+ spihid->rx_transfer.rx_buf = spihid->rx_buf;
+ spihid->rx_transfer.len = sizeof(struct spihid_transfer_packet);
+
+ spi_message_init(&spihid->rx_msg);
+ spi_message_add_tail(&spihid->rx_transfer, &spihid->rx_msg);
+
+ memset(&spihid->tx_transfer, 0, sizeof(spihid->rx_transfer));
+ memset(&spihid->status_transfer, 0, sizeof(spihid->status_transfer));
+
+ spihid->tx_transfer.tx_buf = spihid->tx_buf;
+ spihid->tx_transfer.len = sizeof(struct spihid_transfer_packet);
+ spihid->tx_transfer.delay.unit = SPI_DELAY_UNIT_USECS;
+ spihid->tx_transfer.delay.value = SPI_RW_CHG_DELAY_US;
+
+ spihid->status_transfer.rx_buf = spihid->status_buf;
+ spihid->status_transfer.len = sizeof(spi_hid_apple_status_ok);
+
+ spi_message_init(&spihid->tx_msg);
+ spi_message_add_tail(&spihid->tx_transfer, &spihid->tx_msg);
+ spi_message_add_tail(&spihid->status_transfer, &spihid->tx_msg);
+}
+
+static int spihid_apple_setup_spi(struct spihid_apple *spihid)
+{
+ spihid_apple_setup_spi_msgs(spihid);
+
+ return spihid->ops->power_on(spihid->ops);
+}
+
+static int spihid_register_hid_device(struct spihid_apple *spihid,
+ struct spihid_interface *iface, u8 device)
+{
+ int ret;
+ struct hid_device *hid;
+
+ iface->id = device;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ strscpy(hid->name, spihid->product, sizeof(hid->name));
+ snprintf(hid->phys, sizeof(hid->phys), "%s (%hhx)",
+ dev_name(&spihid->spidev->dev), device);
+ strscpy(hid->uniq, spihid->serial, sizeof(hid->uniq));
+
+ hid->ll_driver = &apple_hid_ll;
+ hid->bus = BUS_SPI;
+ hid->vendor = spihid->vendor_id;
+ hid->product = spihid->product_id;
+ hid->version = spihid->version_number;
+
+ if (device == SPIHID_DEVICE_ID_KBD)
+ hid->type = HID_TYPE_SPI_KEYBOARD;
+ else if (device == SPIHID_DEVICE_ID_TP)
+ hid->type = HID_TYPE_SPI_MOUSE;
+
+ hid->country = iface->country;
+ hid->dev.parent = &spihid->spidev->dev;
+ hid->driver_data = iface;
+
+ ret = hid_add_device(hid);
+ if (ret < 0) {
+ hid_destroy_device(hid);
+ dev_warn(&spihid->spidev->dev,
+ "Failed to register hid device %hhu", device);
+ return ret;
+ }
+
+ iface->hid = hid;
+
+ return 0;
+}
+
+static void spihid_destroy_hid_device(struct spihid_interface *iface)
+{
+ if (iface->hid) {
+ hid_destroy_device(iface->hid);
+ iface->hid = NULL;
+ }
+ iface->ready = false;
+}
+
+int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops)
+{
+ struct device *dev = &spi->dev;
+ struct spihid_apple *spihid;
+ int err, i;
+
+ if (!ops || !ops->power_on || !ops->power_off || !ops->enable_irq || !ops->disable_irq)
+ return -EINVAL;
+
+ spihid = devm_kzalloc(dev, sizeof(*spihid), GFP_KERNEL);
+ if (!spihid)
+ return -ENOMEM;
+
+ spihid->ops = ops;
+ spihid->spidev = spi;
+
+ // init spi
+ spi_set_drvdata(spi, spihid);
+
+ /* allocate SPI buffers */
+ spihid->rx_buf = devm_kmalloc(
+ &spi->dev, sizeof(struct spihid_transfer_packet), GFP_KERNEL);
+ spihid->tx_buf = devm_kmalloc(
+ &spi->dev, sizeof(struct spihid_transfer_packet), GFP_KERNEL);
+ spihid->status_buf = devm_kmalloc(
+ &spi->dev, sizeof(spi_hid_apple_status_ok), GFP_KERNEL);
+
+ if (!spihid->rx_buf || !spihid->tx_buf || !spihid->status_buf)
+ return -ENOMEM;
+
+ spihid->report.buf =
+ devm_kmalloc(dev, SPIHID_MAX_INPUT_REPORT_SIZE, GFP_KERNEL);
+
+ spihid->kbd.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL);
+ spihid->tp.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL);
+
+ if (!spihid->report.buf || !spihid->kbd.hid_desc ||
+ !spihid->tp.hid_desc)
+ return -ENOMEM;
+
+ init_waitqueue_head(&spihid->wait);
+
+ mutex_init(&spihid->tx_lock);
+
+ /* Init spi transfer buffers and power device on */
+ err = spihid_apple_setup_spi(spihid);
+ if (err < 0)
+ goto error;
+
+ /* enable HID irq */
+ spihid->ops->enable_irq(spihid->ops);
+
+ // wait for boot message
+ err = wait_event_interruptible_timeout(spihid->wait,
+ spihid->status_booted,
+ msecs_to_jiffies(1000));
+ if (err == 0)
+ err = -ENODEV;
+ if (err < 0) {
+ dev_err(dev, "waiting for device boot failed: %d", err);
+ goto error;
+ }
+
+ /* request device information */
+ dev_dbg(dev, "request device info");
+ spihid_apple_request(spihid, 0xd0, 0x20, 0x01, 0xd0, 0, NULL, 0);
+ err = wait_event_interruptible_timeout(spihid->wait, spihid->vendor_id,
+ SPIHID_DEF_WAIT);
+ if (err == 0)
+ err = -ENODEV;
+ if (err < 0) {
+ dev_err(dev, "waiting for device info failed: %d", err);
+ goto error;
+ }
+
+ /* request interface information */
+ for (i = 0; i < spihid->num_devices; i++) {
+ struct spihid_interface *iface = spihid_get_iface(spihid, i);
+ if (!iface)
+ continue;
+ dev_dbg(dev, "request interface info 0x%02x", i);
+ spihid_apple_request(spihid, 0xd0, 0x20, 0x02, i,
+ SPIHID_DESC_MAX, NULL, 0);
+ err = wait_event_interruptible_timeout(
+ spihid->wait, iface->max_input_report_len,
+ SPIHID_DEF_WAIT);
+ }
+
+ /* request HID report descriptors */
+ for (i = 1; i < spihid->num_devices; i++) {
+ struct spihid_interface *iface = spihid_get_iface(spihid, i);
+ if (!iface)
+ continue;
+ dev_dbg(dev, "request hid report desc 0x%02x", i);
+ spihid_apple_request(spihid, 0xd0, 0x20, 0x10, i,
+ SPIHID_DESC_MAX, NULL, 0);
+ wait_event_interruptible_timeout(
+ spihid->wait, iface->hid_desc_len, SPIHID_DEF_WAIT);
+ }
+
+ return 0;
+error:
+ return err;
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_probe);
+
+void spihid_apple_core_remove(struct spi_device *spi)
+{
+ struct spihid_apple *spihid = spi_get_drvdata(spi);
+
+ /* destroy input devices */
+
+ spihid_destroy_hid_device(&spihid->tp);
+ spihid_destroy_hid_device(&spihid->kbd);
+
+ /* disable irq */
+ spihid->ops->disable_irq(spihid->ops);
+
+ /* power SPI device down */
+ spihid->ops->power_off(spihid->ops);
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_remove);
+
+void spihid_apple_core_shutdown(struct spi_device *spi)
+{
+ struct spihid_apple *spihid = spi_get_drvdata(spi);
+
+ /* disable irq */
+ spihid->ops->disable_irq(spihid->ops);
+
+ /* power SPI device down */
+ spihid->ops->power_off(spihid->ops);
+}
+EXPORT_SYMBOL_GPL(spihid_apple_core_shutdown);
+
+MODULE_DESCRIPTION("Apple SPI HID transport driver");
+MODULE_AUTHOR("Janne Grunau <j@jannau.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/spi-hid/spi-hid-apple-of.c b/drivers/hid/spi-hid/spi-hid-apple-of.c
new file mode 100644
index 000000000000..db76774eea7e
--- /dev/null
+++ b/drivers/hid/spi-hid/spi-hid-apple-of.c
@@ -0,0 +1,138 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Apple SPI HID transport driver - Open Firmware
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include "spi-hid-apple.h"
+
+
+struct spihid_apple_of {
+ struct spihid_apple_ops ops;
+
+ struct gpio_desc *enable_gpio;
+ int irq;
+};
+
+int spihid_apple_of_power_on(struct spihid_apple_ops *ops)
+{
+ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+ /* reset the controller on boot */
+ gpiod_direction_output(sh_of->enable_gpio, 1);
+ msleep(5);
+ gpiod_direction_output(sh_of->enable_gpio, 0);
+ msleep(5);
+ /* turn SPI device on */
+ gpiod_direction_output(sh_of->enable_gpio, 1);
+ msleep(50);
+
+ return 0;
+}
+
+int spihid_apple_of_power_off(struct spihid_apple_ops *ops)
+{
+ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+ /* turn SPI device off */
+ gpiod_direction_output(sh_of->enable_gpio, 0);
+
+ return 0;
+}
+
+int spihid_apple_of_enable_irq(struct spihid_apple_ops *ops)
+{
+ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+ enable_irq(sh_of->irq);
+
+ return 0;
+}
+
+int spihid_apple_of_disable_irq(struct spihid_apple_ops *ops)
+{
+ struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops);
+
+ disable_irq(sh_of->irq);
+
+ return 0;
+}
+
+static int spihid_apple_of_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct spihid_apple_of *spihid_of;
+ int err;
+
+ dev_warn(dev, "%s:%d", __func__, __LINE__);
+
+ spihid_of = devm_kzalloc(dev, sizeof(*spihid_of), GFP_KERNEL);
+ if (!spihid_of)
+ return -ENOMEM;
+
+ spihid_of->ops.power_on = spihid_apple_of_power_on;
+ spihid_of->ops.power_off = spihid_apple_of_power_off;
+ spihid_of->ops.enable_irq = spihid_apple_of_enable_irq;
+ spihid_of->ops.disable_irq = spihid_apple_of_disable_irq;
+
+ spihid_of->enable_gpio = devm_gpiod_get_index(dev, "spien", 0, 0);
+ if (IS_ERR(spihid_of->enable_gpio)) {
+ err = PTR_ERR(spihid_of->enable_gpio);
+ dev_err(dev, "failed to get 'spien' gpio pin: %d", err);
+ return err;
+ }
+
+ spihid_of->irq = of_irq_get(dev->of_node, 0);
+ if (spihid_of->irq < 0) {
+ err = spihid_of->irq;
+ dev_err(dev, "failed to get 'extended-irq': %d", err);
+ return err;
+ }
+ err = devm_request_threaded_irq(dev, spihid_of->irq, NULL,
+ spihid_apple_core_irq, IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "spi-hid-apple-irq", spi);
+ if (err < 0) {
+ dev_err(dev, "failed to request extended-irq %d: %d",
+ spihid_of->irq, err);
+ return err;
+ }
+
+ return spihid_apple_core_probe(spi, &spihid_of->ops);
+}
+
+static const struct of_device_id spihid_apple_of_match[] = {
+ { .compatible = "apple,spi-hid-transport" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spihid_apple_of_match);
+
+static struct spi_device_id spihid_apple_of_id[] = {
+ { "spi-hid-transport", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, spihid_apple_of_id);
+
+static struct spi_driver spihid_apple_of_driver = {
+ .driver = {
+ .name = "spi-hid-apple-of",
+ //.pm = &spi_hid_apple_of_pm,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spihid_apple_of_match),
+ },
+
+ .id_table = spihid_apple_of_id,
+ .probe = spihid_apple_of_probe,
+ .remove = spihid_apple_core_remove,
+ .shutdown = spihid_apple_core_shutdown,
+};
+
+module_spi_driver(spihid_apple_of_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/spi-hid/spi-hid-apple.h b/drivers/hid/spi-hid/spi-hid-apple.h
new file mode 100644
index 000000000000..2d9554e8a5f8
--- /dev/null
+++ b/drivers/hid/spi-hid/spi-hid-apple.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef SPI_HID_APPLE_H
+#define SPI_HID_APPLE_H
+
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+
+/**
+ * struct spihid_apple_ops - Ops to control the device from the core driver.
+ *
+ * @power_on: reset and power the device on.
+ * @power_off: power the device off.
+ * @enable_irq: enable irq or ACPI gpe.
+ * @disable_irq: disable irq or ACPI gpe.
+ */
+
+struct spihid_apple_ops {
+ int (*power_on)(struct spihid_apple_ops *ops);
+ int (*power_off)(struct spihid_apple_ops *ops);
+ int (*enable_irq)(struct spihid_apple_ops *ops);
+ int (*disable_irq)(struct spihid_apple_ops *ops);
+};
+
+irqreturn_t spihid_apple_core_irq(int irq, void *data);
+
+int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops);
+void spihid_apple_core_remove(struct spi_device *spi);
+void spihid_apple_core_shutdown(struct spi_device *spi);
+
+#endif /* SPI_HID_APPLE_H */
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 9028ffb58cc0..457a40957568 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -5,6 +5,7 @@
* SMBus host driver for PA Semi PWRficient
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
@@ -21,25 +22,36 @@
#define REG_MTXFIFO 0x00
#define REG_MRXFIFO 0x04
#define REG_SMSTA 0x14
+#define REG_IMASK 0x18
#define REG_CTL 0x1c
#define REG_REV 0x28
/* Register defs */
-#define MTXFIFO_READ 0x00000400
-#define MTXFIFO_STOP 0x00000200
-#define MTXFIFO_START 0x00000100
-#define MTXFIFO_DATA_M 0x000000ff
-
-#define MRXFIFO_EMPTY 0x00000100
-#define MRXFIFO_DATA_M 0x000000ff
-
-#define SMSTA_XEN 0x08000000
-#define SMSTA_MTN 0x00200000
-
-#define CTL_MRR 0x00000400
-#define CTL_MTR 0x00000200
-#define CTL_EN 0x00000800
-#define CTL_CLK_M 0x000000ff
+#define MTXFIFO_READ BIT(10)
+#define MTXFIFO_STOP BIT(9)
+#define MTXFIFO_START BIT(8)
+#define MTXFIFO_DATA_M GENMASK(7, 0)
+
+#define MRXFIFO_EMPTY BIT(8)
+#define MRXFIFO_DATA_M GENMASK(7, 0)
+
+#define SMSTA_XIP BIT(28)
+#define SMSTA_XEN BIT(27)
+#define SMSTA_JMD BIT(25)
+#define SMSTA_JAM BIT(24)
+#define SMSTA_MTO BIT(23)
+#define SMSTA_MTA BIT(22)
+#define SMSTA_MTN BIT(21)
+#define SMSTA_MRNE BIT(19)
+#define SMSTA_MTE BIT(16)
+#define SMSTA_TOM BIT(6)
+
+#define CTL_EN BIT(11)
+#define CTL_MRR BIT(10)
+#define CTL_MTR BIT(9)
+#define CTL_CLK_M GENMASK(7, 0)
+
+#define TRANSFER_TIMEOUT_MS 100
static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val)
{
@@ -66,38 +78,84 @@ static void pasemi_reset(struct pasemi_smbus *smbus)
val |= CTL_EN;
reg_write(smbus, REG_CTL, val);
+ reinit_completion(&smbus->irq_completion);
}
-static void pasemi_smb_clear(struct pasemi_smbus *smbus)
+static int pasemi_smb_clear(struct pasemi_smbus *smbus)
{
unsigned int status;
+ int timeout = TRANSFER_TIMEOUT_MS;
status = reg_read(smbus, REG_SMSTA);
+
+ /* First wait for the bus to go idle */
+ while ((status & (SMSTA_XIP | SMSTA_JAM)) && timeout--) {
+ msleep(1);
+ status = reg_read(smbus, REG_SMSTA);
+ }
+
+ if (timeout < 0) {
+ dev_warn(smbus->dev, "Bus is still stuck (status 0x%08x)\n", status);
+ return -EIO;
+ }
+
+ /* If any badness happened or there is data in the FIFOs, reset the FIFOs */
+ if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) ||
+ !(status & SMSTA_MTE))
+ pasemi_reset(smbus);
+
+ /* Clear the flags */
reg_write(smbus, REG_SMSTA, status);
+
+ return 0;
}
static int pasemi_smb_waitready(struct pasemi_smbus *smbus)
{
- int timeout = 10;
+ int timeout = TRANSFER_TIMEOUT_MS;
unsigned int status;
- status = reg_read(smbus, REG_SMSTA);
-
- while (!(status & SMSTA_XEN) && timeout--) {
- msleep(1);
+ if (smbus->use_irq) {
+ reinit_completion(&smbus->irq_completion);
+ /* XEN should be set when a transaction terminates, whether due to error or not */
+ reg_write(smbus, REG_IMASK, SMSTA_XEN);
+ wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(timeout));
+ reg_write(smbus, REG_IMASK, 0);
+ status = reg_read(smbus, REG_SMSTA);
+ } else {
status = reg_read(smbus, REG_SMSTA);
+ while (!(status & SMSTA_XEN) && timeout--) {
+ msleep(1);
+ status = reg_read(smbus, REG_SMSTA);
+ }
}
- /* Got NACK? */
- if (status & SMSTA_MTN)
- return -ENXIO;
+ /* Controller timeout? */
+ if (status & SMSTA_TOM) {
+ dev_warn(smbus->dev, "Controller timeout, status 0x%08x\n", status);
+ return -EIO;
+ }
- if (timeout < 0) {
- dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status);
- reg_write(smbus, REG_SMSTA, status);
+ /* Peripheral timeout? */
+ if (status & SMSTA_MTO) {
+ dev_warn(smbus->dev, "Peripheral timeout, status 0x%08x\n", status);
return -ETIME;
}
+ /* Still stuck in a transaction? */
+ if (status & SMSTA_XIP) {
+ dev_warn(smbus->dev, "Bus stuck, status 0x%08x\n", status);
+ return -EIO;
+ }
+
+ /* Arbitration loss? */
+ if (status & SMSTA_MTA)
+ return -EBUSY;
+
+ /* Got NACK? */
+ if (status & SMSTA_MTN)
+ return -ENXIO;
+
/* Clear XEN */
reg_write(smbus, REG_SMSTA, SMSTA_XEN);
@@ -158,7 +216,8 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter,
struct pasemi_smbus *smbus = adapter->algo_data;
int ret, i;
- pasemi_smb_clear(smbus);
+ if (pasemi_smb_clear(smbus))
+ return -EIO;
ret = 0;
@@ -181,7 +240,8 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter,
addr <<= 1;
read_flag = read_write == I2C_SMBUS_READ;
- pasemi_smb_clear(smbus);
+ if (pasemi_smb_clear(smbus))
+ return -EIO;
switch (size) {
case I2C_SMBUS_QUICK:
@@ -344,10 +404,14 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
/* set up the sysfs linkage to our parent device */
smbus->adapter.dev.parent = smbus->dev;
+ smbus->use_irq = 0;
+ init_completion(&smbus->irq_completion);
if (smbus->hw_rev != PASEMI_HW_REV_PCI)
smbus->hw_rev = reg_read(smbus, REG_REV);
+ reg_write(smbus, REG_IMASK, 0);
+
pasemi_reset(smbus);
error = devm_i2c_add_adapter(smbus->dev, &smbus->adapter);
@@ -356,3 +420,12 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
return 0;
}
+
+irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
+{
+ struct pasemi_smbus *smbus = dev_id;
+
+ reg_write(smbus, REG_IMASK, 0);
+ complete(&smbus->irq_completion);
+ return IRQ_HANDLED;
+}
diff --git a/drivers/i2c/busses/i2c-pasemi-core.h b/drivers/i2c/busses/i2c-pasemi-core.h
index 4655124a37f3..88821f4e8a9f 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.h
+++ b/drivers/i2c/busses/i2c-pasemi-core.h
@@ -7,6 +7,7 @@
#include <linux/i2c-smbus.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/completion.h>
#define PASEMI_HW_REV_PCI -1
@@ -16,6 +17,10 @@ struct pasemi_smbus {
void __iomem *ioaddr;
unsigned int clk_div;
int hw_rev;
+ int use_irq;
+ struct completion irq_completion;
};
int pasemi_i2c_common_probe(struct pasemi_smbus *smbus);
+
+irqreturn_t pasemi_irq_handler(int irq, void *dev_id);
diff --git a/drivers/i2c/busses/i2c-pasemi-platform.c b/drivers/i2c/busses/i2c-pasemi-platform.c
index 88a54aaf7e3c..e35945a91dbe 100644
--- a/drivers/i2c/busses/i2c-pasemi-platform.c
+++ b/drivers/i2c/busses/i2c-pasemi-platform.c
@@ -49,6 +49,7 @@ static int pasemi_platform_i2c_probe(struct platform_device *pdev)
struct pasemi_smbus *smbus;
u32 frequency;
int error;
+ int irq_num;
data = devm_kzalloc(dev, sizeof(struct pasemi_platform_i2c_data),
GFP_KERNEL);
@@ -82,6 +83,11 @@ static int pasemi_platform_i2c_probe(struct platform_device *pdev)
if (error)
goto out_clk_disable;
+ irq_num = platform_get_irq(pdev, 0);
+ error = devm_request_irq(smbus->dev, irq_num, pasemi_irq_handler, 0, "pasemi_apple_i2c", (void *)smbus);
+
+ if (!error)
+ smbus->use_irq = 1;
platform_set_drvdata(pdev, data);
return 0;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 9f088900f863..e8fa44d641b4 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -929,4 +929,16 @@ config INPUT_STPMIC1_ONKEY
To compile this driver as a module, choose M here: the
module will be called stpmic1_onkey.
+config INPUT_MACSMC_HID
+ tristate "Apple Mac SMC lid/buttons"
+ depends on APPLE_SMC
+ default ARCH_APPLE
+ help
+ Say Y here if you want to use the input events delivered via the
+ SMC controller on Apple Mac machines using the macsmc driver.
+ This includes lid open/close and the power button.
+
+ To compile this driver as a module, choose M here: the
+ module will be called macsmc-hid.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 6abefc41037b..daa4b3a64f61 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_INPUT_IQS7222) += iqs7222.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MACSMC_HID) += macsmc-hid.o
obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
diff --git a/drivers/input/misc/macsmc-hid.c b/drivers/input/misc/macsmc-hid.c
new file mode 100644
index 000000000000..49296cbb70cc
--- /dev/null
+++ b/drivers/input/misc/macsmc-hid.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC input event driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver exposes HID events from the SMC as an input device.
+ * This includes the lid open/close and power button notifications.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/reboot.h>
+
+struct macsmc_hid {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct input_dev *input;
+ struct notifier_block nb;
+ bool wakeup_mode;
+};
+
+#define SMC_EV_BTN 0x7201
+#define SMC_EV_LID 0x7203
+
+#define BTN_POWER 0x01
+#define BTN_TOUCHID 0x06
+#define BTN_POWER_HELD1 0xfe
+#define BTN_POWER_HELD2 0x00
+
+static int macsmc_hid_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct macsmc_hid *smchid = container_of(nb, struct macsmc_hid, nb);
+ u16 type = event >> 16;
+ u8 d1 = (event >> 8) & 0xff;
+ u8 d2 = event & 0xff;
+
+ switch (type) {
+ case SMC_EV_BTN:
+ switch (d1) {
+ case BTN_POWER:
+ case BTN_TOUCHID:
+ if (smchid->wakeup_mode) {
+ if (d2) {
+ dev_info(smchid->dev, "Button wakeup\n");
+ pm_wakeup_hard_event(smchid->dev);
+ }
+ } else {
+ input_report_key(smchid->input, KEY_POWER, d2);
+ input_sync(smchid->input);
+ }
+ break;
+ case BTN_POWER_HELD1:
+ /*
+ * TODO: is this pre-warning useful?
+ */
+ if (d2)
+ dev_warn(smchid->dev, "Power button held down\n");
+ break;
+ case BTN_POWER_HELD2:
+ /*
+ * If we get here, we have about 4 seconds before forced shutdown.
+ * Try to do an emergency shutdown to make sure the NVMe cache is
+ * flushed. macOS actually does this by panicing (!)...
+ */
+ if (d2) {
+ dev_crit(smchid->dev, "Triggering forced shutdown!\n");
+ if (kernel_can_power_off())
+ kernel_power_off();
+ else /* Missing macsmc-reboot driver? */
+ kernel_restart("SMC power button triggered restart");
+ }
+ break;
+ default:
+ dev_info(smchid->dev, "Unknown SMC button event: %02x %02x\n", d1, d2);
+ break;
+ }
+ return NOTIFY_OK;
+ case SMC_EV_LID:
+ if (smchid->wakeup_mode && !d1) {
+ dev_info(smchid->dev, "Lid wakeup\n");
+ pm_wakeup_hard_event(smchid->dev);
+ }
+ input_report_switch(smchid->input, SW_LID, d1);
+ input_sync(smchid->input);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int macsmc_hid_probe(struct platform_device *pdev)
+{
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ struct macsmc_hid *smchid;
+ bool have_lid, have_power;
+ int ret;
+
+ have_lid = apple_smc_key_exists(smc, SMC_KEY(MSLD));
+ have_power = apple_smc_key_exists(smc, SMC_KEY(bHLD));
+
+ if (!have_lid && !have_power)
+ return -ENODEV;
+
+ smchid = devm_kzalloc(&pdev->dev, sizeof(*smchid), GFP_KERNEL);
+ if (!smchid)
+ return -ENOMEM;
+
+ smchid->dev = &pdev->dev;
+ smchid->smc = smc;
+ platform_set_drvdata(pdev, smchid);
+
+ smchid->input = devm_input_allocate_device(&pdev->dev);
+ if (!smchid->input)
+ return -ENOMEM;
+
+ smchid->input->phys = "macsmc-hid (0)";
+ smchid->input->name = "Apple SMC power/lid events";
+
+ if (have_lid)
+ input_set_capability(smchid->input, EV_SW, SW_LID);
+ if (have_power)
+ input_set_capability(smchid->input, EV_KEY, KEY_POWER);
+
+ ret = input_register_device(smchid->input);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register input device: %d\n", ret);
+ return ret;
+ }
+
+ if (have_lid) {
+ u8 val;
+
+ ret = apple_smc_read_u8(smc, SMC_KEY(MSLD), &val);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read initial lid state\n");
+ } else {
+ input_report_switch(smchid->input, SW_LID, val);
+ }
+ }
+ if (have_power) {
+ u32 val;
+
+ ret = apple_smc_read_u32(smc, SMC_KEY(bHLD), &val);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read initial power button state\n");
+ } else {
+ input_report_key(smchid->input, KEY_POWER, val & 1);
+ }
+ }
+
+ input_sync(smchid->input);
+
+ smchid->nb.notifier_call = macsmc_hid_event;
+ apple_smc_register_notifier(smc, &smchid->nb);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ return 0;
+}
+
+static int macsmc_hid_pm_prepare(struct device *dev)
+{
+ struct macsmc_hid *smchid = dev_get_drvdata(dev);
+
+ smchid->wakeup_mode = true;
+ return 0;
+}
+
+static void macsmc_hid_pm_complete(struct device *dev)
+{
+ struct macsmc_hid *smchid = dev_get_drvdata(dev);
+
+ smchid->wakeup_mode = false;
+}
+
+static const struct dev_pm_ops macsmc_hid_pm_ops = {
+ .prepare = macsmc_hid_pm_prepare,
+ .complete = macsmc_hid_pm_complete,
+};
+
+static struct platform_driver macsmc_hid_driver = {
+ .driver = {
+ .name = "macsmc-hid",
+ .owner = THIS_MODULE,
+ .pm = &macsmc_hid_pm_ops,
+ },
+ .probe = macsmc_hid_probe,
+};
+module_platform_driver(macsmc_hid_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
+MODULE_ALIAS("platform:macsmc-hid");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dc5f7a156ff5..b4fd4a651f1c 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -306,6 +306,7 @@ config APPLE_DART
depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_DART
+ select OF_IOMMU
default ARCH_APPLE
help
Support for Apple DART (Device Address Resolution Table) IOMMUs
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 4f4a323be0d0..f8648dcea646 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -33,58 +33,157 @@
#include <linux/types.h>
#include "dma-iommu.h"
+#include "io-pgtable-dart.h"
-#define DART_MAX_STREAMS 16
+#define DART_MAX_STREAMS 256
#define DART_MAX_TTBR 4
#define MAX_DARTS_PER_DEVICE 2
-#define DART_STREAM_ALL 0xffff
+/* Common registers */
#define DART_PARAMS1 0x00
-#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
+#define DART_PARAMS1_PAGE_SHIFT GENMASK(27, 24)
#define DART_PARAMS2 0x04
-#define DART_PARAMS_BYPASS_SUPPORT BIT(0)
+#define DART_PARAMS2_BYPASS_SUPPORT BIT(0)
-#define DART_STREAM_COMMAND 0x20
-#define DART_STREAM_COMMAND_BUSY BIT(2)
-#define DART_STREAM_COMMAND_INVALIDATE BIT(20)
+/* T8020/T6000 registers */
-#define DART_STREAM_SELECT 0x34
+#define DART_T8020_STREAM_COMMAND 0x20
+#define DART_T8020_STREAM_COMMAND_BUSY BIT(2)
+#define DART_T8020_STREAM_COMMAND_INVALIDATE BIT(20)
-#define DART_ERROR 0x40
-#define DART_ERROR_STREAM GENMASK(27, 24)
-#define DART_ERROR_CODE GENMASK(11, 0)
-#define DART_ERROR_FLAG BIT(31)
+#define DART_T8020_STREAM_SELECT 0x34
-#define DART_ERROR_READ_FAULT BIT(4)
-#define DART_ERROR_WRITE_FAULT BIT(3)
-#define DART_ERROR_NO_PTE BIT(2)
-#define DART_ERROR_NO_PMD BIT(1)
-#define DART_ERROR_NO_TTBR BIT(0)
+#define DART_T8020_ERROR 0x40
+#define DART_T8020_ERROR_STREAM GENMASK(27, 24)
+#define DART_T8020_ERROR_CODE GENMASK(11, 0)
+#define DART_T8020_ERROR_FLAG BIT(31)
-#define DART_CONFIG 0x60
-#define DART_CONFIG_LOCK BIT(15)
+#define DART_T8020_ERROR_READ_FAULT BIT(4)
+#define DART_T8020_ERROR_WRITE_FAULT BIT(3)
+#define DART_T8020_ERROR_NO_PTE BIT(2)
+#define DART_T8020_ERROR_NO_PMD BIT(1)
+#define DART_T8020_ERROR_NO_TTBR BIT(0)
-#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
-
-#define DART_ERROR_ADDR_HI 0x54
-#define DART_ERROR_ADDR_LO 0x50
-
-#define DART_STREAMS_ENABLE 0xfc
+#define DART_T8020_CONFIG 0x60
+#define DART_T8020_CONFIG_LOCK BIT(15)
-#define DART_TCR(sid) (0x100 + 4 * (sid))
-#define DART_TCR_TRANSLATE_ENABLE BIT(7)
-#define DART_TCR_BYPASS0_ENABLE BIT(8)
-#define DART_TCR_BYPASS1_ENABLE BIT(12)
+#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
-#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
-#define DART_TTBR_VALID BIT(31)
-#define DART_TTBR_SHIFT 12
+#define DART_T8020_ERROR_ADDR_HI 0x54
+#define DART_T8020_ERROR_ADDR_LO 0x50
+
+#define DART_T8020_STREAMS_ENABLE 0xfc
+
+#define DART_T8020_TCR 0x100
+#define DART_T8020_TCR_TRANSLATE_ENABLE BIT(7)
+#define DART_T8020_TCR_BYPASS_DART BIT(8)
+#define DART_T8020_TCR_BYPASS_DAPF BIT(12)
+
+#define DART_T8020_TTBR 0x200
+#define DART_T8020_TTBR_VALID BIT(31)
+#define DART_T8020_TTBR_ADDR_OFF 0
+#define DART_T8020_TTBR_SHIFT 12
+
+/* T8110 registers */
+
+#define DART_T8110_PARAMS3 0x08
+#define DART_T8110_PARAMS3_PA_WIDTH GENMASK(29, 24)
+#define DART_T8110_PARAMS3_VA_WIDTH GENMASK(21, 16)
+#define DART_T8110_PARAMS3_VER_MAJ GENMASK(15, 8)
+#define DART_T8110_PARAMS3_VER_MIN GENMASK(7, 0)
+
+#define DART_T8110_PARAMS4 0x0c
+#define DART_T8110_PARAMS4_NUM_CLIENTS GENMASK(24, 16)
+#define DART_T8110_PARAMS4_NUM_SIDS GENMASK(8, 0)
+
+#define DART_T8110_TLB_CMD 0x80
+#define DART_T8110_TLB_CMD_BUSY BIT(31)
+#define DART_T8110_TLB_CMD_OP GENMASK(10, 8)
+#define DART_T8110_TLB_CMD_OP_FLUSH_ALL 0
+#define DART_T8110_TLB_CMD_OP_FLUSH_SID 1
+#define DART_T8110_TLB_CMD_STREAM GENMASK(7, 0)
+
+#define DART_T8110_ERROR 0x100
+#define DART_T8110_ERROR_STREAM GENMASK(27, 20)
+#define DART_T8110_ERROR_CODE GENMASK(14, 0)
+#define DART_T8110_ERROR_FLAG BIT(31)
+
+#define DART_T8110_ERROR_MASK 0x104
+
+#define DART_T8110_ERROR_READ_FAULT BIT(4)
+#define DART_T8110_ERROR_WRITE_FAULT BIT(3)
+#define DART_T8110_ERROR_NO_PTE BIT(3)
+#define DART_T8110_ERROR_NO_PMD BIT(2)
+#define DART_T8110_ERROR_NO_PGD BIT(1)
+#define DART_T8110_ERROR_NO_TTBR BIT(0)
+
+#define DART_T8110_ERROR_ADDR_LO 0x170
+#define DART_T8110_ERROR_ADDR_HI 0x174
+
+#define DART_T8110_PROTECT 0x200
+#define DART_T8110_UNPROTECT 0x204
+#define DART_T8110_PROTECT_LOCK 0x208
+#define DART_T8110_PROTECT_TTBR_TCR BIT(0)
+
+#define DART_T8110_ENABLE_STREAMS 0xc00
+#define DART_T8110_DISABLE_STREAMS 0xc20
+
+#define DART_T8110_TCR 0x1000
+#define DART_T8110_TCR_REMAP GENMASK(11, 8)
+#define DART_T8110_TCR_REMAP_EN BIT(7)
+#define DART_T8110_TCR_BYPASS_DAPF BIT(2)
+#define DART_T8110_TCR_BYPASS_DART BIT(1)
+#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
+
+#define DART_T8110_TTBR 0x1400
+#define DART_T8110_TTBR_VALID BIT(0)
+#define DART_T8110_TTBR_ADDR_OFF 2
+#define DART_T8110_TTBR_SHIFT 14
+
+#define DART_TCR(dart, sid) ((dart)->hw->tcr + ((sid) << 2))
+
+#define DART_TTBR(dart, sid, idx) ((dart)->hw->ttbr + \
+ (((dart)->hw->ttbr_count * (sid)) << 2) + \
+ ((idx) << 2))
+
+struct apple_dart_stream_map;
+
+enum dart_type {
+ DART_T8020,
+ DART_T6000,
+ DART_T8110,
+};
struct apple_dart_hw {
+ enum dart_type type;
+ irqreturn_t (*irq_handler)(int irq, void *dev);
+ int (*invalidate_tlb)(struct apple_dart_stream_map *stream_map);
+
u32 oas;
enum io_pgtable_fmt fmt;
+
+ int max_sid_count;
+
+ u64 lock;
+ u64 lock_bit;
+
+ u64 error;
+
+ u64 enable_streams;
+ u64 disable_streams;
+
+ u64 tcr;
+ u64 tcr_enabled;
+ u64 tcr_disabled;
+ u64 tcr_bypass;
+
+ u64 ttbr;
+ u64 ttbr_valid;
+ u64 ttbr_addr_off;
+ u64 ttbr_shift;
+ int ttbr_count;
};
/*
@@ -100,6 +199,7 @@ struct apple_dart_hw {
* @pgsize: pagesize supported by this DART
* @supports_bypass: indicates if this DART supports bypass mode
* @force_bypass: force bypass mode due to pagesize mismatch?
+ * @locked: indicates if this DART is locked
* @sid2group: maps stream ids to iommu_groups
* @iommu: iommu core device
*/
@@ -115,12 +215,18 @@ struct apple_dart {
spinlock_t lock;
+ u32 oas;
u32 pgsize;
+ u32 num_streams;
u32 supports_bypass : 1;
u32 force_bypass : 1;
+ u32 locked : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
+
+ u32 save_tcr[DART_MAX_STREAMS];
+ u32 save_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR];
};
/*
@@ -140,11 +246,11 @@ struct apple_dart {
*/
struct apple_dart_stream_map {
struct apple_dart *dart;
- unsigned long sidmap;
+ DECLARE_BITMAP(sidmap, DART_MAX_STREAMS);
};
struct apple_dart_atomic_stream_map {
struct apple_dart *dart;
- atomic64_t sidmap;
+ atomic_long_t sidmap[BITS_TO_LONGS(DART_MAX_STREAMS)];
};
/*
@@ -202,50 +308,60 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
static void
apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(DART_TCR_TRANSLATE_ENABLE,
- stream_map->dart->regs + DART_TCR(sid));
+ WARN_ON(stream_map->dart->locked);
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(0, stream_map->dart->regs + DART_TCR(sid));
+ WARN_ON(stream_map->dart->locked);
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->tcr_disabled, dart->regs + DART_TCR(dart, sid));
}
static void
apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
+ WARN_ON(stream_map->dart->locked);
WARN_ON(!stream_map->dart->supports_bypass);
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
- stream_map->dart->regs + DART_TCR(sid));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->tcr_bypass,
+ dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
u8 idx, phys_addr_t paddr)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
- stream_map->dart->regs + DART_TTBR(sid, idx));
+ WARN_ON(stream_map->dart->locked);
+ WARN_ON(paddr & ((1 << dart->hw->ttbr_shift) - 1));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->ttbr_valid |
+ (paddr >> dart->hw->ttbr_shift) << dart->hw->ttbr_addr_off,
+ dart->regs + DART_TTBR(dart, sid, idx));
}
static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
u8 idx)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
+ WARN_ON(stream_map->dart->locked);
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(0, dart->regs + DART_TTBR(dart, sid, idx));
}
static void
@@ -253,12 +369,12 @@ apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
{
int i;
- for (i = 0; i < DART_MAX_TTBR; ++i)
+ for (i = 0; i < stream_map->dart->hw->ttbr_count; ++i)
apple_dart_hw_clear_ttbr(stream_map, i);
}
static int
-apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
+apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
u32 command)
{
unsigned long flags;
@@ -267,12 +383,12 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
spin_lock_irqsave(&stream_map->dart->lock, flags);
- writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
- writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
+ writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT);
+ writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND);
ret = readl_poll_timeout_atomic(
- stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
- !(command_reg & DART_STREAM_COMMAND_BUSY), 1,
+ stream_map->dart->regs + DART_T8020_STREAM_COMMAND, command_reg,
+ !(command_reg & DART_T8020_STREAM_COMMAND_BUSY), 1,
DART_STREAM_COMMAND_BUSY_TIMEOUT);
spin_unlock_irqrestore(&stream_map->dart->lock, flags);
@@ -280,7 +396,45 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
if (ret) {
dev_err(stream_map->dart->dev,
"busy bit did not clear after command %x for streams %lx\n",
- command, stream_map->sidmap);
+ command, stream_map->sidmap[0]);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+apple_dart_t8110_hw_tlb_command(struct apple_dart_stream_map *stream_map,
+ u32 command)
+{
+ struct apple_dart *dart = stream_map->dart;
+ unsigned long flags;
+ int ret = 0;
+ int sid;
+
+ spin_lock_irqsave(&dart->lock, flags);
+
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) {
+ u32 val = FIELD_PREP(DART_T8110_TLB_CMD_OP, command) |
+ FIELD_PREP(DART_T8110_TLB_CMD_STREAM, sid);
+ writel(val, dart->regs + DART_T8110_TLB_CMD);
+
+ ret = readl_poll_timeout_atomic(
+ dart->regs + DART_T8110_TLB_CMD, val,
+ !(val & DART_T8110_TLB_CMD_BUSY), 1,
+ DART_STREAM_COMMAND_BUSY_TIMEOUT);
+
+ if (ret)
+ break;
+
+ }
+
+ spin_unlock_irqrestore(&dart->lock, flags);
+
+ if (ret) {
+ dev_err(stream_map->dart->dev,
+ "busy bit did not clear after command %x for stream %d\n",
+ command, sid);
return ret;
}
@@ -288,48 +442,56 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
}
static int
-apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+apple_dart_t8020_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
{
- return apple_dart_hw_stream_command(stream_map,
- DART_STREAM_COMMAND_INVALIDATE);
+ return apple_dart_t8020_hw_stream_command(
+ stream_map, DART_T8020_STREAM_COMMAND_INVALIDATE);
+}
+
+static int
+apple_dart_t8110_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+{
+ return apple_dart_t8110_hw_tlb_command(
+ stream_map, DART_T8110_TLB_CMD_OP_FLUSH_SID);
}
static int apple_dart_hw_reset(struct apple_dart *dart)
{
- u32 config;
struct apple_dart_stream_map stream_map;
-
- config = readl(dart->regs + DART_CONFIG);
- if (config & DART_CONFIG_LOCK) {
- dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
- config);
- return -EINVAL;
- }
+ int i;
stream_map.dart = dart;
- stream_map.sidmap = DART_STREAM_ALL;
+ bitmap_zero(stream_map.sidmap, DART_MAX_STREAMS);
+ bitmap_set(stream_map.sidmap, 0, dart->num_streams);
apple_dart_hw_disable_dma(&stream_map);
apple_dart_hw_clear_all_ttbrs(&stream_map);
/* enable all streams globally since TCR is used to control isolation */
- writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
+ for (i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+ writel(U32_MAX, dart->regs + dart->hw->enable_streams);
/* clear any pending errors before the interrupt is unmasked */
- writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
+ writel(readl(dart->regs + dart->hw->error), dart->regs + dart->hw->error);
- return apple_dart_hw_invalidate_tlb(&stream_map);
+ if (dart->hw->type == DART_T8110)
+ writel(0, dart->regs + DART_T8110_ERROR_MASK);
+
+ return dart->hw->invalidate_tlb(&stream_map);
}
static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
{
- int i;
+ int i, j;
struct apple_dart_atomic_stream_map *domain_stream_map;
struct apple_dart_stream_map stream_map;
for_each_stream_map(i, domain, domain_stream_map) {
stream_map.dart = domain_stream_map->dart;
- stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
- apple_dart_hw_invalidate_tlb(&stream_map);
+
+ for (j = 0; j < BITS_TO_LONGS(stream_map.dart->num_streams); j++)
+ stream_map.sidmap[j] = atomic64_read(&domain_stream_map->sidmap[j]);
+
+ stream_map.dart->hw->invalidate_tlb(&stream_map);
}
}
@@ -396,24 +558,62 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
struct io_pgtable_cfg *pgtbl_cfg =
&io_pgtable_ops_to_pgtable(domain->pgtbl_ops)->cfg;
- for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
- apple_dart_hw_set_ttbr(stream_map, i,
- pgtbl_cfg->apple_dart_cfg.ttbr[i]);
- for (; i < DART_MAX_TTBR; ++i)
- apple_dart_hw_clear_ttbr(stream_map, i);
+ /* Locked DARTs are set up by the bootloader. */
+ if (!stream_map->dart->locked) {
+ for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
+ apple_dart_hw_set_ttbr(stream_map, i,
+ pgtbl_cfg->apple_dart_cfg.ttbr[i]);
+ for (; i < stream_map->dart->hw->ttbr_count; ++i)
+ apple_dart_hw_clear_ttbr(stream_map, i);
- apple_dart_hw_enable_translation(stream_map);
- apple_dart_hw_invalidate_tlb(stream_map);
+ apple_dart_hw_enable_translation(stream_map);
+ }
+ stream_map->dart->hw->invalidate_tlb(stream_map);
+}
+
+static int apple_dart_setup_resv_locked(struct iommu_domain *domain,
+ struct device *dev, size_t pgsize)
+{
+ struct iommu_resv_region *region;
+ LIST_HEAD(resv_regions);
+ int ret = 0;
+
+ of_iommu_get_resv_regions(dev, &resv_regions);
+ list_for_each_entry(region, &resv_regions, list) {
+ size_t mapped = 0;
+
+ /* Only map translated reserved regions */
+ if (region->type != IOMMU_RESV_TRANSLATED)
+ continue;
+
+ while (mapped < region->length) {
+ phys_addr_t paddr = region->start + mapped;
+ unsigned long iova = region->dva + mapped;
+ size_t length = region->length - mapped;
+ size_t pgcount = length / pgsize;
+
+ ret = apple_dart_map_pages(domain, iova,
+ paddr, pgsize, pgcount,
+ region->prot, GFP_KERNEL, &mapped);
+
+ if (ret)
+ goto end_put;
+ }
+ }
+end_put:
+ iommu_put_resv_regions(dev, &resv_regions);
+ return ret;
}
static int apple_dart_finalize_domain(struct iommu_domain *domain,
+ struct device *dev,
struct apple_dart_master_cfg *cfg)
{
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
struct apple_dart *dart = cfg->stream_maps[0].dart;
struct io_pgtable_cfg pgtbl_cfg;
int ret = 0;
- int i;
+ int i, j;
mutex_lock(&dart_domain->init_lock);
@@ -422,18 +622,40 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
- atomic64_set(&dart_domain->stream_maps[i].sidmap,
- cfg->stream_maps[i].sidmap);
+ for (j = 0; j < BITS_TO_LONGS(dart->num_streams); j++)
+ atomic64_set(&dart_domain->stream_maps[i].sidmap[j],
+ cfg->stream_maps[i].sidmap[j]);
}
pgtbl_cfg = (struct io_pgtable_cfg){
.pgsize_bitmap = dart->pgsize,
.ias = 32,
- .oas = dart->hw->oas,
+ .oas = dart->oas,
.coherent_walk = 1,
.iommu_dev = dart->dev,
};
+ if (dart->locked) {
+ unsigned long *sidmap;
+ int sid;
+ phys_addr_t phys;
+ u32 ttbr;
+
+ /* Locked DARTs can only have a single stream bound */
+ sidmap = cfg->stream_maps[0].sidmap;
+ sid = find_first_bit(sidmap, dart->num_streams);
+
+ WARN_ON((sid < 0) || bitmap_weight(sidmap, dart->num_streams) > 1);
+ ttbr = readl(dart->regs + DART_TTBR(dart, sid, 0));
+
+ WARN_ON(!(ttbr & dart->hw->ttbr_valid));
+ ttbr &= ~dart->hw->ttbr_valid;
+
+ phys = ((phys_addr_t) ttbr) << dart->hw->ttbr_shift;
+ pgtbl_cfg.apple_dart_cfg.ttbr[0] = phys;
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_APPLE_LOCKED;
+ }
+
dart_domain->pgtbl_ops =
alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
if (!dart_domain->pgtbl_ops) {
@@ -448,6 +670,11 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
dart_domain->finalized = true;
+ if (dart->locked) {
+ /* TODO: error handling */
+ ret = apple_dart_setup_resv_locked(domain, dev, dart->pgsize);
+ io_pgtable_dart_setup_locked(dart_domain->pgtbl_ops);
+ }
done:
mutex_unlock(&dart_domain->init_lock);
return ret;
@@ -458,7 +685,7 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
struct apple_dart_stream_map *master_maps,
bool add_streams)
{
- int i;
+ int i, j;
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (domain_maps[i].dart != master_maps[i].dart)
@@ -468,12 +695,14 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (!domain_maps[i].dart)
break;
- if (add_streams)
- atomic64_or(master_maps[i].sidmap,
- &domain_maps[i].sidmap);
- else
- atomic64_and(~master_maps[i].sidmap,
- &domain_maps[i].sidmap);
+ for (j = 0; j < BITS_TO_LONGS(domain_maps[i].dart->num_streams); j++) {
+ if (add_streams)
+ atomic64_or(master_maps[i].sidmap[j],
+ &domain_maps[i].sidmap[j]);
+ else
+ atomic64_and(~master_maps[i].sidmap[j],
+ &domain_maps[i].sidmap[j]);
+ }
}
return 0;
@@ -500,15 +729,16 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
struct apple_dart_stream_map *stream_map;
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
+ struct apple_dart *dart0 = cfg->stream_maps[0].dart;
- if (cfg->stream_maps[0].dart->force_bypass &&
- domain->type != IOMMU_DOMAIN_IDENTITY)
+ if (dart0->force_bypass && domain->type != IOMMU_DOMAIN_IDENTITY)
return -EINVAL;
- if (!cfg->stream_maps[0].dart->supports_bypass &&
- domain->type == IOMMU_DOMAIN_IDENTITY)
+ if (!dart0->supports_bypass && domain->type == IOMMU_DOMAIN_IDENTITY)
+ return -EINVAL;
+ if (dart0->locked && domain->type != IOMMU_DOMAIN_DMA)
return -EINVAL;
- ret = apple_dart_finalize_domain(domain, cfg);
+ ret = apple_dart_finalize_domain(domain, dev, cfg);
if (ret)
return ret;
@@ -637,14 +867,14 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (cfg->stream_maps[i].dart == dart) {
- cfg->stream_maps[i].sidmap |= 1 << sid;
+ set_bit(sid, cfg->stream_maps[i].sidmap);
return 0;
}
}
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (!cfg->stream_maps[i].dart) {
cfg->stream_maps[i].dart = dart;
- cfg->stream_maps[i].sidmap = 1 << sid;
+ set_bit(sid, cfg->stream_maps[i].sidmap);
return 0;
}
}
@@ -663,7 +893,7 @@ static void apple_dart_release_group(void *iommu_data)
mutex_lock(&apple_dart_groups_lock);
for_each_stream_map(i, group_master_cfg, stream_map)
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
stream_map->dart->sid2group[sid] = NULL;
kfree(iommu_data);
@@ -682,7 +912,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
mutex_lock(&apple_dart_groups_lock);
for_each_stream_map(i, cfg, stream_map) {
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
+ for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) {
struct iommu_group *stream_group =
stream_map->dart->sid2group[sid];
@@ -721,7 +951,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
apple_dart_release_group);
for_each_stream_map(i, cfg, stream_map)
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
stream_map->dart->sid2group[sid] = group;
res = group;
@@ -734,10 +964,15 @@ out:
static int apple_dart_def_domain_type(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct apple_dart *dart = cfg->stream_maps[0].dart;
- if (cfg->stream_maps[0].dart->force_bypass)
+ WARN_ON(dart->force_bypass && dart->locked);
+
+ if (dart->force_bypass)
return IOMMU_DOMAIN_IDENTITY;
- if (!cfg->stream_maps[0].dart->supports_bypass)
+ if (dart->locked)
+ return IOMMU_DOMAIN_DMA;
+ if (dart->supports_bypass)
return IOMMU_DOMAIN_DMA;
return 0;
@@ -791,30 +1026,30 @@ static const struct iommu_ops apple_dart_iommu_ops = {
}
};
-static irqreturn_t apple_dart_irq(int irq, void *dev)
+static irqreturn_t apple_dart_t8020_irq(int irq, void *dev)
{
struct apple_dart *dart = dev;
const char *fault_name = NULL;
- u32 error = readl(dart->regs + DART_ERROR);
- u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
- u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
- u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
+ u32 error = readl(dart->regs + DART_T8020_ERROR);
+ u32 error_code = FIELD_GET(DART_T8020_ERROR_CODE, error);
+ u32 addr_lo = readl(dart->regs + DART_T8020_ERROR_ADDR_LO);
+ u32 addr_hi = readl(dart->regs + DART_T8020_ERROR_ADDR_HI);
u64 addr = addr_lo | (((u64)addr_hi) << 32);
- u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
+ u8 stream_idx = FIELD_GET(DART_T8020_ERROR_STREAM, error);
- if (!(error & DART_ERROR_FLAG))
+ if (!(error & DART_T8020_ERROR_FLAG))
return IRQ_NONE;
/* there should only be a single bit set but let's use == to be sure */
- if (error_code == DART_ERROR_READ_FAULT)
+ if (error_code == DART_T8020_ERROR_READ_FAULT)
fault_name = "READ FAULT";
- else if (error_code == DART_ERROR_WRITE_FAULT)
+ else if (error_code == DART_T8020_ERROR_WRITE_FAULT)
fault_name = "WRITE FAULT";
- else if (error_code == DART_ERROR_NO_PTE)
+ else if (error_code == DART_T8020_ERROR_NO_PTE)
fault_name = "NO PTE FOR IOVA";
- else if (error_code == DART_ERROR_NO_PMD)
+ else if (error_code == DART_T8020_ERROR_NO_PMD)
fault_name = "NO PMD FOR IOVA";
- else if (error_code == DART_ERROR_NO_TTBR)
+ else if (error_code == DART_T8020_ERROR_NO_TTBR)
fault_name = "NO TTBR FOR IOVA";
else
fault_name = "unknown";
@@ -824,14 +1059,58 @@ static irqreturn_t apple_dart_irq(int irq, void *dev)
"translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
error, stream_idx, error_code, fault_name, addr);
- writel(error, dart->regs + DART_ERROR);
+ writel(error, dart->regs + DART_T8020_ERROR);
return IRQ_HANDLED;
}
+static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
+{
+ struct apple_dart *dart = dev;
+ const char *fault_name = NULL;
+ u32 error = readl(dart->regs + DART_T8110_ERROR);
+ u32 error_code = FIELD_GET(DART_T8110_ERROR_CODE, error);
+ u32 addr_lo = readl(dart->regs + DART_T8110_ERROR_ADDR_LO);
+ u32 addr_hi = readl(dart->regs + DART_T8110_ERROR_ADDR_HI);
+ u64 addr = addr_lo | (((u64)addr_hi) << 32);
+ u8 stream_idx = FIELD_GET(DART_T8110_ERROR_STREAM, error);
+
+ if (!(error & DART_T8110_ERROR_FLAG))
+ return IRQ_NONE;
+
+ /* there should only be a single bit set but let's use == to be sure */
+ if (error_code == DART_T8110_ERROR_READ_FAULT)
+ fault_name = "READ FAULT";
+ else if (error_code == DART_T8110_ERROR_WRITE_FAULT)
+ fault_name = "WRITE FAULT";
+ else if (error_code == DART_T8110_ERROR_NO_PTE)
+ fault_name = "NO PTE FOR IOVA";
+ else if (error_code == DART_T8110_ERROR_NO_PMD)
+ fault_name = "NO PMD FOR IOVA";
+ else if (error_code == DART_T8110_ERROR_NO_PGD)
+ fault_name = "NO PGD FOR IOVA";
+ else if (error_code == DART_T8110_ERROR_NO_TTBR)
+ fault_name = "NO TTBR FOR IOVA";
+ else
+ fault_name = "unknown";
+
+ dev_err_ratelimited(
+ dart->dev,
+ "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
+ error, stream_idx, error_code, fault_name, addr);
+
+ writel(error, dart->regs + DART_T8110_ERROR);
+ return IRQ_HANDLED;
+}
+
+static bool apple_dart_is_locked(struct apple_dart *dart)
+{
+ return !!(readl(dart->regs + dart->hw->lock) & dart->hw->lock_bit);
+}
+
static int apple_dart_probe(struct platform_device *pdev)
{
int ret;
- u32 dart_params[2];
+ u32 dart_params[4];
struct resource *res;
struct apple_dart *dart;
struct device *dev = &pdev->dev;
@@ -866,17 +1145,42 @@ static int apple_dart_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = apple_dart_hw_reset(dart);
- if (ret)
- goto err_clk_disable;
-
dart_params[0] = readl(dart->regs + DART_PARAMS1);
dart_params[1] = readl(dart->regs + DART_PARAMS2);
- dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
- dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
+ dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]);
+ dart->supports_bypass = dart_params[1] & DART_PARAMS2_BYPASS_SUPPORT;
+
+ switch (dart->hw->type) {
+ case DART_T8020:
+ case DART_T6000:
+ dart->oas = dart->hw->oas;
+ dart->num_streams = dart->hw->max_sid_count;
+ break;
+
+ case DART_T8110:
+ dart_params[2] = readl(dart->regs + DART_T8110_PARAMS3);
+ dart_params[3] = readl(dart->regs + DART_T8110_PARAMS4);
+ dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
+ dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+ break;
+ }
+
+ if (dart->num_streams > DART_MAX_STREAMS) {
+ dev_err(&pdev->dev, "Too many streams (%d > %d)\n",
+ dart->num_streams, DART_MAX_STREAMS);
+ goto err_clk_disable;
+ }
+
dart->force_bypass = dart->pgsize > PAGE_SIZE;
- ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
+ dart->locked = apple_dart_is_locked(dart);
+ if (!dart->locked) {
+ ret = apple_dart_hw_reset(dart);
+ if (ret)
+ goto err_clk_disable;
+ }
+
+ ret = request_irq(dart->irq, dart->hw->irq_handler, IRQF_SHARED,
"apple-dart fault handler", dart);
if (ret)
goto err_clk_disable;
@@ -894,8 +1198,8 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
- "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
- dart->pgsize, dart->supports_bypass, dart->force_bypass);
+ "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, locked: %d] initialized\n",
+ dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass, dart->locked);
return 0;
err_sysfs_remove:
@@ -924,15 +1228,128 @@ static int apple_dart_remove(struct platform_device *pdev)
}
static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ .type = DART_T8020,
+ .irq_handler = apple_dart_t8020_irq,
+ .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
.oas = 36,
.fmt = APPLE_DART,
+ .max_sid_count = 16,
+
+ .enable_streams = DART_T8020_STREAMS_ENABLE,
+ .lock = DART_T8020_CONFIG,
+ .lock_bit = DART_T8020_CONFIG_LOCK,
+
+ .error = DART_T8020_ERROR,
+
+ .tcr = DART_T8020_TCR,
+ .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART,
+
+ .ttbr = DART_T8020_TTBR,
+ .ttbr_valid = DART_T8020_TTBR_VALID,
+ .ttbr_addr_off = DART_T8020_TTBR_ADDR_OFF,
+ .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ .ttbr_count = 4,
};
static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ .type = DART_T6000,
+ .irq_handler = apple_dart_t8020_irq,
+ .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
.oas = 42,
.fmt = APPLE_DART2,
+ .max_sid_count = 16,
+
+ .enable_streams = DART_T8020_STREAMS_ENABLE,
+ .lock = DART_T8020_CONFIG,
+ .lock_bit = DART_T8020_CONFIG_LOCK,
+
+ .error = DART_T8020_ERROR,
+
+ .tcr = DART_T8020_TCR,
+ .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART,
+
+ .ttbr = DART_T8020_TTBR,
+ .ttbr_valid = DART_T8020_TTBR_VALID,
+ .ttbr_addr_off = DART_T8020_TTBR_ADDR_OFF,
+ .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ .ttbr_count = 4,
+};
+
+static const struct apple_dart_hw apple_dart_hw_t8110 = {
+ .type = DART_T8110,
+ .irq_handler = apple_dart_t8110_irq,
+ .invalidate_tlb = apple_dart_t8110_hw_invalidate_tlb,
+ .fmt = APPLE_DART2,
+ .max_sid_count = 256,
+
+ .enable_streams = DART_T8110_ENABLE_STREAMS,
+ .disable_streams = DART_T8110_DISABLE_STREAMS,
+ .lock = DART_T8110_PROTECT,
+ .lock_bit = DART_T8110_PROTECT_TTBR_TCR,
+
+ .error = DART_T8110_ERROR,
+
+ .tcr = DART_T8110_TCR,
+ .tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+
+ .ttbr = DART_T8110_TTBR,
+ .ttbr_valid = DART_T8110_TTBR_VALID,
+ .ttbr_addr_off = DART_T8110_TTBR_ADDR_OFF,
+ .ttbr_shift = DART_T8110_TTBR_SHIFT,
+ .ttbr_count = 1,
};
+#ifdef CONFIG_PM_SLEEP
+static int apple_dart_suspend(struct device *dev)
+{
+ struct apple_dart *dart = dev_get_drvdata(dev);
+ unsigned int sid, idx;
+
+ for (sid = 0; sid < dart->num_streams; sid++) {
+ dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid));
+ for (idx = 0; idx < dart->hw->ttbr_count; idx++)
+ dart->save_ttbr[sid][idx] =
+ readl_relaxed(dart->regs + DART_TTBR(dart, sid, idx));
+ }
+
+ return 0;
+}
+
+static int apple_dart_resume(struct device *dev)
+{
+ struct apple_dart *dart = dev_get_drvdata(dev);
+ unsigned int sid, idx;
+ int ret;
+
+ ret = apple_dart_hw_reset(dart);
+ if (ret) {
+ dev_err(dev, "Failed to reset DART on resume\n");
+ return ret;
+ }
+
+ for (sid = 0; sid < dart->num_streams; sid++) {
+ for (idx = 0; idx < dart->hw->ttbr_count; idx++)
+ writel_relaxed(dart->save_ttbr[sid][idx],
+ dart->regs + DART_TTBR(dart, sid, idx));
+ writel_relaxed(dart->save_tcr[sid], dart->regs + DART_TCR(dart, sid));
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops apple_dart_pm_ops = {
+ .suspend = apple_dart_suspend,
+ .resume = apple_dart_resume,
+};
+#endif
+
static const struct of_device_id apple_dart_of_match[] = {
+ { .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 },
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{},
@@ -944,6 +1361,9 @@ static struct platform_driver apple_dart_driver = {
.name = "apple-dart",
.of_match_table = apple_dart_of_match,
.suppress_bind_attrs = true,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &apple_dart_pm_ops,
+#endif
},
.probe = apple_dart_probe,
.remove = apple_dart_remove,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9297b741f5e8..b940be5e80ce 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -23,6 +23,7 @@
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
@@ -391,6 +392,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
iort_iommu_get_resv_regions(dev, list);
+ if (dev->of_node)
+ of_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -497,8 +500,13 @@ static int iova_reserve_iommu_regions(struct device *dev,
if (region->type == IOMMU_RESV_SW_MSI)
continue;
- lo = iova_pfn(iovad, region->start);
- hi = iova_pfn(iovad, region->start + region->length - 1);
+ if (region->type == IOMMU_RESV_TRANSLATED) {
+ lo = iova_pfn(iovad, region->dva);
+ hi = iova_pfn(iovad, region->dva + region->length - 1);
+ } else {
+ lo = iova_pfn(iovad, region->start);
+ hi = iova_pfn(iovad, region->start + region->length - 1);
+ }
reserve_iova(iovad, lo, hi);
if (region->type == IOMMU_RESV_MSI)
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 74b1ef2b96be..8459772bfefe 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -16,6 +16,8 @@
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include "linux/export.h"
+#include <linux/io.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
@@ -24,6 +26,8 @@
#include <asm/barrier.h>
+#include "io-pgtable-dart.h"
+
#define DART1_MAX_ADDR_BITS 36
#define DART_MAX_TABLES 4
@@ -106,8 +110,7 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
return paddr;
}
-static void *__dart_alloc_pages(size_t size, gfp_t gfp,
- struct io_pgtable_cfg *cfg)
+static void *__dart_alloc_pages(size_t size, gfp_t gfp)
{
int order = get_order(size);
struct page *p;
@@ -262,7 +265,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
/* no L2 table present */
if (!pte) {
- cptep = __dart_alloc_pages(tblsz, gfp, cfg);
+ cptep = __dart_alloc_pages(tblsz, gfp);
if (!cptep)
return -ENOMEM;
@@ -363,6 +366,32 @@ static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
return 0;
}
+int io_pgtable_dart_setup_locked(struct io_pgtable_ops *ops)
+{
+ void *l1tbl;
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ size_t size;
+
+ if (!(cfg->quirks & IO_PGTABLE_QUIRK_APPLE_LOCKED))
+ return 0;
+
+ size = cfg->pgsize_bitmap;
+ l1tbl = devm_memremap(cfg->iommu_dev, cfg->apple_dart_cfg.ttbr[0], size,
+ MEMREMAP_WB);
+ if (!l1tbl)
+ return -ENOMEM;
+
+ for (int entry = 0; entry < DART_PTES_PER_TABLE(data); entry++)
+ ((dart_iopte *)l1tbl)[entry] = ((dart_iopte *)data->pgd[0])[entry];
+
+ free_pages((unsigned long)data->pgd[0], get_order(DART_GRANULE(data)));
+ data->pgd[0] = l1tbl;
+
+ return 0;
+}
+EXPORT_SYMBOL(io_pgtable_dart_setup_locked);
+
static struct dart_io_pgtable *
dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
@@ -418,30 +447,52 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+ /* Locked DARTs can not modify the TTBR registers. Allocate first a shadow
+ * page table so locked DARTs (disp0, dcp, dcpext*) can map their reserved
+ * memory regions. They will be later in io_pgtable_dart_setup_locked()
+ * copied to the locked L1 table.
+ */
+ if (cfg->quirks & IO_PGTABLE_QUIRK_APPLE_LOCKED) {
+ if (cfg->apple_dart_cfg.n_ttbrs > 1)
+ goto out_free_data;
+
+ data->pgd[0] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
+ if (!data->pgd[0])
+ goto out_free_data;
+
+ return &data->iop;
+ }
+
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
- data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
- cfg);
+ data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
if (!data->pgd[i])
- goto out_free_data;
+ goto out_free_pages;
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
}
return &data->iop;
-out_free_data:
+out_free_pages:
while (--i >= 0)
free_pages((unsigned long)data->pgd[i],
get_order(DART_GRANULE(data)));
+out_free_data:
kfree(data);
return NULL;
}
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
+ struct io_pgtable_cfg *cfg = &iop->cfg;
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
dart_iopte *ptep, *end;
int i;
+ if (cfg->quirks & IO_PGTABLE_QUIRK_APPLE_LOCKED) {
+ kfree(data);
+ return;
+ }
+
for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
ptep = data->pgd[i];
end = (void *)ptep + DART_GRANULE(data);
diff --git a/drivers/iommu/io-pgtable-dart.h b/drivers/iommu/io-pgtable-dart.h
new file mode 100644
index 000000000000..90fd1035e9f1
--- /dev/null
+++ b/drivers/iommu/io-pgtable-dart.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple DART page table allocator.
+ *
+ * Copyright (C) 2022 The Asahi Linux Contributors
+ */
+
+/* This will go away on the next iteration of locked DART handling */
+
+#ifndef IO_PGTABLE_DART_H_
+#define IO_PGTABLE_DART_H_
+
+int io_pgtable_dart_setup_locked(struct io_pgtable_ops *ops);
+
+#endif /* IO_PGTABLE_DART_H_ */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 65a3b3d886dc..f36e140ea02b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -74,6 +74,7 @@ static const char * const iommu_group_resv_type_string[] = {
[IOMMU_RESV_RESERVED] = "reserved",
[IOMMU_RESV_MSI] = "msi",
[IOMMU_RESV_SW_MSI] = "msi",
+ [IOMMU_RESV_TRANSLATED] = "translated",
};
#define IOMMU_CMD_LINE_DMA_API BIT(0)
@@ -2582,6 +2583,19 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
enum iommu_resv_type type,
gfp_t gfp)
{
+ if (type == IOMMU_RESV_TRANSLATED)
+ return NULL;
+
+ return iommu_alloc_resv_region_tr(start, 0, length, prot, type, gfp);
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
+
+struct iommu_resv_region *iommu_alloc_resv_region_tr(phys_addr_t start,
+ dma_addr_t dva_start,
+ size_t length, int prot,
+ enum iommu_resv_type type,
+ gfp_t gfp)
+{
struct iommu_resv_region *region;
region = kzalloc(sizeof(*region), gfp);
@@ -2590,12 +2604,14 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
INIT_LIST_HEAD(&region->list);
region->start = start;
+ if (type == IOMMU_RESV_TRANSLATED)
+ region->dva = dva_start;
region->length = length;
region->prot = prot;
region->type = type;
return region;
}
-EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
+EXPORT_SYMBOL_GPL(iommu_alloc_resv_region_tr);
void iommu_set_default_passthrough(bool cmd_line)
{
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5696314ae69e..522622e3f7df 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -172,3 +173,118 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
return ops;
}
+
+static inline bool check_direct_mapping(struct device *dev, struct resource *phys,
+ phys_addr_t start, phys_addr_t end)
+{
+ if (start != phys->start || end != phys->end)
+ return false;
+
+ return true;
+}
+
+static inline bool check_translated_mapping(struct device *dev, struct resource *phys,
+ phys_addr_t start, phys_addr_t end)
+{
+ if (end - start != phys->end - phys->start) {
+ dev_warn(dev, "treating non-overlapping mapping [%pr] -> [%pap-%pap] as reservation\n",
+ &phys, &start, &end);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * of_iommu_get_resv_regions - reserved region driver helper for device tree
+ * @dev: device for which to get reserved regions
+ * @list: reserved region list
+ *
+ * IOMMU drivers can use this to implement their .get_resv_regions() callback
+ * for memory regions attached to a device tree node. See the reserved-memory
+ * device tree bindings on how to use these:
+ *
+ * Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+ */
+void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
+{
+#if IS_ENABLED(CONFIG_OF_ADDRESS)
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) {
+ const __be32 *maps, *end;
+ struct resource res;
+ int size;
+
+ memset(&res, 0, sizeof(res));
+
+ /*
+ * The "reg" property is optional and can be omitted by reserved-memory regions
+ * that represent reservations in the IOVA space, which are regions that should
+ * not be mapped.
+ */
+ if (of_find_property(it.node, "reg", NULL)) {
+ err = of_address_to_resource(it.node, 0, &res);
+ if (err < 0) {
+ dev_err(dev, "failed to parse memory region %pOF: %d\n",
+ it.node, err);
+ continue;
+ }
+ }
+
+ maps = of_get_property(it.node, "iommu-addresses", &size);
+ if (!maps)
+ continue;
+
+ end = maps + size / sizeof(__be32);
+
+ while (maps < end) {
+ struct device_node *np;
+ u32 phandle;
+ int na, ns;
+
+ phandle = be32_to_cpup(maps++);
+ np = of_find_node_by_phandle(phandle);
+ na = of_n_addr_cells(np);
+ ns = of_n_size_cells(np);
+
+ if (np == dev->of_node) {
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ struct iommu_resv_region *region;
+ enum iommu_resv_type type;
+ phys_addr_t start;
+ size_t length;
+
+ start = of_translate_dma_address(np, maps);
+ length = of_read_number(maps + na, ns);
+
+ /*
+ * IOMMU regions without an associated physical region cannot be
+ * mapped and are simply reservations.
+ */
+ if (res.end > res.start) {
+ phys_addr_t end = start + length - 1;
+
+ if (check_direct_mapping(dev, &res, start, end))
+ type = IOMMU_RESV_DIRECT_RELAXABLE;
+ else if (check_translated_mapping(dev, &res, start, end))
+ type = IOMMU_RESV_TRANSLATED;
+ else
+ type = IOMMU_RESV_RESERVED;
+ } else {
+ type = IOMMU_RESV_RESERVED;
+ }
+
+ region = iommu_alloc_resv_region_tr(res.start, start, length, prot, type,
+ GFP_KERNEL);
+ if (region)
+ list_add_tail(&region->list, list);
+ }
+
+ maps += na + ns;
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL(of_iommu_get_resv_regions);
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
index 2a3e8d8ff8b5..2f631c19a4c2 100644
--- a/drivers/mailbox/apple-mailbox.c
+++ b/drivers/mailbox/apple-mailbox.c
@@ -103,6 +103,8 @@ struct apple_mbox {
struct device *dev;
struct mbox_controller controller;
spinlock_t rx_lock;
+ spinlock_t tx_lock;
+ bool tx_pending;
};
static const struct of_device_id apple_mbox_of_match[];
@@ -166,11 +168,15 @@ static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
{
struct apple_mbox *apple_mbox = chan->con_priv;
struct apple_mbox_msg *msg = data;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&apple_mbox->tx_lock, flags);
+ WARN_ON(apple_mbox->tx_pending);
+
ret = apple_mbox_hw_send(apple_mbox, msg);
if (ret)
- return ret;
+ goto err_unlock;
/*
* The interrupt is level triggered and will keep firing as long as the
@@ -185,9 +191,13 @@ static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
apple_mbox->regs + apple_mbox->hw->irq_ack);
}
+ apple_mbox->tx_pending = true;
enable_irq(apple_mbox->irq_send_empty);
- return 0;
+err_unlock:
+ spin_unlock_irqrestore(&apple_mbox->tx_lock, flags);
+
+ return ret;
}
static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
@@ -202,7 +212,14 @@ static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
* it at the main controller again.
*/
disable_irq_nosync(apple_mbox->irq_send_empty);
- mbox_chan_txdone(&apple_mbox->chan, 0);
+ spin_lock(&apple_mbox->tx_lock);
+ if (apple_mbox->tx_pending) {
+ apple_mbox->tx_pending = false;
+ spin_unlock(&apple_mbox->tx_lock);
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ } else {
+ spin_unlock(&apple_mbox->tx_lock);
+ }
return IRQ_HANDLED;
}
@@ -260,10 +277,17 @@ static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
{
struct apple_mbox *apple_mbox = chan->con_priv;
unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
+ unsigned long flags;
while (time_before(jiffies, deadline)) {
if (apple_mbox_hw_send_empty(apple_mbox)) {
- mbox_chan_txdone(&apple_mbox->chan, 0);
+ spin_lock_irqsave(&apple_mbox->tx_lock, flags);
+ if (apple_mbox->tx_pending) {
+ apple_mbox->tx_pending = false;
+ disable_irq_nosync(apple_mbox->irq_send_empty);
+ }
+ /* Mailbox subsystem will call txdone for us */
+ spin_unlock_irqrestore(&apple_mbox->tx_lock, flags);
return 0;
}
@@ -361,6 +385,7 @@ static int apple_mbox_probe(struct platform_device *pdev)
mbox->controller.of_xlate = apple_mbox_of_xlate;
mbox->chan.con_priv = mbox;
spin_lock_init(&mbox->rx_lock);
+ spin_lock_init(&mbox->tx_lock);
irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
if (!irqname)
@@ -368,8 +393,8 @@ static int apple_mbox_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
apple_mbox_recv_irq,
- IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
- mbox);
+ IRQF_NO_AUTOEN | IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ irqname, mbox);
if (ret)
return ret;
@@ -377,9 +402,8 @@ static int apple_mbox_probe(struct platform_device *pdev)
if (!irqname)
return -ENOMEM;
- ret = devm_request_irq(dev, mbox->irq_send_empty,
- apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
- irqname, mbox);
+ ret = devm_request_irq(dev, mbox->irq_send_empty, apple_mbox_send_empty_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
if (ret)
return ret;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4229b9b5da98..d6b16dcead1c 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -310,7 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
return -ENOTSUPP;
ret = chan->mbox->ops->flush(chan, timeout);
- if (ret < 0)
+ if (ret >= 0)
tx_tick(chan, ret);
return ret;
@@ -390,7 +390,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
spin_unlock_irqrestore(&chan->lock, flags);
- if (chan->mbox->ops->startup) {
+ if (!cl->defer_startup && chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
if (ret) {
@@ -405,6 +405,22 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
+int mbox_start_channel(struct mbox_chan *chan)
+{
+ if (chan->mbox->ops->startup) {
+ int ret = chan->mbox->ops->startup(chan);
+
+ if (ret) {
+ dev_err(chan->cl->dev,
+ "Unable to startup the chan (%d)\n", ret);
+ }
+ return ret;
+ } else {
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(mbox_start_channel);
+
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8b93856de432..10424dede797 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -51,6 +51,21 @@ config MFD_ACT8945A
linear regulators, along with a complete ActivePath battery
charger.
+config MFD_APPLE_SPMI_PMU
+ tristate "Apple SPMI PMUs"
+ depends on SPMI
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ select MFD_SIMPLE_MFD_SPMI
+ help
+ Say yes here to enable support for Apple PMUs attached via the
+ SPMI bus. These can be found on Apple devices such as Apple
+ Silicon Macs.
+
+ This driver itself only attaches to the core device, and relies
+ on subsystem drivers for individual device functions. You must
+ enable those for it to be useful.
+
config MFD_SUN4I_GPADC
tristate "Allwinner sunxi platforms' GPADC MFD driver"
select MFD_CORE
@@ -1274,6 +1289,19 @@ config MFD_SIMPLE_MFD_I2C
sub-devices represented by child nodes in Device Tree will be
subsequently registered.
+config MFD_SIMPLE_MFD_SPMI
+ tristate
+ depends on SPMI
+ select MFD_CORE
+ select REGMAP_SPMI
+ help
+ This driver creates a single register map with the intention for it
+ to be shared by all sub-devices.
+
+ Once the register map has been successfully initialised, any
+ sub-devices represented by child nodes in Device Tree will be
+ subsequently registered.
+
config MFD_SL28CPLD
tristate "Kontron sl28cpld Board Management Controller"
depends on I2C
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 7ed3ef4a698c..70af6d3cb98b 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -271,6 +271,7 @@ obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
+obj-$(CONFIG_MFD_SIMPLE_MFD_SPMI) += simple-mfd-spmi.o
obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
diff --git a/drivers/mfd/simple-mfd-spmi.c b/drivers/mfd/simple-mfd-spmi.c
new file mode 100644
index 000000000000..99f25751000a
--- /dev/null
+++ b/drivers/mfd/simple-mfd-spmi.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Simple MFD - SPMI
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/of_platform.h>
+
+static const struct regmap_config spmi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0xffff,
+};
+
+static int simple_spmi_probe(struct spmi_device *sdev)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return devm_of_platform_populate(&sdev->dev);
+}
+
+static const struct of_device_id simple_spmi_id_table[] = {
+ { .compatible = "apple,spmi-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, simple_spmi_id_table);
+
+static struct spmi_driver pmic_spmi_driver = {
+ .probe = simple_spmi_probe,
+ .driver = {
+ .name = "simple-mfd-spmi",
+ .owner = THIS_MODULE,
+ .of_match_table = simple_spmi_id_table,
+ },
+};
+module_spmi_driver(pmic_spmi_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Simple MFD - SPMI driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 34ea1acbb3cc..4a8fe5877112 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -2039,6 +2039,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
struct sdhci_host *host;
int ret, bar = first_bar + slotno;
size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
+ u32 cd_debounce_delay_ms;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
@@ -2105,6 +2106,10 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (host->mmc->caps & MMC_CAP_CD_WAKE)
device_init_wakeup(&pdev->dev, true);
+ if (device_property_read_u32(&pdev->dev, "cd-debounce-delay-ms",
+ &cd_debounce_delay_ms))
+ cd_debounce_delay_ms = 200;
+
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
slot->cd_override_level, 0);
@@ -2112,7 +2117,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
ret = mmc_gpiod_request_cd(host->mmc, NULL,
slot->cd_idx,
slot->cd_override_level,
- 0);
+ cd_debounce_delay_ms * 1000);
if (ret == -EPROBE_DEFER)
goto remove;
@@ -2120,6 +2125,16 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
slot->cd_idx = -1;
}
+ } else if (is_of_node(pdev->dev.fwnode)) {
+ /* Allow all OF systems to use a CD GPIO if provided */
+
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0,
+ slot->cd_override_level,
+ cd_debounce_delay_ms * 1000);
+ if (ret == -EPROBE_DEFER)
+ goto remove;
+ else if (ret == 0)
+ slot->cd_idx = 0;
}
if (chip->fixes && chip->fixes->add_host)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 13c13504a6e8..19009eb9db93 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -47,3 +47,5 @@ brcmfmac-$(CONFIG_OF) += \
of.o
brcmfmac-$(CONFIG_DMI) += \
dmi.o
+brcmfmac-$(CONFIG_ACPI) += \
+ acpi.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c
new file mode 100644
index 000000000000..dec6a83d13b1
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/acpi.h>
+#include "debug.h"
+#include "core.h"
+#include "common.h"
+
+void brcmf_acpi_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ struct brcmf_mp_device *settings)
+{
+ acpi_status status;
+ const union acpi_object *o;
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
+
+ if (!ACPI_FAILURE(acpi_dev_get_property(adev, "module-instance",
+ ACPI_TYPE_STRING, &o))) {
+ brcmf_dbg(INFO, "ACPI module-instance=%s\n", o->string.pointer);
+ settings->board_type = devm_kasprintf(dev, GFP_KERNEL,
+ "apple,%s",
+ o->string.pointer);
+ } else {
+ brcmf_dbg(INFO, "No ACPI module-instance\n");
+ }
+
+ status = acpi_evaluate_object(adev->handle, "RWCV", NULL, &buf);
+ o = buf.pointer;
+ if (!ACPI_FAILURE(status) && o && o->type == ACPI_TYPE_BUFFER &&
+ o->buffer.length >= 2) {
+ char *antenna_sku = devm_kzalloc(dev, 3, GFP_KERNEL);
+
+ if (!antenna_sku) {
+ brcmf_err("Failed to allocate antenna-sku");
+ } else {
+ memcpy(antenna_sku, o->buffer.pointer, 2);
+ brcmf_dbg(INFO, "ACPI RWCV data=%*phN antenna-sku=%s\n",
+ (int)o->buffer.length, o->buffer.pointer,
+ antenna_sku);
+ settings->antenna_sku = antenna_sku;
+ }
+
+ kfree(buf.pointer);
+ } else {
+ brcmf_dbg(INFO, "No ACPI antenna-sku\n");
+ }
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 2208ab3aa795..c350af827003 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -39,6 +39,7 @@ enum brcmf_bus_protocol_type {
/* Firmware blobs that may be available */
enum brcmf_blob_type {
BRCMF_BLOB_CLM,
+ BRCMF_BLOB_TXCAP,
};
struct brcmf_mp_device;
@@ -88,6 +89,7 @@ struct brcmf_bus_ops {
enum brcmf_blob_type type);
void (*debugfs_create)(struct device *dev);
int (*reset)(struct device *dev);
+ void (*d2h_mb_rx)(struct device *dev, u32 data);
};
@@ -251,6 +253,15 @@ int brcmf_bus_reset(struct brcmf_bus *bus)
return bus->ops->reset(bus->dev);
}
+static inline
+void brcmf_bus_d2h_mb_rx(struct brcmf_bus *bus, u32 data)
+{
+ if (!bus->ops->d2h_mb_rx)
+ return;
+
+ return bus->ops->d2h_mb_rx(bus->dev, data);
+}
+
/*
* interface functions from common layer
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index dfcfb3333369..3bd0407ba883 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -770,12 +770,50 @@ void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
}
}
+static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_scan_params_v2_le *params_le,
+ struct cfg80211_scan_request *request);
+
+static void brcmf_scan_params_v2_to_v1(struct brcmf_scan_params_v2_le *params_v2_le,
+ struct brcmf_scan_params_le *params_le)
+{
+ size_t params_size;
+ u32 ch;
+ int n_channels, n_ssids;
+
+ memcpy(&params_le->ssid_le, &params_v2_le->ssid_le,
+ sizeof(params_le->ssid_le));
+ memcpy(&params_le->bssid, &params_v2_le->bssid,
+ sizeof(params_le->bssid));
+
+ params_le->bss_type = params_v2_le->bss_type;
+ params_le->scan_type = le32_to_cpu(params_v2_le->scan_type);
+ params_le->nprobes = params_v2_le->nprobes;
+ params_le->active_time = params_v2_le->active_time;
+ params_le->passive_time = params_v2_le->passive_time;
+ params_le->home_time = params_v2_le->home_time;
+ params_le->channel_num = params_v2_le->channel_num;
+
+ ch = le32_to_cpu(params_v2_le->channel_num);
+ n_channels = ch & BRCMF_SCAN_PARAMS_COUNT_MASK;
+ n_ssids = ch >> BRCMF_SCAN_PARAMS_NSSID_SHIFT;
+
+ params_size = sizeof(u16) * n_channels;
+ if (n_ssids > 0) {
+ params_size = roundup(params_size, sizeof(u32));
+ params_size += sizeof(struct brcmf_ssid_le) * n_ssids;
+ }
+
+ memcpy(&params_le->channel_list[0],
+ &params_v2_le->channel_list[0], params_size);
+}
+
s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp, bool aborted,
bool fw_abort)
{
struct brcmf_pub *drvr = cfg->pub;
- struct brcmf_scan_params_le params_le;
+ struct brcmf_scan_params_v2_le params_v2_le;
struct cfg80211_scan_request *scan_request;
u64 reqid;
u32 bucket;
@@ -794,20 +832,23 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
brcmf_dbg(SCAN, "ABORT scan in firmware\n");
- memset(&params_le, 0, sizeof(params_le));
- eth_broadcast_addr(params_le.bssid);
- params_le.bss_type = DOT11_BSSTYPE_ANY;
- params_le.scan_type = 0;
- params_le.channel_num = cpu_to_le32(1);
- params_le.nprobes = cpu_to_le32(1);
- params_le.active_time = cpu_to_le32(-1);
- params_le.passive_time = cpu_to_le32(-1);
- params_le.home_time = cpu_to_le32(-1);
- /* Scan is aborted by setting channel_list[0] to -1 */
- params_le.channel_list[0] = cpu_to_le16(-1);
+
+ brcmf_escan_prep(cfg, &params_v2_le, NULL);
+
/* E-Scan (or anyother type) can be aborted by SCAN */
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
- &params_le, sizeof(params_le));
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_V2)) {
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+ &params_v2_le,
+ sizeof(params_v2_le));
+ } else {
+ struct brcmf_scan_params_le params_le;
+
+ brcmf_scan_params_v2_to_v1(&params_v2_le, &params_le);
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
+ &params_le,
+ sizeof(params_le));
+ }
+
if (err)
bphy_err(drvr, "Scan abort failed\n");
}
@@ -1027,7 +1068,7 @@ done:
}
static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
- struct brcmf_scan_params_le *params_le,
+ struct brcmf_scan_params_v2_le *params_le,
struct cfg80211_scan_request *request)
{
u32 n_ssids;
@@ -1036,9 +1077,14 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
s32 offset;
u16 chanspec;
char *ptr;
+ int length;
struct brcmf_ssid_le ssid_le;
eth_broadcast_addr(params_le->bssid);
+
+ length = BRCMF_SCAN_PARAMS_V2_FIXED_SIZE;
+
+ params_le->version = cpu_to_le16(BRCMF_SCAN_PARAMS_VERSION_V2);
params_le->bss_type = DOT11_BSSTYPE_ANY;
params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
@@ -1048,6 +1094,15 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
params_le->home_time = cpu_to_le32(-1);
memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+ /* Scan abort */
+ if (!request) {
+ length += sizeof(u16);
+ params_le->channel_num = cpu_to_le32(1);
+ params_le->channel_list[0] = cpu_to_le16(-1);
+ params_le->length = cpu_to_le16(length);
+ return;
+ }
+
n_ssids = request->n_ssids;
n_channels = request->n_channels;
@@ -1055,6 +1110,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
if (n_channels > 0) {
+ length += roundup(sizeof(u16) * n_channels, sizeof(u32));
for (i = 0; i < n_channels; i++) {
chanspec = channel_to_chanspec(&cfg->d11inf,
request->channels[i]);
@@ -1065,12 +1121,14 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
} else {
brcmf_dbg(SCAN, "Scanning all channels\n");
}
+
/* Copy ssid array if applicable */
brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
if (n_ssids > 0) {
- offset = offsetof(struct brcmf_scan_params_le, channel_list) +
+ offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) +
n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
+ length += sizeof(ssid_le) * n_ssids,
ptr = (char *)params_le + offset;
for (i = 0; i < n_ssids; i++) {
memset(&ssid_le, 0, sizeof(ssid_le));
@@ -1088,8 +1146,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
}
} else {
brcmf_dbg(SCAN, "Performing passive scan\n");
- params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
+ params_le->scan_type = cpu_to_le32(BRCMF_SCANTYPE_PASSIVE);
}
+ params_le->length = cpu_to_le16(length);
/* Adding mask to channel numbers */
params_le->channel_num =
cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) |
@@ -1101,8 +1160,8 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
struct cfg80211_scan_request *request)
{
struct brcmf_pub *drvr = cfg->pub;
- s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
- offsetof(struct brcmf_escan_params_le, params_le);
+ s32 params_size = BRCMF_SCAN_PARAMS_V2_FIXED_SIZE +
+ offsetof(struct brcmf_escan_params_le, params_v2_le);
struct brcmf_escan_params_le *params;
s32 err = 0;
@@ -1122,8 +1181,22 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
goto exit;
}
BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN);
- brcmf_escan_prep(cfg, &params->params_le, request);
- params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+ brcmf_escan_prep(cfg, &params->params_v2_le, request);
+
+ params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION_V2);
+
+ if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_V2)) {
+ struct brcmf_escan_params_le *params_v1;
+
+ params_size -= BRCMF_SCAN_PARAMS_V2_FIXED_SIZE;
+ params_size += BRCMF_SCAN_PARAMS_FIXED_SIZE;
+ params_v1 = kzalloc(params_size, GFP_KERNEL);
+ params_v1->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
+ brcmf_scan_params_v2_to_v1(&params->params_v2_le, &params_v1->params_le);
+ kfree(params);
+ params = params_v1;
+ }
+
params->action = cpu_to_le16(WL_ESCAN_ACTION_START);
params->sync_id = cpu_to_le16(0x1234);
@@ -1344,51 +1417,44 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
return reason;
}
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+static int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
- int i, err;
+ int err;
+
+ if (key_len > sizeof(pmk.key)) {
+ bphy_err(drvr, "key must be less than %zu bytes\n",
+ sizeof(pmk.key));
+ return -EINVAL;
+ }
+
+ memset(&pmk, 0, sizeof(pmk));
- /* convert to firmware key format */
- pmk.key_len = cpu_to_le16(pmk_len << 1);
- pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE);
- for (i = 0; i < pmk_len; i++)
- snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]);
+ /* pass key material directly */
+ pmk.key_len = cpu_to_le16(key_len);
+ pmk.flags = cpu_to_le16(flags);
+ memcpy(pmk.key, key, key_len);
- /* store psk in firmware */
+ /* store key material in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ key_len);
return err;
}
+static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+{
+ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
+}
+
static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
u16 pwd_len)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_wsec_sae_pwd_le sae_pwd;
- int err;
-
- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
- bphy_err(drvr, "sae_password must be less than %d\n",
- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
- return -EINVAL;
- }
-
- sae_pwd.key_len = cpu_to_le16(pwd_len);
- memcpy(sae_pwd.key, pwd_data, pwd_len);
-
- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
- sizeof(sae_pwd));
- if (err < 0)
- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
- pwd_len);
-
- return err;
+ return brcmf_set_wsec(ifp, pwd_data, pwd_len, BRCMF_WSEC_PASSPHRASE);
}
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
@@ -3958,6 +4024,37 @@ exit:
return 0;
}
+static s32
+brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
+ bool alive)
+{
+ struct brcmf_pmk_op_v3_le *pmk_op;
+ int length = offsetof(struct brcmf_pmk_op_v3_le, pmk);
+ int ret;
+
+ pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL);
+ pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3);
+
+ if (!pmksa) {
+ /* Flush operation, operate on entire list */
+ pmk_op->count = cpu_to_le16(0);
+ } else {
+ /* Single PMK operation */
+ pmk_op->count = cpu_to_le16(1);
+ length += sizeof(struct brcmf_pmksa_v3);
+ memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
+ memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+ pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ pmk_op->pmk[0].time_left = alive ? BRCMF_PMKSA_NO_EXPIRY : 0;
+ }
+
+ pmk_op->length = cpu_to_le16(length);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_op, sizeof(*pmk_op));
+ kfree(pmk_op);
+ return ret;
+}
+
static __used s32
brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
{
@@ -3991,6 +4088,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
+ brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmksa->bssid);
+ brcmf_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmksa->pmkid);
+
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PMKID_V3))
+ return brcmf_pmksa_v3_op(ifp, pmksa, true);
+
+ /* TODO: implement PMKID_V2 */
+
npmk = le32_to_cpu(cfg->pmk_list.npmk);
for (i = 0; i < npmk; i++)
if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
@@ -4007,9 +4112,6 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
return -EINVAL;
}
- brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmk[npmk].bssid);
- brcmf_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmk[npmk].pmkid);
-
err = brcmf_update_pmklist(cfg, ifp);
brcmf_dbg(TRACE, "Exit\n");
@@ -4033,6 +4135,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PMKID_V3))
+ return brcmf_pmksa_v3_op(ifp, pmksa, false);
+
+ /* TODO: implement PMKID_V2 */
+
npmk = le32_to_cpu(cfg->pmk_list.npmk);
for (i = 0; i < npmk; i++)
if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
@@ -4069,6 +4176,11 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
if (!check_vif_up(ifp->vif))
return -EIO;
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PMKID_V3))
+ return brcmf_pmksa_v3_op(ifp, NULL, false);
+
+ /* TODO: implement PMKID_V2 */
+
memset(&cfg->pmk_list, 0, sizeof(cfg->pmk_list));
err = brcmf_update_pmklist(cfg, ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 121893bbaa1d..ef887fcd6b60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -212,8 +212,8 @@ struct sbsocramregs {
#define ARMCR4_TCBANB_MASK 0xf
#define ARMCR4_TCBANB_SHIFT 0
-#define ARMCR4_BSZ_MASK 0x3f
-#define ARMCR4_BSZ_MULT 8192
+#define ARMCR4_BSZ_MASK 0x7f
+#define ARMCR4_BLK_1K_MASK 0x200
struct brcmf_core_priv {
struct brcmf_core pub;
@@ -676,7 +676,8 @@ static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
}
/** Return the TCM-RAM size of the ARMCR4 core. */
-static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
+static u32 brcmf_chip_tcm_ramsize(struct brcmf_chip_priv *ci,
+ struct brcmf_core_priv *cr4)
{
u32 corecap;
u32 memsize = 0;
@@ -684,6 +685,7 @@ static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
u32 nbb;
u32 totb;
u32 bxinfo;
+ u32 blksize;
u32 idx;
corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
@@ -695,7 +697,12 @@ static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
for (idx = 0; idx < totb; idx++) {
brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
- memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
+ if (bxinfo & ARMCR4_BLK_1K_MASK)
+ blksize = 1024;
+ else
+ blksize = 8192;
+
+ memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * blksize;
}
return memsize;
@@ -732,9 +739,12 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case CY_CC_4373_CHIP_ID:
return 0x160000;
case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
return 0x352000;
+ case BRCM_CC_4387_CHIP_ID:
+ return 0x740000;
case CY_CC_89459_CHIP_ID:
return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
default:
@@ -754,7 +764,7 @@ int brcmf_chip_get_raminfo(struct brcmf_chip *pub)
mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
if (mem) {
mem_core = container_of(mem, struct brcmf_core_priv, pub);
- ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
+ ci->pub.ramsize = brcmf_chip_tcm_ramsize(ci, mem_core);
ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
if (ci->pub.rambase == INVALID_RAMBASE) {
brcmf_err("RAM base not provided with ARM CR4 core\n");
@@ -1292,15 +1302,18 @@ static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
static inline void
brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
{
+ int i;
struct brcmf_core *core;
brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
- core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
- brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
- D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN);
+ /* Disable the cores only and let the firmware enable them.
+ * Releasing reset ourselves breaks BCM4387 in weird ways.
+ */
+ for (i = 0; (core = brcmf_chip_get_d11core(&chip->pub, i)); i++)
+ brcmf_chip_coredisable(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
}
static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 74020fa10065..1b522b66d3af 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -101,7 +101,7 @@ void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
struct brcmf_dload_data_le *dload_buf,
- u32 len)
+ u32 len, const char *var)
{
s32 err;
@@ -112,17 +112,17 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
dload_buf->crc = cpu_to_le32(0);
len = sizeof(*dload_buf) + len - 1;
- err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf, len);
+ err = brcmf_fil_iovar_data_set(ifp, var, dload_buf, len);
return err;
}
-static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
+static int brcmf_c_download_blob(struct brcmf_if *ifp,
+ const void *data, size_t size,
+ const char *loadvar, const char *statvar)
{
struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_bus *bus = drvr->bus_if;
struct brcmf_dload_data_le *chunk_buf;
- const struct firmware *clm = NULL;
u32 chunk_len;
u32 datalen;
u32 cumulative_len;
@@ -132,20 +132,11 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
brcmf_dbg(TRACE, "Enter\n");
- err = brcmf_bus_get_blob(bus, &clm, BRCMF_BLOB_CLM);
- if (err || !clm) {
- brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
- err);
- return 0;
- }
-
chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
- if (!chunk_buf) {
- err = -ENOMEM;
- goto done;
- }
+ if (!chunk_buf)
+ return -ENOMEM;
- datalen = clm->size;
+ datalen = size;
cumulative_len = 0;
do {
if (datalen > MAX_CHUNK_LEN) {
@@ -154,9 +145,10 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
chunk_len = datalen;
dl_flag |= DL_END;
}
- memcpy(chunk_buf->data, clm->data + cumulative_len, chunk_len);
+ memcpy(chunk_buf->data, data + cumulative_len, chunk_len);
- err = brcmf_c_download(ifp, dl_flag, chunk_buf, chunk_len);
+ err = brcmf_c_download(ifp, dl_flag, chunk_buf, chunk_len,
+ loadvar);
dl_flag &= ~DL_BEGIN;
@@ -165,20 +157,64 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
} while ((datalen > 0) && (err == 0));
if (err) {
- bphy_err(drvr, "clmload (%zu byte file) failed (%d)\n",
- clm->size, err);
- /* Retrieve clmload_status and print */
- err = brcmf_fil_iovar_int_get(ifp, "clmload_status", &status);
+ bphy_err(drvr, "%s (%zu byte file) failed (%d)\n",
+ loadvar, size, err);
+ /* Retrieve status and print */
+ err = brcmf_fil_iovar_int_get(ifp, statvar, &status);
if (err)
- bphy_err(drvr, "get clmload_status failed (%d)\n", err);
+ bphy_err(drvr, "get %s failed (%d)\n", statvar, err);
else
- brcmf_dbg(INFO, "clmload_status=%d\n", status);
+ brcmf_dbg(INFO, "%s=%d\n", statvar, status);
err = -EIO;
}
kfree(chunk_buf);
-done:
- release_firmware(clm);
+ return err;
+}
+
+static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_bus *bus = drvr->bus_if;
+ const struct firmware *fw = NULL;
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_bus_get_blob(bus, &fw, BRCMF_BLOB_CLM);
+ if (err || !fw) {
+ brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
+ err);
+ return 0;
+ }
+
+ err = brcmf_c_download_blob(ifp, fw->data, fw->size,
+ "clmload", "clmload_status");
+
+ release_firmware(fw);
+ return err;
+}
+
+static int brcmf_c_process_txcap_blob(struct brcmf_if *ifp)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_bus *bus = drvr->bus_if;
+ const struct firmware *fw = NULL;
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ err = brcmf_bus_get_blob(bus, &fw, BRCMF_BLOB_TXCAP);
+ if (err || !fw) {
+ brcmf_info("no txcap_blob available (err=%d)\n", err);
+ return 0;
+ }
+
+ brcmf_info("TxCap blob found, loading\n");
+ err = brcmf_c_download_blob(ifp, fw->data, fw->size,
+ "txcapload", "txcapload_status");
+
+ release_firmware(fw);
return err;
}
@@ -207,6 +243,23 @@ static const u8 brcmf_default_mac_address[ETH_ALEN] = {
0x00, 0x90, 0x4c, 0xc5, 0x12, 0x38
};
+static int brcmf_c_process_cal_blob(struct brcmf_if *ifp)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_mp_device *settings = drvr->settings;
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (!settings->cal_blob || !settings->cal_size)
+ return 0;
+
+ brcmf_info("Calibration blob provided by platform, loading\n");
+ err = brcmf_c_download_blob(ifp, settings->cal_blob, settings->cal_size,
+ "calload", "calload_status");
+ return err;
+}
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
@@ -290,6 +343,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
goto done;
}
+ /* Do TxCap downloading, if needed */
+ err = brcmf_c_process_txcap_blob(ifp);
+ if (err < 0) {
+ bphy_err(drvr, "download TxCap blob file failed, %d\n", err);
+ goto done;
+ }
+
+ /* Download external calibration blob, if available */
+ err = brcmf_c_process_cal_blob(ifp);
+ if (err < 0) {
+ bphy_err(drvr, "download calibration blob file failed, %d\n", err);
+ goto done;
+ }
+
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
@@ -479,6 +546,7 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
/* No platform data for this device, try OF and DMI data */
brcmf_dmi_probe(settings, chip, chiprev);
brcmf_of_probe(dev, bus_type, settings);
+ brcmf_acpi_probe(dev, bus_type, settings);
}
return settings;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index aa25abffcc7d..2be2986d2110 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -54,6 +54,8 @@ struct brcmf_mp_device {
const char *board_type;
unsigned char mac[ETH_ALEN];
const char *antenna_sku;
+ const void *cal_blob;
+ int cal_size;
union {
struct brcmfmac_sdio_pd sdio;
} bus;
@@ -77,6 +79,15 @@ static inline void
brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
#endif
+#ifdef CONFIG_ACPI
+void brcmf_acpi_probe(struct device *dev, enum brcmf_bus_type bus_type,
+ struct brcmf_mp_device *settings);
+#else
+static inline void brcmf_acpi_probe(struct device *dev,
+ enum brcmf_bus_type bus_type,
+ struct brcmf_mp_device *settings) {}
+#endif
+
u8 brcmf_map_prio_to_prec(void *cfg, u8 prio);
u8 brcmf_map_prio_to_aci(void *cfg, u8 prio);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 2c2f3e026c13..9f52019ffe47 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -126,6 +126,53 @@ static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
drv->feat_flags |= feat_flags;
}
+struct brcmf_feat_wlcfeat {
+ u16 min_ver_major;
+ u16 min_ver_minor;
+ u32 feat_flags;
+};
+
+static const struct brcmf_feat_wlcfeat brcmf_feat_wlcfeat_map[] = {
+ { 12, 0, BIT(BRCMF_FEAT_PMKID_V2) },
+ { 13, 0, BIT(BRCMF_FEAT_PMKID_V3) },
+};
+
+static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
+{
+ struct brcmf_if *ifp = brcmf_get_ifp(drv, 0);
+ const struct brcmf_feat_wlcfeat *e;
+ struct brcmf_wlc_version_le ver;
+ u32 feat_flags = 0;
+ int i, err, major, minor;
+
+ err = brcmf_fil_iovar_data_get(ifp, "wlc_ver", &ver, sizeof(ver));
+ if (err)
+ return;
+
+ major = le16_to_cpu(ver.wlc_ver_major);
+ minor = le16_to_cpu(ver.wlc_ver_minor);
+
+ brcmf_dbg(INFO, "WLC version: %d.%d\n", major, minor);
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_feat_wlcfeat_map); i++) {
+ e = &brcmf_feat_wlcfeat_map[i];
+ if (major > e->min_ver_major ||
+ (major == e->min_ver_major &&
+ minor >= e->min_ver_minor)) {
+ feat_flags |= e->feat_flags;
+ }
+ }
+
+ if (!feat_flags)
+ return;
+
+ for (i = 0; i < BRCMF_FEAT_LAST; i++)
+ if (feat_flags & BIT(i))
+ brcmf_dbg(INFO, "enabling firmware feature: %s\n",
+ brcmf_feat_names[i]);
+ drv->feat_flags |= feat_flags;
+}
+
/**
* brcmf_feat_iovar_int_get() - determine feature through iovar query.
*
@@ -289,6 +336,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC);
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
@@ -297,6 +345,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
+ brcmf_feat_wlc_version_overrides(drvr);
brcmf_feat_firmware_overrides(drvr);
/* set chip related quirks */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index d1f4257af696..becbcc50d57a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -29,6 +29,7 @@
* DOT11H: firmware supports 802.11h
* SAE: simultaneous authentication of equals
* FWAUTH: Firmware authenticator
+ * SCAN_V2: Version 2 scan params
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -51,7 +52,10 @@
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
BRCMF_FEAT_DEF(SAE) \
- BRCMF_FEAT_DEF(FWAUTH)
+ BRCMF_FEAT_DEF(FWAUTH) \
+ BRCMF_FEAT_DEF(SCAN_V2) \
+ BRCMF_FEAT_DEF(PMKID_V2) \
+ BRCMF_FEAT_DEF(PMKID_V3)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index f518e025d6e4..d28427ce27c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -48,6 +48,10 @@
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
+#define BRCMF_SCAN_PARAMS_V2_FIXED_SIZE 72
+
+/* version of brcmf_scan_params structure */
+#define BRCMF_SCAN_PARAMS_VERSION_V2 2
/* masks for channel and ssid count */
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
@@ -67,6 +71,7 @@
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
#define BRCMF_ESCAN_REQ_VERSION 1
+#define BRCMF_ESCAN_REQ_VERSION_V2 2
#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
@@ -169,6 +174,10 @@
#define BRCMF_HE_CAP_MCS_MAP_NSS_MAX 8
+#define BRCMF_PMKSA_VER_2 2
+#define BRCMF_PMKSA_VER_3 3
+#define BRCMF_PMKSA_NO_EXPIRY 0xffffffff
+
/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
* ioctl. It is relatively small because firmware has small maximum size input
* playload restriction for ioctls.
@@ -350,6 +359,12 @@ struct brcmf_ssid_le {
unsigned char SSID[IEEE80211_MAX_SSID_LEN];
};
+/* Alternate SSID structure used in some places... */
+struct brcmf_ssid8_le {
+ u8 SSID_len;
+ unsigned char SSID[IEEE80211_MAX_SSID_LEN];
+};
+
struct brcmf_scan_params_le {
struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
u8 bssid[ETH_ALEN]; /* default: bcast */
@@ -386,6 +401,45 @@ struct brcmf_scan_params_le {
__le16 channel_list[1]; /* list of chanspecs */
};
+struct brcmf_scan_params_v2_le {
+ __le16 version; /* structure version */
+ __le16 length; /* structure length */
+ struct brcmf_ssid_le ssid_le; /* default: {0, ""} */
+ u8 bssid[ETH_ALEN]; /* default: bcast */
+ s8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ u8 pad;
+ __le32 scan_type; /* flags, 0 use default */
+ __le32 nprobes; /* -1 use default, number of probes per channel */
+ __le32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ __le32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ __le32 home_time; /* -1 use default, dwell time for the
+ * home channel between channel scans
+ */
+ __le32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in
+ * channel_list, 0 means default (use all
+ * available channels)
+ *
+ * high half is entries in struct brcmf_ssid
+ * array that follows channel_list, aligned for
+ * s32 (4 bytes) meaning an odd channel count
+ * implies a 2-byte pad between end of
+ * channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the
+ * fixed parameter portion is assumed, otherwise
+ * ssid in the fixed portion is ignored
+ */
+ __le16 channel_list[1]; /* list of chanspecs */
+};
+
struct brcmf_scan_results {
u32 buflen;
u32 version;
@@ -397,7 +451,10 @@ struct brcmf_escan_params_le {
__le32 version;
__le16 action;
__le16 sync_id;
- struct brcmf_scan_params_le params_le;
+ union {
+ struct brcmf_scan_params_le params_le;
+ struct brcmf_scan_params_v2_le params_v2_le;
+ };
};
struct brcmf_escan_result_le {
@@ -517,7 +574,7 @@ struct brcmf_wsec_key_le {
struct brcmf_wsec_pmk_le {
__le16 key_len;
__le16 flags;
- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
};
/**
@@ -742,6 +799,31 @@ struct brcmf_rev_info_le {
};
/**
+ * struct brcmf_wlc_version_le - firmware revision info.
+ *
+ * @version: structure version.
+ * @length: structure length.
+ * @epi_ver_major: EPI major version
+ * @epi_ver_minor: EPI minor version
+ * @epi_ver_rc: EPI rc version
+ * @epi_ver_incr: EPI increment version
+ * @wlc_ver_major: WLC major version
+ * @wlc_ver_minor: WLC minor version
+ */
+struct brcmf_wlc_version_le {
+ __le16 version;
+ __le16 length;
+
+ __le16 epi_ver_major;
+ __le16 epi_ver_minor;
+ __le16 epi_ver_rc;
+ __le16 epi_ver_incr;
+
+ __le16 wlc_ver_major;
+ __le16 wlc_ver_minor;
+};
+
+/**
* struct brcmf_assoclist_le - request assoc list.
*
* @count: indicates number of stations.
@@ -804,6 +886,51 @@ struct brcmf_pmksa {
};
/**
+ * struct brcmf_pmksa_v2 - PMK Security Association
+ *
+ * @length: Length of the structure.
+ * @bssid: The AP's BSSID.
+ * @pmkid: The PMK ID.
+ * @pmk: PMK material for FILS key derivation.
+ * @pmk_len: Length of PMK data.
+ * @ssid: The AP's SSID.
+ * @fils_cache_id: FILS cache identifier
+ */
+struct brcmf_pmksa_v2 {
+ __le16 length;
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[WLAN_PMKID_LEN];
+ u8 pmk[WLAN_PMK_LEN_SUITE_B_192];
+ __le16 pmk_len;
+ struct brcmf_ssid8_le ssid;
+ u16 fils_cache_id;
+};
+
+/**
+ * struct brcmf_pmksa_v3 - PMK Security Association
+ *
+ * @bssid: The AP's BSSID.
+ * @pmkid: The PMK ID.
+ * @pmkid_len: The length of the PMK ID.
+ * @pmk: PMK material for FILS key derivation.
+ * @pmk_len: Length of PMK data.
+ * @fils_cache_id: FILS cache identifier
+ * @ssid: The AP's SSID.
+ * @time_left: Remaining time until expiry. 0 = expired, ~0 = no expiry.
+ */
+struct brcmf_pmksa_v3 {
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[WLAN_PMKID_LEN];
+ u8 pmkid_len;
+ u8 pmk[WLAN_PMK_LEN_SUITE_B_192];
+ u8 pmk_len;
+ __le16 fils_cache_id;
+ u8 pad;
+ struct brcmf_ssid8_le ssid;
+ __le32 time_left;
+};
+
+/**
* struct brcmf_pmk_list_le - List of pmksa's.
*
* @npmk: Number of pmksa's.
@@ -815,6 +942,34 @@ struct brcmf_pmk_list_le {
};
/**
+ * struct brcmf_pmk_list_v2_le - List of pmksa's.
+ *
+ * @version: Request version.
+ * @length: Length of this structure.
+ * @pmk: PMK SA information.
+ */
+struct brcmf_pmk_list_v2_le {
+ __le16 version;
+ __le16 length;
+ struct brcmf_pmksa_v2 pmk[BRCMF_MAXPMKID];
+};
+
+/**
+ * struct brcmf_pmk_op_v3_le - Operation on PMKSA list.
+ *
+ * @version: Request version.
+ * @length: Length of this structure.
+ * @pmk: PMK SA information.
+ */
+struct brcmf_pmk_op_v3_le {
+ __le16 version;
+ __le16 length;
+ __le16 count;
+ __le16 pad;
+ struct brcmf_pmksa_v3 pmk[BRCMF_MAXPMKID];
+};
+
+/**
* struct brcmf_pno_param_le - PNO scan configuration parameters
*
* @version: PNO parameters version.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index cec53f934940..e737db3f5774 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -47,6 +47,32 @@
#define MSGBUF_TYPE_RX_CMPLT 0x12
#define MSGBUF_TYPE_LPBK_DMAXFER 0x13
#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
+#define MSGBUF_TYPE_FLOW_RING_RESUME 0x15
+#define MSGBUF_TYPE_FLOW_RING_RESUME_CMPLT 0x16
+#define MSGBUF_TYPE_FLOW_RING_SUSPEND 0x17
+#define MSGBUF_TYPE_FLOW_RING_SUSPEND_CMPLT 0x18
+#define MSGBUF_TYPE_INFO_BUF_POST 0x19
+#define MSGBUF_TYPE_INFO_BUF_CMPLT 0x1A
+#define MSGBUF_TYPE_H2D_RING_CREATE 0x1B
+#define MSGBUF_TYPE_D2H_RING_CREATE 0x1C
+#define MSGBUF_TYPE_H2D_RING_CREATE_CMPLT 0x1D
+#define MSGBUF_TYPE_D2H_RING_CREATE_CMPLT 0x1E
+#define MSGBUF_TYPE_H2D_RING_CONFIG 0x1F
+#define MSGBUF_TYPE_D2H_RING_CONFIG 0x20
+#define MSGBUF_TYPE_H2D_RING_CONFIG_CMPLT 0x21
+#define MSGBUF_TYPE_D2H_RING_CONFIG_CMPLT 0x22
+#define MSGBUF_TYPE_H2D_MAILBOX_DATA 0x23
+#define MSGBUF_TYPE_D2H_MAILBOX_DATA 0x24
+#define MSGBUF_TYPE_TIMSTAMP_BUFPOST 0x25
+#define MSGBUF_TYPE_HOSTTIMSTAMP 0x26
+#define MSGBUF_TYPE_HOSTTIMSTAMP_CMPLT 0x27
+#define MSGBUF_TYPE_FIRMWARE_TIMESTAMP 0x28
+#define MSGBUF_TYPE_SNAPSHOT_UPLOAD 0x29
+#define MSGBUF_TYPE_SNAPSHOT_CMPLT 0x2A
+#define MSGBUF_TYPE_H2D_RING_DELETE 0x2B
+#define MSGBUF_TYPE_D2H_RING_DELETE 0x2C
+#define MSGBUF_TYPE_H2D_RING_DELETE_CMPLT 0x2D
+#define MSGBUF_TYPE_D2H_RING_DELETE_CMPLT 0x2E
#define NR_TX_PKTIDS 2048
#define NR_RX_PKTIDS 1024
@@ -218,6 +244,19 @@ struct msgbuf_flowring_flush_resp {
__le32 rsvd0[3];
};
+struct msgbuf_h2d_mailbox_data {
+ struct msgbuf_common_hdr msg;
+ __le32 data;
+ __le32 rsvd0[7];
+};
+
+struct msgbuf_d2h_mailbox_data {
+ struct msgbuf_common_hdr msg;
+ struct msgbuf_completion_hdr compl_hdr;
+ __le32 data;
+ __le32 rsvd0[2];
+};
+
struct brcmf_msgbuf_work_item {
struct list_head queue;
u32 flowid;
@@ -1282,6 +1321,16 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
}
+static void brcmf_msgbuf_process_d2h_mailbox_data(struct brcmf_msgbuf *msgbuf,
+ void *buf)
+{
+ struct msgbuf_d2h_mailbox_data *d2h_mb_data = buf;
+ struct brcmf_pub *drvr = msgbuf->drvr;
+
+ brcmf_bus_d2h_mb_rx(drvr->bus_if, le32_to_cpu(d2h_mb_data->data));
+}
+
+
static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
{
struct brcmf_pub *drvr = msgbuf->drvr;
@@ -1324,6 +1373,10 @@ static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
brcmf_msgbuf_process_rx_complete(msgbuf, buf);
break;
+ case MSGBUF_TYPE_D2H_MAILBOX_DATA:
+ brcmf_dbg(MSGBUF, "MSGBUF_TYPE_D2H_MAILBOX_DATA\n");
+ brcmf_msgbuf_process_d2h_mailbox_data(msgbuf, buf);
+ break;
default:
bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
break;
@@ -1462,6 +1515,38 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
}
}
+
+int brcmf_msgbuf_h2d_mb_write(struct brcmf_pub *drvr, u32 data)
+{
+ struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ struct brcmf_commonring *commonring;
+ struct msgbuf_h2d_mailbox_data *request;
+ void *ret_ptr;
+ int err;
+
+ commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+ brcmf_commonring_lock(commonring);
+ ret_ptr = brcmf_commonring_reserve_for_write(commonring);
+ if (!ret_ptr) {
+ bphy_err(drvr, "Failed to reserve space in commonring\n");
+ brcmf_commonring_unlock(commonring);
+ return -ENOMEM;
+ }
+
+ request = (struct msgbuf_h2d_mailbox_data *)ret_ptr;
+ request->msg.msgtype = MSGBUF_TYPE_H2D_MAILBOX_DATA;
+ request->msg.ifidx = -1;
+ request->msg.flags = 0;
+ request->msg.request_id = 0;
+ request->data = data;
+
+ err = brcmf_commonring_write_complete(commonring);
+ brcmf_commonring_unlock(commonring);
+
+ return err;
+}
+
+
#ifdef DEBUG
static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 6a849f4a94dd..89b6b7f9ddb7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -32,6 +32,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
+int brcmf_msgbuf_h2d_mb_write(struct brcmf_pub *drvr, u32 data);
#else
static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index a83699de01ec..d295b9f3a4fb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -85,6 +85,13 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
if (!of_property_read_string(np, "apple,antenna-sku", &prop))
settings->antenna_sku = prop;
+ /* The WLAN calibration blob is normally stored in SROM, but Apple
+ * ARM64 platforms pass it via the DT instead.
+ */
+ prop = of_get_property(np, "brcm,cal-blob", &settings->cal_size);
+ if (prop && settings->cal_size)
+ settings->cal_blob = prop;
+
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root && !settings->board_type) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 80083f9ea311..a3c66b808bc1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -13,6 +13,7 @@
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
#include <linux/io.h>
+#include <linux/random.h>
#include <asm/unaligned.h>
#include <soc.h>
@@ -53,13 +54,17 @@ BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
-BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
+BRCMF_FW_CLM_DEF(4364B2, "brcmfmac4364b2-pcie");
+BRCMF_FW_CLM_DEF(4364B3, "brcmfmac4364b3-pcie");
BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie");
BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
+BRCMF_FW_CLM_DEF(4378B3, "brcmfmac4378b3-pcie");
+BRCMF_FW_CLM_DEF(4387C2, "brcmfmac4387c2-pcie");
BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
/* firmware config files */
@@ -69,6 +74,7 @@ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.clm_blob");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txcap_blob");
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
@@ -82,7 +88,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
- BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0x0000000F, 4364B2), /* 3 */
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFF0, 4364B3), /* 4 */
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
@@ -90,7 +97,10 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
- BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
+ BRCMF_FW_ENTRY(BRCM_CC_4377_CHIP_ID, 0xFFFFFFFF, 4377B3), /* revision ID 4 */
+ BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0x0000000F, 4378B1), /* revision ID 3 */
+ BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFE0, 4378B3), /* revision ID 5 */
+ BRCMF_FW_ENTRY(BRCM_CC_4387_CHIP_ID, 0xFFFFFFFF, 4387C2), /* revision ID 7 */
BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
};
@@ -199,11 +209,64 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
+#define BRCMF_PCIE_SHARED_USE_MAILBOX 0x2000000
+#define BRCMF_PCIE_SHARED_TIMESTAMP_DB0 0x8000000
#define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
+#define BRCMF_PCIE_SHARED_NO_OOB_DW 0x20000000
+#define BRCMF_PCIE_SHARED_INBAND_DS 0x40000000
+#define BRCMF_PCIE_SHARED_DAR 0x80000000
+
+#define BRCMF_PCIE_SHARED2_EXTENDED_TRAP_DATA 0x1
+#define BRCMF_PCIE_SHARED2_TXSTATUS_METADATA 0x2
+#define BRCMF_PCIE_SHARED2_BT_LOGGING 0x4
+#define BRCMF_PCIE_SHARED2_SNAPSHOT_UPLOAD 0x8
+#define BRCMF_PCIE_SHARED2_SUBMIT_COUNT_WAR 0x10
+#define BRCMF_PCIE_SHARED2_FAST_DELETE_RING 0x20
+#define BRCMF_PCIE_SHARED2_EVTBUF_MAX_MASK 0xC0
+#define BRCMF_PCIE_SHARED2_PKT_TX_STATUS 0x100
+#define BRCMF_PCIE_SHARED2_FW_SMALL_MEMDUMP 0x200
+#define BRCMF_PCIE_SHARED2_FW_HC_ON_TRAP 0x400
+#define BRCMF_PCIE_SHARED2_HSCB 0x800
+#define BRCMF_PCIE_SHARED2_EDL_RING 0x1000
+#define BRCMF_PCIE_SHARED2_DEBUG_BUF_DEST 0x2000
+#define BRCMF_PCIE_SHARED2_PCIE_ENUM_RESET_FLR 0x4000
+#define BRCMF_PCIE_SHARED2_PKT_TIMESTAMP 0x8000
+#define BRCMF_PCIE_SHARED2_HP2P 0x10000
+#define BRCMF_PCIE_SHARED2_HWA 0x20000
+#define BRCMF_PCIE_SHARED2_TRAP_ON_HOST_DB7 0x40000
+#define BRCMF_PCIE_SHARED2_DURATION_SCALE 0x100000
+#define BRCMF_PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000
+#define BRCMF_PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000
#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
+#define BRCMF_HOSTCAP_PCIEAPI_VERSION_MASK 0x000000FF
+#define BRCMF_HOSTCAP_H2D_VALID_PHASE 0x00000100
+#define BRCMF_HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE 0x00000200
+#define BRCMF_HOSTCAP_H2D_ENABLE_HOSTRDY 0x400
+#define BRCMF_HOSTCAP_DB0_TIMESTAMP 0x800
+#define BRCMF_HOSTCAP_DS_NO_OOB_DW 0x1000
+#define BRCMF_HOSTCAP_DS_INBAND_DW 0x2000
+#define BRCMF_HOSTCAP_H2D_IDMA 0x4000
+#define BRCMF_HOSTCAP_H2D_IFRM 0x8000
+#define BRCMF_HOSTCAP_H2D_DAR 0x10000
+#define BRCMF_HOSTCAP_EXTENDED_TRAP_DATA 0x20000
+#define BRCMF_HOSTCAP_TXSTATUS_METADATA 0x40000
+#define BRCMF_HOSTCAP_BT_LOGGING 0x80000
+#define BRCMF_HOSTCAP_SNAPSHOT_UPLOAD 0x100000
+#define BRCMF_HOSTCAP_FAST_DELETE_RING 0x200000
+#define BRCMF_HOSTCAP_PKT_TXSTATUS 0x400000
+#define BRCMF_HOSTCAP_UR_FW_NO_TRAP 0x800000
+#define BRCMF_HOSTCAP_HSCB 0x2000000
+#define BRCMF_HOSTCAP_EXT_TRAP_DBGBUF 0x4000000
+#define BRCMF_HOSTCAP_EDL_RING 0x10000000
+#define BRCMF_HOSTCAP_PKT_TIMESTAMP 0x20000000
+#define BRCMF_HOSTCAP_PKT_HP2P 0x40000000
+#define BRCMF_HOSTCAP_HWA 0x80000000
+#define BRCMF_HOSTCAP2_DURATION_SCALE_MASK 0x3F
+
+#define BRCMF_SHARED_FLAGS_OFFSET 0
#define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
#define BRCMF_SHARED_RING_BASE_OFFSET 52
#define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
@@ -215,6 +278,11 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
#define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
#define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
+#define BRCMF_SHARED_FLAGS2_OFFSET 80
+#define BRCMF_SHARED_HOST_CAP_OFFSET 84
+#define BRCMF_SHARED_FLAGS3_OFFSET 108
+#define BRCMF_SHARED_HOST_CAP2_OFFSET 112
+#define BRCMF_SHARED_HOST_CAP3_OFFSET 116
#define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
#define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
@@ -279,6 +347,8 @@ struct brcmf_pcie_console {
struct brcmf_pcie_shared_info {
u32 tcm_base_address;
u32 flags;
+ u32 flags2;
+ u32 flags3;
struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
struct brcmf_pcie_ringbuf *flowrings;
u16 max_rxbufpost;
@@ -295,6 +365,7 @@ struct brcmf_pcie_shared_info {
void *ringupd;
dma_addr_t ringupd_dmahandle;
u8 version;
+ bool mb_via_ctl;
};
struct brcmf_pcie_core_info {
@@ -318,7 +389,9 @@ struct brcmf_pciedev_info {
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
char clm_name[BRCMF_FW_NAME_LEN];
+ char txcap_name[BRCMF_FW_NAME_LEN];
const struct firmware *clm_fw;
+ const struct firmware *txcap_fw;
const struct brcmf_pcie_reginfo *reginfo;
void __iomem *regs;
void __iomem *tcm;
@@ -330,6 +403,7 @@ struct brcmf_pciedev_info {
wait_queue_head_t mbdata_resp_wait;
bool mbdata_completed;
bool irq_allocated;
+ bool have_msi;
bool wowl_enabled;
u8 dma_idx_sz;
void *idxbuf;
@@ -410,8 +484,6 @@ struct brcmf_pcie_reginfo {
u32 intmask;
u32 mailboxint;
u32 mailboxmask;
- u32 h2d_mailbox_0;
- u32 h2d_mailbox_1;
u32 int_d2h_db;
u32 int_fn0;
};
@@ -420,8 +492,6 @@ static const struct brcmf_pcie_reginfo brcmf_reginfo_default = {
.intmask = BRCMF_PCIE_PCIE2REG_INTMASK,
.mailboxint = BRCMF_PCIE_PCIE2REG_MAILBOXINT,
.mailboxmask = BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
- .h2d_mailbox_0 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0,
- .h2d_mailbox_1 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1,
.int_d2h_db = BRCMF_PCIE_MB_INT_D2H_DB,
.int_fn0 = BRCMF_PCIE_MB_INT_FN0,
};
@@ -430,8 +500,6 @@ static const struct brcmf_pcie_reginfo brcmf_reginfo_64 = {
.intmask = BRCMF_PCIE_64_PCIE2REG_INTMASK,
.mailboxint = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT,
.mailboxmask = BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK,
- .h2d_mailbox_0 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0,
- .h2d_mailbox_1 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1,
.int_d2h_db = BRCMF_PCIE_64_MB_INT_D2H_DB,
.int_fn0 = 0,
};
@@ -741,6 +809,19 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
u32 i;
shared = &devinfo->shared;
+
+ if (shared->mb_via_ctl) {
+ struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ ret = brcmf_msgbuf_h2d_mb_write(bus->drvr, htod_mb_data);
+ if (ret < 0)
+ brcmf_err(bus, "Failed to send H2D mailbox data (%d)\n",
+ ret);
+ return ret;
+ }
+
addr = shared->htod_mb_data_addr;
cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -768,8 +849,29 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
return 0;
}
+static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo, u32 data)
+{
+ brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", data);
+ if (data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
+ brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
+ brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
+ brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
+ }
+ if (data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
+ brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
+ if (data & BRCMF_D2H_DEV_D3_ACK) {
+ brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
+ devinfo->mbdata_completed = true;
+ wake_up(&devinfo->mbdata_resp_wait);
+ }
+ if (data & BRCMF_D2H_DEV_FWHALT) {
+ brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
+ brcmf_fw_crashed(&devinfo->pdev->dev);
+ }
+}
+
-static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
+static void brcmf_pcie_poll_mb_data(struct brcmf_pciedev_info *devinfo)
{
struct brcmf_pcie_shared_info *shared;
u32 addr;
@@ -784,23 +886,16 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_write_tcm32(devinfo, addr, 0);
- brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
- if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
- brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
- brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
- brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
- }
- if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
- brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
- if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
- brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
- devinfo->mbdata_completed = true;
- wake_up(&devinfo->mbdata_resp_wait);
- }
- if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
- brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
- brcmf_fw_crashed(&devinfo->pdev->dev);
- }
+ brcmf_pcie_handle_mb_data(devinfo, dtoh_mb_data);
+}
+
+
+static void brcmf_pcie_d2h_mb_rx(struct device *dev, u32 data)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+ brcmf_pcie_handle_mb_data(buspub->devinfo, data);
}
@@ -892,9 +987,12 @@ static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
{
- if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
- brcmf_pcie_write_reg32(devinfo,
- devinfo->reginfo->h2d_mailbox_1, 1);
+ if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1) {
+ if (devinfo->shared.flags & BRCMF_PCIE_SHARED_DAR)
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1, 1);
+ else
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
+ }
}
static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
@@ -906,6 +1004,11 @@ static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
brcmf_dbg(PCIE, "Enter\n");
return IRQ_WAKE_THREAD;
}
+
+ /* mailboxint is cleared by the firmware in MSI mode */
+ if (devinfo->have_msi)
+ return IRQ_WAKE_THREAD;
+
return IRQ_NONE;
}
@@ -922,13 +1025,13 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint,
status);
if (status & devinfo->reginfo->int_fn0)
- brcmf_pcie_handle_mb_data(devinfo);
- if (status & devinfo->reginfo->int_d2h_db) {
- if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
- brcmf_proto_msgbuf_rx_trigger(
- &devinfo->pdev->dev);
- }
+ brcmf_pcie_poll_mb_data(devinfo);
+ }
+ if (devinfo->have_msi || status & devinfo->reginfo->int_d2h_db) {
+ if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
+ brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
}
+
brcmf_pcie_bus_console_read(devinfo, false);
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
brcmf_pcie_intr_enable(devinfo);
@@ -946,7 +1049,10 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_dbg(PCIE, "Enter\n");
- pci_enable_msi(pdev);
+ devinfo->have_msi = pci_enable_msi(pdev) >= 0;
+ if (devinfo->have_msi)
+ brcmf_dbg(PCIE, "MSI enabled\n");
+
if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
brcmf_pcie_isr_thread, IRQF_SHARED,
"brcmf_pcie_intr", devinfo)) {
@@ -1035,7 +1141,10 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
brcmf_dbg(PCIE, "RING !\n");
/* Any arbitrary value will do, lets use 1 */
- brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1);
+ if (devinfo->shared.flags & BRCMF_PCIE_SHARED_DAR)
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0, 1);
+ else
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
return 0;
}
@@ -1492,6 +1601,10 @@ static int brcmf_pcie_get_blob(struct device *dev, const struct firmware **fw,
*fw = devinfo->clm_fw;
devinfo->clm_fw = NULL;
break;
+ case BRCMF_BLOB_TXCAP:
+ *fw = devinfo->txcap_fw;
+ devinfo->txcap_fw = NULL;
+ break;
default:
return -ENOENT;
}
@@ -1547,6 +1660,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.get_memdump = brcmf_pcie_get_memdump,
.get_blob = brcmf_pcie_get_blob,
.reset = brcmf_pcie_reset,
+ .d2h_mb_rx = brcmf_pcie_d2h_mb_rx,
};
@@ -1578,12 +1692,16 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
{
struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
struct brcmf_pcie_shared_info *shared;
+ u32 host_cap;
+ u32 host_cap2;
u32 addr;
shared = &devinfo->shared;
shared->tcm_base_address = sharedram_addr;
- shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
+ shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr +
+ BRCMF_SHARED_FLAGS_OFFSET);
+
shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
@@ -1624,9 +1742,47 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
brcmf_pcie_bus_console_init(devinfo);
brcmf_pcie_bus_console_read(devinfo, false);
+ /* Features added in revision 6 follow */
+ if (shared->version < 6)
+ return 0;
+
+ shared->flags2 = brcmf_pcie_read_tcm32(devinfo, sharedram_addr +
+ BRCMF_SHARED_FLAGS2_OFFSET);
+ shared->flags3 = brcmf_pcie_read_tcm32(devinfo, sharedram_addr +
+ BRCMF_SHARED_FLAGS3_OFFSET);
+
+ /* Check which mailbox mechanism to use */
+ if (!(shared->flags & BRCMF_PCIE_SHARED_USE_MAILBOX))
+ shared->mb_via_ctl = true;
+
+ /* Update host support flags */
+ host_cap = shared->version;
+ host_cap2 = 0;
+
+ if (shared->flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
+ host_cap |= BRCMF_HOSTCAP_H2D_ENABLE_HOSTRDY;
+
+ if (shared->flags & BRCMF_PCIE_SHARED_DAR)
+ host_cap |= BRCMF_HOSTCAP_H2D_DAR;
+
+ /* Disable DS: this is not currently properly supported */
+ host_cap |= BRCMF_HOSTCAP_DS_NO_OOB_DW;
+
+ brcmf_pcie_write_tcm32(devinfo, sharedram_addr +
+ BRCMF_SHARED_HOST_CAP_OFFSET, host_cap);
+ brcmf_pcie_write_tcm32(devinfo, sharedram_addr +
+ BRCMF_SHARED_HOST_CAP2_OFFSET, host_cap2);
+
return 0;
}
+struct brcmf_random_seed_footer {
+ __le32 length;
+ __le32 magic;
+};
+
+#define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de
+#define BRCMF_RANDOM_SEED_LENGTH 0x100
static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
const struct firmware *fw, void *nvram,
@@ -1658,11 +1814,32 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
if (nvram) {
+ size_t rand_len = BRCMF_RANDOM_SEED_LENGTH;
+ struct brcmf_random_seed_footer footer = {
+ .length = cpu_to_le32(rand_len),
+ .magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
+ };
+ void *randbuf;
+
brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
address = devinfo->ci->rambase + devinfo->ci->ramsize -
nvram_len;
memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
brcmf_fw_nvram_free(nvram);
+
+ /* Some Apple chips/firmwares expect a buffer of random data
+ * to be present before NVRAM
+ */
+ brcmf_dbg(PCIE, "Download random seed\n");
+
+ address -= sizeof(footer);
+ memcpy_toio(devinfo->tcm + address, &footer, sizeof(footer));
+
+ address -= rand_len;
+ randbuf = kzalloc(rand_len, GFP_KERNEL);
+ get_random_bytes(randbuf, rand_len);
+ memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
+ kfree(randbuf);
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
devinfo->nvram_name);
@@ -1974,11 +2151,27 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
int ret;
switch (devinfo->ci->chip) {
+ case CY_CC_89459_CHIP_ID:
+ coreid = BCMA_CORE_CHIPCOMMON;
+ base = 0x8c0;
+ words = 0xb2;
+ break;
+ case BRCM_CC_4364_CHIP_ID:
+ coreid = BCMA_CORE_CHIPCOMMON;
+ base = 0x8c0;
+ words = 0x1a0;
+ break;
+ case BRCM_CC_4377_CHIP_ID:
case BRCM_CC_4378_CHIP_ID:
coreid = BCMA_CORE_GCI;
base = 0x1120;
words = 0x170;
break;
+ case BRCM_CC_4387_CHIP_ID:
+ coreid = BCMA_CORE_GCI;
+ base = 0x113c;
+ words = 0x170;
+ break;
default:
/* OTP not supported on this chip */
return 0;
@@ -2036,6 +2229,7 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
#define BRCMF_PCIE_FW_CODE 0
#define BRCMF_PCIE_FW_NVRAM 1
#define BRCMF_PCIE_FW_CLM 2
+#define BRCMF_PCIE_FW_TXCAP 3
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq)
@@ -2061,6 +2255,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
devinfo->clm_fw = fwreq->items[BRCMF_PCIE_FW_CLM].binary;
+ devinfo->txcap_fw = fwreq->items[BRCMF_PCIE_FW_TXCAP].binary;
kfree(fwreq);
ret = brcmf_chip_get_raminfo(devinfo->ci);
@@ -2137,6 +2332,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
{ ".bin", devinfo->fw_name },
{ ".txt", devinfo->nvram_name },
{ ".clm_blob", devinfo->clm_name },
+ { ".txcap_blob", devinfo->txcap_name },
};
fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
@@ -2151,6 +2347,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
fwreq->items[BRCMF_PCIE_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
+ fwreq->items[BRCMF_PCIE_FW_TXCAP].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_PCIE_FW_TXCAP].flags = BRCMF_FW_REQF_OPTIONAL;
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
@@ -2342,6 +2540,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
brcmf_pcie_reset_device(devinfo);
brcmf_pcie_release_resource(devinfo);
release_firmware(devinfo->clm_fw);
+ release_firmware(devinfo->txcap_fw);
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
@@ -2401,10 +2600,11 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
/* Check if device is still up and running, if so we are ready */
if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) {
brcmf_dbg(PCIE, "Try to wakeup device....\n");
+ /* Set the device up, so we can write the MB data message in ring mode */
+ devinfo->state = BRCMFMAC_PCIE_STATE_UP;
if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
goto cleanup;
brcmf_dbg(PCIE, "Hot resume, continue....\n");
- devinfo->state = BRCMFMAC_PCIE_STATE_UP;
brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
brcmf_bus_change_state(bus, BRCMF_BUS_UP);
brcmf_pcie_intr_enable(devinfo);
@@ -2413,6 +2613,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
}
cleanup:
+ devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
brcmf_chip_detach(devinfo->ci);
devinfo->ci = NULL;
pdev = devinfo->pdev;
@@ -2447,6 +2648,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
@@ -2466,7 +2668,9 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4377_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4387_DEVICE_ID),
BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID),
BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index f4939cf62767..782c55bb2655 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -51,7 +51,9 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_4377_CHIP_ID 0x4377
#define BRCM_CC_4378_CHIP_ID 0x4378
+#define BRCM_CC_4387_CHIP_ID 0x4387
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
@@ -72,6 +74,7 @@
#define BRCM_PCIE_4350_DEVICE_ID 0x43a3
#define BRCM_PCIE_4354_DEVICE_ID 0x43df
#define BRCM_PCIE_4354_RAW_DEVICE_ID 0x4354
+#define BRCM_PCIE_4355_DEVICE_ID 0x43dc
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
@@ -90,7 +93,9 @@
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
#define BRCM_PCIE_4371_DEVICE_ID 0x440d
+#define BRCM_PCIE_4377_DEVICE_ID 0x4488
#define BRCM_PCIE_4378_DEVICE_ID 0x4425
+#define BRCM_PCIE_4387_DEVICE_ID 0x4433
#define CY_PCIE_89459_DEVICE_ID 0x4415
#define CY_PCIE_89459_RAW_DEVICE_ID 0x4355
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index ff8b083dc5c6..4481ffc97993 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -195,8 +195,20 @@ struct apple_nvme {
int irq;
spinlock_t lock;
+
+ /*
+ * Delayed cache flush handling state
+ */
+ struct nvme_ns *flush_ns;
+ unsigned long flush_interval;
+ unsigned long last_flush;
+ struct delayed_work flush_dwork;
};
+unsigned int flush_interval = 1000;
+module_param(flush_interval, uint, 0644);
+MODULE_PARM_DESC(flush_interval, "Grace period in msecs between flushes");
+
static_assert(sizeof(struct nvme_command) == 64);
static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
@@ -729,6 +741,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv)
return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
}
+static bool apple_nvme_delayed_flush(struct apple_nvme *anv, struct nvme_ns *ns,
+ struct request *req)
+{
+ if (!anv->flush_interval || req_op(req) != REQ_OP_FLUSH)
+ return false;
+ if (delayed_work_pending(&anv->flush_dwork))
+ return true;
+ if (time_before(jiffies, anv->last_flush + anv->flush_interval)) {
+ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &anv->flush_dwork,
+ anv->flush_interval);
+ if (WARN_ON_ONCE(anv->flush_ns && anv->flush_ns != ns))
+ goto out;
+ anv->flush_ns = ns;
+ return true;
+ }
+out:
+ anv->last_flush = jiffies;
+ return false;
+}
+
static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -764,6 +796,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
blk_mq_start_request(req);
+
+ if (apple_nvme_delayed_flush(anv, ns, req)) {
+ blk_mq_complete_request(req);
+ return BLK_STS_OK;
+ }
+
apple_nvme_submit_cmd(q, cmnd);
return BLK_STS_OK;
@@ -1373,6 +1411,28 @@ static void devm_apple_nvme_mempool_destroy(void *data)
mempool_destroy(data);
}
+static void apple_nvme_flush_work(struct work_struct *work)
+{
+ struct nvme_command c = { };
+ struct apple_nvme *anv;
+ struct nvme_ns *ns;
+ int err;
+
+ anv = container_of(work, struct apple_nvme, flush_dwork.work);
+ ns = anv->flush_ns;
+ if (WARN_ON_ONCE(!ns))
+ return;
+
+ c.common.opcode = nvme_cmd_flush;
+ c.common.nsid = cpu_to_le32(anv->flush_ns->head->ns_id);
+ err = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
+ if (err) {
+ dev_err(anv->dev, "Deferred flush failed: %d\n", err);
+ } else {
+ anv->last_flush = jiffies;
+ }
+}
+
static int apple_nvme_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1515,12 +1575,21 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev;
}
+ if (flush_interval) {
+ anv->flush_interval = msecs_to_jiffies(flush_interval);
+ anv->flush_ns = NULL;
+ anv->last_flush = jiffies - anv->flush_interval;
+ }
+
+ INIT_DELAYED_WORK(&anv->flush_dwork, apple_nvme_flush_work);
+
nvme_reset_ctrl(&anv->ctrl);
async_schedule(apple_nvme_async_probe, anv);
return 0;
put_dev:
+ apple_nvme_detach_genpd(anv);
put_device(anv->dev);
return ret;
}
@@ -1548,6 +1617,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
{
struct apple_nvme *anv = platform_get_drvdata(pdev);
+ flush_delayed_work(&anv->flush_dwork);
apple_nvme_disable(anv, true);
if (apple_rtkit_is_running(anv->rtk))
apple_rtkit_shutdown(anv->rtk);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ec8a49c04003..4728b5b3d0f7 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -290,6 +290,19 @@ config NVMEM_SPRD_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem-sprd-efuse.
+config NVMEM_SPMI_MFD
+ tristate "Generic SPMI MFD NVMEM"
+ depends on MFD_SIMPLE_MFD_SPMI || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Say y here to build a generic driver to expose an SPMI MFD device
+ as a NVMEM provider. This can be used for PMIC/PMU devices which
+ are used to store power and RTC-related settings on certain
+ platforms, such as Apple Silicon Macs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-spmi-mfd.
+
config NVMEM_STM32_ROMEM
tristate "STMicroelectronics STM32 factory-programmed memory support"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index fa80fe17e567..9ac4b013ebbd 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -55,6 +55,8 @@ obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
nvmem_snvs_lpgpr-y := snvs_lpgpr.o
+obj-$(CONFIG_NVMEM_SPMI_MFD) += nvmem_spmi_mfd.o
+nvmem_spmi_mfd-y := spmi-mfd-nvmem.o
obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o
diff --git a/drivers/nvmem/spmi-mfd-nvmem.c b/drivers/nvmem/spmi-mfd-nvmem.c
new file mode 100644
index 000000000000..284c93be2e18
--- /dev/null
+++ b/drivers/nvmem/spmi-mfd-nvmem.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Generic SPMI MFD NVMEM driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct spmi_mfd_nvmem {
+ struct regmap *regmap;
+ unsigned int base;
+};
+
+static int spmi_mfd_nvmem_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct spmi_mfd_nvmem *nvmem = priv;
+
+ return regmap_bulk_read(nvmem->regmap, nvmem->base + offset, val, bytes);
+}
+
+static int spmi_mfd_nvmem_write(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct spmi_mfd_nvmem *nvmem = priv;
+
+ return regmap_bulk_write(nvmem->regmap, nvmem->base + offset, val, bytes);
+}
+
+static int spmi_mfd_nvmem_probe(struct platform_device *pdev)
+{
+ struct spmi_mfd_nvmem *nvmem;
+ const __be32 *addr;
+ int len;
+ struct nvmem_config nvmem_cfg = {
+ .dev = &pdev->dev,
+ .name = "spmi_mfd_nvmem",
+ .id = NVMEM_DEVID_AUTO,
+ .word_size = 1,
+ .stride = 1,
+ .reg_read = spmi_mfd_nvmem_read,
+ .reg_write = spmi_mfd_nvmem_write,
+ };
+
+ nvmem = devm_kzalloc(&pdev->dev, sizeof(*nvmem), GFP_KERNEL);
+ if (!nvmem)
+ return -ENOMEM;
+
+ nvmem_cfg.priv = nvmem;
+
+ nvmem->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!nvmem->regmap) {
+ dev_err(&pdev->dev, "Parent regmap unavailable.\n");
+ return -ENXIO;
+ }
+
+ addr = of_get_property(pdev->dev.of_node, "reg", &len);
+ if (!addr) {
+ dev_err(&pdev->dev, "no reg property\n");
+ return -EINVAL;
+ }
+ if (len != 2 * sizeof(u32)) {
+ dev_err(&pdev->dev, "invalid reg property\n");
+ return -EINVAL;
+ }
+
+ nvmem->base = be32_to_cpup(&addr[0]);
+ nvmem_cfg.size = be32_to_cpup(&addr[1]);
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(&pdev->dev, &nvmem_cfg));
+}
+
+static const struct of_device_id spmi_mfd_nvmem_id_table[] = {
+ { .compatible = "apple,spmi-pmu-nvmem" },
+ { .compatible = "spmi-mfd-nvmem" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spmi_mfd_nvmem_id_table);
+
+static struct platform_driver spmi_mfd_nvmem_driver = {
+ .probe = spmi_mfd_nvmem_probe,
+ .driver = {
+ .name = "spmi-mfd-nvmem",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_mfd_nvmem_id_table,
+ },
+};
+
+module_platform_driver(spmi_mfd_nvmem_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("SPMI MFD NVMEM driver");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c34ac33b7338..f73021a0c245 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -538,7 +538,7 @@ static u64 __of_translate_address(struct device_node *dev,
pbus = of_match_bus(parent);
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
- pr_err("Bad cell count for %pOF\n", dev);
+ pr_debug("Bad cell count for %pOF\n", dev);
break;
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index bfd9bac37e24..1f9f9fbf42ba 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -327,6 +327,7 @@ config PCIE_APPLE
depends on ARCH_APPLE || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
+ depends on ARM64_PAGE_SHIFT = 14 || COMPILE_TEST
select PCI_HOST_COMMON
help
Say Y here if you want to enable PCIe controller support on Apple
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 66f37e403a09..8c33f6d05243 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -507,12 +507,36 @@ static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
return readl_relaxed(port->base + PORT_RID2SID(idx));
}
+static int apple_pcie_probe_port(struct device_node *np)
+{
+ struct gpio_desc *gd;
+
+ gd = gpiod_get_from_of_node(np, "reset-gpios", 0,
+ GPIOD_OUT_LOW, "PERST#");
+ if (IS_ERR(gd)) {
+ return PTR_ERR(gd);
+ }
+
+ gpiod_put(gd);
+
+ gd = gpiod_get_from_of_node(np, "pwren-gpios", 0,
+ GPIOD_OUT_LOW, "PWREN");
+ if (IS_ERR(gd)) {
+ if (PTR_ERR(gd) != -ENOENT)
+ return PTR_ERR(gd);
+ } else {
+ gpiod_put(gd);
+ }
+
+ return 0;
+}
+
static int apple_pcie_setup_port(struct apple_pcie *pcie,
struct device_node *np)
{
struct platform_device *platform = to_platform_device(pcie->dev);
struct apple_pcie_port *port;
- struct gpio_desc *reset;
+ struct gpio_desc *reset, *pwren = NULL;
u32 stat, idx;
int ret, i;
@@ -521,6 +545,15 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
if (IS_ERR(reset))
return PTR_ERR(reset);
+ pwren = devm_gpiod_get_from_of_node(pcie->dev, np, "pwren-gpios", 0,
+ GPIOD_OUT_LOW, "PWREN");
+ if (IS_ERR(pwren)) {
+ if (PTR_ERR(pwren) == -ENOENT)
+ pwren = NULL;
+ else
+ return PTR_ERR(pwren);
+ }
+
port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -541,18 +574,27 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
/* Assert PERST# before setting up the clock */
- gpiod_set_value(reset, 1);
+ gpiod_set_value_cansleep(reset, 1);
+
+ /* Power on the device if required */
+ gpiod_set_value_cansleep(pwren, 1);
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
return ret;
- /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
- usleep_range(100, 200);
+ /*
+ * The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2)
+ * If powering up, the minimal Tpvperl is 100ms
+ */
+ if (pwren)
+ msleep(100);
+ else
+ usleep_range(100, 200);
/* Deassert PERST# */
rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 0);
+ gpiod_set_value_cansleep(reset, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
@@ -797,8 +839,18 @@ static int apple_pcie_init(struct pci_config_window *cfg)
static int apple_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *of_port;
int ret;
+ /* Check for probe dependencies for all ports first */
+ for_each_child_of_node(dev->of_node, of_port) {
+ ret = apple_pcie_probe_port(of_port);
+ of_node_put(of_port);
+ if (ret)
+ return dev_err_probe(dev, ret, "Port %pOF probe fail\n", of_port);
+ }
+
ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
if (ret)
return ret;
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 979a7c2b4f56..03e1e81b94b9 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -559,9 +559,23 @@ static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
return m1_pmu_init(cpu_pmu);
}
+static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "apple_blizzard_pmu";
+ return m1_pmu_init(cpu_pmu);
+}
+
+static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "apple_avalanche_pmu";
+ return m1_pmu_init(cpu_pmu);
+}
+
static const struct of_device_id m1_pmu_of_device_ids[] = {
{ .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, },
{ .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, },
+ { .compatible = "apple,blizzard-pmu", .data = m2_pmu_blizzard_init, },
+ { .compatible = "apple,avalanche-pmu", .data = m2_pmu_avalanche_init, },
{ },
};
MODULE_DEVICE_TABLE(of, m1_pmu_of_device_ids);
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index dbd327712205..bde284b78a7f 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -16,3 +16,5 @@ source "drivers/platform/olpc/Kconfig"
source "drivers/platform/surface/Kconfig"
source "drivers/platform/x86/Kconfig"
+
+source "drivers/platform/apple/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 41640172975a..d2baa4eb4f13 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_OLPC_EC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
obj-$(CONFIG_SURFACE_PLATFORMS) += surface/
+obj-$(CONFIG_APPLE_PLATFORMS) += apple/
diff --git a/drivers/platform/apple/Kconfig b/drivers/platform/apple/Kconfig
new file mode 100644
index 000000000000..5bcadd349493
--- /dev/null
+++ b/drivers/platform/apple/Kconfig
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Apple Platform-Specific Drivers
+#
+
+menuconfig APPLE_PLATFORMS
+ bool "Apple Mac Platform-Specific Device Drivers"
+ default y
+ help
+ Say Y here to get to see options for platform-specific device drivers
+ for Apple devices. This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if APPLE_PLATFORMS
+
+config APPLE_SMC
+ tristate "Apple SMC Driver"
+ depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+ default ARCH_APPLE
+ select MFD_CORE
+ help
+ Build support for the Apple System Management Controller present in
+ Apple Macs. This driver currently supports the SMC in Apple Silicon
+ Macs. For x86 Macs, see the applesmc driver (SENSORS_APPLESMC).
+
+ Say Y here if you have an Apple Silicon Mac.
+
+ To compile this driver as a module, choose M here: the module will
+ be called macsmc.
+
+if APPLE_SMC
+
+config APPLE_SMC_RTKIT
+ tristate "RTKit (Apple Silicon) backend"
+ depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
+ depends on APPLE_RTKIT
+ default ARCH_APPLE
+ help
+ Build support for SMC communications via the RTKit backend. This is
+ required for Apple Silicon Macs.
+
+ Say Y here if you have an Apple Silicon Mac.
+
+ To compile this driver as a module, choose M here: the module will
+ be called macsmc-rtkit.
+
+endif
+endif
diff --git a/drivers/platform/apple/Makefile b/drivers/platform/apple/Makefile
new file mode 100644
index 000000000000..79fac195398b
--- /dev/null
+++ b/drivers/platform/apple/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/apple
+# Apple Platform-Specific Drivers
+#
+
+macsmc-y += smc_core.o
+macsmc-rtkit-y += smc_rtkit.o
+
+obj-$(CONFIG_APPLE_SMC) += macsmc.o
+obj-$(CONFIG_APPLE_SMC_RTKIT) += macsmc-rtkit.o
diff --git a/drivers/platform/apple/smc.h b/drivers/platform/apple/smc.h
new file mode 100644
index 000000000000..8ae51887b2c5
--- /dev/null
+++ b/drivers/platform/apple/smc.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC internal core definitions
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _SMC_H
+#define _SMC_H
+
+#include <linux/mfd/macsmc.h>
+
+struct apple_smc_backend_ops {
+ int (*read_key)(void *cookie, smc_key key, void *buf, size_t size);
+ int (*write_key)(void *cookie, smc_key key, void *buf, size_t size);
+ int (*write_key_atomic)(void *cookie, smc_key key, void *buf, size_t size);
+ int (*rw_key)(void *cookie, smc_key key, void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize);
+ int (*get_key_by_index)(void *cookie, int index, smc_key *key);
+ int (*get_key_info)(void *cookie, smc_key key, struct apple_smc_key_info *info);
+};
+
+struct apple_smc *apple_smc_probe(struct device *dev, const struct apple_smc_backend_ops *ops,
+ void *cookie);
+void *apple_smc_get_cookie(struct apple_smc *smc);
+int apple_smc_remove(struct apple_smc *smc);
+void apple_smc_event_received(struct apple_smc *smc, uint32_t event);
+
+#endif
diff --git a/drivers/platform/apple/smc_core.c b/drivers/platform/apple/smc_core.c
new file mode 100644
index 000000000000..daf029cd072f
--- /dev/null
+++ b/drivers/platform/apple/smc_core.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC core framework
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include "smc.h"
+
+struct apple_smc {
+ struct device *dev;
+
+ void *be_cookie;
+ const struct apple_smc_backend_ops *be;
+
+ struct mutex mutex;
+
+ u32 key_count;
+ smc_key first_key;
+ smc_key last_key;
+
+ struct blocking_notifier_head event_handlers;
+};
+
+static const struct mfd_cell apple_smc_devs[] = {
+ {
+ .name = "macsmc-gpio",
+ },
+ {
+ .name = "macsmc-hid",
+ },
+ {
+ .name = "macsmc-power",
+ },
+ {
+ .name = "macsmc-reboot",
+ },
+ {
+ .name = "macsmc-rtc",
+ },
+};
+
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+ int ret;
+
+ mutex_lock(&smc->mutex);
+ ret = smc->be->read_key(smc->be_cookie, key, buf, size);
+ mutex_unlock(&smc->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_read);
+
+int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+ int ret;
+
+ mutex_lock(&smc->mutex);
+ ret = smc->be->write_key(smc->be_cookie, key, buf, size);
+ mutex_unlock(&smc->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_write);
+
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+ int ret;
+
+ /*
+ * Will fail if SMC is busy. This is only used by SMC reboot/poweroff
+ * final calls, so it doesn't really matter at that point.
+ */
+ if (!mutex_trylock(&smc->mutex))
+ return -EBUSY;
+
+ ret = smc->be->write_key_atomic(smc->be_cookie, key, buf, size);
+ mutex_unlock(&smc->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_write_atomic);
+
+int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize)
+{
+ int ret;
+
+ mutex_lock(&smc->mutex);
+ ret = smc->be->rw_key(smc->be_cookie, key, wbuf, wsize, rbuf, rsize);
+ mutex_unlock(&smc->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_rw);
+
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key)
+{
+ int ret;
+
+ mutex_lock(&smc->mutex);
+ ret = smc->be->get_key_by_index(smc->be_cookie, index, key);
+ mutex_unlock(&smc->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_get_key_by_index);
+
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info)
+{
+ int ret;
+
+ mutex_lock(&smc->mutex);
+ ret = smc->be->get_key_info(smc->be_cookie, key, info);
+ mutex_unlock(&smc->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_get_key_info);
+
+int apple_smc_find_first_key_index(struct apple_smc *smc, smc_key key)
+{
+ int start = 0, count = smc->key_count;
+ int ret;
+
+ if (key <= smc->first_key)
+ return 0;
+ if (key > smc->last_key)
+ return smc->key_count;
+
+ while (count > 1) {
+ int pivot = start + ((count - 1) >> 1);
+ smc_key pkey;
+
+ ret = apple_smc_get_key_by_index(smc, pivot, &pkey);
+ if (ret < 0)
+ return ret;
+
+ if (pkey == key)
+ return pivot;
+
+ pivot++;
+
+ if (pkey < key) {
+ count -= pivot - start;
+ start = pivot;
+ } else {
+ count = pivot - start;
+ }
+ }
+
+ return start;
+}
+EXPORT_SYMBOL(apple_smc_find_first_key_index);
+
+int apple_smc_get_key_count(struct apple_smc *smc)
+{
+ return smc->key_count;
+}
+EXPORT_SYMBOL(apple_smc_get_key_count);
+
+void apple_smc_event_received(struct apple_smc *smc, uint32_t event)
+{
+ dev_dbg(smc->dev, "Event: 0x%08x\n", event);
+ blocking_notifier_call_chain(&smc->event_handlers, event, NULL);
+}
+EXPORT_SYMBOL(apple_smc_event_received);
+
+int apple_smc_register_notifier(struct apple_smc *smc, struct notifier_block *n)
+{
+ return blocking_notifier_chain_register(&smc->event_handlers, n);
+}
+EXPORT_SYMBOL(apple_smc_register_notifier);
+
+int apple_smc_unregister_notifier(struct apple_smc *smc, struct notifier_block *n)
+{
+ return blocking_notifier_chain_unregister(&smc->event_handlers, n);
+}
+EXPORT_SYMBOL(apple_smc_unregister_notifier);
+
+void *apple_smc_get_cookie(struct apple_smc *smc)
+{
+ return smc->be_cookie;
+}
+EXPORT_SYMBOL(apple_smc_get_cookie);
+
+struct apple_smc *apple_smc_probe(struct device *dev, const struct apple_smc_backend_ops *ops, void *cookie)
+{
+ struct apple_smc *smc;
+ u32 count;
+ int ret;
+
+ smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+ return ERR_PTR(-ENOMEM);
+
+ smc->dev = dev;
+ smc->be_cookie = cookie;
+ smc->be = ops;
+ mutex_init(&smc->mutex);
+ BLOCKING_INIT_NOTIFIER_HEAD(&smc->event_handlers);
+
+ ret = apple_smc_read_u32(smc, SMC_KEY(#KEY), &count);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Failed to get key count"));
+ smc->key_count = be32_to_cpu(count);
+
+ ret = apple_smc_get_key_by_index(smc, 0, &smc->first_key);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Failed to get first key"));
+
+ ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &smc->last_key);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Failed to get last key"));
+
+ /* Enable notifications */
+ apple_smc_write_flag(smc, SMC_KEY(NTAP), 1);
+
+ dev_info(dev, "Initialized (%d keys %p4ch..%p4ch)\n",
+ smc->key_count, &smc->first_key, &smc->last_key);
+
+ dev_set_drvdata(dev, smc);
+
+ ret = mfd_add_devices(dev, -1, apple_smc_devs, ARRAY_SIZE(apple_smc_devs), NULL, 0, NULL);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Subdevice initialization failed"));
+
+ return smc;
+}
+EXPORT_SYMBOL(apple_smc_probe);
+
+int apple_smc_remove(struct apple_smc *smc)
+{
+ mfd_remove_devices(smc->dev);
+
+ /* Disable notifications */
+ apple_smc_write_flag(smc, SMC_KEY(NTAP), 1);
+
+ return 0;
+}
+EXPORT_SYMBOL(apple_smc_remove);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC core");
diff --git a/drivers/platform/apple/smc_rtkit.c b/drivers/platform/apple/smc_rtkit.c
new file mode 100644
index 000000000000..e84e2e369e6a
--- /dev/null
+++ b/drivers/platform/apple/smc_rtkit.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC RTKit backend
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/soc/apple/rtkit.h>
+#include "smc.h"
+
+#define SMC_ENDPOINT 0x20
+
+/* Guess */
+#define SMC_SHMEM_SIZE 0x1000
+
+#define SMC_MSG_READ_KEY 0x10
+#define SMC_MSG_WRITE_KEY 0x11
+#define SMC_MSG_GET_KEY_BY_INDEX 0x12
+#define SMC_MSG_GET_KEY_INFO 0x13
+#define SMC_MSG_INITIALIZE 0x17
+#define SMC_MSG_NOTIFICATION 0x18
+#define SMC_MSG_RW_KEY 0x20
+
+#define SMC_DATA GENMASK(63, 32)
+#define SMC_WSIZE GENMASK(31, 24)
+#define SMC_SIZE GENMASK(23, 16)
+#define SMC_ID GENMASK(15, 12)
+#define SMC_MSG GENMASK(7, 0)
+#define SMC_RESULT SMC_MSG
+
+#define SMC_RECV_TIMEOUT 500
+
+struct apple_smc_rtkit {
+ struct device *dev;
+ struct apple_smc *core;
+ struct apple_rtkit *rtk;
+
+ struct completion init_done;
+ bool initialized;
+ bool alive;
+
+ struct resource *sram;
+ void __iomem *sram_base;
+ struct apple_rtkit_shmem shmem;
+
+ unsigned int msg_id;
+
+ bool atomic_pending;
+ struct completion cmd_done;
+ u64 cmd_ret;
+};
+
+static int apple_smc_rtkit_write_key_atomic(void *cookie, smc_key key, void *buf, size_t size)
+{
+ struct apple_smc_rtkit *smc = cookie;
+ int ret;
+ u64 msg;
+ u8 result;
+
+ if (size > SMC_SHMEM_SIZE || size == 0)
+ return -EINVAL;
+
+ if (!smc->alive)
+ return -EIO;
+
+ memcpy_toio(smc->shmem.iomem, buf, size);
+ smc->msg_id = (smc->msg_id + 1) & 0xf;
+ msg = (FIELD_PREP(SMC_MSG, SMC_MSG_WRITE_KEY) |
+ FIELD_PREP(SMC_SIZE, size) |
+ FIELD_PREP(SMC_ID, smc->msg_id) |
+ FIELD_PREP(SMC_DATA, key));
+ smc->atomic_pending = true;
+
+ ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, true);
+ if (ret < 0) {
+ dev_err(smc->dev, "Failed to send command (%d)\n", ret);
+ return ret;
+ }
+
+ while (smc->atomic_pending) {
+ ret = apple_rtkit_poll(smc->rtk);
+ if (ret < 0) {
+ dev_err(smc->dev, "RTKit poll failed (%llx)", msg);
+ return ret;
+ }
+ udelay(100);
+ }
+
+ if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) {
+ dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n",
+ smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret));
+ return -EIO;
+ }
+
+ result = FIELD_GET(SMC_RESULT, smc->cmd_ret);
+ if (result != 0)
+ return -result;
+
+ return FIELD_GET(SMC_SIZE, smc->cmd_ret);
+}
+
+static int apple_smc_cmd(struct apple_smc_rtkit *smc, u64 cmd, u64 arg,
+ u64 size, u64 wsize, u32 *ret_data)
+{
+ int ret;
+ u64 msg;
+ u8 result;
+
+ if (!smc->alive)
+ return -EIO;
+
+ reinit_completion(&smc->cmd_done);
+
+ smc->msg_id = (smc->msg_id + 1) & 0xf;
+ msg = (FIELD_PREP(SMC_MSG, cmd) |
+ FIELD_PREP(SMC_SIZE, size) |
+ FIELD_PREP(SMC_WSIZE, wsize) |
+ FIELD_PREP(SMC_ID, smc->msg_id) |
+ FIELD_PREP(SMC_DATA, arg));
+
+ ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, false);
+ if (ret < 0) {
+ dev_err(smc->dev, "Failed to send command\n");
+ return ret;
+ }
+
+ do {
+ if (wait_for_completion_timeout(&smc->cmd_done,
+ msecs_to_jiffies(SMC_RECV_TIMEOUT)) == 0) {
+ dev_err(smc->dev, "Command timed out (%llx)", msg);
+ return -ETIMEDOUT;
+ }
+ if (FIELD_GET(SMC_ID, smc->cmd_ret) == smc->msg_id)
+ break;
+ dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n",
+ smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret));
+ } while(1);
+
+ result = FIELD_GET(SMC_RESULT, smc->cmd_ret);
+ if (result != 0)
+ return -result;
+
+ if (ret_data)
+ *ret_data = FIELD_GET(SMC_DATA, smc->cmd_ret);
+
+ return FIELD_GET(SMC_SIZE, smc->cmd_ret);
+}
+
+static int _apple_smc_rtkit_read_key(struct apple_smc_rtkit *smc, smc_key key,
+ void *buf, size_t size, size_t wsize)
+{
+ int ret;
+ u32 rdata;
+ u64 cmd;
+
+ if (size > SMC_SHMEM_SIZE || size == 0)
+ return -EINVAL;
+
+ cmd = wsize ? SMC_MSG_RW_KEY : SMC_MSG_READ_KEY;
+
+ ret = apple_smc_cmd(smc, cmd, key, size, wsize, &rdata);
+ if (ret < 0)
+ return ret;
+
+ if (size <= 4)
+ memcpy(buf, &rdata, size);
+ else
+ memcpy_fromio(buf, smc->shmem.iomem, size);
+
+ return ret;
+}
+
+static int apple_smc_rtkit_read_key(void *cookie, smc_key key, void *buf, size_t size)
+{
+ return _apple_smc_rtkit_read_key(cookie, key, buf, size, 0);
+}
+
+static int apple_smc_rtkit_write_key(void *cookie, smc_key key, void *buf, size_t size)
+{
+ struct apple_smc_rtkit *smc = cookie;
+
+ if (size > SMC_SHMEM_SIZE || size == 0)
+ return -EINVAL;
+
+ memcpy_toio(smc->shmem.iomem, buf, size);
+ return apple_smc_cmd(smc, SMC_MSG_WRITE_KEY, key, size, 0, NULL);
+}
+
+static int apple_smc_rtkit_rw_key(void *cookie, smc_key key,
+ void *wbuf, size_t wsize, void *rbuf, size_t rsize)
+{
+ struct apple_smc_rtkit *smc = cookie;
+
+ if (wsize > SMC_SHMEM_SIZE || wsize == 0)
+ return -EINVAL;
+
+ memcpy_toio(smc->shmem.iomem, wbuf, wsize);
+ return _apple_smc_rtkit_read_key(smc, key, rbuf, rsize, wsize);
+}
+
+static int apple_smc_rtkit_get_key_by_index(void *cookie, int index, smc_key *key)
+{
+ struct apple_smc_rtkit *smc = cookie;
+ int ret;
+
+ ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_BY_INDEX, index, 0, 0, key);
+
+ *key = swab32(*key);
+ return ret;
+}
+
+static int apple_smc_rtkit_get_key_info(void *cookie, smc_key key, struct apple_smc_key_info *info)
+{
+ struct apple_smc_rtkit *smc = cookie;
+ u8 key_info[6];
+ int ret;
+
+ ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_INFO, key, 0, 0, NULL);
+ if (ret >= 0 && info) {
+ info->size = key_info[0];
+ info->type_code = get_unaligned_be32(&key_info[1]);
+ info->flags = key_info[5];
+ }
+ return ret;
+}
+
+static const struct apple_smc_backend_ops apple_smc_rtkit_be_ops = {
+ .read_key = apple_smc_rtkit_read_key,
+ .write_key = apple_smc_rtkit_write_key,
+ .write_key_atomic = apple_smc_rtkit_write_key_atomic,
+ .rw_key = apple_smc_rtkit_rw_key,
+ .get_key_by_index = apple_smc_rtkit_get_key_by_index,
+ .get_key_info = apple_smc_rtkit_get_key_info,
+};
+
+static void apple_smc_rtkit_crashed(void *cookie)
+{
+ struct apple_smc_rtkit *smc = cookie;
+
+ dev_err(smc->dev, "SMC crashed! Your system will reboot in a few seconds...\n");
+ smc->alive = false;
+}
+
+static int apple_smc_rtkit_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_smc_rtkit *smc = cookie;
+ struct resource res = {
+ .start = bfr->iova,
+ .end = bfr->iova + bfr->size - 1,
+ .name = "rtkit_map",
+ .flags = smc->sram->flags,
+ };
+
+ if (!bfr->iova) {
+ dev_err(smc->dev, "RTKit wants a RAM buffer\n");
+ return -EIO;
+ }
+
+ if (res.end < res.start || !resource_contains(smc->sram, &res)) {
+ dev_err(smc->dev,
+ "RTKit buffer request outside SRAM region: %pR", &res);
+ return -EFAULT;
+ }
+
+ bfr->iomem = smc->sram_base + (res.start - smc->sram->start);
+ bfr->is_mapped = true;
+
+ return 0;
+}
+
+static void apple_smc_rtkit_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ // no-op
+}
+
+static bool apple_smc_rtkit_recv_early(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_smc_rtkit *smc = cookie;
+
+ if (endpoint != SMC_ENDPOINT) {
+ dev_err(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint);
+ return false;
+ }
+
+ if (!smc->initialized) {
+ int ret;
+
+ smc->shmem.iova = message;
+ smc->shmem.size = SMC_SHMEM_SIZE;
+ ret = apple_smc_rtkit_shmem_setup(smc, &smc->shmem);
+ if (ret < 0)
+ dev_err(smc->dev, "Failed to initialize shared memory\n");
+ else
+ smc->alive = true;
+ smc->initialized = true;
+ complete(&smc->init_done);
+ } else if (FIELD_GET(SMC_MSG, message) == SMC_MSG_NOTIFICATION) {
+ /* Handle these in the RTKit worker thread */
+ return false;
+ } else {
+ smc->cmd_ret = message;
+ if (smc->atomic_pending) {
+ smc->atomic_pending = false;
+ } else {
+ complete(&smc->cmd_done);
+ }
+ }
+
+ return true;
+}
+
+static void apple_smc_rtkit_recv(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_smc_rtkit *smc = cookie;
+
+ if (endpoint != SMC_ENDPOINT) {
+ dev_err(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint);
+ return;
+ }
+
+ if (FIELD_GET(SMC_MSG, message) != SMC_MSG_NOTIFICATION) {
+ dev_err(smc->dev, "Received unknown message from worker: 0x%llx\n", message);
+ return;
+ }
+
+ apple_smc_event_received(smc->core, FIELD_GET(SMC_DATA, message));
+}
+
+static const struct apple_rtkit_ops apple_smc_rtkit_ops = {
+ .crashed = apple_smc_rtkit_crashed,
+ .recv_message = apple_smc_rtkit_recv,
+ .recv_message_early = apple_smc_rtkit_recv_early,
+ .shmem_setup = apple_smc_rtkit_shmem_setup,
+ .shmem_destroy = apple_smc_rtkit_shmem_destroy,
+};
+
+static int apple_smc_rtkit_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_smc_rtkit *smc;
+ int ret;
+
+ smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+ return -ENOMEM;
+
+ smc->dev = dev;
+
+ smc->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!smc->sram)
+ return dev_err_probe(dev, EIO,
+ "No SRAM region");
+
+ smc->sram_base = devm_ioremap_resource(dev, smc->sram);
+ if (IS_ERR(smc->sram_base))
+ return dev_err_probe(dev, PTR_ERR(smc->sram_base),
+ "Failed to map SRAM region");
+
+ smc->rtk =
+ devm_apple_rtkit_init(dev, smc, NULL, 0, &apple_smc_rtkit_ops);
+ if (IS_ERR(smc->rtk))
+ return dev_err_probe(dev, PTR_ERR(smc->rtk),
+ "Failed to intialize RTKit");
+
+ ret = apple_rtkit_wake(smc->rtk);
+ if (ret != 0)
+ return dev_err_probe(dev, ret,
+ "Failed to wake up SMC");
+
+ ret = apple_rtkit_start_ep(smc->rtk, SMC_ENDPOINT);
+ if (ret != 0) {
+ dev_err(dev, "Failed to start endpoint");
+ goto cleanup;
+ }
+
+ init_completion(&smc->init_done);
+ init_completion(&smc->cmd_done);
+
+ ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT,
+ FIELD_PREP(SMC_MSG, SMC_MSG_INITIALIZE), NULL, false);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to send init message");
+
+ if (wait_for_completion_timeout(&smc->init_done,
+ msecs_to_jiffies(SMC_RECV_TIMEOUT)) == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(dev, "Timed out initializing SMC");
+ goto cleanup;
+ }
+
+ if (!smc->alive) {
+ ret = -EIO;
+ goto cleanup;
+ }
+
+ smc->core = apple_smc_probe(dev, &apple_smc_rtkit_be_ops, smc);
+ if (IS_ERR(smc->core)) {
+ ret = PTR_ERR(smc->core);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /* Try to shut down RTKit, if it's not completely wedged */
+ if (apple_rtkit_is_running(smc->rtk))
+ apple_rtkit_quiesce(smc->rtk);
+
+ return ret;
+}
+
+static int apple_smc_rtkit_remove(struct platform_device *pdev)
+{
+ struct apple_smc *core = platform_get_drvdata(pdev);
+ struct apple_smc_rtkit *smc = apple_smc_get_cookie(core);
+
+ apple_smc_remove(core);
+
+ if (apple_rtkit_is_running(smc->rtk))
+ apple_rtkit_quiesce(smc->rtk);
+
+ return 0;
+}
+
+static const struct of_device_id apple_smc_rtkit_of_match[] = {
+ { .compatible = "apple,smc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_smc_rtkit_of_match);
+
+static struct platform_driver apple_smc_rtkit_driver = {
+ .driver = {
+ .name = "macsmc-rtkit",
+ .owner = THIS_MODULE,
+ .of_match_table = apple_smc_rtkit_of_match,
+ },
+ .probe = apple_smc_rtkit_probe,
+ .remove = apple_smc_rtkit_remove,
+};
+module_platform_driver(apple_smc_rtkit_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC RTKit backend driver");
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index a8c46ba5878f..b4c2b7171672 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -117,6 +117,18 @@ config POWER_RESET_LINKSTATION
Say Y here if you have a Buffalo LinkStation LS421D/E.
+config POWER_RESET_MACSMC
+ tristate "Apple SMC reset/power-off driver"
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on APPLE_SMC
+ depends on OF
+ default ARCH_APPLE
+ help
+ This driver supports reset and power-off on Apple Mac machines
+ that implement this functionality via the SMC.
+
+ Say Y here if you have an Apple Silicon Mac.
+
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 0a39424fc558..5838768e106f 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_LINKSTATION) += linkstation-poweroff.o
+obj-$(CONFIG_POWER_RESET_MACSMC) += macsmc-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o
diff --git a/drivers/power/reset/macsmc-reboot.c b/drivers/power/reset/macsmc-reboot.c
new file mode 100644
index 000000000000..c33ba2a7852d
--- /dev/null
+++ b/drivers/power/reset/macsmc-reboot.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC Reboot/Poweroff Handler
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+struct macsmc_reboot_nvmem {
+ struct nvmem_cell *shutdown_flag;
+ struct nvmem_cell *pm_setting;
+ struct nvmem_cell *boot_stage;
+ struct nvmem_cell *boot_error_count;
+ struct nvmem_cell *panic_count;
+};
+
+static const char *nvmem_names[] = {
+ "shutdown_flag",
+ "pm_setting",
+ "boot_stage",
+ "boot_error_count",
+ "panic_count",
+};
+
+enum boot_stage {
+ BOOT_STAGE_SHUTDOWN = 0x00, /* Clean shutdown */
+ BOOT_STAGE_IBOOT_DONE = 0x2f, /* Last stage of bootloader */
+ BOOT_STAGE_KERNEL_STARTED = 0x30, /* Normal OS booting */
+};
+
+enum pm_setting {
+ PM_SETTING_AC_POWER_RESTORE = 0x02,
+ PM_SETTING_AC_POWER_OFF = 0x03,
+};
+
+static const char *ac_power_modes[] = { "off", "restore" };
+
+static int ac_power_mode_map[] = {
+ PM_SETTING_AC_POWER_OFF,
+ PM_SETTING_AC_POWER_RESTORE,
+};
+
+struct macsmc_reboot {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct notifier_block reboot_notify;
+
+ union {
+ struct macsmc_reboot_nvmem nvm;
+ struct nvmem_cell *nvm_cells[ARRAY_SIZE(nvmem_names)];
+ };
+};
+
+/* Helpers to read/write a u8 given a struct nvmem_cell */
+static int nvmem_cell_get_u8(struct nvmem_cell *cell)
+{
+ size_t len;
+ u8 val;
+ void *ret = nvmem_cell_read(cell, &len);
+
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ if (len < 1) {
+ kfree(ret);
+ return -EINVAL;
+ }
+
+ val = *(u8 *)ret;
+ kfree(ret);
+ return val;
+}
+
+static int nvmem_cell_set_u8(struct nvmem_cell *cell, u8 val)
+{
+ return nvmem_cell_write(cell, &val, sizeof(val));
+}
+
+static ssize_t macsmc_ac_power_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct macsmc_reboot *reboot = dev_get_drvdata(dev);
+ int mode;
+ int ret;
+
+ mode = sysfs_match_string(ac_power_modes, buf);
+ if (mode < 0)
+ return mode;
+
+ ret = nvmem_cell_set_u8(reboot->nvm.pm_setting, ac_power_mode_map[mode]);
+ if (ret < 0)
+ return ret;
+
+ return n;
+}
+
+static ssize_t macsmc_ac_power_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct macsmc_reboot *reboot = dev_get_drvdata(dev);
+ int len = 0;
+ int i;
+ int mode = nvmem_cell_get_u8(reboot->nvm.pm_setting);
+
+ if (mode < 0)
+ return mode;
+
+ for (i = 0; i < ARRAY_SIZE(ac_power_mode_map); i++)
+ if (mode == ac_power_mode_map[i])
+ len += scnprintf(buf+len, PAGE_SIZE-len,
+ "[%s] ", ac_power_modes[i]);
+ else
+ len += scnprintf(buf+len, PAGE_SIZE-len,
+ "%s ", ac_power_modes[i]);
+ buf[len-1] = '\n';
+ return len;
+}
+static DEVICE_ATTR(ac_power_mode, 0644, macsmc_ac_power_mode_show,
+ macsmc_ac_power_mode_store);
+
+/*
+ * SMC 'MBSE' key actions:
+ *
+ * 'offw' - shutdown warning
+ * 'slpw' - sleep warning
+ * 'rest' - restart warning
+ * 'off1' - shutdown (needs PMU bit set to stay on)
+ * 'susp' - suspend
+ * 'phra' - restart ("PE Halt Restart Action"?)
+ * 'panb' - panic beginning
+ * 'pane' - panic end
+ */
+
+static int macsmc_power_off(struct sys_off_data *data)
+{
+ struct macsmc_reboot *reboot = data->cb_data;
+
+ dev_info(reboot->dev, "Issuing power off (off1)\n");
+
+ if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(off1)) < 0) {
+ dev_err(reboot->dev, "Failed to issue MBSE = off1 (power_off)\n");
+ } else {
+ mdelay(100);
+ WARN_ON(1);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int macsmc_restart(struct sys_off_data *data)
+{
+ struct macsmc_reboot *reboot = data->cb_data;
+
+ dev_info(reboot->dev, "Issuing restart (phra)\n");
+
+ if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(phra)) < 0) {
+ dev_err(reboot->dev, "Failed to issue MBSE = phra (restart)\n");
+ } else {
+ mdelay(100);
+ WARN_ON(1);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int macsmc_reboot_notify(struct notifier_block *this, unsigned long action, void *data)
+{
+ struct macsmc_reboot *reboot = container_of(this, struct macsmc_reboot, reboot_notify);
+ u32 val;
+ u8 shutdown_flag;
+
+ switch (action) {
+ case SYS_RESTART:
+ val = SMC_KEY(rest);
+ shutdown_flag = 0;
+ break;
+ case SYS_POWER_OFF:
+ val = SMC_KEY(offw);
+ shutdown_flag = 1;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ dev_info(reboot->dev, "Preparing for reboot (%p4ch)\n", &val);
+
+ /* On the Mac Mini, this will turn off the LED for power off */
+ if (apple_smc_write_u32(reboot->smc, SMC_KEY(MBSE), val) < 0)
+ dev_err(reboot->dev, "Failed to issue MBSE = %p4ch (reboot_prepare)\n", &val);
+
+ /* Set the boot_stage to 0, which means we're doing a clean shutdown/reboot. */
+ if (reboot->nvm.boot_stage &&
+ nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_SHUTDOWN) < 0)
+ dev_err(reboot->dev, "Failed to write boot_stage\n");
+
+ /*
+ * Set the PMU flag to actually reboot into the off state.
+ * Without this, the device will just reboot. We make it optional in case it is no longer
+ * necessary on newer hardware.
+ */
+ if (reboot->nvm.shutdown_flag &&
+ nvmem_cell_set_u8(reboot->nvm.shutdown_flag, shutdown_flag) < 0)
+ dev_err(reboot->dev, "Failed to write shutdown_flag\n");
+
+ return NOTIFY_OK;
+}
+
+static void macsmc_power_init_error_counts(struct macsmc_reboot *reboot)
+{
+ int boot_error_count, panic_count;
+
+ if (!reboot->nvm.boot_error_count || !reboot->nvm.panic_count)
+ return;
+
+ boot_error_count = nvmem_cell_get_u8(reboot->nvm.boot_error_count);
+ if (boot_error_count < 0) {
+ dev_err(reboot->dev, "Failed to read boot_error_count (%d)\n", boot_error_count);
+ return;
+ }
+
+ panic_count = nvmem_cell_get_u8(reboot->nvm.panic_count);
+ if (panic_count < 0) {
+ dev_err(reboot->dev, "Failed to read panic_count (%d)\n", panic_count);
+ return;
+ }
+
+ if (!boot_error_count && !panic_count)
+ return;
+
+ dev_warn(reboot->dev, "PMU logged %d boot error(s) and %d panic(s)\n",
+ boot_error_count, panic_count);
+
+ if (nvmem_cell_set_u8(reboot->nvm.panic_count, 0) < 0)
+ dev_err(reboot->dev, "Failed to reset panic_count\n");
+ if (nvmem_cell_set_u8(reboot->nvm.boot_error_count, 0) < 0)
+ dev_err(reboot->dev, "Failed to reset boot_error_count\n");
+}
+
+static int macsmc_reboot_probe(struct platform_device *pdev)
+{
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ struct macsmc_reboot *reboot;
+ int ret, i;
+
+ /* Ignore devices without this functionality */
+ if (!apple_smc_key_exists(smc, SMC_KEY(MBSE)))
+ return -ENODEV;
+
+ reboot = devm_kzalloc(&pdev->dev, sizeof(*reboot), GFP_KERNEL);
+ if (!reboot)
+ return -ENOMEM;
+
+ reboot->dev = &pdev->dev;
+ reboot->smc = smc;
+
+ platform_set_drvdata(pdev, reboot);
+
+ pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "reboot");
+
+ for (i = 0; i < ARRAY_SIZE(nvmem_names); i++) {
+ struct nvmem_cell *cell;
+ cell = devm_nvmem_cell_get(&pdev->dev,
+ nvmem_names[i]);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_warn(&pdev->dev, "Missing NVMEM cell %s (%ld)\n",
+ nvmem_names[i], PTR_ERR(cell));
+ /* Non fatal, we'll deal with it */
+ cell = NULL;
+ }
+ reboot->nvm_cells[i] = cell;
+ }
+
+ /* Set the boot_stage to indicate we're running the OS kernel */
+ if (reboot->nvm.boot_stage &&
+ nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_KERNEL_STARTED) < 0)
+ dev_err(reboot->dev, "Failed to write boot_stage\n");
+
+ /* Display and clear the error counts */
+ macsmc_power_init_error_counts(reboot);
+
+ reboot->reboot_notify.notifier_call = macsmc_reboot_notify;
+
+ ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_HIGH,
+ macsmc_power_off, reboot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register power-off handler\n");
+
+ ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+ macsmc_restart, reboot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register restart handler\n");
+
+ ret = devm_register_reboot_notifier(&pdev->dev, &reboot->reboot_notify);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register reboot notifier\n");
+
+ dev_info(&pdev->dev, "Handling reboot and poweroff requests via SMC\n");
+
+ if (device_create_file(&pdev->dev, &dev_attr_ac_power_mode))
+ dev_warn(&pdev->dev, "could not create sysfs file\n");
+
+ return 0;
+}
+
+static int macsmc_reboot_remove(struct platform_device *pdev)
+{
+ device_remove_file(&pdev->dev, &dev_attr_ac_power_mode);
+
+ return 0;
+}
+
+
+static struct platform_driver macsmc_reboot_driver = {
+ .driver = {
+ .name = "macsmc-reboot",
+ .owner = THIS_MODULE,
+ },
+ .probe = macsmc_reboot_probe,
+ .remove = macsmc_reboot_remove,
+};
+module_platform_driver(macsmc_reboot_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC reboot/poweroff driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_ALIAS("platform:macsmc-reboot");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 0bbfe6a7ce4d..129e7e951590 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -918,4 +918,11 @@ config BATTERY_UG3105
device is off or suspended, the functionality of this driver is
limited to reporting capacity only.
+config CHARGER_MACSMC
+ tristate "Apple SMC Charger / Battery support"
+ depends on APPLE_SMC
+ help
+ Say Y here to enable support for the charger and battery controls on
+ Apple SMC controllers, as used on Apple Silicon Macs.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 0ee8653e882e..19fbf25b2869 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -110,3 +110,4 @@ obj-$(CONFIG_BATTERY_ACER_A500) += acer_a500_battery.o
obj-$(CONFIG_BATTERY_SURFACE) += surface_battery.o
obj-$(CONFIG_CHARGER_SURFACE) += surface_charger.o
obj-$(CONFIG_BATTERY_UG3105) += ug3105_battery.o
+obj-$(CONFIG_CHARGER_MACSMC) += macsmc_power.o
diff --git a/drivers/power/supply/macsmc_power.c b/drivers/power/supply/macsmc_power.c
new file mode 100644
index 000000000000..dee48ee8b5a4
--- /dev/null
+++ b/drivers/power/supply/macsmc_power.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC Power/Battery Management
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/power_supply.h>
+
+#define MAX_STRING_LENGTH 256
+
+struct macsmc_power {
+ struct device *dev;
+ struct apple_smc *smc;
+
+ struct power_supply *batt;
+ char model_name[MAX_STRING_LENGTH];
+ char serial_number[MAX_STRING_LENGTH];
+ char mfg_date[MAX_STRING_LENGTH];
+
+ struct power_supply *ac;
+
+ struct notifier_block nb;
+};
+
+#define CHNC_BATTERY_FULL BIT(0)
+#define CHNC_NO_CHARGER BIT(7)
+#define CHNC_NOCHG_CH0C BIT(14)
+#define CHNC_NOCHG_CH0B_CH0K BIT(15)
+#define CHNC_BATTERY_FULL_2 BIT(18)
+#define CHNC_BMS_BUSY BIT(23)
+#define CHNC_NOAC_CH0J BIT(53)
+#define CHNC_NOAC_CH0I BIT(54)
+
+#define CH0R_LOWER_FLAGS GENMASK(15, 0)
+#define CH0R_NOAC_CH0I BIT(0)
+#define CH0R_NOAC_CH0J BIT(5)
+#define CH0R_BMS_BUSY BIT(8)
+#define CH0R_NOAC_CH0K BIT(9)
+
+#define CH0X_CH0C BIT(0)
+#define CH0X_CH0B BIT(1)
+
+static int macsmc_battery_get_status(struct macsmc_power *power)
+{
+ u64 nocharge_flags;
+ u32 nopower_flags;
+ u16 ac_current;
+ int ret;
+
+ /*
+ * Note: there are fallbacks in case some of these SMC keys disappear in the future
+ * or are not present on some machines. We treat the absence of the CHCE/CHCC/BSFC/CHSC
+ * flags as an error, since they are quite fundamental and simple booleans.
+ */
+
+ /*
+ * If power input is inhibited, we are definitely discharging.
+ * However, if the only reason is the BMS is doing a balancing cycle,
+ * go ahead and ignore that one to avoid spooking users.
+ */
+ ret = apple_smc_read_u32(power->smc, SMC_KEY(CH0R), &nopower_flags);
+ if (!ret && (nopower_flags & CH0R_LOWER_FLAGS & ~CH0R_BMS_BUSY))
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ /* If no charger is present, we are definitely discharging. */
+ ret = apple_smc_read_flag(power->smc, SMC_KEY(CHCE));
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ /* If AC is not charge capable, we are definitely discharging. */
+ ret = apple_smc_read_flag(power->smc, SMC_KEY(CHCC));
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ /*
+ * If the AC input current limit is tiny or 0, we are discharging no matter
+ * how much the BMS believes it can charge.
+ */
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-i), &ac_current);
+ if (!ret && ac_current < 100)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ /* If the battery is full, report it as such. */
+ ret = apple_smc_read_flag(power->smc, SMC_KEY(BSFC));
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ /* If there are reasons we aren't charging... */
+ ret = apple_smc_read_u64(power->smc, SMC_KEY(CHNC), &nocharge_flags);
+ if (!ret) {
+ /* Perhaps the battery is full after all */
+ if (nocharge_flags & CHNC_BATTERY_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+ /* Or maybe the BMS is just busy doing something, if so call it charging anyway */
+ else if (nocharge_flags == CHNC_BMS_BUSY)
+ return POWER_SUPPLY_STATUS_CHARGING;
+ /* If we have other reasons we aren't charging, say we aren't */
+ else if (nocharge_flags)
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ /* Else we're either charging or about to charge */
+ else
+ return POWER_SUPPLY_STATUS_CHARGING;
+ }
+
+ /* As a fallback, use the system charging flag. */
+ ret = apple_smc_read_flag(power->smc, SMC_KEY(CHSC));
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+static int macsmc_battery_get_charge_behaviour(struct macsmc_power *power)
+{
+ int ret;
+ u8 val;
+
+ /* CH0I returns a bitmask like the low byte of CH0R */
+ ret = apple_smc_read_u8(power->smc, SMC_KEY(CH0I), &val);
+ if (ret)
+ return ret;
+ if (val & CH0R_NOAC_CH0I)
+ return POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE;
+
+ /* CH0C returns a bitmask containing CH0B/CH0C flags */
+ ret = apple_smc_read_u8(power->smc, SMC_KEY(CH0C), &val);
+ if (ret)
+ return ret;
+ if (val & CH0X_CH0C)
+ return POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+ else
+ return POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+}
+
+static int macsmc_battery_set_charge_behaviour(struct macsmc_power *power, int val)
+{
+ u8 ch0i, ch0c;
+ int ret;
+
+ /*
+ * CH0I/CH0C are "hard" controls that will allow the battery to run down to 0.
+ * CH0K/CH0B are "soft" controls that are reset to 0 when SOC drops below 50%;
+ * we don't expose these yet.
+ */
+
+ switch (val) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ ch0i = ch0c = 0;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ ch0i = 0;
+ ch0c = 1;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE:
+ ch0i = 1;
+ ch0c = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = apple_smc_write_u8(power->smc, SMC_KEY(CH0I), ch0i);
+ if (ret)
+ return ret;
+ return apple_smc_write_u8(power->smc, SMC_KEY(CH0C), ch0c);
+}
+
+static int macsmc_battery_get_date(const char *s, int *out)
+{
+ if (!isdigit(s[0]) || !isdigit(s[1]))
+ return -ENOTSUPP;
+
+ *out = (s[0] - '0') * 10 + s[1] - '0';
+ return 0;
+}
+
+static int macsmc_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct macsmc_power *power = power_supply_get_drvdata(psy);
+ int ret = 0;
+ u8 vu8;
+ u16 vu16;
+ u32 vu32;
+ s16 vs16;
+ s32 vs32;
+ s64 vs64;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = macsmc_battery_get_status(power);
+ ret = val->intval < 0 ? val->intval : 0;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ val->intval = macsmc_battery_get_charge_behaviour(power);
+ ret = val->intval < 0 ? val->intval : 0;
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0TE), &vu16);
+ val->intval = vu16 == 0xffff ? 0 : vu16 * 60;
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0TF), &vu16);
+ val->intval = vu16 == 0xffff ? 0 : vu16 * 60;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = apple_smc_read_u8(power->smc, SMC_KEY(BUIC), &vu8);
+ val->intval = vu8;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0AV), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = apple_smc_read_s16(power->smc, SMC_KEY(B0AC), &vs16);
+ val->intval = vs16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ ret = apple_smc_read_s32(power->smc, SMC_KEY(B0AP), &vs32);
+ val->intval = vs32 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(BITV), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RC), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = apple_smc_read_u32(power->smc, SMC_KEY(CSIL), &vu32);
+ val->intval = vu32 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RI), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RV), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0DC), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0FC), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RM), &vu16);
+ val->intval = swab16(vu16) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0AT), &vu16);
+ val->intval = vu16 - 2732;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ ret = apple_smc_read_s64(power->smc, SMC_KEY(BAAC), &vs64);
+ val->intval = vs64;
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(B0CT), &vu16);
+ val->intval = vu16;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = apple_smc_read_flag(power->smc, SMC_KEY(BBAD));
+ val->intval = ret == 1 ? POWER_SUPPLY_HEALTH_DEAD : POWER_SUPPLY_HEALTH_GOOD;
+ ret = ret < 0 ? ret : 0;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = power->model_name;
+ break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = power->serial_number;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ ret = macsmc_battery_get_date(&power->mfg_date[0], &val->intval);
+ val->intval += 2000 - 8; /* -8 is a fixup for a firmware bug... */
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+ ret = macsmc_battery_get_date(&power->mfg_date[2], &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+ ret = macsmc_battery_get_date(&power->mfg_date[4], &val->intval);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int macsmc_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct macsmc_power *power = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ return macsmc_battery_set_charge_behaviour(power, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int macsmc_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static enum power_supply_property macsmc_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+};
+
+static const struct power_supply_desc macsmc_battery_desc = {
+ .name = "macsmc-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = macsmc_battery_get_property,
+ .set_property = macsmc_battery_set_property,
+ .property_is_writeable = macsmc_battery_property_is_writeable,
+ .properties = macsmc_battery_props,
+ .num_properties = ARRAY_SIZE(macsmc_battery_props),
+};
+
+static int macsmc_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct macsmc_power *power = power_supply_get_drvdata(psy);
+ int ret = 0;
+ u16 vu16;
+ u32 vu32;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = apple_smc_read_u32(power->smc, SMC_KEY(CHIS), &vu32);
+ val->intval = !!vu32;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-n), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-i), &vu16);
+ val->intval = vu16 * 1000;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
+ ret = apple_smc_read_u32(power->smc, SMC_KEY(ACPW), &vu32);
+ val->intval = vu32 * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property macsmc_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_POWER_LIMIT,
+};
+
+static const struct power_supply_desc macsmc_ac_desc = {
+ .name = "macsmc-ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .get_property = macsmc_ac_get_property,
+ .properties = macsmc_ac_props,
+ .num_properties = ARRAY_SIZE(macsmc_ac_props),
+};
+
+static int macsmc_power_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct macsmc_power *power = container_of(nb, struct macsmc_power, nb);
+
+ if ((event & 0xffffff00) == 0x71010100) {
+ bool charging = (event & 0xff) != 0;
+
+ dev_info(power->dev, "Charging: %d\n", charging);
+ power_supply_changed(power->batt);
+ power_supply_changed(power->ac);
+
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int macsmc_power_probe(struct platform_device *pdev)
+{
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ struct power_supply_config psy_cfg = {};
+ struct macsmc_power *power;
+ int ret;
+
+ power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+
+ power->dev = &pdev->dev;
+ power->smc = smc;
+ dev_set_drvdata(&pdev->dev, power);
+
+ /* Ignore devices without a charger/battery */
+ if (macsmc_battery_get_status(power) <= POWER_SUPPLY_STATUS_UNKNOWN)
+ return -ENODEV;
+
+ /* Fetch string properties */
+ apple_smc_read(smc, SMC_KEY(BMDN), power->model_name, sizeof(power->model_name) - 1);
+ apple_smc_read(smc, SMC_KEY(BMSN), power->serial_number, sizeof(power->serial_number) - 1);
+ apple_smc_read(smc, SMC_KEY(BMDT), power->mfg_date, sizeof(power->mfg_date) - 1);
+
+ /* Turn off the "optimized battery charging" flags, in case macOS left them on */
+ apple_smc_write_u8(power->smc, SMC_KEY(CH0K), 0);
+ apple_smc_write_u8(power->smc, SMC_KEY(CH0B), 0);
+
+ psy_cfg.drv_data = power;
+ power->batt = devm_power_supply_register(&pdev->dev, &macsmc_battery_desc, &psy_cfg);
+ if (IS_ERR(power->batt)) {
+ dev_err(&pdev->dev, "Failed to register battery\n");
+ ret = PTR_ERR(power->batt);
+ return ret;
+ }
+
+ power->ac = devm_power_supply_register(&pdev->dev, &macsmc_ac_desc, &psy_cfg);
+ if (IS_ERR(power->ac)) {
+ dev_err(&pdev->dev, "Failed to register AC adapter\n");
+ ret = PTR_ERR(power->ac);
+ return ret;
+ }
+
+ power->nb.notifier_call = macsmc_power_event;
+ apple_smc_register_notifier(power->smc, &power->nb);
+
+ return 0;
+}
+
+static int macsmc_power_remove(struct platform_device *pdev)
+{
+ struct macsmc_power *power = dev_get_drvdata(&pdev->dev);
+
+ apple_smc_unregister_notifier(power->smc, &power->nb);
+
+ return 0;
+}
+
+static struct platform_driver macsmc_power_driver = {
+ .driver = {
+ .name = "macsmc-power",
+ .owner = THIS_MODULE,
+ },
+ .probe = macsmc_power_probe,
+ .remove = macsmc_power_remove,
+};
+module_platform_driver(macsmc_power_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC battery and power management driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_ALIAS("platform:macsmc-power");
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 60d13a949bc5..c3be11468414 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -51,6 +51,18 @@ config PWM_AB8500
To compile this driver as a module, choose M here: the module
will be called pwm-ab8500.
+config PWM_APPLE
+ tristate "Apple SoC PWM support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ help
+ Generic PWM framework driver for PWM controller present on
+ Apple SoCs
+
+ Say Y here if you have an ARM Apple laptop, otherwise say N
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-apple.
+
config PWM_ATMEL
tristate "Atmel PWM support"
depends on ARCH_AT91 || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 7bf1a29f02b8..19899b912e00 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
+obj-$(CONFIG_PWM_APPLE) += pwm-apple.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
diff --git a/drivers/pwm/pwm-apple.c b/drivers/pwm/pwm-apple.c
new file mode 100644
index 000000000000..7b2936346f4e
--- /dev/null
+++ b/drivers/pwm/pwm-apple.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Driver for the Apple SoC PWM controller
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/math64.h>
+
+#define PWM_CONTROL 0x00
+#define PWM_ON_CYCLES 0x1c
+#define PWM_OFF_CYCLES 0x18
+
+#define CTRL_ENABLE BIT(0)
+#define CTRL_MODE BIT(2)
+#define CTRL_UPDATE BIT(5)
+#define CTRL_TRIGGER BIT(9)
+#define CTRL_INVERT BIT(10)
+#define CTRL_OUTPUT_ENABLE BIT(14)
+
+struct apple_pwm {
+ struct pwm_chip chip;
+ void __iomem *base;
+ u64 clkrate;
+};
+
+static int apple_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct apple_pwm *fpwm;
+ u64 on_cycles, off_cycles;
+
+ fpwm = container_of(chip, struct apple_pwm, chip);
+ if (state->enabled) {
+ on_cycles = mul_u64_u64_div_u64(fpwm->clkrate,
+ state->duty_cycle, NSEC_PER_SEC);
+ off_cycles = mul_u64_u64_div_u64(fpwm->clkrate,
+ state->period, NSEC_PER_SEC) - on_cycles;
+ writel(on_cycles, fpwm->base + PWM_ON_CYCLES);
+ writel(off_cycles, fpwm->base + PWM_OFF_CYCLES);
+ writel(CTRL_ENABLE | CTRL_OUTPUT_ENABLE | CTRL_UPDATE,
+ fpwm->base + PWM_CONTROL);
+ } else {
+ writel(0, fpwm->base + PWM_CONTROL);
+ }
+ return 0;
+}
+
+static void apple_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct apple_pwm *fpwm;
+ u32 on_cycles, off_cycles, ctrl;
+
+ fpwm = container_of(chip, struct apple_pwm, chip);
+
+ ctrl = readl(fpwm->base + PWM_CONTROL);
+ on_cycles = readl(fpwm->base + PWM_ON_CYCLES);
+ off_cycles = readl(fpwm->base + PWM_OFF_CYCLES);
+
+ state->enabled = (ctrl & CTRL_ENABLE) && (ctrl & CTRL_OUTPUT_ENABLE);
+ state->polarity = PWM_POLARITY_NORMAL;
+ state->duty_cycle = div_u64(on_cycles, fpwm->clkrate) * NSEC_PER_SEC;
+ state->period = div_u64(off_cycles + on_cycles, fpwm->clkrate) * NSEC_PER_SEC;
+}
+
+static const struct pwm_ops apple_pwm_ops = {
+ .apply = apple_pwm_apply,
+ .get_state = apple_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int apple_pwm_probe(struct platform_device *pdev)
+{
+ struct apple_pwm *pwm;
+ struct clk *clk;
+ int ret;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return -ENOMEM;
+
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pwm->base))
+ return PTR_ERR(pwm->base);
+
+ platform_set_drvdata(pdev, pwm);
+
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ pwm->clkrate = clk_get_rate(clk);
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.npwm = 1;
+ pwm->chip.ops = &apple_pwm_ops;
+
+ ret = devm_pwmchip_add(&pdev->dev, &pwm->chip);
+ return ret;
+}
+
+static const struct of_device_id apple_pwm_of_match[] = {
+ { .compatible = "apple,s5l-fpwm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_pwm_of_match);
+
+static struct platform_driver apple_pwm_driver = {
+ .probe = apple_pwm_probe,
+ .driver = {
+ .name = "apple-pwm",
+ .owner = THIS_MODULE,
+ .of_match_table = apple_pwm_of_match,
+ },
+};
+module_platform_driver(apple_pwm_driver);
+
+MODULE_DESCRIPTION("Apple SoC PWM driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index bb63edb507da..d4545aaee56d 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1995,4 +1995,17 @@ config RTC_DRV_POLARFIRE_SOC
This driver can also be built as a module, if so, the module
will be called "rtc-mpfs".
+config RTC_DRV_MACSMC
+ tristate "Apple Mac SMC RTC"
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on APPLE_SMC
+ depends on OF
+ default ARCH_APPLE
+ help
+ If you say yes here you get support for RTC functions
+ inside Apple SPMI PMUs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc-macsmc.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index aab22bc63432..cd4badbba8b8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
+obj-$(CONFIG_RTC_DRV_MACSMC) += rtc-macsmc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_MAX6916) += rtc-max6916.o
diff --git a/drivers/rtc/rtc-macsmc.c b/drivers/rtc/rtc-macsmc.c
new file mode 100644
index 000000000000..34730c925248
--- /dev/null
+++ b/drivers/rtc/rtc-macsmc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC RTC driver
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitops.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+/* 48-bit RTC */
+#define RTC_BYTES 6
+#define RTC_BITS (8 * RTC_BYTES)
+
+/* 32768 Hz clock */
+#define RTC_SEC_SHIFT 15
+
+struct macsmc_rtc {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct rtc_device *rtc_dev;
+ struct nvmem_cell *rtc_offset;
+};
+
+static int macsmc_rtc_get_time(struct device *dev, struct rtc_time *tm)
+{
+ struct macsmc_rtc *rtc = dev_get_drvdata(dev);
+ u64 ctr = 0, off = 0;
+ time64_t now;
+ void *p_off;
+ size_t len;
+ int ret;
+
+ ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES);
+ if (ret != RTC_BYTES)
+ return ret < 0 ? ret : -EIO;
+
+ p_off = nvmem_cell_read(rtc->rtc_offset, &len);
+ if (IS_ERR(p_off))
+ return PTR_ERR(p_off);
+ if (len < RTC_BYTES) {
+ kfree(p_off);
+ return -EIO;
+ }
+
+ memcpy(&off, p_off, RTC_BYTES);
+ kfree(p_off);
+
+ /* Sign extend from 48 to 64 bits, then arithmetic shift right 15 bits to get seconds */
+ now = sign_extend64(ctr + off, RTC_BITS - 1) >> RTC_SEC_SHIFT;
+ rtc_time64_to_tm(now, tm);
+
+ return ret;
+}
+
+static int macsmc_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct macsmc_rtc *rtc = dev_get_drvdata(dev);
+ u64 ctr = 0, off = 0;
+ int ret;
+
+ ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES);
+ if (ret != RTC_BYTES)
+ return ret < 0 ? ret : -EIO;
+
+ /* This sets the offset such that the set second begins now */
+ off = (rtc_tm_to_time64(tm) << RTC_SEC_SHIFT) - ctr;
+ return nvmem_cell_write(rtc->rtc_offset, &off, RTC_BYTES);
+}
+
+static const struct rtc_class_ops macsmc_rtc_ops = {
+ .read_time = macsmc_rtc_get_time,
+ .set_time = macsmc_rtc_set_time,
+};
+
+static int macsmc_rtc_probe(struct platform_device *pdev)
+{
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ struct macsmc_rtc *rtc;
+
+ /* Ignore devices without this functionality */
+ if (!apple_smc_key_exists(smc, SMC_KEY(CLKM)))
+ return -ENODEV;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->dev = &pdev->dev;
+ rtc->smc = smc;
+
+ pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "rtc");
+
+ rtc->rtc_offset = devm_nvmem_cell_get(&pdev->dev, "rtc_offset");
+ if (IS_ERR(rtc->rtc_offset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_offset),
+ "Failed to get rtc_offset NVMEM cell\n");
+
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ rtc->rtc_dev->ops = &macsmc_rtc_ops;
+ rtc->rtc_dev->range_min = S64_MIN >> (RTC_SEC_SHIFT + (64 - RTC_BITS));
+ rtc->rtc_dev->range_max = S64_MAX >> (RTC_SEC_SHIFT + (64 - RTC_BITS));
+
+ platform_set_drvdata(pdev, rtc);
+
+ return devm_rtc_register_device(rtc->rtc_dev);
+}
+
+static struct platform_driver macsmc_rtc_driver = {
+ .driver = {
+ .name = "macsmc-rtc",
+ .owner = THIS_MODULE,
+ },
+ .probe = macsmc_rtc_probe,
+};
+module_platform_driver(macsmc_rtc_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC RTC driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_ALIAS("platform:macsmc-rtc");
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index a1596fefacff..694a44968f76 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -30,6 +30,20 @@ config APPLE_RTKIT
Say 'y' here if you have an Apple SoC.
+config APPLE_RTKIT_HELPER
+ tristate "Apple Generic RTKit helper co-processor"
+ depends on APPLE_RTKIT
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SoCs such as the M1 come with various co-processors running
+ their proprietary RTKit operating system. This option enables support
+ for a generic co-processor that does not implement any additional
+ in-band communications. It can be used for testing purposes, or for
+ coprocessors such as MTP that communicate over a different interface.
+
+ Say 'y' here if you have an Apple SoC.
+
config APPLE_SART
tristate "Apple SART DMA address filter"
depends on ARCH_APPLE || COMPILE_TEST
@@ -41,6 +55,16 @@ config APPLE_SART
Say 'y' here if you have an Apple SoC.
+config APPLE_DOCKCHANNEL
+ tristate "Apple DockChannel FIFO"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ DockChannel is a simple FIFO used on Apple SoCs for debug and inter-processor
+ communications.
+
+ Say 'y' here if you have an Apple SoC.
+
endmenu
endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index e293770cf66d..07a95a883d3d 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -4,5 +4,11 @@ obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o
obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
apple-rtkit-y = rtkit.o rtkit-crashlog.o
+obj-$(CONFIG_APPLE_RTKIT_HELPER) += apple-rtkit-helper.o
+apple-rtkit-helper-y = rtkit-helper.o
+
obj-$(CONFIG_APPLE_SART) += apple-sart.o
apple-sart-y = sart.o
+
+obj-$(CONFIG_APPLE_DOCKCHANNEL) += apple-dockchannel.o
+apple-dockchannel-y = dockchannel.o
diff --git a/drivers/soc/apple/dockchannel.c b/drivers/soc/apple/dockchannel.c
new file mode 100644
index 000000000000..b4d793bf2102
--- /dev/null
+++ b/drivers/soc/apple/dockchannel.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple DockChannel FIFO driver
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <asm/unaligned.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/soc/apple/dockchannel.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define DOCKCHANNEL_MAX_IRQ 32
+
+#define DOCKCHANNEL_TX_TIMEOUT_MS 1000
+#define DOCKCHANNEL_RX_TIMEOUT_MS 1000
+
+#define IRQ_MASK 0x0
+#define IRQ_FLAG 0x4
+
+#define IRQ_TX BIT(0)
+#define IRQ_RX BIT(1)
+
+#define CONFIG_TX_THRESH 0x0
+#define CONFIG_RX_THRESH 0x4
+
+#define DATA_TX8 0x4
+#define DATA_TX16 0x8
+#define DATA_TX24 0xc
+#define DATA_TX32 0x10
+#define DATA_TX_FREE 0x14
+#define DATA_RX8 0x1c
+#define DATA_RX16 0x20
+#define DATA_RX24 0x24
+#define DATA_RX32 0x28
+#define DATA_RX_COUNT 0x2c
+
+struct dockchannel {
+ struct device *dev;
+ int tx_irq;
+ int rx_irq;
+
+ void __iomem *config_base;
+ void __iomem *data_base;
+
+ u32 fifo_size;
+ bool awaiting;
+ struct completion tx_comp;
+ struct completion rx_comp;
+
+ void *cookie;
+ void (*data_available)(void *cookie, size_t avail);
+};
+
+struct dockchannel_common {
+ struct device *dev;
+ struct irq_domain *domain;
+ int irq;
+
+ void __iomem *irq_base;
+};
+
+/* Dockchannel FIFO functions */
+
+static irqreturn_t dockchannel_tx_irq(int irq, void *data)
+{
+ struct dockchannel *dockchannel = data;
+
+ disable_irq_nosync(irq);
+ complete(&dockchannel->tx_comp);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dockchannel_rx_irq(int irq, void *data)
+{
+ struct dockchannel *dockchannel = data;
+
+ disable_irq_nosync(irq);
+
+ if (dockchannel->awaiting) {
+ return IRQ_WAKE_THREAD;
+ } else {
+ complete(&dockchannel->rx_comp);
+ return IRQ_HANDLED;
+ }
+}
+
+static irqreturn_t dockchannel_rx_irq_thread(int irq, void *data)
+{
+ struct dockchannel *dockchannel = data;
+ size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT);
+
+ dockchannel->awaiting = false;
+ dockchannel->data_available(dockchannel->cookie, avail);
+
+ return IRQ_HANDLED;
+}
+
+int dockchannel_send(struct dockchannel *dockchannel, const void *buf, size_t count)
+{
+ size_t left = count;
+ const u8 *p = buf;
+
+ while (left > 0) {
+ size_t avail = readl_relaxed(dockchannel->data_base + DATA_TX_FREE);
+ size_t block = min(left, avail);
+
+ if (avail == 0) {
+ size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left);
+
+ writel_relaxed(threshold, dockchannel->config_base + CONFIG_TX_THRESH);
+ reinit_completion(&dockchannel->tx_comp);
+ enable_irq(dockchannel->tx_irq);
+
+ if (!wait_for_completion_timeout(&dockchannel->tx_comp,
+ msecs_to_jiffies(DOCKCHANNEL_TX_TIMEOUT_MS))) {
+ disable_irq(dockchannel->tx_irq);
+ return -ETIMEDOUT;
+ }
+
+ continue;
+ }
+
+ while (block >= 4) {
+ writel_relaxed(get_unaligned_le32(p), dockchannel->data_base + DATA_TX32);
+ p += 4;
+ left -= 4;
+ block -= 4;
+ }
+ while (block > 0) {
+ writeb_relaxed(*p++, dockchannel->data_base + DATA_TX8);
+ left--;
+ block--;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(dockchannel_send);
+
+int dockchannel_recv(struct dockchannel *dockchannel, void *buf, size_t count)
+{
+ size_t left = count;
+ u8 *p = buf;
+
+ while (left > 0) {
+ size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT);
+ size_t block = min(left, avail);
+
+ if (avail == 0) {
+ size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left);
+
+ writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH);
+ reinit_completion(&dockchannel->rx_comp);
+ enable_irq(dockchannel->rx_irq);
+
+ if (!wait_for_completion_timeout(&dockchannel->rx_comp,
+ msecs_to_jiffies(DOCKCHANNEL_RX_TIMEOUT_MS))) {
+ disable_irq(dockchannel->rx_irq);
+ return -ETIMEDOUT;
+ }
+
+ continue;
+ }
+
+ while (block >= 4) {
+ put_unaligned_le32(readl_relaxed(dockchannel->data_base + DATA_RX32), p);
+ p += 4;
+ left -= 4;
+ block -= 4;
+ }
+ while (block > 0) {
+ *p++ = readl_relaxed(dockchannel->data_base + DATA_RX8) >> 8;
+ left--;
+ block--;
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(dockchannel_recv);
+
+int dockchannel_await(struct dockchannel *dockchannel,
+ void (*callback)(void *cookie, size_t avail),
+ void *cookie, size_t count)
+{
+ size_t threshold = min((size_t)dockchannel->fifo_size, count);
+
+ if (!count) {
+ dockchannel->awaiting = false;
+ disable_irq(dockchannel->rx_irq);
+ return 0;
+ }
+
+ dockchannel->data_available = callback;
+ dockchannel->cookie = cookie;
+ dockchannel->awaiting = true;
+ writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH);
+ enable_irq(dockchannel->rx_irq);
+
+ return threshold;
+}
+EXPORT_SYMBOL(dockchannel_await);
+
+struct dockchannel *dockchannel_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dockchannel *dockchannel;
+ int ret;
+
+ dockchannel = devm_kzalloc(dev, sizeof(*dockchannel), GFP_KERNEL);
+ if (!dockchannel)
+ return ERR_PTR(-ENOMEM);
+
+ dockchannel->dev = dev;
+ dockchannel->config_base = devm_platform_ioremap_resource_byname(pdev, "config");
+ if (IS_ERR(dockchannel->config_base))
+ return (__force void *)dockchannel->config_base;
+
+ dockchannel->data_base = devm_platform_ioremap_resource_byname(pdev, "data");
+ if (IS_ERR(dockchannel->data_base))
+ return (__force void *)dockchannel->data_base;
+
+ ret = of_property_read_u32(dev->of_node, "apple,fifo-size", &dockchannel->fifo_size);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Missing apple,fifo-size property"));
+
+ init_completion(&dockchannel->tx_comp);
+ init_completion(&dockchannel->rx_comp);
+
+ dockchannel->tx_irq = platform_get_irq_byname(pdev, "tx");
+ if (dockchannel->tx_irq <= 0) {
+ return ERR_PTR(dev_err_probe(dev, dockchannel->tx_irq,
+ "Failed to get TX IRQ"));
+ }
+
+ dockchannel->rx_irq = platform_get_irq_byname(pdev, "rx");
+ if (dockchannel->rx_irq <= 0) {
+ return ERR_PTR(dev_err_probe(dev, dockchannel->rx_irq,
+ "Failed to get RX IRQ"));
+ }
+
+ ret = devm_request_irq(dev, dockchannel->tx_irq, dockchannel_tx_irq, IRQF_NO_AUTOEN,
+ "apple-dockchannel-tx", dockchannel);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Failed to request TX IRQ"));
+
+ ret = devm_request_threaded_irq(dev, dockchannel->rx_irq, dockchannel_rx_irq,
+ dockchannel_rx_irq_thread, IRQF_NO_AUTOEN,
+ "apple-dockchannel-rx", dockchannel);
+ if (ret)
+ return ERR_PTR(dev_err_probe(dev, ret, "Failed to request RX IRQ"));
+
+ return dockchannel;
+}
+EXPORT_SYMBOL(dockchannel_init);
+
+
+/* Dockchannel IRQchip */
+
+static void dockchannel_irq(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dockchannel_common *dcc = irq_get_handler_data(irq);
+ unsigned long flags = readl_relaxed(dcc->irq_base + IRQ_FLAG);
+ int bit;
+
+ chained_irq_enter(chip, desc);
+
+ for_each_set_bit(bit, &flags, DOCKCHANNEL_MAX_IRQ)
+ generic_handle_domain_irq(dcc->domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void dockchannel_irq_ack(struct irq_data *data)
+{
+ struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ writel_relaxed(BIT(hwirq), dcc->irq_base + IRQ_FLAG);
+}
+
+static void dockchannel_irq_mask(struct irq_data *data)
+{
+ struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+ u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK);
+
+ writel_relaxed(val & ~BIT(hwirq), dcc->irq_base + IRQ_MASK);
+}
+
+static void dockchannel_irq_unmask(struct irq_data *data)
+{
+ struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+ u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK);
+
+ writel_relaxed(val | BIT(hwirq), dcc->irq_base + IRQ_MASK);
+}
+
+static const struct irq_chip dockchannel_irqchip = {
+ .name = "dockchannel-irqc",
+ .irq_ack = dockchannel_irq_ack,
+ .irq_mask = dockchannel_irq_mask,
+ .irq_unmask = dockchannel_irq_unmask,
+};
+
+static int dockchannel_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_chip_and_handler(virq, &dockchannel_irqchip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops dockchannel_irq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = dockchannel_irq_domain_map,
+};
+
+static int dockchannel_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dockchannel_common *dcc;
+ struct device_node *child;
+
+ dcc = devm_kzalloc(dev, sizeof(*dcc), GFP_KERNEL);
+ if (!dcc)
+ return -ENOMEM;
+
+ dcc->dev = dev;
+ platform_set_drvdata(pdev, dcc);
+
+ dcc->irq_base = devm_platform_ioremap_resource_byname(pdev, "irq");
+ if (IS_ERR(dcc->irq_base))
+ return PTR_ERR(dcc->irq_base);
+
+ writel_relaxed(0, dcc->irq_base + IRQ_MASK);
+ writel_relaxed(~0, dcc->irq_base + IRQ_FLAG);
+
+ dcc->domain = irq_domain_add_linear(dev->of_node, DOCKCHANNEL_MAX_IRQ,
+ &dockchannel_irq_domain_ops, dcc);
+ if (!dcc->domain)
+ return -ENOMEM;
+
+ dcc->irq = platform_get_irq(pdev, 0);
+ if (dcc->irq <= 0)
+ return dev_err_probe(dev, dcc->irq, "Failed to get IRQ");
+
+ irq_set_handler_data(dcc->irq, dcc);
+ irq_set_chained_handler(dcc->irq, dockchannel_irq);
+
+ for_each_child_of_node(dev->of_node, child)
+ of_platform_device_create(child, NULL, dev);
+
+ return 0;
+}
+
+static int dockchannel_remove(struct platform_device *pdev)
+{
+ struct dockchannel_common *dcc = platform_get_drvdata(pdev);
+ int hwirq;
+
+ device_for_each_child(&pdev->dev, NULL, of_platform_device_destroy);
+
+ irq_set_chained_handler_and_data(dcc->irq, NULL, NULL);
+
+ for (hwirq = 0; hwirq < DOCKCHANNEL_MAX_IRQ; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(dcc->domain, hwirq));
+
+ irq_domain_remove(dcc->domain);
+
+ writel_relaxed(0, dcc->irq_base + IRQ_MASK);
+ writel_relaxed(~0, dcc->irq_base + IRQ_FLAG);
+
+ return 0;
+}
+
+static const struct of_device_id dockchannel_of_match[] = {
+ { .compatible = "apple,dockchannel" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dockchannel_of_match);
+
+static struct platform_driver dockchannel_driver = {
+ .driver = {
+ .name = "dockchannel",
+ .of_match_table = dockchannel_of_match,
+ },
+ .probe = dockchannel_probe,
+ .remove = dockchannel_remove,
+};
+module_platform_driver(dockchannel_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple DockChannel driver");
diff --git a/drivers/soc/apple/rtkit-helper.c b/drivers/soc/apple/rtkit-helper.c
new file mode 100644
index 000000000000..f971af503f0b
--- /dev/null
+++ b/drivers/soc/apple/rtkit-helper.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple Generic RTKit helper coprocessor
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/soc/apple/rtkit.h>
+
+#define APPLE_ASC_CPU_CONTROL 0x44
+#define APPLE_ASC_CPU_CONTROL_RUN BIT(4)
+
+struct apple_rtkit_helper {
+ struct device *dev;
+ struct apple_rtkit *rtk;
+
+ void __iomem *asc_base;
+
+ struct resource *sram;
+ void __iomem *sram_base;
+};
+
+static int apple_rtkit_helper_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_rtkit_helper *helper = cookie;
+ struct resource res = {
+ .start = bfr->iova,
+ .end = bfr->iova + bfr->size - 1,
+ .name = "rtkit_map",
+ };
+
+ if (!bfr->iova) {
+ bfr->buffer = dma_alloc_coherent(helper->dev, bfr->size,
+ &bfr->iova, GFP_KERNEL);
+ if (!bfr->buffer)
+ return -ENOMEM;
+ return 0;
+ }
+
+ if (!helper->sram) {
+ dev_err(helper->dev,
+ "RTKit buffer request with no SRAM region: %pR", &res);
+ return -EFAULT;
+ }
+
+ res.flags = helper->sram->flags;
+
+ if (res.end < res.start || !resource_contains(helper->sram, &res)) {
+ dev_err(helper->dev,
+ "RTKit buffer request outside SRAM region: %pR", &res);
+ return -EFAULT;
+ }
+
+ bfr->iomem = helper->sram_base + (res.start - helper->sram->start);
+ bfr->is_mapped = true;
+
+ return 0;
+}
+
+static void apple_rtkit_helper_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ // no-op
+}
+
+static const struct apple_rtkit_ops apple_rtkit_helper_ops = {
+ .shmem_setup = apple_rtkit_helper_shmem_setup,
+ .shmem_destroy = apple_rtkit_helper_shmem_destroy,
+};
+
+static int apple_rtkit_helper_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_rtkit_helper *helper;
+ int ret;
+
+ helper = devm_kzalloc(dev, sizeof(*helper), GFP_KERNEL);
+ if (!helper)
+ return -ENOMEM;
+
+ helper->dev = dev;
+ platform_set_drvdata(pdev, helper);
+
+ helper->asc_base = devm_platform_ioremap_resource_byname(pdev, "asc");
+ if (IS_ERR(helper->asc_base))
+ return PTR_ERR(helper->asc_base);
+
+ helper->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (helper->sram) {
+ helper->sram_base = devm_ioremap_resource(dev, helper->sram);
+ if (IS_ERR(helper->sram_base))
+ return dev_err_probe(dev, PTR_ERR(helper->sram_base),
+ "Failed to map SRAM region");
+ }
+
+ helper->rtk =
+ devm_apple_rtkit_init(dev, helper, NULL, 0, &apple_rtkit_helper_ops);
+ if (IS_ERR(helper->rtk))
+ return dev_err_probe(dev, PTR_ERR(helper->rtk),
+ "Failed to intialize RTKit");
+
+ writel_relaxed(APPLE_ASC_CPU_CONTROL_RUN,
+ helper->asc_base + APPLE_ASC_CPU_CONTROL);
+
+ /* Works for both wake and boot */
+ ret = apple_rtkit_wake(helper->rtk);
+ if (ret != 0)
+ return dev_err_probe(dev, ret, "Failed to wake up coprocessor");
+
+ return 0;
+}
+
+static int apple_rtkit_helper_remove(struct platform_device *pdev)
+{
+ struct apple_rtkit_helper *helper = platform_get_drvdata(pdev);
+
+ if (apple_rtkit_is_running(helper->rtk))
+ apple_rtkit_quiesce(helper->rtk);
+
+ writel_relaxed(0, helper->asc_base + APPLE_ASC_CPU_CONTROL);
+
+ return 0;
+}
+
+static const struct of_device_id apple_rtkit_helper_of_match[] = {
+ { .compatible = "apple,rtk-helper-asc4" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_rtkit_helper_of_match);
+
+static struct platform_driver apple_rtkit_helper_driver = {
+ .driver = {
+ .name = "rtkit-helper",
+ .of_match_table = apple_rtkit_helper_of_match,
+ },
+ .probe = apple_rtkit_helper_probe,
+ .remove = apple_rtkit_helper_remove,
+};
+module_platform_driver(apple_rtkit_helper_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple RTKit helper driver");
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index 031ec4aa06d5..ecc1a818b23d 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -11,6 +11,7 @@ enum {
APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+ APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
};
enum {
@@ -54,7 +55,7 @@ enum {
#define APPLE_RTKIT_BUFFER_REQUEST 1
#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44)
-#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(41, 0)
+#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(43, 0)
#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52)
@@ -101,12 +102,20 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
-static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
+ int ret;
+
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+ ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+
+ if (ret) {
+ dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
+ }
+
+ return ret;
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@@ -299,6 +308,9 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
return 0;
error:
+ dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
+ buffer->size, err);
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
@@ -592,11 +604,18 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
int ret;
gfp_t flags;
- if (rtk->crashed)
+ if (rtk->crashed) {
+ dev_warn(rtk->dev,
+ "RTKit: Device is crashed, cannot send message\n");
return -EINVAL;
+ }
+
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
- !apple_rtkit_is_running(rtk))
+ !apple_rtkit_is_running(rtk)) {
+ dev_warn(rtk->dev,
+ "RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
return -EINVAL;
+ }
if (atomic)
flags = GFP_ATOMIC;
@@ -628,38 +647,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
-int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
- unsigned long timeout, bool atomic)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int ret;
- long t;
-
- ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
- if (ret < 0)
- return ret;
-
- if (atomic) {
- ret = mbox_flush(rtk->mbox_chan, timeout);
- if (ret < 0)
- return ret;
-
- if (try_wait_for_completion(&completion))
- return 0;
-
- return -ETIME;
- } else {
- t = wait_for_completion_interruptible_timeout(
- &completion, msecs_to_jiffies(timeout));
- if (t < 0)
- return t;
- else if (t == 0)
- return -ETIME;
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
-
int apple_rtkit_poll(struct apple_rtkit *rtk)
{
return mbox_client_peek_data(rtk->mbox_chan);
@@ -695,7 +682,8 @@ static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
if (IS_ERR(rtk->mbox_chan))
return PTR_ERR(rtk->mbox_chan);
- return 0;
+
+ return mbox_start_channel(rtk->mbox_chan);
}
static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
@@ -728,6 +716,7 @@ static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
rtk->mbox_cl.dev = dev;
rtk->mbox_cl.tx_block = false;
rtk->mbox_cl.knows_txdone = false;
+ rtk->mbox_cl.defer_startup = true;
rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
@@ -805,8 +794,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
@@ -826,8 +817,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
@@ -908,6 +901,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
+ int ret;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
@@ -918,9 +912,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
- msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
return apple_rtkit_boot(rtk);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d1bb62f7368b..67fa0af23a51 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -79,6 +79,14 @@ config SPI_ALTERA_DFL
Altera SPI master controller. The SPI master is connected
to a SPI slave to Avalon bridge in a Intel MAX BMC.
+config SPI_APPLE
+ tristate "Apple SoC SPI Controller platform driver"
+ depends on ARCH_APPLE || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on
+ many Apple SoCs, including the t8103 (M1) and t600x
+ (M1 Pro/Max).
+
config SPI_AR934X
tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver"
depends on ATH79 || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 4b34e855c841..fcdf201a2190 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o
obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o
obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o
+obj-$(CONFIG_SPI_APPLE) += spi-apple.o
obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o
diff --git a/drivers/spi/spi-apple.c b/drivers/spi/spi-apple.c
new file mode 100644
index 000000000000..c483ad3f69ef
--- /dev/null
+++ b/drivers/spi/spi-apple.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple SoC SPI device driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on spi-sifive.c, Copyright 2018 SiFive, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define APPLE_SPI_CTRL 0x000
+#define APPLE_SPI_CTRL_RUN BIT(0)
+#define APPLE_SPI_CTRL_TX_RESET BIT(2)
+#define APPLE_SPI_CTRL_RX_RESET BIT(3)
+
+#define APPLE_SPI_CFG 0x004
+#define APPLE_SPI_CFG_CPHA BIT(1)
+#define APPLE_SPI_CFG_CPOL BIT(2)
+#define APPLE_SPI_CFG_MODE GENMASK(6, 5)
+#define APPLE_SPI_CFG_MODE_POLLED 0
+#define APPLE_SPI_CFG_MODE_IRQ 1
+#define APPLE_SPI_CFG_MODE_DMA 2
+#define APPLE_SPI_CFG_IE_RXCOMPLETE BIT(7)
+#define APPLE_SPI_CFG_IE_TXRXTHRESH BIT(8)
+#define APPLE_SPI_CFG_LSB_FIRST BIT(13)
+#define APPLE_SPI_CFG_WORD_SIZE GENMASK(16, 15)
+#define APPLE_SPI_CFG_WORD_SIZE_8B 0
+#define APPLE_SPI_CFG_WORD_SIZE_16B 1
+#define APPLE_SPI_CFG_WORD_SIZE_32B 2
+#define APPLE_SPI_CFG_FIFO_THRESH GENMASK(18, 17)
+#define APPLE_SPI_CFG_FIFO_THRESH_8B 0
+#define APPLE_SPI_CFG_FIFO_THRESH_4B 1
+#define APPLE_SPI_CFG_FIFO_THRESH_1B 2
+#define APPLE_SPI_CFG_IE_TXCOMPLETE BIT(21)
+
+#define APPLE_SPI_STATUS 0x008
+#define APPLE_SPI_STATUS_RXCOMPLETE BIT(0)
+#define APPLE_SPI_STATUS_TXRXTHRESH BIT(1)
+#define APPLE_SPI_STATUS_TXCOMPLETE BIT(2)
+
+#define APPLE_SPI_PIN 0x00c
+#define APPLE_SPI_PIN_KEEP_MOSI BIT(0)
+#define APPLE_SPI_PIN_CS BIT(1)
+
+#define APPLE_SPI_TXDATA 0x010
+#define APPLE_SPI_RXDATA 0x020
+#define APPLE_SPI_CLKDIV 0x030
+#define APPLE_SPI_CLKDIV_MAX 0x7ff
+#define APPLE_SPI_RXCNT 0x034
+#define APPLE_SPI_WORD_DELAY 0x038
+#define APPLE_SPI_TXCNT 0x04c
+
+#define APPLE_SPI_FIFOSTAT 0x10c
+#define APPLE_SPI_FIFOSTAT_TXFULL BIT(4)
+#define APPLE_SPI_FIFOSTAT_LEVEL_TX GENMASK(15, 8)
+#define APPLE_SPI_FIFOSTAT_RXEMPTY BIT(20)
+#define APPLE_SPI_FIFOSTAT_LEVEL_RX GENMASK(31, 24)
+
+#define APPLE_SPI_IE_XFER 0x130
+#define APPLE_SPI_IF_XFER 0x134
+#define APPLE_SPI_XFER_RXCOMPLETE BIT(0)
+#define APPLE_SPI_XFER_TXCOMPLETE BIT(1)
+
+#define APPLE_SPI_IE_FIFO 0x138
+#define APPLE_SPI_IF_FIFO 0x13c
+#define APPLE_SPI_FIFO_RXTHRESH BIT(4)
+#define APPLE_SPI_FIFO_TXTHRESH BIT(5)
+#define APPLE_SPI_FIFO_RXFULL BIT(8)
+#define APPLE_SPI_FIFO_TXEMPTY BIT(9)
+#define APPLE_SPI_FIFO_RXUNDERRUN BIT(16)
+#define APPLE_SPI_FIFO_TXOVERFLOW BIT(17)
+
+#define APPLE_SPI_SHIFTCFG 0x150
+#define APPLE_SPI_SHIFTCFG_CLK_ENABLE BIT(0)
+#define APPLE_SPI_SHIFTCFG_CS_ENABLE BIT(1)
+#define APPLE_SPI_SHIFTCFG_AND_CLK_DATA BIT(8)
+#define APPLE_SPI_SHIFTCFG_CS_AS_DATA BIT(9)
+#define APPLE_SPI_SHIFTCFG_TX_ENABLE BIT(10)
+#define APPLE_SPI_SHIFTCFG_RX_ENABLE BIT(11)
+#define APPLE_SPI_SHIFTCFG_BITS GENMASK(21, 16)
+#define APPLE_SPI_SHIFTCFG_OVERRIDE_CS BIT(24)
+
+#define APPLE_SPI_PINCFG 0x154
+#define APPLE_SPI_PINCFG_KEEP_CLK BIT(0)
+#define APPLE_SPI_PINCFG_KEEP_CS BIT(1)
+#define APPLE_SPI_PINCFG_KEEP_MOSI BIT(2)
+#define APPLE_SPI_PINCFG_CLK_IDLE_VAL BIT(8)
+#define APPLE_SPI_PINCFG_CS_IDLE_VAL BIT(9)
+#define APPLE_SPI_PINCFG_MOSI_IDLE_VAL BIT(10)
+
+#define APPLE_SPI_DELAY_PRE 0x160
+#define APPLE_SPI_DELAY_POST 0x168
+#define APPLE_SPI_DELAY_ENABLE BIT(0)
+#define APPLE_SPI_DELAY_NO_INTERBYTE BIT(1)
+#define APPLE_SPI_DELAY_SET_SCK BIT(4)
+#define APPLE_SPI_DELAY_SET_MOSI BIT(6)
+#define APPLE_SPI_DELAY_SCK_VAL BIT(8)
+#define APPLE_SPI_DELAY_MOSI_VAL BIT(12)
+
+#define APPLE_SPI_FIFO_DEPTH 16
+
+/*
+ * The slowest refclock available is 24MHz, the highest divider is 0x7ff,
+ * the largest word size is 32 bits, the FIFO depth is 16, the maximum
+ * intra-word delay is 0xffff refclocks. So the maximum time a transfer
+ * cycle can take is:
+ *
+ * (0x7ff * 32 + 0xffff) * 16 / 24e6 Hz ~= 87ms
+ *
+ * Double it and round it up to 200ms for good measure.
+ */
+#define APPLE_SPI_TIMEOUT_MS 200
+
+struct apple_spi {
+ void __iomem *regs; /* MMIO register address */
+ struct clk *clk; /* bus clock */
+ struct completion done; /* wake-up from interrupt */
+};
+
+static inline void reg_write(struct apple_spi *spi, int offset, u32 value)
+{
+ writel_relaxed(value, spi->regs + offset);
+}
+
+static inline u32 reg_read(struct apple_spi *spi, int offset)
+{
+ return readl_relaxed(spi->regs + offset);
+}
+
+static inline void reg_mask(struct apple_spi *spi, int offset, u32 clear, u32 set)
+{
+ u32 val = reg_read(spi, offset);
+
+ val &= ~clear;
+ val |= set;
+ reg_write(spi, offset, val);
+}
+
+static void apple_spi_init(struct apple_spi *spi)
+{
+ /* Set CS high (inactive) and disable override and auto-CS */
+ reg_write(spi, APPLE_SPI_PIN, APPLE_SPI_PIN_CS);
+ reg_mask(spi, APPLE_SPI_SHIFTCFG, APPLE_SPI_SHIFTCFG_OVERRIDE_CS, 0);
+ reg_mask(spi, APPLE_SPI_PINCFG, APPLE_SPI_PINCFG_CS_IDLE_VAL, APPLE_SPI_PINCFG_KEEP_CS);
+
+ /* Reset FIFOs */
+ reg_write(spi, APPLE_SPI_CTRL, APPLE_SPI_CTRL_RX_RESET | APPLE_SPI_CTRL_TX_RESET);
+
+ /* Configure defaults */
+ reg_write(spi, APPLE_SPI_CFG,
+ FIELD_PREP(APPLE_SPI_CFG_FIFO_THRESH, APPLE_SPI_CFG_FIFO_THRESH_8B) |
+ FIELD_PREP(APPLE_SPI_CFG_MODE, APPLE_SPI_CFG_MODE_IRQ) |
+ FIELD_PREP(APPLE_SPI_CFG_WORD_SIZE, APPLE_SPI_CFG_WORD_SIZE_8B));
+
+ /* Disable IRQs */
+ reg_write(spi, APPLE_SPI_IE_FIFO, 0);
+ reg_write(spi, APPLE_SPI_IE_XFER, 0);
+
+ /* Disable delays */
+ reg_write(spi, APPLE_SPI_DELAY_PRE, 0);
+ reg_write(spi, APPLE_SPI_DELAY_POST, 0);
+}
+
+static int apple_spi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ struct apple_spi *spi = spi_controller_get_devdata(ctlr);
+ struct spi_device *device = msg->spi;
+
+ u32 cfg = ((device->mode & SPI_CPHA ? APPLE_SPI_CFG_CPHA : 0) |
+ (device->mode & SPI_CPOL ? APPLE_SPI_CFG_CPOL : 0) |
+ (device->mode & SPI_LSB_FIRST ? APPLE_SPI_CFG_LSB_FIRST : 0));
+
+ /* Update core config */
+ reg_mask(spi, APPLE_SPI_CFG,
+ APPLE_SPI_CFG_CPHA | APPLE_SPI_CFG_CPOL | APPLE_SPI_CFG_LSB_FIRST, cfg);
+
+ return 0;
+}
+
+static void apple_spi_set_cs(struct spi_device *device, bool is_high)
+{
+ struct apple_spi *spi = spi_controller_get_devdata(device->controller);
+
+ reg_mask(spi, APPLE_SPI_PIN, APPLE_SPI_PIN_CS, is_high ? APPLE_SPI_PIN_CS : 0);
+}
+
+static bool apple_spi_prep_transfer(struct apple_spi *spi, struct spi_transfer *t)
+{
+ u32 cr, fifo_threshold;
+
+ /* Calculate and program the clock rate */
+ cr = DIV_ROUND_UP(clk_get_rate(spi->clk), t->speed_hz);
+ reg_write(spi, APPLE_SPI_CLKDIV, min_t(u32, cr, APPLE_SPI_CLKDIV_MAX));
+
+ /* Update bits per word */
+ reg_mask(spi, APPLE_SPI_SHIFTCFG, APPLE_SPI_SHIFTCFG_BITS,
+ FIELD_PREP(APPLE_SPI_SHIFTCFG_BITS, t->bits_per_word));
+
+ /* We will want to poll if the time we need to wait is
+ * less than the context switching time.
+ * Let's call that threshold 5us. The operation will take:
+ * bits_per_word * fifo_threshold / hz <= 5 * 10^-6
+ * 200000 * bits_per_word * fifo_threshold <= hz
+ */
+ fifo_threshold = APPLE_SPI_FIFO_DEPTH / 2;
+ return (200000 * t->bits_per_word * fifo_threshold) <= t->speed_hz;
+}
+
+static irqreturn_t apple_spi_irq(int irq, void *dev_id)
+{
+ struct apple_spi *spi = dev_id;
+ u32 fifo = reg_read(spi, APPLE_SPI_IF_FIFO) & reg_read(spi, APPLE_SPI_IE_FIFO);
+ u32 xfer = reg_read(spi, APPLE_SPI_IF_XFER) & reg_read(spi, APPLE_SPI_IE_XFER);
+
+ if (fifo || xfer) {
+ /* Disable interrupts until next transfer */
+ reg_write(spi, APPLE_SPI_IE_XFER, 0);
+ reg_write(spi, APPLE_SPI_IE_FIFO, 0);
+ complete(&spi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int apple_spi_wait(struct apple_spi *spi, u32 fifo_bit, u32 xfer_bit, int poll)
+{
+ int ret = 0;
+
+ if (poll) {
+ u32 fifo, xfer;
+ unsigned long timeout = jiffies + APPLE_SPI_TIMEOUT_MS * HZ / 1000;
+
+ do {
+ fifo = reg_read(spi, APPLE_SPI_IF_FIFO);
+ xfer = reg_read(spi, APPLE_SPI_IF_XFER);
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ } while (!((fifo & fifo_bit) || (xfer & xfer_bit)));
+ } else {
+ reinit_completion(&spi->done);
+ reg_write(spi, APPLE_SPI_IE_XFER, xfer_bit);
+ reg_write(spi, APPLE_SPI_IE_FIFO, fifo_bit);
+
+ if (!wait_for_completion_timeout(&spi->done,
+ msecs_to_jiffies(APPLE_SPI_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ reg_write(spi, APPLE_SPI_IE_XFER, 0);
+ reg_write(spi, APPLE_SPI_IE_FIFO, 0);
+ }
+
+ return ret;
+}
+
+static void apple_spi_tx(struct apple_spi *spi, const void **tx_ptr, u32 *left,
+ unsigned int bytes_per_word)
+{
+ u32 inuse, words, wrote;
+
+ if (!*tx_ptr)
+ return;
+
+ inuse = FIELD_GET(APPLE_SPI_FIFOSTAT_LEVEL_TX, reg_read(spi, APPLE_SPI_FIFOSTAT));
+ words = wrote = min_t(u32, *left, APPLE_SPI_FIFO_DEPTH - inuse);
+
+ if (!words)
+ return;
+
+ *left -= words;
+
+ switch (bytes_per_word) {
+ case 1: {
+ const u8 *p = *tx_ptr;
+
+ while (words--)
+ reg_write(spi, APPLE_SPI_TXDATA, *p++);
+ break;
+ }
+ case 2: {
+ const u16 *p = *tx_ptr;
+
+ while (words--)
+ reg_write(spi, APPLE_SPI_TXDATA, *p++);
+ break;
+ }
+ case 4: {
+ const u32 *p = *tx_ptr;
+
+ while (words--)
+ reg_write(spi, APPLE_SPI_TXDATA, *p++);
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ *tx_ptr = ((u8 *)*tx_ptr) + bytes_per_word * wrote;
+}
+
+static void apple_spi_rx(struct apple_spi *spi, void **rx_ptr, u32 *left,
+ unsigned int bytes_per_word)
+{
+ u32 words, read;
+
+ if (!*rx_ptr)
+ return;
+
+ words = read = FIELD_GET(APPLE_SPI_FIFOSTAT_LEVEL_RX, reg_read(spi, APPLE_SPI_FIFOSTAT));
+ WARN_ON(words > *left);
+
+ if (!words)
+ return;
+
+ *left -= min_t(u32, *left, words);
+
+ switch (bytes_per_word) {
+ case 1: {
+ u8 *p = *rx_ptr;
+
+ while (words--)
+ *p++ = reg_read(spi, APPLE_SPI_RXDATA);
+ break;
+ }
+ case 2: {
+ u16 *p = *rx_ptr;
+
+ while (words--)
+ *p++ = reg_read(spi, APPLE_SPI_RXDATA);
+ break;
+ }
+ case 4: {
+ u32 *p = *rx_ptr;
+
+ while (words--)
+ *p++ = reg_read(spi, APPLE_SPI_RXDATA);
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ *rx_ptr = ((u8 *)*rx_ptr) + bytes_per_word * read;
+}
+
+static int apple_spi_transfer_one(struct spi_controller *ctlr, struct spi_device *device,
+ struct spi_transfer *t)
+{
+ struct apple_spi *spi = spi_controller_get_devdata(ctlr);
+ bool poll = apple_spi_prep_transfer(spi, t);
+ const void *tx_ptr = t->tx_buf;
+ void *rx_ptr = t->rx_buf;
+ unsigned int bytes_per_word;
+ u32 words, remaining_tx, remaining_rx;
+ u32 xfer_flags = 0;
+ u32 fifo_flags;
+ int retries = 100;
+ int ret = 0;
+
+ if (t->bits_per_word > 16)
+ bytes_per_word = 4;
+ else if (t->bits_per_word > 8)
+ bytes_per_word = 2;
+ else
+ bytes_per_word = 1;
+
+ words = t->len / bytes_per_word;
+ remaining_tx = tx_ptr ? words : 0;
+ remaining_rx = rx_ptr ? words : 0;
+
+ /* Reset FIFOs */
+ reg_write(spi, APPLE_SPI_CTRL, APPLE_SPI_CTRL_RX_RESET | APPLE_SPI_CTRL_TX_RESET);
+
+ /* Clear IRQ flags */
+ reg_write(spi, APPLE_SPI_IF_XFER, ~0);
+ reg_write(spi, APPLE_SPI_IF_FIFO, ~0);
+
+ /* Determine transfer completion flags we wait for */
+ if (tx_ptr)
+ xfer_flags |= APPLE_SPI_XFER_TXCOMPLETE;
+ if (rx_ptr)
+ xfer_flags |= APPLE_SPI_XFER_RXCOMPLETE;
+
+ /* Set transfer length */
+ reg_write(spi, APPLE_SPI_TXCNT, remaining_tx);
+ reg_write(spi, APPLE_SPI_RXCNT, remaining_rx);
+
+ /* Prime transmit FIFO */
+ apple_spi_tx(spi, &tx_ptr, &remaining_tx, bytes_per_word);
+
+ /* Start transfer */
+ reg_write(spi, APPLE_SPI_CTRL, APPLE_SPI_CTRL_RUN);
+
+ /* TX again since a few words get popped off immediately */
+ apple_spi_tx(spi, &tx_ptr, &remaining_tx, bytes_per_word);
+
+ while (xfer_flags) {
+ fifo_flags = 0;
+
+ if (remaining_tx)
+ fifo_flags |= APPLE_SPI_FIFO_TXTHRESH;
+ if (remaining_rx)
+ fifo_flags |= APPLE_SPI_FIFO_RXTHRESH;
+
+ /* Wait for anything to happen */
+ ret = apple_spi_wait(spi, fifo_flags, xfer_flags, poll);
+ if (ret) {
+ dev_err(&ctlr->dev, "transfer timed out (remaining %d tx, %d rx)\n",
+ remaining_tx, remaining_rx);
+ goto err;
+ }
+
+ /* Stop waiting on transfer halves once they complete */
+ xfer_flags &= ~reg_read(spi, APPLE_SPI_IF_XFER);
+
+ /* Transmit and receive everything we can */
+ apple_spi_tx(spi, &tx_ptr, &remaining_tx, bytes_per_word);
+ apple_spi_rx(spi, &rx_ptr, &remaining_rx, bytes_per_word);
+ }
+
+ /*
+ * Sometimes the transfer completes before the last word is in the RX FIFO.
+ * Normally one retry is all it takes to get the last word out.
+ */
+ while (remaining_rx && retries--)
+ apple_spi_rx(spi, &rx_ptr, &remaining_rx, bytes_per_word);
+
+ if (remaining_tx)
+ dev_err(&ctlr->dev, "transfer completed with %d words left to transmit\n",
+ remaining_tx);
+ if (remaining_rx)
+ dev_err(&ctlr->dev, "transfer completed with %d words left to receive\n",
+ remaining_rx);
+
+err:
+ fifo_flags = reg_read(spi, APPLE_SPI_IF_FIFO);
+ WARN_ON(fifo_flags & APPLE_SPI_FIFO_TXOVERFLOW);
+ WARN_ON(fifo_flags & APPLE_SPI_FIFO_RXUNDERRUN);
+
+ /* Stop transfer */
+ reg_write(spi, APPLE_SPI_CTRL, 0);
+
+ return ret;
+}
+
+static void apple_spi_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int apple_spi_probe(struct platform_device *pdev)
+{
+ struct apple_spi *spi;
+ int ret, irq;
+ struct spi_controller *ctlr;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(struct apple_spi));
+ if (!ctlr)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "out of memory\n");
+
+ spi = spi_controller_get_devdata(ctlr);
+ init_completion(&spi->done);
+ platform_set_drvdata(pdev, ctlr);
+
+ spi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spi->regs))
+ return PTR_ERR(spi->regs);
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk), "Unable to find bus clock\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, apple_spi_irq, 0,
+ dev_name(&pdev->dev), spi);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Unable to bind to interrupt\n");
+
+ ret = clk_prepare_enable(spi->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Unable to enable bus clock\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev, apple_spi_clk_disable_unprepare, spi->clk);
+ if (ret)
+ return ret;
+
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = 1;
+ ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ ctlr->flags = 0;
+ ctlr->prepare_message = apple_spi_prepare_message;
+ ctlr->set_cs = apple_spi_set_cs;
+ ctlr->transfer_one = apple_spi_transfer_one;
+ ctlr->auto_runtime_pm = true;
+
+ pm_runtime_set_active(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
+
+ apple_spi_init(spi);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "devm_spi_register_controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id apple_spi_of_match[] = {
+ { .compatible = "apple,spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_spi_of_match);
+
+static struct platform_driver apple_spi_driver = {
+ .probe = apple_spi_probe,
+ .driver = {
+ .name = "apple-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = apple_spi_of_match,
+ },
+};
+module_platform_driver(apple_spi_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("Apple SoC SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5f9aedd1f0b6..ba186d3285f1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2208,6 +2208,22 @@ void spi_flush_queue(struct spi_controller *ctlr)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
+static void of_spi_parse_dt_cs_delay(struct device_node *nc,
+ struct spi_delay *delay, const char *prop)
+{
+ u32 value;
+
+ if (!of_property_read_u32(nc, prop, &value)) {
+ if (value > U16_MAX) {
+ delay->value = DIV_ROUND_UP(value, 1000);
+ delay->unit = SPI_DELAY_UNIT_USECS;
+ } else {
+ delay->value = value;
+ delay->unit = SPI_DELAY_UNIT_NSECS;
+ }
+ }
+}
+
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
@@ -2297,6 +2313,11 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
+ /* Device CS delays */
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
+
return 0;
}
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 737802046314..96c73c5b5720 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -45,4 +45,12 @@ config SPMI_MTK_PMIF
This is required for communicating with Mediatek PMICs and
other devices that have the SPMI interface.
+config SPMI_APPLE
+ tristate "Apple SoC SPMI Controller platform driver"
+ depends on ARCH_APPLE || COMPILE_TEST
+ help
+ This enables basic support for the SPMI controller present on
+ many Apple SoCs, including the t8103 (M1) and t600x
+ (M1 Pro/Max).
+
endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 9d974424c8c1..989b84bbca60 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SPMI) += spmi.o
obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o
+obj-$(CONFIG_SPMI_APPLE) += spmi-apple-controller.o
diff --git a/drivers/spmi/spmi-apple-controller.c b/drivers/spmi/spmi-apple-controller.c
new file mode 100644
index 000000000000..c14c4874654d
--- /dev/null
+++ b/drivers/spmi/spmi-apple-controller.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple SoC SPMI device driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * Inspired by:
+ * OpenBSD support Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
+ * Correllium support Copyright (C) 2021 Corellium LLC
+ * hisi-spmi-controller.c
+ * spmi-pmic-ard.c Copyright (c) 2021, The Linux Foundation.
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spmi.h>
+
+/* SPMI Controller Registers */
+#define SPMI_STATUS_REG 0
+#define SPMI_CMD_REG 0x4
+#define SPMI_RSP_REG 0x8
+
+#define SPMI_RX_FIFO_EMPTY BIT(24)
+#define SPMI_TX_FIFO_EMPTY BIT(8)
+
+/* Apple SPMI controler */
+struct apple_spmi {
+ void __iomem *regs;
+ struct spmi_controller *ctrl;
+};
+
+static inline u32 read_reg(struct apple_spmi *spmi, int offset)
+{
+ return (readl_relaxed(spmi->regs + offset));
+}
+
+static inline void write_reg(u32 value, struct apple_spmi *spmi, int offset)
+{
+ writel_relaxed(value, spmi->regs + offset);
+}
+
+static int spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 slave_id,
+ u16 slave_addr, u8 *__buf, size_t bc)
+{
+ struct apple_spmi *spmi;
+ u32 spmi_cmd = opc | slave_id << 8 | slave_addr << 16 | (bc - 1) |
+ (1 << 15);
+ u32 rsp;
+ volatile u32 status;
+ size_t len_to_read;
+ u8 i;
+
+ spmi = spmi_controller_get_drvdata(ctrl);
+
+ write_reg(spmi_cmd, spmi, SPMI_CMD_REG);
+
+ /* Wait for Rx FIFO to have something */
+ /* Quite ugly msleep, need to find a better way to do it */
+ i = 0;
+ do {
+ status = read_reg(spmi, SPMI_STATUS_REG);
+ msleep(10);
+ i += 1;
+ } while ((status & SPMI_RX_FIFO_EMPTY) && i < 5);
+
+ if (i >= 5) {
+ dev_err(&ctrl->dev,
+ "spmi_read_cmd:took to long to get the status");
+ return -1;
+ }
+
+ /* Read SPMI reply status */
+ rsp = read_reg(spmi, SPMI_RSP_REG);
+
+ len_to_read = 0;
+ /* Read SPMI data reply */
+ while (!(status & SPMI_RX_FIFO_EMPTY) && (len_to_read < bc)) {
+ rsp = read_reg(spmi, SPMI_RSP_REG);
+ i = 0;
+ while ((len_to_read < bc) && (i < 4)) {
+ __buf[len_to_read++] = ((0xff << (8 * i)) & rsp) >>
+ (8 * i);
+ i += 1;
+ }
+ }
+
+ return 0;
+}
+
+static int spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 slave_id,
+ u16 slave_addr, const u8 *__buf, size_t bc)
+{
+ struct apple_spmi *spmi;
+ u32 spmi_cmd = opc | slave_id << 8 | slave_addr << 16 | (bc - 1) |
+ (1 << 15);
+ volatile u32 rsp;
+ volatile u32 status;
+ size_t i = 0, j;
+
+ spmi = spmi_controller_get_drvdata(ctrl);
+
+ write_reg(spmi_cmd, spmi, SPMI_CMD_REG);
+
+ while (i < bc) {
+ j = 0;
+ spmi_cmd = 0;
+ while ((j < 4) & (i < bc)) {
+ spmi_cmd |= __buf[i++] << (j++ * 8);
+ }
+ write_reg(spmi_cmd, spmi, SPMI_CMD_REG);
+ }
+
+ /* Wait for Rx FIFO to have something */
+ /* Quite ugly msleep, need to find a better way to do it */
+ i = 0;
+ do {
+ status = read_reg(spmi, SPMI_STATUS_REG);
+ msleep(10);
+ i += 1;
+ } while ((status & SPMI_RX_FIFO_EMPTY) && i < 5);
+
+ if (i >= 5) {
+ dev_err(&ctrl->dev,
+ "spmi_write_cmd:took to long to get the status");
+ return -1;
+ }
+
+ rsp = read_reg(spmi, SPMI_RSP_REG);
+ (void)rsp; // TODO: check stuff here
+
+ return 0;
+}
+
+static int spmi_controller_probe(struct platform_device *pdev)
+{
+ struct apple_spmi *spmi;
+ struct spmi_controller *ctrl;
+ int ret;
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(struct apple_spmi));
+ if (IS_ERR(ctrl)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(ctrl),
+ "Can't allocate spmi_controller data\n");
+ return -ENOMEM;
+ }
+
+ spmi = spmi_controller_get_drvdata(ctrl);
+ spmi->ctrl = ctrl;
+ platform_set_drvdata(pdev, ctrl);
+
+ spmi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spmi->regs)) {
+ dev_err_probe(&pdev->dev, PTR_ERR(spmi->regs),
+ "Can't get ioremap regs.\n");
+ return PTR_ERR(spmi->regs);
+ }
+
+ ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
+
+ /* Callbacks */
+ ctrl->read_cmd = spmi_read_cmd;
+ ctrl->write_cmd = spmi_write_cmd;
+
+ ret = spmi_controller_add(ctrl);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ /* Let's look for other nodes in device tree like the rtc */
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "spmi_controller_probe: devm_of_platform_populate failed with error %d!\n",
+ ret);
+ goto err_devm_of_platform_populate;
+ }
+
+ return 0;
+
+err_put_controller:
+ spmi_controller_put(ctrl);
+err_devm_of_platform_populate:
+ return ret;
+}
+
+static int spmi_del_controller(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+
+ spmi_controller_remove(ctrl);
+ spmi_controller_put(ctrl);
+ return 0;
+}
+
+static const struct of_device_id spmi_controller_match_table[] = {
+ {
+ .compatible = "apple,spmi",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spmi_controller_match_table);
+
+static struct platform_driver spmi_controller_driver = {
+ .probe = spmi_controller_probe,
+ .remove = spmi_del_controller,
+ .driver = {
+ .name = "apple-spmi",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_controller_match_table,
+ },
+};
+module_platform_driver(spmi_controller_driver);
+
+MODULE_AUTHOR("Jean-Francois Bortolotti <jeff@borto.fr>");
+MODULE_DESCRIPTION("Apple SoC SPMI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 77d1363029f5..489f366c77a9 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -40,6 +40,7 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#include <asm/irq.h>
/* UART name and device definitions */
@@ -1368,30 +1369,49 @@ static int apple_s5l_serial_startup(struct uart_port *port)
/* power power management control */
+static int __maybe_unused s3c24xx_serial_runtime_suspend(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+ int timeout = 10000;
+
+ while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
+ udelay(100);
+
+ if (!IS_ERR(ourport->baudclk))
+ clk_disable_unprepare(ourport->baudclk);
+
+ clk_disable_unprepare(ourport->clk);
+ return 0;
+};
+
+static int __maybe_unused s3c24xx_serial_runtime_resume(struct device *dev)
+{
+ struct uart_port *port = dev_get_drvdata(dev);
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
+
+ clk_prepare_enable(ourport->clk);
+
+ if (!IS_ERR(ourport->baudclk))
+ clk_prepare_enable(ourport->baudclk);
+ return 0;
+};
+
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
unsigned int old)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
- int timeout = 10000;
ourport->pm_level = level;
switch (level) {
- case 3:
- while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
- udelay(100);
-
- if (!IS_ERR(ourport->baudclk))
- clk_disable_unprepare(ourport->baudclk);
-
- clk_disable_unprepare(ourport->clk);
+ case UART_PM_STATE_OFF:
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_sync(port->dev);
break;
- case 0:
- clk_prepare_enable(ourport->clk);
-
- if (!IS_ERR(ourport->baudclk))
- clk_prepare_enable(ourport->baudclk);
+ case UART_PM_STATE_ON:
+ pm_runtime_get_sync(port->dev);
break;
default:
dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level);
@@ -2224,18 +2244,15 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
}
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
dev_dbg(&pdev->dev, "%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(pdev, &ourport->port);
- /*
- * Deactivate the clock enabled in s3c24xx_serial_init_port here,
- * so that a potential re-enablement through the pm-callback overlaps
- * and keeps the clock enabled in this case.
- */
- clk_disable_unprepare(ourport->clk);
- if (!IS_ERR(ourport->baudclk))
- clk_disable_unprepare(ourport->baudclk);
+ pm_runtime_put_sync(&pdev->dev);
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
@@ -2249,10 +2266,21 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
static int s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+ struct s3c24xx_uart_port *ourport = to_ourport(port);
if (port) {
+ pm_runtime_get_sync(&dev->dev);
+
s3c24xx_serial_cpufreq_deregister(to_ourport(port));
uart_remove_one_port(&s3c24xx_uart_drv, port);
+
+ clk_disable_unprepare(ourport->clk);
+ if (!IS_ERR(ourport->baudclk))
+ clk_disable_unprepare(ourport->baudclk);
+
+ pm_runtime_disable(&dev->dev);
+ pm_runtime_set_suspended(&dev->dev);
+ pm_runtime_put_noidle(&dev->dev);
}
uart_unregister_driver(&s3c24xx_uart_drv);
@@ -2261,8 +2289,8 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
}
/* UART power management code */
-#ifdef CONFIG_PM_SLEEP
-static int s3c24xx_serial_suspend(struct device *dev)
+
+static int __maybe_unused s3c24xx_serial_suspend(struct device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
@@ -2272,7 +2300,7 @@ static int s3c24xx_serial_suspend(struct device *dev)
return 0;
}
-static int s3c24xx_serial_resume(struct device *dev)
+static int __maybe_unused s3c24xx_serial_resume(struct device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -2292,7 +2320,7 @@ static int s3c24xx_serial_resume(struct device *dev)
return 0;
}
-static int s3c24xx_serial_resume_noirq(struct device *dev)
+static int __maybe_unused s3c24xx_serial_resume_noirq(struct device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -2362,16 +2390,14 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
.suspend = s3c24xx_serial_suspend,
.resume = s3c24xx_serial_resume,
.resume_noirq = s3c24xx_serial_resume_noirq,
+#endif
+ SET_RUNTIME_PM_OPS(s3c24xx_serial_runtime_suspend,
+ s3c24xx_serial_runtime_resume, NULL)
};
-#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define SERIAL_SAMSUNG_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
/* Console code */
@@ -2912,7 +2938,7 @@ static struct platform_driver samsung_serial_driver = {
.id_table = s3c24xx_serial_driver_ids,
.driver = {
.name = "samsung-uart",
- .pm = SERIAL_SAMSUNG_PM_OPS,
+ .pm = &s3c24xx_serial_pm_ops,
.of_match_table = of_match_ptr(s3c24xx_uart_dt_match),
},
};
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c0e7c76dc5c8..232aaa7790d5 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -116,6 +116,9 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
dwc->current_dr_role = mode;
}
+static void dwc3_core_exit(struct dwc3 *dwc);
+static int dwc3_core_init_for_resume(struct dwc3 *dwc);
+
static void __dwc3_set_mode(struct work_struct *work)
{
struct dwc3 *dwc = work_to_dwc(work);
@@ -130,10 +133,11 @@ static void __dwc3_set_mode(struct work_struct *work)
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
dwc3_otg_update(dwc, 0);
- if (!dwc->desired_dr_role)
+ if (!dwc->desired_dr_role && !dwc->role_switch_reset_quirk)
goto out;
- if (dwc->desired_dr_role == dwc->current_dr_role)
+ if (dwc->desired_dr_role == dwc->current_dr_role &&
+ !dwc->role_switch_reset_quirk)
goto out;
if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
@@ -158,6 +162,34 @@ static void __dwc3_set_mode(struct work_struct *work)
break;
}
+ if (dwc->role_switch_reset_quirk) {
+ if (dwc->current_dr_role) {
+ dwc->current_dr_role = 0;
+ dwc3_core_exit(dwc);
+ }
+
+ if (dwc->desired_dr_role) {
+ /*
+ * the first call to __dwc3_set_mode comes from
+ * dwc3_drd_init. In that case dwc3_core_init has been
+ * called but dwc->current_dr_role is zero such that
+ * we must not reinitialize the core again here.
+ */
+ if (dwc->role_switch_reset_quirk_initialized) {
+ ret = dwc3_core_init_for_resume(dwc);
+ if (ret) {
+ dev_err(dwc->dev,
+ "failed to reinitialize core\n");
+ goto out;
+ }
+ }
+
+ dwc->role_switch_reset_quirk_initialized = 1;
+ } else {
+ goto out;
+ }
+ }
+
/*
* When current_dr_role is not set, there's no role switching.
* Only perform GCTL.CoreSoftReset when there's DRD role switching.
@@ -1835,6 +1867,9 @@ static int dwc3_probe(struct platform_device *pdev)
goto put_usb_psy;
}
}
+
+ if (of_device_is_compatible(dev->of_node, "apple,dwc3"))
+ dwc->role_switch_reset_quirk = true;
}
ret = reset_control_deassert(dwc->reset);
@@ -1977,7 +2012,6 @@ static int dwc3_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
int ret;
@@ -2004,6 +2038,7 @@ assert_reset:
return ret;
}
+#ifdef CONFIG_PM
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 8f9959ba9fd4..e9ebdac66fb3 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1110,6 +1110,9 @@ struct dwc3_scratchpad_array {
* 3 - Reserved
* @dis_metastability_quirk: set to disable metastability quirk.
* @dis_split_quirk: set to disable split boundary.
+ * @role_switch_reset_quirk: set to force reinitialization after any role switch
+ * @role_switch_reset_quirk_initialized: set to true after the first role switch
+ * which is triggered from dwc3_drd_init directly
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
* @max_cfg_eps: current max number of IN eps used across all USB configs.
@@ -1327,6 +1330,9 @@ struct dwc3 {
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
+ unsigned role_switch_reset_quirk:1;
+ unsigned role_switch_reset_quirk_initialized:1;
+
u16 imod_interval;
int max_cfg_eps;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 039bf241769a..4579505cac1f 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -461,6 +461,9 @@ static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
break;
}
+ if (dwc->role_switch_reset_quirk && role == USB_ROLE_NONE)
+ mode = 0;
+
dwc3_set_mode(dwc, mode);
return 0;
}
@@ -489,6 +492,10 @@ static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
role = USB_ROLE_DEVICE;
break;
}
+
+ if (dwc->role_switch_reset_quirk && !dwc->current_dr_role)
+ role = USB_ROLE_NONE;
+
spin_unlock_irqrestore(&dwc->lock, flags);
return role;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 247568bc17a2..75824d66c3bd 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,17 +40,26 @@ config USB_XHCI_DBGCAP
config USB_XHCI_PCI
tristate
depends on USB_PCI
- depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS
default y
config USB_XHCI_PCI_RENESAS
- tristate "Support for additional Renesas xHCI controller with firmware"
+ bool "Support for Renesas xHCI controllers with firmware"
+ depends on USB_XHCI_PCI
help
Say 'Y' to enable the support for the Renesas xHCI controller with
firmware. Make sure you have the firwmare for the device and
installed on your system for this device to work.
If unsure, say 'N'.
+config USB_XHCI_PCI_ASMEDIA
+ bool "Support for ASMedia xHCI controller with firmware"
+ default ARCH_APPLE
+ depends on USB_XHCI_PCI
+ help
+ Say 'Y' to enable support for ASMedia xHCI controllers with
+ host-supplied firmware. These are usually present on Apple devices.
+ If unsure, say 'N'.
+
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
select USB_XHCI_RCAR if ARCH_RENESAS
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 2c8a61be7e46..584aa8fe4448 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -69,7 +69,9 @@ obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
-obj-$(CONFIG_USB_XHCI_PCI_RENESAS) += xhci-pci-renesas.o
+xhci-pci-y += xhci-pci-core.o
+xhci-pci-$(CONFIG_USB_XHCI_PCI_RENESAS) += xhci-pci-renesas.o
+xhci-pci-$(CONFIG_USB_XHCI_PCI_ASMEDIA) += xhci-pci-asmedia.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_HISTB) += xhci-histb.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk-hcd.o
diff --git a/drivers/usb/host/xhci-pci-asmedia.c b/drivers/usb/host/xhci-pci-asmedia.c
new file mode 100644
index 000000000000..a4d342a7b216
--- /dev/null
+++ b/drivers/usb/host/xhci-pci-asmedia.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ASMedia xHCI firmware loader
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-pci.h"
+
+/* Configuration space registers */
+#define ASMT_CFG_CONTROL 0xe0
+#define ASMT_CFG_CONTROL_WRITE BIT(1)
+#define ASMT_CFG_CONTROL_READ BIT(0)
+
+#define ASMT_CFG_SRAM_ADDR 0xe2
+
+#define ASMT_CFG_SRAM_ACCESS 0xef
+#define ASMT_CFG_SRAM_ACCESS_READ BIT(6)
+#define ASMT_CFG_SRAM_ACCESS_ENABLE BIT(7)
+
+#define ASMT_CFG_DATA_READ0 0xf0
+#define ASMT_CFG_DATA_READ1 0xf4
+
+#define ASMT_CFG_DATA_WRITE0 0xf8
+#define ASMT_CFG_DATA_WRITE1 0xfc
+
+#define ASMT_CMD_GET_FWVER 0x8000060840
+#define ASMT_FWVER_ROM 0x010250090816
+
+/* BAR0 registers */
+#define ASMT_REG_CTL 0x3000
+
+#define ASMT_REG_RESET 0x3004
+#define ASMT_REG_RESET_ONCE BIT(0)
+#define ASMT_REG_RESET_HOLD BIT(1)
+
+#define ASMT_REG_FW_STATUS 0x3009
+
+#define ASMT_REG_WDATA 0x3010
+#define ASMT_REG_RDATA 0x3018
+
+#define TIMEOUT_USEC 10000
+#define RESET_TIMEOUT_USEC 300000
+
+static int asmedia_mbox_tx(struct pci_dev *pdev, u64 data)
+{
+ u8 op;
+ int i;
+
+ for (i = 0; i < TIMEOUT_USEC; i++) {
+ pci_read_config_byte(pdev, ASMT_CFG_CONTROL, &op);
+ if (!(op & ASMT_CFG_CONTROL_WRITE))
+ break;
+ udelay(1);
+ }
+
+ if (op & ASMT_CFG_CONTROL_WRITE) {
+ dev_err(&pdev->dev,
+ "Timed out on mailbox tx: 0x%llx\n",
+ data);
+ return -ETIMEDOUT;
+ }
+
+ pci_write_config_dword(pdev, ASMT_CFG_DATA_WRITE0, data);
+ pci_write_config_dword(pdev, ASMT_CFG_DATA_WRITE1, data >> 32);
+ pci_write_config_byte(pdev, ASMT_CFG_CONTROL,
+ ASMT_CFG_CONTROL_WRITE);
+
+ return 0;
+}
+
+static int asmedia_mbox_rx(struct pci_dev *pdev, u64 *data)
+{
+ u8 op;
+ u32 low, high;
+ int i;
+
+ for (i = 0; i < TIMEOUT_USEC; i++) {
+ pci_read_config_byte(pdev, ASMT_CFG_CONTROL, &op);
+ if (op & ASMT_CFG_CONTROL_READ)
+ break;
+ udelay(1);
+ }
+
+ if (!(op & ASMT_CFG_CONTROL_READ)) {
+ dev_err(&pdev->dev, "Timed out on mailbox rx\n");
+ return -ETIMEDOUT;
+ }
+
+ pci_read_config_dword(pdev, ASMT_CFG_DATA_READ0, &low);
+ pci_read_config_dword(pdev, ASMT_CFG_DATA_READ1, &high);
+ pci_write_config_byte(pdev, ASMT_CFG_CONTROL,
+ ASMT_CFG_CONTROL_READ);
+
+ *data = ((u64)high << 32) | low;
+ return 0;
+}
+
+static int asmedia_get_fw_version(struct pci_dev *pdev, u64 *version)
+{
+ int err = 0;
+ u64 cmd;
+
+ err = asmedia_mbox_tx(pdev, ASMT_CMD_GET_FWVER);
+ if (err)
+ return err;
+ err = asmedia_mbox_tx(pdev, 0);
+ if (err)
+ return err;
+
+ err = asmedia_mbox_rx(pdev, &cmd);
+ if (err)
+ return err;
+ err = asmedia_mbox_rx(pdev, version);
+ if (err)
+ return err;
+
+ if (cmd != ASMT_CMD_GET_FWVER) {
+ dev_err(&pdev->dev, "Unexpected reply command 0x%llx\n", cmd);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static bool asmedia_check_firmware(struct pci_dev *pdev)
+{
+ u64 fwver;
+ int ret;
+
+ ret = asmedia_get_fw_version(pdev, &fwver);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Firmware version: 0x%llx\n", fwver);
+
+ return fwver != ASMT_FWVER_ROM;
+}
+
+static int asmedia_wait_reset(struct usb_hcd *hcd)
+{
+ struct xhci_cap_regs __iomem *cap = hcd->regs;
+ struct xhci_op_regs __iomem *op;
+ u32 val;
+ int ret;
+
+ op = hcd->regs + HC_LENGTH(readl(&cap->hc_capbase));
+
+ ret = readl_poll_timeout(&op->command,
+ val, !(val & CMD_RESET),
+ 1000, RESET_TIMEOUT_USEC);
+
+ if (ret)
+ dev_err(hcd->self.controller, "Reset timed out\n");
+
+ return ret;
+}
+
+static int asmedia_load_fw(struct pci_dev *pdev, const struct firmware *fw)
+{
+ struct usb_hcd *hcd;
+ void __iomem *regs;
+ const u16 *fw_data = (const u16 *)fw->data;
+ u32 data;
+ size_t index = 0, addr = 0;
+ size_t words = fw->size >> 1;
+ int ret;
+
+ hcd = dev_get_drvdata(&pdev->dev);
+ regs = hcd->regs;
+
+ writew_relaxed(0x5040, regs + ASMT_REG_CTL);
+ writeb_relaxed(ASMT_REG_RESET_HOLD, regs + ASMT_REG_RESET);
+ writew_relaxed(0x5042, regs + ASMT_REG_CTL);
+ writeb_relaxed(ASMT_REG_RESET_ONCE, regs + ASMT_REG_RESET);
+
+ ret = asmedia_wait_reset(hcd);
+ if (ret)
+ return ret;
+
+ writew_relaxed(0x500e, regs + ASMT_REG_CTL);
+ writeb_relaxed(ASMT_REG_RESET_ONCE, regs + ASMT_REG_RESET);
+
+ pci_write_config_byte(pdev, ASMT_CFG_SRAM_ACCESS,
+ ASMT_CFG_SRAM_ACCESS_ENABLE);
+
+ /* The firmware upload is interleaved in 0x4000 word blocks */
+ addr = index = 0;
+ while (index < words) {
+ data = fw_data[index];
+ if ((index | 0x4000) < words)
+ data |= fw_data[index | 0x4000] << 16;
+
+ pci_write_config_word(pdev, ASMT_CFG_SRAM_ADDR,
+ addr);
+
+ writel_relaxed(data, regs + ASMT_REG_WDATA);
+
+ if (++index & 0x4000)
+ index += 0x4000;
+ addr += 2;
+ }
+
+ writew_relaxed(0x5040, regs + ASMT_REG_CTL);
+ writeb_relaxed(ASMT_REG_RESET_HOLD | ASMT_REG_RESET_ONCE,
+ regs + ASMT_REG_RESET);
+
+ pci_write_config_byte(pdev, ASMT_CFG_SRAM_ACCESS, 0);
+
+ writew_relaxed(0x500e, regs + ASMT_REG_CTL);
+ writeb_relaxed(0, regs + ASMT_REG_RESET);
+
+ ret = asmedia_wait_reset(hcd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int asmedia_xhci_check_request_fw(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct xhci_driver_data *driver_data =
+ (struct xhci_driver_data *)id->driver_data;
+ const char *fw_name = driver_data->firmware;
+ const struct firmware *fw;
+ int ret;
+
+ /* Check if device has firmware, if so skip everything */
+ ret = asmedia_check_firmware(pdev);
+ if (ret < 0)
+ return ret;
+ else if (ret == 1)
+ return 0;
+
+ pci_dev_get(pdev);
+ ret = request_firmware(&fw, fw_name, &pdev->dev);
+ pci_dev_put(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not load firmware %s: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = asmedia_load_fw(pdev, fw);
+ if (ret) {
+ dev_err(&pdev->dev, "Firmware upload failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = asmedia_check_firmware(pdev);
+ if (ret < 0) {
+ goto err;
+ } else if (ret != 1) {
+ dev_err(&pdev->dev, "Firmware version is too old after upload\n");
+ ret = -EIO;
+ } else {
+ ret = 0;
+ }
+
+err:
+ release_firmware(fw);
+ return ret;
+}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci-core.c
index 7bccbe50bab1..2c8d108ab79c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci-core.c
@@ -360,6 +360,18 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct xhci_hcd *xhci;
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ struct xhci_driver_data *driver_data;
+ const struct pci_device_id *id;
+
+ id = pci_match_id(to_pci_driver(pdev->dev.driver)->id_table, pdev);
+ if (id && id->driver_data && usb_hcd_is_primary_hcd(hcd)) {
+ driver_data = (struct xhci_driver_data *)id->driver_data;
+ if (driver_data->quirks & XHCI_ASMEDIA_FW_QUIRK) {
+ retval = asmedia_xhci_check_request_fw(pdev, id);
+ if (retval < 0)
+ return retval;
+ }
+ }
xhci = hcd_to_xhci(hcd);
if (!xhci->sbrn)
@@ -640,6 +652,11 @@ static const struct xhci_driver_data reneses_data = {
.firmware = "renesas_usb_fw.mem",
};
+static const struct xhci_driver_data asmedia_data = {
+ .quirks = XHCI_ASMEDIA_FW_QUIRK,
+ .firmware = "asmedia/asm2214a-apple.bin",
+};
+
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(0x1912, 0x0014),
@@ -648,6 +665,9 @@ static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(0x1912, 0x0015),
.driver_data = (unsigned long)&reneses_data,
},
+ { PCI_DEVICE(0x1b21, 0x2142),
+ .driver_data = (unsigned long)&asmedia_data,
+ },
/* handle any USB 3.0 xHCI controller */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
@@ -663,6 +683,10 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
MODULE_FIRMWARE("renesas_usb_fw.mem");
#endif
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_ASMEDIA)
+MODULE_FIRMWARE("asmedia/asm2214a-apple.bin");
+#endif
+
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
.name = hcd_name,
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
index 93f8b355bc70..dddd35e50a2c 100644
--- a/drivers/usb/host/xhci-pci-renesas.c
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -3,7 +3,6 @@
#include <linux/acpi.h>
#include <linux/firmware.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -625,6 +624,5 @@ exit:
release_firmware(fw);
return err;
}
-EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
-MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE("renesas_usb_fw.mem");
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
index cb9a8f331a44..279c95acc43f 100644
--- a/drivers/usb/host/xhci-pci.h
+++ b/drivers/usb/host/xhci-pci.h
@@ -9,7 +9,20 @@ int renesas_xhci_check_request_fw(struct pci_dev *dev,
const struct pci_device_id *id);
#else
-static int renesas_xhci_check_request_fw(struct pci_dev *dev,
+static inline int renesas_xhci_check_request_fw(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return 0;
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_ASMEDIA)
+int asmedia_xhci_check_request_fw(struct pci_dev *dev,
+ const struct pci_device_id *id);
+
+#else
+static inline int asmedia_xhci_check_request_fw(struct pci_dev *dev,
const struct pci_device_id *id)
{
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index cc084d9505cd..b93deeff06d7 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1898,6 +1898,7 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
+#define XHCI_ASMEDIA_FW_QUIRK BIT_ULL(45)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index b637e8b378b3..a037f7ecf625 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -128,11 +128,15 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
return regmap_raw_read(tps->regmap, reg, val, len);
ret = regmap_raw_read(tps->regmap, reg, data, len + 1);
- if (ret)
+ if (ret) {
+ dev_err(tps->dev, "regmap_raw_read returned %d\n", ret);
return ret;
+ }
- if (data[0] < len)
+ if (data[0] < len) {
+ dev_err(tps->dev, "expected %zu bytes, got %d\n", len, data[0]);
return -EIO;
+ }
memcpy(val, &data[1], len);
return 0;
@@ -419,7 +423,7 @@ static bool tps6598x_read_status(struct tps6598x *tps, u32 *status)
ret = tps6598x_read32(tps, TPS_REG_STATUS, status);
if (ret) {
- dev_err(tps->dev, "%s: failed to read status\n", __func__);
+ dev_err(tps->dev, "%s: failed to read status: %d\n", __func__, ret);
return false;
}
trace_tps6598x_status(*status);
@@ -476,12 +480,11 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
struct tps6598x *tps = data;
u64 event;
u32 status;
- int ret;
+ int ret = IRQ_NONE;
mutex_lock(&tps->lock);
- ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event);
- if (ret) {
+ if (tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event)) {
dev_err(tps->dev, "%s: failed to read events\n", __func__);
goto err_unlock;
}
@@ -490,6 +493,8 @@ static irqreturn_t cd321x_interrupt(int irq, void *data)
if (!event)
goto err_unlock;
+ ret = IRQ_HANDLED;
+
if (!tps6598x_read_status(tps, &status))
goto err_clear_ints;
@@ -511,9 +516,7 @@ err_clear_ints:
err_unlock:
mutex_unlock(&tps->lock);
- if (event)
- return IRQ_HANDLED;
- return IRQ_NONE;
+ return ret;
}
static irqreturn_t tps6598x_interrupt(int irq, void *data)
@@ -522,13 +525,12 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data)
u64 event1;
u64 event2;
u32 status;
- int ret;
+ int ret = IRQ_NONE;
mutex_lock(&tps->lock);
- ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
- ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
- if (ret) {
+ if (tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1) ||
+ tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2)) {
dev_err(tps->dev, "%s: failed to read events\n", __func__);
goto err_unlock;
}
@@ -537,6 +539,8 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data)
if (!(event1 | event2))
goto err_unlock;
+ ret = IRQ_HANDLED;
+
if (!tps6598x_read_status(tps, &status))
goto err_clear_ints;
@@ -559,9 +563,7 @@ err_clear_ints:
err_unlock:
mutex_unlock(&tps->lock);
- if (event1 | event2)
- return IRQ_HANDLED;
- return IRQ_NONE;
+ return ret;
}
static int tps6598x_check_mode(struct tps6598x *tps)