diff options
Diffstat (limited to 'drivers')
103 files changed, 12592 insertions, 488 deletions
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c index c285c91a5e9c..71b8f103ab0f 100644 --- a/drivers/acpi/x86/apple.c +++ b/drivers/acpi/x86/apple.c @@ -70,13 +70,16 @@ void acpi_extract_apple_properties(struct acpi_device *adev) if ( key->type != ACPI_TYPE_STRING || (val->type != ACPI_TYPE_INTEGER && - val->type != ACPI_TYPE_BUFFER)) + val->type != ACPI_TYPE_BUFFER && + val->type != ACPI_TYPE_STRING)) continue; /* skip invalid properties */ __set_bit(i, valid); newsize += key->string.length + 1; if ( val->type == ACPI_TYPE_BUFFER) newsize += val->buffer.length; + else if (val->type == ACPI_TYPE_STRING) + newsize += val->string.length + 1; } numvalid = bitmap_weight(valid, numprops); @@ -118,6 +121,12 @@ void acpi_extract_apple_properties(struct acpi_device *adev) newprops[v].type = val->type; if (val->type == ACPI_TYPE_INTEGER) { newprops[v].integer.value = val->integer.value; + } else if (val->type == ACPI_TYPE_STRING) { + newprops[v].string.length = val->string.length; + newprops[v].string.pointer = free_space; + memcpy(free_space, val->string.pointer, + val->string.length); + free_space += val->string.length + 1; } else { newprops[v].buffer.length = val->buffer.length; newprops[v].buffer.pointer = free_space; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index e30707405455..2f5ca7bc7c14 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -274,6 +274,18 @@ config BT_HCIBCM203X Say Y here to compile support for HCI BCM203x devices into the kernel or say M to compile it as module (bcm203x). + +config BT_HCIBCM43XX + tristate "HCI BCM43xx PCI driver" + depends on PCI + default ARCH_APPLE + help + Support for Broadcom BCM43xx bluetooth chipsets attached over PCI, + which are usually found in Apple machines. + + Say Y here to compile support for HCI BCM43xx devices into the + kernel or say M to compile it as module (bcm43xx). + config BT_HCIBPA10X tristate "HCI BPA10x USB driver" depends on USB diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 3321a8aea4a0..2be16bd3c32c 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -33,6 +33,8 @@ obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o obj-$(CONFIG_BT_HCIRSI) += btrsi.o +obj-$(CONFIG_BT_HCIBCM43XX) += hci_bcm43xx.o + btmrvl-y := btmrvl_main.o btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o diff --git a/drivers/bluetooth/hci_bcm43xx.c b/drivers/bluetooth/hci_bcm43xx.c new file mode 100644 index 000000000000..f5f4efc1ba48 --- /dev/null +++ b/drivers/bluetooth/hci_bcm43xx.c @@ -0,0 +1,2016 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Bluetooth HCI driver for Broadcom 43XX devices attached via PCI + * + * Copyright (C) The Asahi Linux Contributors + * + * Broadcom BCM43xx PCI devices are combined Wireless LAN/Bluetooth devices + * which expose WiFi and Bluetooth as separate PCI functions. + * + * The Bluetooth function uses a simple IPC protocol based on DMA addressable + * shared memory called "Converged IPC" in order to tunnel HCI frames. + * Communication between the host and the devices happens over "transfer rings" + * and "completion rings" for the separate transport types: Control, HCI, + * ACL and SCO. + * + * The (official) terms "completion ring" and "transfer ring" are a bit + * misleading: + * For transfers from the host to the device an entry is enqueued in the + * transfer ring and the device will acknowledge it by enqueueing and entry + * in the corresponding completion ring. + * For transfer initiated from the device however an entry will be enqueued + * inside the completion ring (which has not corresponding entry in the transfer + * ring). The transfer ring for this direction has no memory associated but + * just a head and tail pointer. The message from the device is acknowledged + * by simple advancing the head of the transfer ring and ringing a doorbell. + * + */ + +//#define DEBUG + +#include <linux/async.h> +#include <linux/bitfield.h> +#include <linux/completion.h> +#include <linux/dma-mapping.h> +#include <linux/dmi.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/printk.h> + +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +enum bcm43xx_chip { + BCM4377 = 0, + BCM4378, + BCM4387, +}; + +#define BCM4377_DEVICE_ID 0x5fa0 +#define BCM4378_DEVICE_ID 0x5f69 +#define BCM4387_DEVICE_ID 0x5f71 + +#define BCM43XX_TIMEOUT 1000 + +/* + * These devices only support DMA transactions inside a 32bit window + * (possibly to avoid 64 bit arithmetic). The window size cannot exceed + * 0xffffffff but is always aligned down to the previous 0x200 byte boundary + * which effectively limits the window to [start, start+0xfffffe00]. + * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't + * run into this limitation. + */ +#define BCM43XX_DMA_MASK 0xfffffe00 + +/* vendor-specific config space registers */ +#define BCM43XX_PCIECFG_BAR0_WINDOW0 0x80 +#define BCM43XX_PCIECFG_BAR0_WINDOW1 0x70 +#define BCM43XX_PCIECFG_BAR0_WINDOW4 0x74 +#define BCM43XX_PCIECFG_BAR0_WINDOW5 0x78 +#define BCM43XX_PCIECFG_BAR2_WINDOW 0x84 + +#define BCM43XX_PCIECFG_BAR0_WINDOW4_DEFAULT 0x18011000 +#define BCM43XX_PCIECFG_BAR2_WINDOW_DEFAULT 0x19000000 + +#define BCM43XX_PCIECFG_UNK_CTRL 0x88 + +/* BAR0 */ +#define BCM43XX_MAX_OTP_SIZE 0x100 +#define BCM43XX_OTP_SYS_VENDOR 0x15 +#define BCM43XX_OTP_CIS 0x80 +#define BCM43XX_OTP_VENDOR_HDR 0x00000008 +#define BCM43XX_OTP_MAX_PARAM_LEN 16 + +#define BCM43XX_BAR0_FW_DOORBELL 0x140 +#define BCM43XX_BAR0_RTI_CONTROL 0x144 + +#define BCM43XX_BAR0_DOORBELL 0x174 +#define BCM43XX_BAR0_DOORBELL_VALUE GENMASK(31, 16) +#define BCM43XX_BAR0_DOORBELL_IDX GENMASK(15, 8) +#define BCM43XX_BAR0_DOORBELL_RING BIT(5) + +#define BCM43XX_BAR0_MSI_ADDR_LO 0x580 +#define BCM43XX_BAR0_MSI_ADDR_HI 0x584 + +#define BCM43XX_BAR0_HOST_WINDOW_LO 0x590 +#define BCM43XX_BAR0_HOST_WINDOW_HI 0x594 +#define BCM43XX_BAR0_HOST_WINDOW_SIZE 0x598 + +/* BAR2 */ +#define BCM43XX_BAR2_BOOTSTAGE 0x200454 + +#define BCM43XX_BAR2_FW_LO 0x200478 +#define BCM43XX_BAR2_FW_HI 0x20047c +#define BCM43XX_BAR2_FW_SIZE 0x200480 + +#define BCM43XX_BAR2_RTI_MSI_ADDR_LO 0x2004f8 +#define BCM43XX_BAR2_RTI_MSI_ADDR_HI 0x2004fc +#define BCM43XX_BAR2_RTI_MSI_DATA 0x200500 + +#define BCM43XX_BAR2_CONTEXT_ADDR_LO 0x20048c +#define BCM43XX_BAR2_CONTEXT_ADDR_HI 0x200450 + +#define BCM43XX_BAR2_RTI_STATUS 0x20045c +#define BCM43XX_BAR2_RTI_WINDOW_LO 0x200494 +#define BCM43XX_BAR2_RTI_WINDOW_HI 0x200498 +#define BCM43XX_BAR2_RTI_WINDOW_SIZE 0x20049c + +#define BCM43XX_N_TRANSFER_RINGS 9 +#define BCM43XX_N_COMPLETION_RINGS 6 + +#define BCM43XX_CONTROL_MSG_SIZE 0x34 + +#define BCM43XX_MAX_RING_SIZE 256 + +#define BCM43XX_MSGID_GENERATION GENMASK(15, 8) +#define BCM43XX_MSGID_ID GENMASK(7, 0) + +enum bcm43xx_transfer_ring_id { + BCM43XX_XFER_RING_CONTROL = 0, + BCM43XX_XFER_RING_HCI_H2D = 1, + BCM43XX_XFER_RING_HCI_D2H = 2, + BCM43XX_XFER_RING_SCO_H2D = 3, + BCM43XX_XFER_RING_SCO_D2H = 4, + BCM43XX_XFER_RING_ACL_H2D = 5, + BCM43XX_XFER_RING_ACL_D2H = 6, +}; + +enum bcm43xx_completion_ring_id { + BCM43XX_ACK_RING_CONTROL = 0, + BCM43XX_ACK_RING_HCI_ACL = 1, + BCM43XX_EVENT_RING_HCI_ACL = 2, + BCM43XX_ACK_RING_SCO = 3, + BCM43XX_EVENT_RING_SCO = 4, +}; + +enum bcm43xx_doorbell { + BCM43XX_DOORBELL_CONTROL = 0, + BCM43XX_DOORBELL_HCI_H2D = 1, + BCM43XX_DOORBELL_HCI_D2H = 2, + BCM43XX_DOORBELL_ACL_H2D = 3, + BCM43XX_DOORBELL_ACL_D2H = 4, + BCM43XX_DOORBELL_SCO = 6, +}; + +#define BCM43XX_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff) + +struct bcm43xx_xfer_ring_entry { +#define BCM43XX_XFER_RING_FLAG_PAYLOAD_MAPPED BIT(0) +#define BCM43XX_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1) + u8 flags; + __le16 len; + u8 _unk0; + __le64 payload; + __le16 id; + u8 _unk1[2]; +} __packed; +static_assert(sizeof(struct bcm43xx_xfer_ring_entry) == 0x10); + +struct bcm43xx_completion_ring_entry { + u8 flags; + u8 _unk0; + __le16 ring_id; + __le16 msg_id; + __le32 len; + u8 _unk1[6]; +} __packed; +static_assert(sizeof(struct bcm43xx_completion_ring_entry) == 0x10); + +enum bcm43xx_control_message_type { + BCM43XX_CONTROL_MSG_CREATE_XFER_RING = 1, + BCM43XX_CONTROL_MSG_CREATE_COMPLETION_RING = 2, + BCM43XX_CONTROL_MSG_DESTROY_XFER_RING = 3, + BCM43XX_CONTROL_MSG_DESTROY_COMPLETION_RING = 4, + // BCM43XX_CONTROL_MSG_ABORT_CMDQ = 5, +}; + +struct bcm43xx_create_completion_ring_msg { + u8 msg_type; + u8 header_size; + u8 footer_size; + u8 _unk0; + __le16 id; + __le16 id_again; + __le64 ring_iova; + __le16 n_elements; + __le32 unk; + u8 _unk1[6]; + __le16 msi; + __le16 intmod_delay; + __le32 intmod_bytes; + __le16 accum_delay; + __le32 accum_bytes; + u8 _unk2[10]; +} __packed; +static_assert(sizeof(struct bcm43xx_create_completion_ring_msg) == + BCM43XX_CONTROL_MSG_SIZE); + +struct bcm43xx_destroy_completion_ring_msg { + u8 msg_type; + u8 _pad0; + __le16 ring_id; + u8 _pad1[48]; +} __packed; +static_assert(sizeof(struct bcm43xx_destroy_completion_ring_msg) == + BCM43XX_CONTROL_MSG_SIZE); + +struct bcm43xx_create_transfer_ring_msg { + u8 msg_type; + u8 header_size; + u8 footer_size; + u8 _unk0; + __le16 ring_id; + __le16 ring_id_again; + __le64 ring_iova; + u8 _unk1[8]; + __le16 n_elements; + __le16 completion_ring_id; + __le16 doorbell; +#define BCM43XX_XFER_RING_FLAG_VIRTUAL BIT(7) +#define BCM43XX_XFER_RING_FLAG_SYNC BIT(8) + __le16 flags; + u8 _unk2[20]; +} __packed; +static_assert(sizeof(struct bcm43xx_create_transfer_ring_msg) == + BCM43XX_CONTROL_MSG_SIZE); + +struct bcm43xx_destroy_transfer_ring_msg { + u8 msg_type; + u8 _pad0; + __le16 ring_id; + u8 _pad1[48]; +} __packed; +static_assert(sizeof(struct bcm43xx_destroy_transfer_ring_msg) == + BCM43XX_CONTROL_MSG_SIZE); + +struct bcm43xx_context { + __le16 version; + __le16 size; + __le32 enabled_caps; + + __le64 per_info_addr; + + /* ring heads and tails */ + __le64 completion_ring_heads_addr; + __le64 xfer_ring_tails_addr; + __le64 completion_ring_tails_addr; + __le64 xfer_ring_heads_addr; + __le16 n_completion_rings; + __le16 n_xfer_rings; + + /* control ring configuration */ + __le64 control_completion_ring_addr; + __le64 control_xfer_ring_addr; + __le16 control_xfer_ring_n_entries; + __le16 control_completion_ring_n_entries; + __le16 control_xfer_ring_doorbell; + __le16 control_completion_ring_doorbell; + __le16 control_xfer_ring_msi; + __le16 control_completion_ring_msi; + u8 control_xfer_ring_header_size; + u8 control_xfer_ring_footer_size; + u8 control_completion_ring_header_size; + u8 control_completion_ring_footer_size; + + __le16 _unk0; // inPlaceComp and oOOComp + __le16 _unk1; // piMsi -> interrupt for new perInfo data? + + __le64 scratch_pad; + __le32 scratch_pad_size; + + __le32 res; +} __packed; +static_assert(sizeof(struct bcm43xx_context) == 0x68); + +struct bcm43xx_hci_send_calibration_cmd { + u8 unk; + __le16 blocks_left; + u8 data[0xe6]; +} __packed; + +struct bcm43xx_hci_send_ptb_cmd { + __le16 blocks_left; + u8 data[0xcf]; +} __packed; + +struct bcm43xx_ring_state { + __le16 completion_ring_head[BCM43XX_N_COMPLETION_RINGS]; + __le16 completion_ring_tail[BCM43XX_N_COMPLETION_RINGS]; + __le16 xfer_ring_head[BCM43XX_N_TRANSFER_RINGS]; + __le16 xfer_ring_tail[BCM43XX_N_TRANSFER_RINGS]; +}; + +struct bcm43xx_transfer_ring { + enum bcm43xx_transfer_ring_id ring_id; + enum bcm43xx_doorbell doorbell; + u16 payload_size; + size_t mapped_payload_size; + u8 completion_ring; + u16 n_entries; + u8 generation; + + bool sync; + bool virtual; + bool d2h_buffers_only; + bool allow_wait; + bool enabled; + + void *ring; + dma_addr_t ring_dma; + + void *payloads; + dma_addr_t payloads_dma; + + struct completion **events; + DECLARE_BITMAP(msgids, BCM43XX_MAX_RING_SIZE); + spinlock_t lock; +}; + +struct bcm43xx_completion_ring { + enum bcm43xx_completion_ring_id ring_id; + u16 payload_size; + u16 delay; + u16 n_entries; + bool enabled; + + u16 head; + u16 tail; + + void *ring; + dma_addr_t ring_dma; + + unsigned long transfer_rings; +}; + +struct bcm43xx_data; + +struct bcm43xx_hw { + const char *name; + + u32 bar0_window0; + u32 bar0_window1; + u32 bar0_window5; + + u32 otp_offset; + u32 otp_size; + + bool has_bar0_window5; + bool m2m_reset_on_ss_reset_disabled; + + const char *default_board_type; + + int (*send_calibration)(struct bcm43xx_data *bcm43xx); +}; + +struct bcm43xx_data { + struct pci_dev *pdev; + struct hci_dev *hdev; + + void __iomem *bar0; + void __iomem *bar2; + + const struct bcm43xx_hw *hw; + + const void *taurus_cal_blob; + int taurus_cal_size; + const void *taurus_beamforming_cal_blob; + int taurus_beamforming_cal_size; + + char stepping[BCM43XX_OTP_MAX_PARAM_LEN]; + char vendor[BCM43XX_OTP_MAX_PARAM_LEN]; + const char *board_type; + + struct completion event; + + int irq; + + struct bcm43xx_context *ctx; + dma_addr_t ctx_dma; + + struct bcm43xx_ring_state *ring_state; + dma_addr_t ring_state_dma; + + /* + * The HCI and ACL rings have to be merged because this structure is + * hardcoded in the firmware. + */ + struct bcm43xx_completion_ring control_ack_ring; + struct bcm43xx_completion_ring hci_acl_ack_ring; + struct bcm43xx_completion_ring hci_acl_event_ring; + struct bcm43xx_completion_ring sco_ack_ring; + struct bcm43xx_completion_ring sco_event_ring; + + struct bcm43xx_transfer_ring control_h2d_ring; + struct bcm43xx_transfer_ring hci_h2d_ring; + struct bcm43xx_transfer_ring hci_d2h_ring; + struct bcm43xx_transfer_ring sco_h2d_ring; + struct bcm43xx_transfer_ring sco_d2h_ring; + struct bcm43xx_transfer_ring acl_h2d_ring; + struct bcm43xx_transfer_ring acl_d2h_ring; +}; + +static void bcm43xx_ring_doorbell(struct bcm43xx_data *bcm43xx, u8 doorbell, + u16 val) +{ + u32 db = 0; + + db |= FIELD_PREP(BCM43XX_BAR0_DOORBELL_VALUE, val); + db |= FIELD_PREP(BCM43XX_BAR0_DOORBELL_IDX, doorbell); + db |= BCM43XX_BAR0_DOORBELL_RING; + + dev_dbg(&bcm43xx->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val, + doorbell, db); + iowrite32(db, bcm43xx->bar0 + BCM43XX_BAR0_DOORBELL); +} + +static void bcm43xx_handle_event(struct bcm43xx_data *bcm43xx, + struct bcm43xx_transfer_ring *ring, u8 type, + void *payload, size_t len) +{ + struct sk_buff *skb; + u16 head; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (!ring->enabled) { + dev_warn(&bcm43xx->pdev->dev, + "event for disabled transfer ring %d\n", + ring->ring_id); + goto unlock; + } + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + goto unlock; + + memcpy(skb_put(skb, len), payload, len); + hci_skb_pkt_type(skb) = type; + hci_recv_frame(bcm43xx->hdev, skb); + + head = le16_to_cpu(bcm43xx->ring_state->xfer_ring_head[ring->ring_id]); + head = (head + 1) % ring->n_entries; + bcm43xx->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head); + + bcm43xx_ring_doorbell(bcm43xx, ring->doorbell, head); + +unlock: + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcm43xx_handle_ack(struct bcm43xx_data *bcm43xx, + struct bcm43xx_transfer_ring *ring, + u16 raw_msgid) +{ + unsigned long flags; + u8 generation = FIELD_GET(BCM43XX_MSGID_GENERATION, raw_msgid); + u8 msgid = FIELD_GET(BCM43XX_MSGID_ID, raw_msgid); + + spin_lock_irqsave(&ring->lock, flags); + + if (generation != ring->generation) { + dev_warn( + &bcm43xx->pdev->dev, + "invalid message generation %d should be %d in ack for ring %d\n", + generation, ring->generation, ring->ring_id); + goto unlock; + } + + if (msgid > ring->n_entries) { + dev_warn(&bcm43xx->pdev->dev, + "invalid message id in ack for ring %d: %d > %d\n", + ring->ring_id, msgid, ring->n_entries); + goto unlock; + } + + if (!test_bit(msgid, ring->msgids)) { + dev_warn( + &bcm43xx->pdev->dev, + "invalid message id in ack for ring %d: %d is not used\n", + ring->ring_id, msgid); + goto unlock; + } + + if (ring->allow_wait && ring->events[msgid]) { + complete(ring->events[msgid]); + ring->events[msgid] = NULL; + } + + bitmap_release_region(ring->msgids, msgid, ring->n_entries); + +unlock: + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcm43xx_handle_completion(struct bcm43xx_data *bcm43xx, + struct bcm43xx_completion_ring *ring, + u16 pos) +{ + struct bcm43xx_completion_ring_entry *entry; + u16 msg_id, transfer_ring; + size_t entry_size, data_len; + void *data; + + if (pos >= ring->n_entries) { + dev_warn(&bcm43xx->pdev->dev, "invalid pos: %d\n", pos); + return; + } + + entry_size = sizeof(*entry) + ring->payload_size; + entry = ring->ring + pos * entry_size; + data = ring->ring + pos * entry_size + sizeof(*entry); + data_len = le32_to_cpu(entry->len); + msg_id = le16_to_cpu(entry->msg_id); + transfer_ring = le16_to_cpu(entry->ring_id); + + if ((ring->transfer_rings & BIT(transfer_ring)) == 0) { + dev_warn( + &bcm43xx->pdev->dev, + "invalid entry at offset %d for transfer ring %d in completion ring %d\n", + pos, transfer_ring, ring->ring_id); + return; + } + + dev_dbg(&bcm43xx->pdev->dev, + "entry in completion ring %d for transfer ring %d with msg_id %d\n", + ring->ring_id, transfer_ring, msg_id); + + switch (transfer_ring) { + case BCM43XX_XFER_RING_CONTROL: + bcm43xx_handle_ack(bcm43xx, &bcm43xx->control_h2d_ring, msg_id); + break; + case BCM43XX_XFER_RING_HCI_H2D: + bcm43xx_handle_ack(bcm43xx, &bcm43xx->hci_h2d_ring, msg_id); + break; + case BCM43XX_XFER_RING_HCI_D2H: + bcm43xx_handle_event(bcm43xx, &bcm43xx->hci_d2h_ring, + HCI_EVENT_PKT, data, data_len); + break; + case BCM43XX_XFER_RING_SCO_H2D: + bcm43xx_handle_ack(bcm43xx, &bcm43xx->sco_h2d_ring, msg_id); + break; + case BCM43XX_XFER_RING_SCO_D2H: + bcm43xx_handle_event(bcm43xx, &bcm43xx->sco_d2h_ring, + HCI_SCODATA_PKT, data, data_len); + break; + case BCM43XX_XFER_RING_ACL_H2D: + bcm43xx_handle_ack(bcm43xx, &bcm43xx->acl_h2d_ring, msg_id); + break; + case BCM43XX_XFER_RING_ACL_D2H: + // TODO: cleanup + if (entry->flags & BCM43XX_XFER_RING_FLAG_PAYLOAD_MAPPED) + data = bcm43xx->acl_d2h_ring.payloads + + (msg_id & 0xff) * + bcm43xx->acl_d2h_ring.mapped_payload_size; + + bcm43xx_handle_event(bcm43xx, &bcm43xx->acl_d2h_ring, + HCI_ACLDATA_PKT, data, data_len); + break; + default: + dev_err(&bcm43xx->pdev->dev, + "entry in completion ring %d for unknown transfer ring %d with msg_id %d\n", + ring->ring_id, transfer_ring, msg_id); + } +} + +static void bcm43xx_poll_completion_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_completion_ring *ring) +{ + u16 tail; + __le16 *heads = bcm43xx->ring_state->completion_ring_head; + __le16 *tails = bcm43xx->ring_state->completion_ring_tail; + + if (!ring->enabled) + return; + + tail = le16_to_cpu(tails[ring->ring_id]); + dev_dbg(&bcm43xx->pdev->dev, + "completion ring #%d: head: %d, tail: %d\n", ring->ring_id, + le16_to_cpu(heads[ring->ring_id]), tail); + + while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) { + /* + * ensure the CPU doesn't speculate through the comparison. + * otherwise it might already read the (empty) queue entry + * before the updated head has been loaded and checked. + */ + dma_rmb(); + + bcm43xx_handle_completion(bcm43xx, ring, tail); + + tail = (tail + 1) % ring->n_entries; + tails[ring->ring_id] = cpu_to_le16(tail); + } +} + +static irqreturn_t bcm43xx_irq(int irq, void *data) +{ + struct bcm43xx_data *bcm43xx = data; + u32 bootstage, rti_status; + + bootstage = ioread32(bcm43xx->bar2 + BCM43XX_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm43xx->bar2 + BCM43XX_BAR2_RTI_STATUS); + dev_dbg(&bcm43xx->pdev->dev, + "interrupt; bootstage = %d, rti state = %d\n", bootstage, + rti_status); + + complete(&bcm43xx->event); + + bcm43xx_poll_completion_ring(bcm43xx, &bcm43xx->control_ack_ring); + bcm43xx_poll_completion_ring(bcm43xx, &bcm43xx->hci_acl_event_ring); + bcm43xx_poll_completion_ring(bcm43xx, &bcm43xx->hci_acl_ack_ring); + bcm43xx_poll_completion_ring(bcm43xx, &bcm43xx->sco_ack_ring); + bcm43xx_poll_completion_ring(bcm43xx, &bcm43xx->sco_event_ring); + + return IRQ_HANDLED; +} + +static int bcm43xx_enqueue(struct bcm43xx_data *bcm43xx, + struct bcm43xx_transfer_ring *ring, void *data, + size_t len, bool wait) +{ + unsigned long flags; + struct bcm43xx_xfer_ring_entry *entry; + void *payload; + size_t offset; + u16 head, tail, new_head; + u16 raw_msgid; + int ret, msgid; + DECLARE_COMPLETION_ONSTACK(event); + + if (len > ring->payload_size && len > ring->mapped_payload_size) + return -EINVAL; + if (wait && !ring->allow_wait) + return -EINVAL; + if (ring->virtual) + return -EINVAL; + + spin_lock_irqsave(&ring->lock, flags); + + head = le16_to_cpu(bcm43xx->ring_state->xfer_ring_head[ring->ring_id]); + + /* tail is changed using DMA; prevent stale reads */ + dma_rmb(); + tail = le16_to_cpu(bcm43xx->ring_state->xfer_ring_tail[ring->ring_id]); + + new_head = (head + 1) % ring->n_entries; + + if (new_head == tail) { + ret = -EINVAL; + goto out; + } + + msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0); + if (msgid < 0) { + ret = -EINVAL; + goto out; + } + + raw_msgid = FIELD_PREP(BCM43XX_MSGID_GENERATION, ring->generation); + raw_msgid |= FIELD_PREP(BCM43XX_MSGID_ID, msgid); + + offset = head * (sizeof(*entry) + ring->payload_size); + entry = ring->ring + offset; + + memset(entry, 0, sizeof(*entry)); + entry->id = cpu_to_le16(raw_msgid); + entry->len = cpu_to_le16(len); + + if (len <= ring->payload_size) { + entry->flags = BCM43XX_XFER_RING_FLAG_PAYLOAD_IN_FOOTER; + payload = ring->ring + offset + sizeof(*entry); + } else { + entry->flags = BCM43XX_XFER_RING_FLAG_PAYLOAD_MAPPED; + entry->payload = cpu_to_le64(ring->payloads_dma + + msgid * ring->mapped_payload_size); + payload = ring->payloads + msgid * ring->mapped_payload_size; + } + + memcpy(payload, data, len); + + if (wait) + ring->events[msgid] = &event; + + dev_dbg(&bcm43xx->pdev->dev, + "updating head for transfer queue #%d to %d\n", ring->ring_id, + new_head); + bcm43xx->ring_state->xfer_ring_head[ring->ring_id] = + cpu_to_le16(new_head); + + // TODO: check if this is actually correct for sync rings + if (!ring->sync) + bcm43xx_ring_doorbell(bcm43xx, ring->doorbell, new_head); + ret = 0; + +out: + spin_unlock_irqrestore(&ring->lock, flags); + + if (ret == 0 && wait) { + ret = wait_for_completion_interruptible_timeout( + &event, BCM43XX_TIMEOUT); + if (ret == 0) + ret = -ETIMEDOUT; + else if (ret > 0) + ret = 0; + + spin_lock_irqsave(&ring->lock, flags); + ring->events[msgid] = NULL; + spin_unlock_irqrestore(&ring->lock, flags); + } + + return ret; +} + +static int bcm43xx_create_completion_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_completion_ring *ring) +{ + struct bcm43xx_create_completion_ring_msg msg; + int ret; + + if (ring->enabled) { + dev_warn(&bcm43xx->pdev->dev, "ring already enabled\n"); + return 0; + } + + memset(ring->ring, 0, + ring->n_entries * (sizeof(struct bcm43xx_completion_ring_entry) + + ring->payload_size)); + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM43XX_CONTROL_MSG_CREATE_COMPLETION_RING; + msg.id = cpu_to_le16(ring->ring_id); + msg.id_again = cpu_to_le16(ring->ring_id); + msg.ring_iova = cpu_to_le64(ring->ring_dma); + msg.n_elements = cpu_to_le16(ring->n_entries); + msg.intmod_bytes = cpu_to_le32(0xffffffff); + msg.unk = cpu_to_le32(0xffffffff); + msg.intmod_delay = cpu_to_le16(ring->delay); + msg.footer_size = ring->payload_size / 4; + + ret = bcm43xx_enqueue(bcm43xx, &bcm43xx->control_h2d_ring, &msg, + sizeof(msg), true); + if (!ret) + ring->enabled = true; + + return ret; +} + +static int bcm43xx_destroy_completion_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_completion_ring *ring) +{ + struct bcm43xx_destroy_completion_ring_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM43XX_CONTROL_MSG_DESTROY_COMPLETION_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + + ret = bcm43xx_enqueue(bcm43xx, &bcm43xx->control_h2d_ring, &msg, + sizeof(msg), true); + if (ret) + dev_warn(&bcm43xx->pdev->dev, + "failed to destroy completion ring %d\n", + ring->ring_id); + + ring->enabled = false; + return ret; +} + +static int bcm43xx_create_transfer_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_transfer_ring *ring) +{ + struct bcm43xx_create_transfer_ring_msg msg; + u16 flags = 0; + int ret, i; + unsigned long spinlock_flags; + + if (ring->virtual) + flags |= BCM43XX_XFER_RING_FLAG_VIRTUAL; + if (ring->sync) + flags |= BCM43XX_XFER_RING_FLAG_SYNC; + + spin_lock_irqsave(&ring->lock, spinlock_flags); + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM43XX_CONTROL_MSG_CREATE_XFER_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + msg.ring_id_again = cpu_to_le16(ring->ring_id); + msg.ring_iova = cpu_to_le64(ring->ring_dma); + msg.n_elements = cpu_to_le16(ring->n_entries); + msg.completion_ring_id = cpu_to_le16(ring->completion_ring); + msg.doorbell = cpu_to_le16(ring->doorbell); + msg.flags = cpu_to_le16(flags); + msg.footer_size = ring->payload_size / 4; + + bcm43xx->ring_state->xfer_ring_head[ring->ring_id] = 0; + bcm43xx->ring_state->xfer_ring_tail[ring->ring_id] = 0; + ring->generation++; + spin_unlock_irqrestore(&ring->lock, spinlock_flags); + + ret = bcm43xx_enqueue(bcm43xx, &bcm43xx->control_h2d_ring, &msg, + sizeof(msg), true); + + spin_lock_irqsave(&ring->lock, spinlock_flags); + + if (ring->d2h_buffers_only) { + for (i = 0; i < ring->n_entries; ++i) { + struct bcm43xx_xfer_ring_entry *entry = + ring->ring + i * sizeof(*entry); + u16 raw_msgid = FIELD_PREP(BCM43XX_MSGID_GENERATION, + ring->generation); + raw_msgid |= FIELD_PREP(BCM43XX_MSGID_ID, i); + + memset(entry, 0, sizeof(*entry)); + entry->id = cpu_to_le16(raw_msgid); + entry->len = cpu_to_le16(ring->mapped_payload_size); + entry->flags = BCM43XX_XFER_RING_FLAG_PAYLOAD_MAPPED; + entry->payload = + cpu_to_le64(ring->payloads_dma + + i * ring->mapped_payload_size); + } + } + + /* this primes the device->host side */ + if (ring->virtual || ring->d2h_buffers_only) { + bcm43xx->ring_state->xfer_ring_head[ring->ring_id] = 0xf; + bcm43xx_ring_doorbell(bcm43xx, ring->doorbell, 0xf); + } + + ring->enabled = true; + spin_unlock_irqrestore(&ring->lock, spinlock_flags); + + return ret; +} + +static int bcm43xx_destroy_transfer_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_transfer_ring *ring) +{ + struct bcm43xx_destroy_transfer_ring_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.msg_type = BCM43XX_CONTROL_MSG_DESTROY_XFER_RING; + msg.ring_id = cpu_to_le16(ring->ring_id); + + ret = bcm43xx_enqueue(bcm43xx, &bcm43xx->control_h2d_ring, &msg, + sizeof(msg), true); + if (ret) + dev_warn(&bcm43xx->pdev->dev, + "failed to destroy transfer ring %d\n", ring->ring_id); + + ring->enabled = false; + return ret; +} + +static int bcm43xx_send_calibration(struct bcm43xx_data *bcm43xx, + const void *cal_blob, size_t cal_blob_size) +{ + struct bcm43xx_hci_send_calibration_cmd cmd; + struct sk_buff *skb; + off_t done = 0; + size_t left = cal_blob_size; + u16 blocks_left; + int ret; + + if (!cal_blob) { + dev_err(&bcm43xx->pdev->dev, + "no calibration data available.\n"); + return -ENOENT; + } + + blocks_left = DIV_ROUND_UP(left, sizeof(cmd.data)) - 1; + + while (left) { + size_t transfer_len = min(left, sizeof(cmd.data)); + + memset(&cmd, 0, sizeof(cmd)); + cmd.unk = 0x03; + cmd.blocks_left = cpu_to_le16(blocks_left); + memcpy(cmd.data, cal_blob + done, transfer_len); + + dev_dbg(&bcm43xx->pdev->dev, + "btbcmpci: sending calibration chunk; left (chunks): %d, left(bytes): %zu\n", + cmd.blocks_left, left); + + skb = __hci_cmd_sync(bcm43xx->hdev, 0xfd97, sizeof(cmd), &cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + dev_err(&bcm43xx->pdev->dev, + "btbcmpci: send calibration failed (%d)", ret); + return ret; + } + kfree_skb(skb); + + blocks_left--; + left -= transfer_len; + done += transfer_len; + } + + return 0; +} + +static int bcm4378_send_calibration(struct bcm43xx_data *bcm43xx) +{ + if (strcmp(bcm43xx->stepping, "b1") == 0) + return bcm43xx_send_calibration( + bcm43xx, bcm43xx->taurus_beamforming_cal_blob, + bcm43xx->taurus_beamforming_cal_size); + else + return bcm43xx_send_calibration(bcm43xx, + bcm43xx->taurus_cal_blob, + bcm43xx->taurus_cal_size); +} + +static int bcm4387_send_calibration(struct bcm43xx_data *bcm43xx) +{ + if (strcmp(bcm43xx->stepping, "c2") == 0) + return bcm43xx_send_calibration( + bcm43xx, bcm43xx->taurus_beamforming_cal_blob, + bcm43xx->taurus_beamforming_cal_size); + else + return bcm43xx_send_calibration(bcm43xx, + bcm43xx->taurus_cal_blob, + bcm43xx->taurus_cal_size); +} + +static const struct firmware *bcm43xx_request_blob(struct bcm43xx_data *bcm43xx, + const char *suffix) +{ + const struct firmware *fw; + char name[256]; + int ret; + + snprintf(name, sizeof(name), "brcm/brcmbt%s%s-%s-%s.%s", + bcm43xx->hw->name, bcm43xx->stepping, bcm43xx->board_type, + bcm43xx->vendor, suffix); + dev_info(&bcm43xx->pdev->dev, "Trying to load '%s'", name); + + ret = request_firmware(&fw, name, &bcm43xx->pdev->dev); + if (!ret) + return fw; + + snprintf(name, sizeof(name), "brcm/brcmbt%s%s-%s.%s", bcm43xx->hw->name, + bcm43xx->stepping, bcm43xx->board_type, suffix); + ret = request_firmware(&fw, name, &bcm43xx->pdev->dev); + dev_info(&bcm43xx->pdev->dev, "Trying to load '%s'", name); + if (!ret) + return fw; + + dev_err(&bcm43xx->pdev->dev, "Unable to load '%s' blob", suffix); + return NULL; +} + +static int bcm43xx_send_ptb(struct bcm43xx_data *bcm43xx) +{ + const struct firmware *fw; + struct bcm43xx_hci_send_ptb_cmd cmd; + struct sk_buff *skb; + off_t done = 0; + size_t left; + u16 blocks_left; + int ret = 0; + + fw = bcm43xx_request_blob(bcm43xx, "ptb"); + if (!fw) { + dev_err(&bcm43xx->pdev->dev, + "btbcmpci: failed to load PTB data"); + return -ENOENT; + } + + left = fw->size; + blocks_left = DIV_ROUND_UP(left, sizeof(cmd.data)) - 1; + + while (left) { + size_t transfer_len = min(left, sizeof(cmd.data)); + + memset(&cmd, 0, sizeof(cmd)); + cmd.blocks_left = cpu_to_le16(blocks_left); + memcpy(cmd.data, fw->data + done, transfer_len); + + dev_dbg(&bcm43xx->pdev->dev, + "btbcmpci: sending ptb chunk; left: %zu\n", left); + + skb = __hci_cmd_sync(bcm43xx->hdev, 0xfe0d, sizeof(cmd), &cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + dev_err(&bcm43xx->pdev->dev, + "btbcmpci: sending ptb failed (%d)", ret); + goto out; + } + kfree_skb(skb); + + blocks_left--; + left -= transfer_len; + done += transfer_len; + } + +out: + release_firmware(fw); + return ret; +} + +static int bcm43xx_hci_open(struct hci_dev *hdev) +{ + return 0; +} + +static int bcm43xx_open_rings(struct bcm43xx_data *bcm43xx) +{ + int ret; + + ret = bcm43xx_create_completion_ring(bcm43xx, + &bcm43xx->hci_acl_ack_ring); + if (ret) + return ret; + ret = bcm43xx_create_completion_ring(bcm43xx, + &bcm43xx->hci_acl_event_ring); + if (ret) + goto destroy_hci_acl_ack; + ret = bcm43xx_create_completion_ring(bcm43xx, &bcm43xx->sco_ack_ring); + if (ret) + goto destroy_hci_acl_event; + ret = bcm43xx_create_completion_ring(bcm43xx, &bcm43xx->sco_event_ring); + if (ret) + goto destroy_sco_ack; + dev_dbg(&bcm43xx->pdev->dev, + "all completion rings successfully created!\n"); + + ret = bcm43xx_create_transfer_ring(bcm43xx, &bcm43xx->hci_h2d_ring); + if (ret) + goto destroy_sco_event; + ret = bcm43xx_create_transfer_ring(bcm43xx, &bcm43xx->hci_d2h_ring); + if (ret) + goto destroy_hci_h2d; + ret = bcm43xx_create_transfer_ring(bcm43xx, &bcm43xx->sco_h2d_ring); + if (ret) + goto destroy_hci_d2h; + ret = bcm43xx_create_transfer_ring(bcm43xx, &bcm43xx->sco_d2h_ring); + if (ret) + goto destroy_sco_h2d; + ret = bcm43xx_create_transfer_ring(bcm43xx, &bcm43xx->acl_h2d_ring); + if (ret) + goto destroy_sco_d2h; + ret = bcm43xx_create_transfer_ring(bcm43xx, &bcm43xx->acl_d2h_ring); + if (ret) + goto destroy_acl_h2d; + dev_dbg(&bcm43xx->pdev->dev, + "all transfer rings successfully created!\n"); + + return 0; + +destroy_acl_h2d: + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->acl_h2d_ring); +destroy_sco_d2h: + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->sco_d2h_ring); +destroy_sco_h2d: + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->sco_h2d_ring); +destroy_hci_d2h: + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->hci_h2d_ring); +destroy_hci_h2d: + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->hci_d2h_ring); +destroy_sco_event: + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->sco_event_ring); +destroy_sco_ack: + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->sco_ack_ring); +destroy_hci_acl_event: + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->hci_acl_event_ring); +destroy_hci_acl_ack: + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->hci_acl_ack_ring); + + dev_warn(&bcm43xx->pdev->dev, "Creating rings failed with %d\n", ret); + return ret; +} + +static int bcm43xx_hci_close(struct hci_dev *hdev) +{ + struct bcm43xx_data *bcm43xx = hci_get_drvdata(hdev); + +#if 0 + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->acl_d2h_ring); + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->acl_h2d_ring); + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->sco_d2h_ring); + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->sco_h2d_ring); + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->hci_d2h_ring); + bcm43xx_destroy_transfer_ring(bcm43xx, &bcm43xx->hci_h2d_ring); + + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->sco_event_ring); + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->sco_ack_ring); + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->hci_acl_event_ring); + bcm43xx_destroy_completion_ring(bcm43xx, &bcm43xx->hci_acl_ack_ring); +#endif + + return 0; +} + +static int bcm43xx_hci_setup(struct hci_dev *hdev) +{ + struct bcm43xx_data *bcm43xx = hci_get_drvdata(hdev); + int ret; + + if (bcm43xx->hw->send_calibration) { + ret = bcm43xx->hw->send_calibration(bcm43xx); + if (ret) + return ret; + } + + ret = bcm43xx_send_ptb(bcm43xx); + if (ret) + return ret; + + return 0; +} + +static int bcm43xx_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct bcm43xx_data *bcm43xx = hci_get_drvdata(hdev); + struct bcm43xx_transfer_ring *ring; + int ret; + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + ring = &bcm43xx->hci_h2d_ring; + break; + + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + ring = &bcm43xx->acl_h2d_ring; + break; + + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + ring = &bcm43xx->sco_h2d_ring; + break; + + default: + return -EILSEQ; + } + + ret = bcm43xx_enqueue(bcm43xx, ring, skb->data, skb->len, false); + if (ret < 0) { + hdev->stat.err_tx++; + return ret; + } + + hdev->stat.byte_tx += skb->len; + kfree_skb(skb); + return ret; +} + +static int bcm43xx_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, + "hci_bcm43xx: Change address command failed (%d)", + err); + return err; + } + kfree_skb(skb); + + return 0; +} + +static int bcm43xx_alloc_transfer_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_transfer_ring *ring) +{ + size_t entry_size; + + spin_lock_init(&ring->lock); + ring->payload_size = ALIGN(ring->payload_size, 4); + ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4); + + if (ring->payload_size > BCM43XX_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) + return -EINVAL; + if (ring->n_entries > BCM43XX_MAX_RING_SIZE) + return -EINVAL; + if (ring->virtual && ring->allow_wait) + return -EINVAL; + + if (ring->d2h_buffers_only) { + if (ring->virtual) + return -EINVAL; + if (ring->payload_size) + return -EINVAL; + if (!ring->mapped_payload_size) + return -EINVAL; + } + if (ring->virtual) + return 0; + + entry_size = + ring->payload_size + sizeof(struct bcm43xx_xfer_ring_entry); + ring->ring = dmam_alloc_coherent(&bcm43xx->pdev->dev, + ring->n_entries * entry_size, + &ring->ring_dma, GFP_KERNEL); + if (!ring->ring) + return -ENOMEM; + + if (ring->allow_wait) { + ring->events = devm_kcalloc(&bcm43xx->pdev->dev, + ring->n_entries, + sizeof(*ring->events), GFP_KERNEL); + if (!ring->events) + return -ENOMEM; + } + + if (ring->mapped_payload_size) { + ring->payloads = dmam_alloc_coherent( + &bcm43xx->pdev->dev, + ring->n_entries * ring->mapped_payload_size, + &ring->payloads_dma, GFP_KERNEL); + if (!ring->payloads) + return -ENOMEM; + } + + return 0; +} + +static int bcm43xx_alloc_completion_ring(struct bcm43xx_data *bcm43xx, + struct bcm43xx_completion_ring *ring) +{ + size_t entry_size; + + ring->payload_size = ALIGN(ring->payload_size, 4); + if (ring->payload_size > BCM43XX_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) + return -EINVAL; + if (ring->n_entries > BCM43XX_MAX_RING_SIZE) + return -EINVAL; + + entry_size = ring->payload_size + + sizeof(struct bcm43xx_completion_ring_entry); + + ring->ring = dmam_alloc_coherent(&bcm43xx->pdev->dev, + ring->n_entries * entry_size, + &ring->ring_dma, GFP_KERNEL); + if (!ring->ring) + return -ENOMEM; + return 0; +} + +static int bcm43xx_init_context(struct bcm43xx_data *bcm43xx) +{ + struct device *dev = &bcm43xx->pdev->dev; + + bcm43xx->ctx = dmam_alloc_coherent(dev, sizeof(*bcm43xx->ctx), + &bcm43xx->ctx_dma, GFP_KERNEL); + if (!bcm43xx->ctx) + return -ENOMEM; + memset(bcm43xx->ctx, 0, sizeof(*bcm43xx->ctx)); + + bcm43xx->ring_state = + dmam_alloc_coherent(dev, sizeof(*bcm43xx->ring_state), + &bcm43xx->ring_state_dma, GFP_KERNEL); + if (!bcm43xx->ring_state) + return -ENOMEM; + memset(bcm43xx->ring_state, 0, sizeof(*bcm43xx->ring_state)); + + bcm43xx->ctx->version = cpu_to_le16(1); + bcm43xx->ctx->size = cpu_to_le16(sizeof(*bcm43xx->ctx)); + bcm43xx->ctx->enabled_caps = cpu_to_le16(2); + + // TODO: do we actually care about the contents here? + dmam_alloc_coherent(&bcm43xx->pdev->dev, PAGE_SIZE, + &bcm43xx->ctx->per_info_addr, GFP_KERNEL); + + bcm43xx->ctx->xfer_ring_heads_addr = + bcm43xx->ring_state_dma + + offsetof(struct bcm43xx_ring_state, xfer_ring_head); + bcm43xx->ctx->xfer_ring_tails_addr = + bcm43xx->ring_state_dma + + offsetof(struct bcm43xx_ring_state, xfer_ring_tail); + bcm43xx->ctx->completion_ring_heads_addr = + bcm43xx->ring_state_dma + + offsetof(struct bcm43xx_ring_state, completion_ring_head); + bcm43xx->ctx->completion_ring_tails_addr = + bcm43xx->ring_state_dma + + offsetof(struct bcm43xx_ring_state, completion_ring_tail); + + bcm43xx->ctx->n_completion_rings = + cpu_to_le16(BCM43XX_N_COMPLETION_RINGS); + bcm43xx->ctx->n_xfer_rings = cpu_to_le16(BCM43XX_N_TRANSFER_RINGS); + + bcm43xx->ctx->control_completion_ring_addr = + cpu_to_le64(bcm43xx->control_ack_ring.ring_dma); + bcm43xx->ctx->control_completion_ring_n_entries = + cpu_to_le16(bcm43xx->control_ack_ring.n_entries); + bcm43xx->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff); + bcm43xx->ctx->control_completion_ring_msi = 0; + bcm43xx->ctx->control_completion_ring_header_size = 0; + bcm43xx->ctx->control_completion_ring_footer_size = 0; + + bcm43xx->ctx->control_xfer_ring_addr = + cpu_to_le64(bcm43xx->control_h2d_ring.ring_dma); + bcm43xx->ctx->control_xfer_ring_n_entries = + cpu_to_le16(bcm43xx->control_h2d_ring.n_entries); + bcm43xx->ctx->control_xfer_ring_doorbell = + cpu_to_le16(bcm43xx->control_h2d_ring.doorbell); + bcm43xx->ctx->control_xfer_ring_msi = 0; + bcm43xx->ctx->control_xfer_ring_header_size = 0; + bcm43xx->ctx->control_xfer_ring_footer_size = + bcm43xx->control_h2d_ring.payload_size / 4; + + return 0; +} + +static int bcm43xx_prepare_rings(struct bcm43xx_data *bcm43xx) +{ + int ret; + + /* + * Even though many of these settings appear to be configurable + * when sending the "create ring" messages most of these are + * actually hardcoded in some (and quite possibly all) firmware versions + * and changing them on the host has no effect. + * Specifically, this applies to at least the doorbells, the transfer + * and completion ring ids and their mapping (e.g. both HCI and ACL + * entries will always be queued in completion rings 1 and 2 no matter + * what we configure here). + */ + bcm43xx->control_ack_ring.ring_id = BCM43XX_ACK_RING_CONTROL; + bcm43xx->control_ack_ring.n_entries = 32; + bcm43xx->control_ack_ring.transfer_rings = + BIT(BCM43XX_XFER_RING_CONTROL); + + bcm43xx->hci_acl_ack_ring.ring_id = BCM43XX_ACK_RING_HCI_ACL; + bcm43xx->hci_acl_ack_ring.n_entries = 256; + bcm43xx->hci_acl_ack_ring.transfer_rings = + BIT(BCM43XX_XFER_RING_HCI_H2D) | BIT(BCM43XX_XFER_RING_ACL_H2D); + bcm43xx->hci_acl_ack_ring.delay = 1000; + + bcm43xx->hci_acl_event_ring.ring_id = BCM43XX_EVENT_RING_HCI_ACL; + // TODO: this is just a max-sized HCI frame, but small for ACL? + bcm43xx->hci_acl_event_ring.payload_size = HCI_MAX_EVENT_SIZE; + bcm43xx->hci_acl_event_ring.n_entries = 256; + bcm43xx->hci_acl_event_ring.transfer_rings = + BIT(BCM43XX_XFER_RING_HCI_D2H) | BIT(BCM43XX_XFER_RING_ACL_D2H); + bcm43xx->hci_acl_event_ring.delay = 1000; + + bcm43xx->sco_ack_ring.ring_id = BCM43XX_ACK_RING_SCO; + bcm43xx->sco_ack_ring.n_entries = 128; + bcm43xx->sco_ack_ring.transfer_rings = BIT(BCM43XX_XFER_RING_SCO_H2D); + + bcm43xx->sco_event_ring.ring_id = BCM43XX_EVENT_RING_SCO; + bcm43xx->sco_event_ring.payload_size = HCI_MAX_SCO_SIZE; + bcm43xx->sco_event_ring.n_entries = 128; + bcm43xx->sco_event_ring.transfer_rings = BIT(BCM43XX_XFER_RING_SCO_D2H); + + bcm43xx->control_h2d_ring.ring_id = BCM43XX_XFER_RING_CONTROL; + bcm43xx->control_h2d_ring.doorbell = BCM43XX_DOORBELL_CONTROL; + bcm43xx->control_h2d_ring.payload_size = BCM43XX_CONTROL_MSG_SIZE; + bcm43xx->control_h2d_ring.completion_ring = BCM43XX_ACK_RING_CONTROL; + bcm43xx->control_h2d_ring.allow_wait = true; + bcm43xx->control_h2d_ring.n_entries = 128; + + bcm43xx->hci_h2d_ring.ring_id = BCM43XX_XFER_RING_HCI_H2D; + bcm43xx->hci_h2d_ring.doorbell = BCM43XX_DOORBELL_HCI_H2D; + bcm43xx->hci_h2d_ring.payload_size = HCI_MAX_EVENT_SIZE; + bcm43xx->hci_h2d_ring.completion_ring = BCM43XX_ACK_RING_HCI_ACL; + bcm43xx->hci_h2d_ring.n_entries = 128; + + bcm43xx->hci_d2h_ring.ring_id = BCM43XX_XFER_RING_HCI_D2H; + bcm43xx->hci_d2h_ring.doorbell = BCM43XX_DOORBELL_HCI_D2H; + bcm43xx->hci_d2h_ring.completion_ring = BCM43XX_EVENT_RING_HCI_ACL; + bcm43xx->hci_d2h_ring.virtual = true; + bcm43xx->hci_d2h_ring.n_entries = 128; + + bcm43xx->sco_h2d_ring.ring_id = BCM43XX_XFER_RING_SCO_H2D; + bcm43xx->sco_h2d_ring.doorbell = BCM43XX_DOORBELL_SCO; + bcm43xx->sco_h2d_ring.payload_size = HCI_MAX_SCO_SIZE; + bcm43xx->sco_h2d_ring.completion_ring = BCM43XX_ACK_RING_SCO; + bcm43xx->sco_h2d_ring.sync = true; + bcm43xx->sco_h2d_ring.n_entries = 128; + + bcm43xx->sco_d2h_ring.ring_id = BCM43XX_XFER_RING_SCO_D2H; + bcm43xx->sco_d2h_ring.doorbell = BCM43XX_DOORBELL_SCO; + bcm43xx->sco_d2h_ring.completion_ring = BCM43XX_EVENT_RING_SCO; + bcm43xx->sco_d2h_ring.virtual = true; + bcm43xx->sco_d2h_ring.sync = true; + bcm43xx->sco_d2h_ring.n_entries = 128; + + bcm43xx->acl_h2d_ring.ring_id = BCM43XX_XFER_RING_ACL_H2D; + bcm43xx->acl_h2d_ring.doorbell = BCM43XX_DOORBELL_ACL_H2D; + /* + * mapped_payload_size because the largest ACL packet doesn't + * fit inside the largest possible footer + */ + bcm43xx->acl_h2d_ring.mapped_payload_size = HCI_MAX_FRAME_SIZE; + bcm43xx->acl_h2d_ring.completion_ring = BCM43XX_ACK_RING_HCI_ACL; + bcm43xx->acl_h2d_ring.n_entries = 128; + + bcm43xx->acl_d2h_ring.ring_id = BCM43XX_XFER_RING_ACL_D2H; + bcm43xx->acl_d2h_ring.doorbell = BCM43XX_DOORBELL_ACL_D2H; + bcm43xx->acl_d2h_ring.completion_ring = BCM43XX_EVENT_RING_HCI_ACL; + bcm43xx->acl_d2h_ring.d2h_buffers_only = true; + bcm43xx->acl_d2h_ring.mapped_payload_size = HCI_MAX_FRAME_SIZE; + bcm43xx->acl_d2h_ring.n_entries = 128; + + /* + * no need for any cleanup since this is only called from _probe + * and only devres-managed allocations are used + */ + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->control_h2d_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->hci_h2d_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->hci_d2h_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->sco_h2d_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->sco_d2h_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->acl_h2d_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_transfer_ring(bcm43xx, &bcm43xx->acl_d2h_ring); + if (ret) + return ret; + + ret = bcm43xx_alloc_completion_ring(bcm43xx, + &bcm43xx->control_ack_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_completion_ring(bcm43xx, + &bcm43xx->hci_acl_ack_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_completion_ring(bcm43xx, + &bcm43xx->hci_acl_event_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_completion_ring(bcm43xx, &bcm43xx->sco_ack_ring); + if (ret) + return ret; + ret = bcm43xx_alloc_completion_ring(bcm43xx, &bcm43xx->sco_event_ring); + if (ret) + return ret; + + return 0; +} + +static int bcm43xx_boot(struct bcm43xx_data *bcm43xx) +{ + u32 bootstage; + const struct firmware *fw; + void *bfr; + dma_addr_t fw_dma; + int ret = 0; + + fw = bcm43xx_request_blob(bcm43xx, "bin"); + if (!fw) + return -ENOENT; + + bfr = dma_alloc_coherent(&bcm43xx->pdev->dev, fw->size, &fw_dma, + GFP_KERNEL); + if (!bfr) { + ret = -ENOMEM; + goto out_release_fw; + } + + memcpy(bfr, fw->data, fw->size); + + iowrite32(0, bcm43xx->bar0 + BCM43XX_BAR0_HOST_WINDOW_LO); + iowrite32(0, bcm43xx->bar0 + BCM43XX_BAR0_HOST_WINDOW_HI); + iowrite32(BCM43XX_DMA_MASK, + bcm43xx->bar0 + BCM43XX_BAR0_HOST_WINDOW_SIZE); + + iowrite32(lower_32_bits(fw_dma), bcm43xx->bar2 + BCM43XX_BAR2_FW_LO); + iowrite32(upper_32_bits(fw_dma), bcm43xx->bar2 + BCM43XX_BAR2_FW_HI); + iowrite32(fw->size, bcm43xx->bar2 + BCM43XX_BAR2_FW_SIZE); + iowrite32(0, bcm43xx->bar0 + BCM43XX_BAR0_FW_DOORBELL); + + ret = wait_for_completion_interruptible_timeout(&bcm43xx->event, + BCM43XX_TIMEOUT); + if (ret == 0) { + ret = -ETIMEDOUT; + goto out_dma_free; + } else if (ret < 0) { + goto out_dma_free; + } + + bootstage = ioread32(bcm43xx->bar2 + BCM43XX_BAR2_BOOTSTAGE); + if (bootstage != 2) { + dev_err(&bcm43xx->pdev->dev, "boostage %d != 2\n", bootstage); + ret = -ENXIO; + goto out_dma_free; + } + + dev_dbg(&bcm43xx->pdev->dev, "firmware has booted (stage = %x)\n", + bootstage); + ret = 0; + +out_dma_free: + dma_free_coherent(&bcm43xx->pdev->dev, fw->size, bfr, fw_dma); +out_release_fw: + release_firmware(fw); + return ret; +} + +static int bcm43xx_setup_rti(struct bcm43xx_data *bcm43xx) +{ + u32 rti_status; + int ret; + + /* start RTI */ + iowrite32(1, bcm43xx->bar0 + BCM43XX_BAR0_RTI_CONTROL); + + ret = wait_for_completion_interruptible_timeout(&bcm43xx->event, + BCM43XX_TIMEOUT); + if (ret == 0) { + dev_err(&bcm43xx->pdev->dev, + "timed out while waiting for RTI to transition to state 1"); + return -ETIMEDOUT; + } else if (ret < 0) { + return ret; + } + + rti_status = ioread32(bcm43xx->bar2 + BCM43XX_BAR2_RTI_STATUS); + if (rti_status != 1) { + dev_err(&bcm43xx->pdev->dev, "RTI did not ack state 1 (%d)\n", + rti_status); + return -ENODEV; + } + dev_dbg(&bcm43xx->pdev->dev, "RTI is in state 1\n"); + + /* allow access to the entire IOVA space again */ + iowrite32(0, bcm43xx->bar2 + BCM43XX_BAR2_RTI_WINDOW_LO); + iowrite32(0, bcm43xx->bar2 + BCM43XX_BAR2_RTI_WINDOW_HI); + iowrite32(BCM43XX_DMA_MASK, + bcm43xx->bar2 + BCM43XX_BAR2_RTI_WINDOW_SIZE); + + /* setup "Converged IPC" context */ + iowrite32(lower_32_bits(bcm43xx->ctx_dma), + bcm43xx->bar2 + BCM43XX_BAR2_CONTEXT_ADDR_LO); + iowrite32(upper_32_bits(bcm43xx->ctx_dma), + bcm43xx->bar2 + BCM43XX_BAR2_CONTEXT_ADDR_HI); + iowrite32(2, bcm43xx->bar0 + BCM43XX_BAR0_RTI_CONTROL); + + ret = wait_for_completion_interruptible_timeout(&bcm43xx->event, + BCM43XX_TIMEOUT); + if (ret == 0) { + dev_err(&bcm43xx->pdev->dev, + "timed out while waiting for RTI to transition to state 2"); + return -ETIMEDOUT; + } else if (ret < 0) { + return ret; + } + + rti_status = ioread32(bcm43xx->bar2 + BCM43XX_BAR2_RTI_STATUS); + if (rti_status != 2) { + dev_err(&bcm43xx->pdev->dev, "RTI did not ack state 2 (%d)\n", + rti_status); + return -ENODEV; + } + + dev_dbg(&bcm43xx->pdev->dev, + "RTI is in state 2; control ring is ready\n"); + bcm43xx->control_ack_ring.enabled = true; + + return 0; +} + +static int bcm43xx_parse_otp_board_params(struct bcm43xx_data *bcm43xx, + char tag, const char *val, size_t len) +{ + if (tag != 'V') + return 0; + + strscpy(bcm43xx->vendor, val, len + 1); + return 0; +} + +static int bcm43xx_parse_otp_chip_params(struct bcm43xx_data *bcm43xx, char tag, + const char *val, size_t len) +{ + size_t idx = 0; + + if (tag != 's') + return 0; + + /* + * this won't write out of bounds since len < BCM43XX_OTP_MAX_PARAM_LEN + * and sizeof(bcm43xx->stepping) = BCM43XX_OTP_MAX_PARAM_LEN + */ + while (len != 0) { + bcm43xx->stepping[idx] = tolower(val[idx]); + if (val[idx] == '\0') + return 0; + + idx++; + len--; + } + + bcm43xx->stepping[idx] = '\0'; + return 0; +} + +static int bcm43xx_parse_opt_str(struct bcm43xx_data *bcm43xx, const u8 *str, + int (*parse_arg)(struct bcm43xx_data *, char, + const char *, size_t)) +{ + const char *p; + int ret; + + p = skip_spaces(str); + while (*p) { + char tag = *p++; + const char *end; + size_t len; + + if (*p++ != '=') /* implicit NUL check */ + return -EINVAL; + + /* *p might be NUL here, if so end == p and len == 0 */ + end = strchrnul(p, ' '); + len = end - p; + + /* leave 1 byte for NUL in destination string */ + if (len > (BCM43XX_OTP_MAX_PARAM_LEN - 1)) + return -EINVAL; + + /* Copy len characters plus a NUL terminator */ + ret = parse_arg(bcm43xx, tag, p, len); + if (ret) + return ret; + + /* Skip to next arg, if any */ + p = skip_spaces(end); + } + + return 0; +} + +static int bcm43xx_parse_otp_sys_vendor(struct bcm43xx_data *bcm43xx, u8 *otp, + size_t size) +{ + int idx = 4; + const char *chip_params; + const char *board_params; + int ret; + + /* 4-byte header and two empty strings */ + if (size < 6) + return -EINVAL; + + if (get_unaligned_le32(otp) != BCM43XX_OTP_VENDOR_HDR) + return -EINVAL; + + chip_params = &otp[idx]; + + /* Skip first string, including terminator */ + idx += strnlen(chip_params, size - idx) + 1; + if (idx >= size) + return -EINVAL; + + board_params = &otp[idx]; + + /* Skip to terminator of second string */ + idx += strnlen(board_params, size - idx); + if (idx >= size) + return -EINVAL; + + /* At this point both strings are guaranteed NUL-terminated */ + dev_dbg(&bcm43xx->pdev->dev, + "OTP: chip_params='%s' board_params='%s'\n", chip_params, + board_params); + + ret = bcm43xx_parse_opt_str(bcm43xx, chip_params, + bcm43xx_parse_otp_chip_params); + if (ret) + return ret; + + ret = bcm43xx_parse_opt_str(bcm43xx, board_params, + bcm43xx_parse_otp_board_params); + if (ret) + return ret; + + dev_dbg(&bcm43xx->pdev->dev, "OTP: stepping=%s, vendor=%s\n", + bcm43xx->stepping, bcm43xx->vendor); + + if (!bcm43xx->stepping[0] || !bcm43xx->vendor[0]) + return -EINVAL; + + return 0; +} + +static int bcm43xx_read_otp(struct bcm43xx_data *bcm43xx) +{ + u8 otp[BCM43XX_MAX_OTP_SIZE]; + int i; + int ret = -ENOENT; + + for (i = 0; i < bcm43xx->hw->otp_size; ++i) + otp[i] = ioread8(bcm43xx->bar0 + bcm43xx->hw->otp_offset + i); + + i = 0; + while (i < (bcm43xx->hw->otp_size - 1)) { + u8 type = otp[i]; + u8 length = otp[i + 1]; + + if (type == 0) + break; + + if ((i + 2 + length) > bcm43xx->hw->otp_size) + break; + + switch (type) { + case BCM43XX_OTP_SYS_VENDOR: + dev_dbg(&bcm43xx->pdev->dev, + "OTP @ 0x%x (%d): SYS_VENDOR", i, length); + ret = bcm43xx_parse_otp_sys_vendor(bcm43xx, &otp[i + 2], + length); + break; + case BCM43XX_OTP_CIS: + dev_dbg(&bcm43xx->pdev->dev, + "OTP @ 0x%x (%d): BCM43XX_CIS", i, length); + break; + default: + dev_dbg(&bcm43xx->pdev->dev, "OTP @ 0x%x (%d): unknown", + i, length); + break; + } + + i += 2 + length; + } + + return ret; +} + +static int bcm43xx_init_cfg(struct bcm43xx_data *bcm43xx) +{ + int ret; + u32 ctrl; + + ret = pci_write_config_dword(bcm43xx->pdev, + BCM43XX_PCIECFG_BAR0_WINDOW0, + bcm43xx->hw->bar0_window0); + if (ret) + return ret; + + ret = pci_write_config_dword(bcm43xx->pdev, + BCM43XX_PCIECFG_BAR0_WINDOW1, + bcm43xx->hw->bar0_window1); + if (ret) + return ret; + + ret = pci_write_config_dword(bcm43xx->pdev, + BCM43XX_PCIECFG_BAR0_WINDOW4, + BCM43XX_PCIECFG_BAR0_WINDOW4_DEFAULT); + if (ret) + return ret; + + if (bcm43xx->hw->has_bar0_window5) { + ret = pci_write_config_dword(bcm43xx->pdev, + BCM43XX_PCIECFG_BAR0_WINDOW5, + bcm43xx->hw->bar0_window5); + if (ret) + return ret; + } + + ret = pci_write_config_dword(bcm43xx->pdev, BCM43XX_PCIECFG_BAR2_WINDOW, + BCM43XX_PCIECFG_BAR2_WINDOW_DEFAULT); + if (ret) + return ret; + + ret = pci_read_config_dword(bcm43xx->pdev, BCM43XX_PCIECFG_UNK_CTRL, + &ctrl); + if (ret) + return ret; + + // TODO: 19 and 16 are probably M2M and SS reset + if (bcm43xx->hw->m2m_reset_on_ss_reset_disabled) + ctrl &= ~BIT(19); // BIT(19) = M2M reset? + ctrl |= BIT(16); + + return pci_write_config_dword(bcm43xx->pdev, BCM43XX_PCIECFG_UNK_CTRL, + ctrl); +} + +static int bcm43xx_probe_of(struct bcm43xx_data *bcm43xx) +{ + struct device_node *np = bcm43xx->pdev->dev.of_node; + int ret; + + if (!np) + return 0; + + ret = of_property_read_string(np, "brcm,board-type", + &bcm43xx->board_type); + if (ret) { + dev_err(&bcm43xx->pdev->dev, "no brcm,board-type property\n"); + return ret; + } + + bcm43xx->taurus_beamforming_cal_blob = + of_get_property(np, "brcm,taurus-bf-cal-blob", + &bcm43xx->taurus_beamforming_cal_size); + if (!bcm43xx->taurus_beamforming_cal_blob) { + dev_err(&bcm43xx->pdev->dev, + "no brcm,taurus-bf-cal-blob property\n"); + return -ENOENT; + } + bcm43xx->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob", + &bcm43xx->taurus_cal_size); + if (!bcm43xx->taurus_cal_blob) { + dev_err(&bcm43xx->pdev->dev, + "no brcm,taurus-cal-blob property\n"); + return -ENOENT; + } + + return 0; +} + +static const struct bcm43xx_hw bcm43xx_hw_variants[]; + +static int bcm43xx_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct bcm43xx_data *bcm43xx; + struct hci_dev *hdev; + int ret; + + ret = dma_set_mask_and_coherent(&pdev->dev, BCM43XX_DMA_MASK); + if (ret) + return ret; + + bcm43xx = devm_kzalloc(&pdev->dev, sizeof(*bcm43xx), GFP_KERNEL); + if (!bcm43xx) + return -ENOMEM; + + bcm43xx->pdev = pdev; + bcm43xx->hw = &bcm43xx_hw_variants[id->driver_data]; + init_completion(&bcm43xx->event); + + ret = bcm43xx_prepare_rings(bcm43xx); + if (ret) + return ret; + + ret = bcm43xx_init_context(bcm43xx); + if (ret) + return ret; + + bcm43xx->board_type = bcm43xx->hw->default_board_type; + ret = bcm43xx_probe_of(bcm43xx); + if (ret) + return ret; + if (!bcm43xx->board_type) { + dev_err(&pdev->dev, "unable to determine board type\n"); + return ret; + } + + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = bcm43xx_init_cfg(bcm43xx); + if (ret) + return ret; + + bcm43xx->bar0 = pcim_iomap(pdev, 0, 0); + if (!bcm43xx->bar0) + return -EBUSY; + bcm43xx->bar2 = pcim_iomap(pdev, 2, 0); + if (!bcm43xx->bar2) + return -EBUSY; + + ret = bcm43xx_read_otp(bcm43xx); + if (ret) { + dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret); + return ret; + } + + // TODO: check if legacy irqs also work + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) + return -ENODEV; + + bcm43xx->irq = pci_irq_vector(pdev, 0); + if (bcm43xx->irq <= 0) + return -ENODEV; + + ret = devm_request_irq(&pdev->dev, bcm43xx->irq, bcm43xx_irq, 0, + "bcm43xx", bcm43xx); + if (ret) + return ret; + + hdev = hci_alloc_dev(); + if (!hdev) + return -ENOMEM; + ret = devm_add_action_or_reset(&pdev->dev, + (void (*)(void *))hci_free_dev, hdev); + if (ret) + return ret; + + bcm43xx->hdev = hdev; + + hdev->bus = HCI_PCI; + hdev->dev_type = HCI_PRIMARY; + hdev->open = bcm43xx_hci_open; + hdev->close = bcm43xx_hci_close; + hdev->send = bcm43xx_hci_send_frame; + hdev->set_bdaddr = bcm43xx_hci_set_bdaddr; + hdev->setup = bcm43xx_hci_setup; + + /* non-DT devices have the address stored inside a ROM */ + if (pdev->dev.of_node) + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + + pci_set_drvdata(pdev, bcm43xx); + hci_set_drvdata(hdev, bcm43xx); + SET_HCIDEV_DEV(hdev, &pdev->dev); + + ret = bcm43xx_boot(bcm43xx); + if (ret) + return ret; + + ret = bcm43xx_setup_rti(bcm43xx); + if (ret) + return ret; + + // TODO: figure out to do a full reset or make sure to close the rings + // on remove + ret = bcm43xx_open_rings(bcm43xx); + if (ret) + return ret; + + ret = hci_register_dev(hdev); + if (ret) + return ret; + return devm_add_action_or_reset( + &pdev->dev, (void (*)(void *))hci_unregister_dev, hdev); +} + +static const struct bcm43xx_hw bcm43xx_hw_variants[] = { + [BCM4377] = { + .name = "4377", + .bar0_window0 = 0x1800b000, + .bar0_window1 = 0x1810c000, + .otp_offset = 0x4120, + .otp_size = 0xe0, + .default_board_type = "formosa", + }, + + [BCM4378] = { + .name = "4378", + .bar0_window0 = 0x18002000, + .bar0_window1 = 0x1810a000, + .bar0_window5 = 0x18107000, + .otp_offset = 0x4120, + .otp_size = 0xe0, + .has_bar0_window5 = true, + .send_calibration = bcm4378_send_calibration, + }, + + [BCM4387]= { + .name = "4387", + .bar0_window0 = 0x18002000, + .bar0_window1 = 0x18109000, + .bar0_window5 = 0x18106000, + .otp_offset = 0x413c, + .otp_size = 0xe0, + .has_bar0_window5 = true, + .m2m_reset_on_ss_reset_disabled = true, + .send_calibration = bcm4387_send_calibration, + }, +}; + +#define BCM43XX_DEVID_ENTRY(id) \ + { \ + PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID, \ + PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \ + BCM##id \ + } + +static const struct pci_device_id bcm43xx_devid_table[] = { + BCM43XX_DEVID_ENTRY(4377), + BCM43XX_DEVID_ENTRY(4378), + BCM43XX_DEVID_ENTRY(4387), + {}, +}; +MODULE_DEVICE_TABLE(pci, bcm43xx_devid_table); + +static struct pci_driver bcm43xx_pci_driver = { + .name = "hci_bcm43xx", + .id_table = bcm43xx_devid_table, + .probe = bcm43xx_probe, +}; +module_pci_driver(bcm43xx_pci_driver); + +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_DESCRIPTION("Bluetooth support for Broadcom 43XX PCIe devices"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("brcm/brcmbt43*.bin"); +MODULE_FIRMWARE("brcm/brcmbt43*.ptb"); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 954749afb5fe..9a020a0bd9c4 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -41,6 +41,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM To compile this driver as a module, choose M here: the module will be called sun50i-cpufreq-nvmem. +config ARM_APPLE_SOC_CPUFREQ + tristate "Apple Silicon SoC CPUFreq support" + depends on ARCH_APPLE || COMPILE_TEST + select PM_OPP + default ARCH_APPLE + help + This adds the CPUFreq driver for Apple Silicon machines + (e.g. Apple M1). + config ARM_ARMADA_37XX_CPUFREQ tristate "Armada 37xx CPUFreq support" depends on ARCH_MVEBU && CPUFREQ_DT diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 285de70af877..c0da6f2f6cd5 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o ################################################################################## # ARM SoC drivers +obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c new file mode 100644 index 000000000000..a92f7e7c57b5 --- /dev/null +++ b/drivers/cpufreq/apple-soc-cpufreq.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple SoC CPU cluster performance state driver + * + * Copyright The Asahi Linux Contributors + * + * Based on scpi-cpufreq.c + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/slab.h> + +#define APPLE_DVFS_PSTATE_BITS_T8103 4 +#define APPLE_DVFS_PSTATE_BITS_T8112 5 + +#define APPLE_DVFS_CMD 0x20 +#define APPLE_DVFS_CMD_BUSY BIT(31) +#define APPLE_DVFS_CMD_SET BIT(25) +#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12) +#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0) + +/* Same timebase as CPU counter (24MHz) */ +#define APPLE_DVFS_LAST_CHG_TIME 0x38 + +/* + * Apple ran out of bits and had to shift this in T8112... + */ +#define APPLE_DVFS_STATUS 0x50 +#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4) +#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4 +#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0) +#define APPLE_DVFS_STATUS_CUR_PS_T8112 GENMASK(9, 5) +#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112 5 +#define APPLE_DVFS_STATUS_TGT_PS_T8112 GENMASK(4, 0) +/* + * Div is +1, base clock is 12MHz on existing SoCs. + * For documentation purposes. We use the OPP table to + * get the frequency. + */ +#define APPLE_DVFS_PLL_STATUS 0xc0 +#define APPLE_DVFS_PLL_FACTOR 0xc8 +#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16) +#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0) + +struct apple_soc_cpufreq_info { + u64 max_pstate; + u64 cur_pstate_mask; + u64 cur_pstate_shift; +}; + +struct apple_cpu_priv { + struct device *cpu_dev; + void __iomem *reg_base; + const struct apple_soc_cpufreq_info *info; +}; + +#define APPLE_DVFS_TRANSITION_TIMEOUT 100 + +static struct cpufreq_driver apple_soc_cpufreq_driver; + +static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + struct apple_cpu_priv *priv = policy->driver_data; + unsigned int pstate; + unsigned int i; + + if (priv->info->cur_pstate_mask) { + u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS); + pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift; + } else { + /* + * For the fallback case we might not know the layout of DVFS_STATUS, + * so just use the command register value (which ignores boost limitations). + */ + u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_CMD); + pstate = FIELD_GET(APPLE_DVFS_CMD_PS1, reg); + } + + for (i = 0; policy->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) + if (policy->freq_table[i].driver_data == pstate) + return policy->freq_table[i].frequency; + + dev_err(priv->cpu_dev, "could not find frequency for pstate %d\n", + pstate); + return 0; +} + +static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy, + unsigned int index) +{ + struct apple_cpu_priv *priv = policy->driver_data; + unsigned int pstate = policy->freq_table[index].driver_data; + u64 reg; + + /* Fallback for newer SoCs */ + if (index > priv->info->max_pstate) + index = priv->info->max_pstate; + + if (readq_poll_timeout_atomic(priv->reg_base + APPLE_DVFS_CMD, reg, + !(reg & APPLE_DVFS_CMD_BUSY), 2, + APPLE_DVFS_TRANSITION_TIMEOUT)) { + return -EIO; + } + + reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2); + reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate); + reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate); + reg |= APPLE_DVFS_CMD_SET; + + writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD); + + return 0; +} + +static unsigned int apple_soc_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + if (apple_soc_cpufreq_set_target(policy, policy->cached_resolved_idx) < 0) + return 0; + + return policy->freq_table[policy->cached_resolved_idx].frequency; +} + +static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy, + void __iomem **reg_base) +{ + struct of_phandle_args args; + struct device_node *cpu_np; + char name[32]; + int cpu, ret; + int index; + + cpu_np = of_cpu_device_node_get(policy->cpu); + if (!cpu_np) + return -EINVAL; + + ret = of_parse_phandle_with_args(cpu_np, "apple,freq-domain", + "#freq-domain-cells", 0, &args); + of_node_put(cpu_np); + if (ret) + return ret; + + index = args.args[0]; + + snprintf(name, sizeof(name), "cluster%d", index); + ret = of_property_match_string(args.np, "reg-names", name); + if (ret < 0) + return ret; + + *reg_base = of_iomap(args.np, ret); + if (IS_ERR(*reg_base)) + return PTR_ERR(*reg_base); + + for_each_possible_cpu(cpu) { + cpu_np = of_cpu_device_node_get(cpu); + if (!cpu_np) + continue; + + ret = of_parse_phandle_with_args(cpu_np, "apple,freq-domain", + "#freq-domain-cells", 0, &args); + of_node_put(cpu_np); + if (ret < 0) + continue; + + if (index == args.args[0]) + cpumask_set_cpu(cpu, policy->cpus); + } + + return 0; +} + +static struct freq_attr *apple_soc_cpufreq_hw_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, + NULL, +}; + +static int apple_soc_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret, i; + unsigned int transition_latency; + void __iomem *reg_base; + struct device *cpu_dev; + struct apple_cpu_priv *priv; + struct cpufreq_frequency_table *freq_table; + struct platform_device *pdev = cpufreq_get_driver_data(); + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", policy->cpu); + return -ENODEV; + } + + ret = dev_pm_opp_of_add_table(cpu_dev); + if (ret < 0) { + dev_err(cpu_dev, "%s: failed to add OPP table: %d\n", __func__, ret); + return ret; + } + + ret = apple_soc_cpufreq_find_cluster(policy, ®_base); + if (ret) { + dev_err(cpu_dev, "%s: failed to get cluster info: %d\n", __func__, ret); + return ret; + } + + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); + if (ret) { + dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); + goto out_iounmap; + } + + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); + ret = -EPROBE_DEFER; + goto out_free_opp; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_free_opp; + } + + priv->info = of_device_get_match_data(&pdev->dev); + if (!priv->info) { + ret = -ENODEV; + goto out_free_opp; + } + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out_free_priv; + } + + /* Get OPP levels (p-state indexes) and stash them in driver_data */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + unsigned long rate = freq_table[i].frequency * 1000; + struct dev_pm_opp *opp = dev_pm_opp_find_freq_floor(cpu_dev, &rate); + + if (IS_ERR(opp)) { + ret = PTR_ERR(opp); + goto out_free_cpufreq_table; + } + freq_table[i].driver_data = dev_pm_opp_get_level(opp); + dev_pm_opp_put(opp); + } + + priv->cpu_dev = cpu_dev; + priv->reg_base = reg_base; + policy->driver_data = priv; + policy->freq_table = freq_table; + + transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev); + if (!transition_latency) + transition_latency = CPUFREQ_ETERNAL; + + policy->cpuinfo.transition_latency = transition_latency; + policy->dvfs_possible_from_any_cpu = true; + policy->fast_switch_possible = true; + + if (policy_has_boost_freq(policy)) { + ret = cpufreq_enable_boost_support(); + if (ret) { + dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); + } else { + apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + apple_soc_cpufreq_driver.boost_enabled = true; + } + } + + return 0; + +out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_free_priv: + kfree(priv); +out_free_opp: + dev_pm_opp_remove_all_dynamic(cpu_dev); +out_iounmap: + iounmap(reg_base); + return ret; +} + +static int apple_soc_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct apple_cpu_priv *priv = policy->driver_data; + + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); + dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + iounmap(priv->reg_base); + kfree(priv); + + return 0; +} + +static struct cpufreq_driver apple_soc_cpufreq_driver = { + .name = "apple-cpufreq", + .flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV, + .verify = cpufreq_generic_frequency_table_verify, + .attr = cpufreq_generic_attr, + .get = apple_soc_cpufreq_get_rate, + .init = apple_soc_cpufreq_init, + .exit = apple_soc_cpufreq_exit, + .target_index = apple_soc_cpufreq_set_target, + .fast_switch = apple_soc_cpufreq_fast_switch, + .register_em = cpufreq_register_em_with_opp, + .attr = apple_soc_cpufreq_hw_attr, +}; + +static int apple_soc_cpufreq_probe(struct platform_device *pdev) +{ + int ret; + + apple_soc_cpufreq_driver.driver_data = pdev; + + ret = cpufreq_register_driver(&apple_soc_cpufreq_driver); + if (ret) + return dev_err_probe(&pdev->dev, ret, "registering cpufreq failed\n"); + + return 0; +} + +static int apple_soc_cpufreq_remove(struct platform_device *pdev) +{ + cpufreq_unregister_driver(&apple_soc_cpufreq_driver); + + return 0; +} + +const struct apple_soc_cpufreq_info soc_t8103_info = { + .max_pstate = 15, + .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103, + .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103, +}; + +const struct apple_soc_cpufreq_info soc_t8112_info = { + .max_pstate = 31, + .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112, + .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112, +}; + +const struct apple_soc_cpufreq_info soc_default_info = { + .max_pstate = 15, + .cur_pstate_mask = 0, /* fallback */ +}; + +static const struct of_device_id apple_soc_cpufreq_of_match[] = { + { + .compatible = "apple,t8103-soc-cpufreq", + .data = &soc_t8103_info, + }, + { + .compatible = "apple,t8112-cpufreq", + .data = &soc_t8112_info, + }, + { + .compatible = "apple,soc-cpufreq", + .data = &soc_default_info, + }, + {} +}; +MODULE_DEVICE_TABLE(of, apple_soc_cpufreq_of_match); + +static struct platform_driver apple_soc_cpufreq_plat_driver = { + .probe = apple_soc_cpufreq_probe, + .remove = apple_soc_cpufreq_remove, + .driver = { + .name = "apple-soc-cpufreq", + .of_match_table = apple_soc_cpufreq_of_match, + }, +}; +module_platform_driver(apple_soc_cpufreq_plat_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_DESCRIPTION("Apple SoC CPU cluster DVFS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 2c96de3f2d83..12b203d67779 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -103,6 +103,8 @@ static const struct of_device_id allowlist[] __initconst = { static const struct of_device_id blocklist[] __initconst = { { .compatible = "allwinner,sun50i-h6", }, + { .compatible = "apple,arm-platform", }, + { .compatible = "arm,vexpress", }, { .compatible = "calxeda,highbank", }, diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 487ed4ddc3be..bda78b8172d4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -85,6 +85,14 @@ config AMCC_PPC440SPE_ADMA help Enable support for the AMCC PPC440SPe RAID engines. +config APPLE_ADMAC + tristate "Apple ADMAC support" + depends on ARCH_APPLE || COMPILE_TEST + select DMA_ENGINE + default ARCH_APPLE + help + Enable support for Audio DMA Controller found on Apple Silicon chips. + config AT_HDMAC tristate "Atmel AHB DMA support" depends on ARCH_AT91 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2f1b87ffd7ab..10f7d4241001 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AMD_PTDMA) += ptdma/ +obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c new file mode 100644 index 000000000000..aa4c07bcb72a --- /dev/null +++ b/drivers/dma/apple-admac.c @@ -0,0 +1,752 @@ +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/pm_runtime.h> + +#include "dmaengine.h" + +#define NCHANNELS_MAX 64 + +#define RING_WRITE_SLOT GENMASK(1, 0) +#define RING_READ_SLOT GENMASK(5, 4) +#define RING_FULL BIT(9) +#define RING_EMPTY BIT(8) +#define RING_ERR BIT(10) + +#define STATUS_DESC_DONE BIT(0) +#define STATUS_ERR BIT(6) + +#define FLAG_DESC_NOTIFY BIT(16) + +#define REG_TX_START 0x0000 +#define REG_TX_STOP 0x0004 +#define REG_RX_START 0x0008 +#define REG_RX_STOP 0x000c + +#define REG_CHAN_CTL(ch) (0x8000 + (ch)*0x200) +#define REG_CHAN_CTL_RST_RINGS BIT(0) + +#define REG_DESC_RING(ch) (0x8070 + (ch)*0x200) +#define REG_REPORT_RING(ch) (0x8074 + (ch)*0x200) + +#define REG_RESIDUE(ch) (0x8064 + (ch)*0x200) + +#define REG_BUS_WIDTH(ch) (0x8040 + (ch)*0x200) + +#define BUS_WIDTH_8BIT 0x00 +#define BUS_WIDTH_16BIT 0x01 +#define BUS_WIDTH_32BIT 0x02 +#define BUS_WIDTH_FRAME_2_WORDS 0x10 +#define BUS_WIDTH_FRAME_4_WORDS 0x20 + +#define REG_CHAN_BURSTSIZE(ch) (0x8054 + (ch)*0x200) + +#define REG_DESC_WRITE(ch) (0x10000 + (ch / 2) * 0x4 + (ch & 1) * 0x4000) +#define REG_REPORT_READ(ch) (0x10100 + (ch / 2) * 0x4 + (ch & 1) * 0x4000) + +#define IRQ_INDEX_MAX 3 + +#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4) +#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4) +#define REG_CHAN_INTSTATUS(ch,idx) (0x8010 + (ch) * 0x200 + (idx) * 4) +#define REG_CHAN_INTMASK(ch,idx) (0x8020 + (ch) * 0x200 + (idx) * 4) + +struct admac_data; +struct admac_tx; + +struct admac_chan { + int no; + struct admac_data *host; + struct dma_chan chan; + struct tasklet_struct tasklet; + + spinlock_t lock; + struct admac_tx *current_tx; + int nperiod_acks; + + struct list_head submitted; + struct list_head issued; +}; + +struct admac_data { + struct dma_device dma; + struct device *dev; + __iomem void *base; + + int irq_index; + int nchannels; + struct admac_chan channels[]; +}; + +struct admac_tx { + struct dma_async_tx_descriptor tx; + bool cyclic; + dma_addr_t buf_addr; + dma_addr_t buf_end; + size_t buf_len; + size_t period_len; + + size_t submitted_pos; + size_t reclaimed_pos; + + struct list_head node; +}; + +static void admac_poke(struct admac_data *ad, int reg, u32 val) +{ + writel_relaxed(val, ad->base + reg); +} + +static u32 admac_peek(struct admac_data *ad, int reg) +{ + return readl_relaxed(ad->base + reg); +} + +static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val) +{ + void __iomem *addr = ad->base + reg; + u32 curr = readl_relaxed(addr); + + writel_relaxed((curr & ~mask) | (val & mask), addr); +} + +static struct admac_chan *to_admac_chan(struct dma_chan *chan) +{ + return container_of(chan, struct admac_chan, chan); +} + +static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct admac_tx, tx); +} + +static enum dma_transfer_direction admac_chan_direction(int channo) +{ + return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; +} + +static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct admac_tx *adtx = to_admac_tx(tx); + struct admac_chan *adchan = to_admac_chan(tx->chan); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&adchan->lock, flags); + cookie = dma_cookie_assign(tx); + list_add_tail(&adtx->node, &adchan->submitted); + spin_unlock_irqrestore(&adchan->lock, flags); + + return cookie; +} + +static int admac_desc_free(struct dma_async_tx_descriptor *tx) +{ + struct admac_tx *adtx = to_admac_tx(tx); + devm_kfree(to_admac_chan(tx->chan)->host->dev, adtx); + return 0; +} + +static struct dma_async_tx_descriptor *admac_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct admac_chan *adchan = container_of(chan, struct admac_chan, chan); + struct admac_tx *adtx; + + if (direction != admac_chan_direction(adchan->no)) + return NULL; + + adtx = devm_kzalloc(adchan->host->dev, sizeof(*adtx), GFP_NOWAIT); + if (!adtx) + return NULL; + + adtx->cyclic = true; + + adtx->buf_addr = buf_addr; + adtx->buf_len = buf_len; + adtx->buf_end = buf_addr + buf_len; + adtx->period_len = period_len; + + adtx->submitted_pos = 0; + adtx->reclaimed_pos = 0; + + dma_async_tx_descriptor_init(&adtx->tx, chan); + adtx->tx.tx_submit = admac_tx_submit; + adtx->tx.desc_free = admac_desc_free; + + return &adtx->tx; +} + +/* + * Write one hardware descriptor for a dmaegine cyclic transaction. + */ +static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo, + struct admac_tx *tx) +{ + dma_addr_t addr; + + if (WARN_ON(!tx->cyclic)) + return; + + addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len); + WARN_ON(addr + tx->period_len > tx->buf_end); + + dev_dbg(ad->dev, "ch%d descriptor: addr=0x%llx len=0x%x flags=0x%lx\n", + channo, addr, (u32) tx->period_len, FLAG_DESC_NOTIFY); + + admac_poke(ad, REG_DESC_WRITE(channo), addr); + admac_poke(ad, REG_DESC_WRITE(channo), addr >> 32); + admac_poke(ad, REG_DESC_WRITE(channo), tx->period_len); + admac_poke(ad, REG_DESC_WRITE(channo), FLAG_DESC_NOTIFY); + + tx->submitted_pos += tx->period_len; + tx->submitted_pos %= 2 * tx->buf_len; +} + +/* + * Write all the hardware descriptors for a cyclic transaction + * there is space for. + */ +static void admac_cyclic_write_desc(struct admac_data *ad, int channo, + struct admac_tx *tx) +{ + int i; + + for (i = 0; i < 4; i++) { + if (admac_peek(ad, REG_DESC_RING(channo)) & RING_FULL) + break; + admac_cyclic_write_one_desc(ad, channo, tx); + } +} + +static int admac_alloc_chan_resources(struct dma_chan *chan) +{ + return 0; +} + +static void admac_free_chan_resources(struct dma_chan *chan) +{ + // TODO +} + +static int admac_ring_noccupied_slots(int ringval) +{ + int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval); + int rdslot = FIELD_GET(RING_READ_SLOT, ringval); + + if (wrslot != rdslot) { + return (wrslot + 4 - rdslot) % 4; + } else { + WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0); + + if (ringval & RING_FULL) + return 4; + else + return 0; + } +} + +/* + * Read from hardware the residue of a cyclic dmaengine transaction. + */ +u32 admac_cyclic_read_residue(struct admac_data *ad, int channo, struct admac_tx *adtx) +{ + u32 ring1, ring2; + u32 residue1, residue2; + int nreports; + size_t pos; + + ring1 = admac_peek(ad, REG_REPORT_RING(channo)); + residue1 = admac_peek(ad, REG_RESIDUE(channo)); + ring2 = admac_peek(ad, REG_REPORT_RING(channo)); + residue2 = admac_peek(ad, REG_RESIDUE(channo)); + + if (residue2 > residue1) { + // engine must have loaded next descriptor between the two residue reads + nreports = admac_ring_noccupied_slots(ring1) + 1; + } else { + // no descriptor load between the two reads, ring2 is safe to use + nreports = admac_ring_noccupied_slots(ring2); + } + + pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) \ + - residue2; + + return adtx->buf_len - pos % adtx->buf_len; +} + +static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct admac_chan *adchan = to_admac_chan(chan); + struct admac_data *ad = adchan->host; + struct admac_tx *adtx; + + enum dma_status ret; + size_t residue; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&adchan->lock, flags); + adtx = adchan->current_tx; + + if (adtx && adtx->tx.cookie == cookie) { + ret = DMA_IN_PROGRESS; + residue = admac_cyclic_read_residue(ad, adchan->no, adtx); + } else { + ret = DMA_IN_PROGRESS; + residue = 0; + list_for_each_entry(adtx, &adchan->issued, node) { + if (adtx->tx.cookie == cookie) { + residue = adtx->buf_len; + break; + } + } + } + spin_unlock_irqrestore(&adchan->lock, flags); + + dma_set_residue(txstate, residue); + return ret; +} + +static void admac_start_chan(struct admac_chan *adchan) +{ + u32 startbit = 1 << (adchan->no / 2); + switch (admac_chan_direction(adchan->no)) { + case DMA_MEM_TO_DEV: + admac_poke(adchan->host, REG_TX_START, startbit); + break; + case DMA_DEV_TO_MEM: + admac_poke(adchan->host, REG_RX_START, startbit); + break; + default: + break; + } + dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no); +} + +static void admac_stop_chan(struct admac_chan *adchan) +{ + u32 stopbit = 1 << (adchan->no / 2); + switch (admac_chan_direction(adchan->no)) { + case DMA_MEM_TO_DEV: + admac_poke(adchan->host, REG_TX_STOP, stopbit); + break; + case DMA_DEV_TO_MEM: + admac_poke(adchan->host, REG_RX_STOP, stopbit); + break; + default: + break; + } + dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no); +} + +static void admac_start_current_tx(struct admac_chan *adchan) +{ + struct admac_data *ad = adchan->host; + int ch = adchan->no; + + admac_poke(ad, REG_CHAN_INTSTATUS(ch, ad->irq_index), + STATUS_DESC_DONE | STATUS_ERR); + admac_poke(ad, REG_CHAN_CTL(ch), REG_CHAN_CTL_RST_RINGS); + admac_poke(ad, REG_CHAN_CTL(ch), 0); + + admac_cyclic_write_one_desc(ad, ch, adchan->current_tx); + admac_start_chan(adchan); + admac_cyclic_write_desc(ad, ch, adchan->current_tx); +} + +static void admac_issue_pending(struct dma_chan *chan) +{ + struct admac_chan *adchan = to_admac_chan(chan); + struct admac_tx *tx; + unsigned long flags; + + spin_lock_irqsave(&adchan->lock, flags); + list_splice_tail_init(&adchan->submitted, &adchan->issued); + if (!list_empty(&adchan->issued) && !adchan->current_tx) { + tx = list_first_entry(&adchan->issued, struct admac_tx, node); + list_del(&tx->node); + + adchan->current_tx = tx; + adchan->nperiod_acks = 0; + admac_start_current_tx(adchan); + } + spin_unlock_irqrestore(&adchan->lock, flags); +} + +static int admac_pause(struct dma_chan *chan) +{ + struct admac_chan *adchan = to_admac_chan(chan); + + admac_stop_chan(adchan); + + return 0; +} + +static int admac_resume(struct dma_chan *chan) +{ + struct admac_chan *adchan = to_admac_chan(chan); + + admac_start_chan(adchan); + + return 0; +} + +static int admac_terminate_all(struct dma_chan *chan) +{ + struct admac_chan *adchan = to_admac_chan(chan); + struct admac_tx *adtx, *_adtx; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&adchan->lock, flags); + admac_stop_chan(adchan); + adchan->current_tx = NULL; + list_splice_tail_init(&adchan->submitted, &head); + list_splice_tail_init(&adchan->issued, &head); + spin_unlock_irqrestore(&adchan->lock, flags); + + list_for_each_entry_safe(adtx, _adtx, &head, node) { + list_del(&adtx->node); + admac_desc_free(&adtx->tx); + } + + return 0; +} + +static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct admac_data *ad = (struct admac_data*) ofdma->of_dma_data; + unsigned int index; + + if (dma_spec->args_count != 1) + return NULL; + + index = dma_spec->args[0]; + + if (index >= ad->nchannels) { + dev_err(ad->dev, "channel index %u out of bounds\n", index); + return NULL; + } + + return &ad->channels[index].chan; +} + +static int admac_drain_reports(struct admac_data *ad, int channo) +{ + int count; + + for (count = 0; count < 4; count++) { + u32 countval_hi, countval_lo, unk1, flags; + + if (admac_peek(ad, REG_REPORT_RING(channo)) & RING_EMPTY) + break; + + countval_lo = admac_peek(ad, REG_REPORT_READ(channo)); + countval_hi = admac_peek(ad, REG_REPORT_READ(channo)); + unk1 = admac_peek(ad, REG_REPORT_READ(channo)); + flags = admac_peek(ad, REG_REPORT_READ(channo)); + + dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n", + channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags); + } + + return count; +} + +static void admac_handle_status_err(struct admac_data *ad, int channo) +{ + bool handled = false; + + if (admac_peek(ad, REG_DESC_RING(channo) & RING_ERR)) { + admac_poke(ad, REG_DESC_RING(channo), RING_ERR); + dev_err(ad->dev, "ch%d descriptor ring error\n", channo); + handled = true; + } + + if (admac_peek(ad, REG_REPORT_RING(channo)) & RING_ERR) { + admac_poke(ad, REG_REPORT_RING(channo), RING_ERR); + dev_err(ad->dev, "ch%d report ring error\n", channo); + handled = true; + } + + if (unlikely(!handled)) { + dev_err(ad->dev, "ch%d unknown error, masking future error interrupts\n", channo); + admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index), STATUS_ERR, 0); + } +} + +static void admac_handle_status_desc_done(struct admac_data *ad, int channo) +{ + struct admac_chan *adchan = &ad->channels[channo]; + unsigned long flags; + int nreports; + + admac_poke(ad, REG_CHAN_INTSTATUS(channo, ad->irq_index), + STATUS_DESC_DONE); + + spin_lock_irqsave(&adchan->lock, flags); + nreports = admac_drain_reports(ad, channo); + + if (adchan->current_tx) { + struct admac_tx *tx = adchan->current_tx; + + adchan->nperiod_acks += nreports; + tx->reclaimed_pos += nreports * tx->period_len; + tx->reclaimed_pos %= 2 * tx->buf_len; + + admac_cyclic_write_desc(ad, channo, tx); + tasklet_schedule(&adchan->tasklet); + } + spin_unlock_irqrestore(&adchan->lock, flags); +} + +static void admac_handle_chan_int(struct admac_data *ad, int no) +{ + u32 cause = admac_peek(ad, REG_CHAN_INTSTATUS(no, ad->irq_index)); + + if (cause & STATUS_ERR) + admac_handle_status_err(ad, no); + + if (cause & STATUS_DESC_DONE) + admac_handle_status_desc_done(ad, no); +} + +static irqreturn_t admac_interrupt(int irq, void *devid) +{ + struct admac_data *ad = devid; + u32 rx_intstate, tx_intstate; + int i; + + rx_intstate = admac_peek(ad, REG_RX_INTSTATE(ad->irq_index)); + tx_intstate = admac_peek(ad, REG_TX_INTSTATE(ad->irq_index)); + + for (i = 0; i < ad->nchannels; i += 2) + if (tx_intstate & (1 << (i / 2))) + admac_handle_chan_int(ad, i); + + for (i = 1; i < ad->nchannels; i += 2) + if (rx_intstate & (1 << (i / 2))) + admac_handle_chan_int(ad, i); + + return (tx_intstate | rx_intstate) != 0 ? IRQ_HANDLED : IRQ_NONE; +} + +static void admac_chan_tasklet(struct tasklet_struct *t) +{ + struct admac_chan *adchan = from_tasklet(adchan, t, tasklet); + struct admac_tx *adtx; + struct dmaengine_desc_callback cb; + struct dmaengine_result tx_result; + int nacks; + + spin_lock_irq(&adchan->lock); + adtx = adchan->current_tx; + nacks = adchan->nperiod_acks; + adchan->nperiod_acks = 0; + spin_unlock_irq(&adchan->lock); + + if (!adtx || !nacks) + return; + + tx_result.result = DMA_TRANS_NOERROR; + tx_result.residue = 0; + + dmaengine_desc_get_callback(&adtx->tx, &cb); + while (nacks--) + dmaengine_desc_callback_invoke(&cb, &tx_result); +} + +static int admac_device_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct admac_chan *adchan = to_admac_chan(chan); + struct admac_data *ad = adchan->host; + bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV; + u32 bus_width = 0; + + + switch (is_tx ? config->dst_addr_width : config->src_addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + bus_width |= BUS_WIDTH_8BIT; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + bus_width |= BUS_WIDTH_16BIT; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + bus_width |= BUS_WIDTH_32BIT; + break; + default: + return -EINVAL; + } + + switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) { + case 0 ... 1: + break; + case 2: + bus_width |= BUS_WIDTH_FRAME_2_WORDS; + break; + case 4: + bus_width |= BUS_WIDTH_FRAME_4_WORDS; + break; + default: + return -EINVAL; + } + + admac_poke(adchan->host, REG_BUS_WIDTH(adchan->no), bus_width); + + /* TODO: burst size */ + + admac_poke(adchan->host, REG_CHAN_BURSTSIZE(adchan->no), 0xc00060); + + /* TODO: this belongs elsewhere */ + admac_poke(adchan->host, REG_CHAN_INTSTATUS(adchan->no, ad->irq_index), + STATUS_DESC_DONE | STATUS_ERR); + admac_poke(adchan->host, REG_CHAN_INTMASK(adchan->no, ad->irq_index), + STATUS_DESC_DONE | STATUS_ERR); + return 0; +} + +static int admac_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct admac_data *ad; + struct dma_device *dma; + int nchannels; + int err, irq, i; + + err = of_property_read_u32(np, "dma-channels", &nchannels); + if (err || nchannels > NCHANNELS_MAX) { + dev_err(&pdev->dev, "missing or invalid dma-channels property\n"); + return -EINVAL; + } + + ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL); + if (!ad) + return -ENOMEM; + + platform_set_drvdata(pdev, ad); + ad->dev = &pdev->dev; + ad->nchannels = nchannels; + + err = of_property_read_u32(np, "apple,internal-irq-destination", &ad->irq_index); + if (err || ad->irq_index > IRQ_INDEX_MAX) { + dev_err(&pdev->dev, "missing or invalid apple,internal-irq-destination property\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "unable to obtain interrupt resource\n"); + return irq; + } + + err = devm_request_irq(&pdev->dev, irq, admac_interrupt, + 0, dev_name(&pdev->dev), ad); + if (err) { + dev_err(&pdev->dev, "unable to register interrupt: %d\n", err); + return err; + } + + ad->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ad->base)) { + dev_err(&pdev->dev, "unable to obtain MMIO resource\n"); + return PTR_ERR(ad->base); + } + + dma = &ad->dma; + + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + + dma->dev = &pdev->dev; + dma->device_alloc_chan_resources = admac_alloc_chan_resources; + dma->device_free_chan_resources = admac_free_chan_resources; + dma->device_tx_status = admac_tx_status; + dma->device_issue_pending = admac_issue_pending; + dma->device_terminate_all = admac_terminate_all; + dma->device_prep_dma_cyclic = admac_prep_dma_cyclic; + dma->device_config = admac_device_config; + dma->device_pause = admac_pause; + dma->device_resume = admac_resume; + + dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + + INIT_LIST_HEAD(&dma->channels); + for (i = 0; i < nchannels; i++) { + struct admac_chan *adchan = &ad->channels[i]; + adchan->host = ad; + adchan->no = i; + adchan->chan.device = &ad->dma; + dma_cookie_init(&adchan->chan); + spin_lock_init(&adchan->lock); + INIT_LIST_HEAD(&adchan->submitted); + INIT_LIST_HEAD(&adchan->issued); + list_add_tail(&adchan->chan.device_node, &dma->channels); + tasklet_setup(&adchan->tasklet, admac_chan_tasklet); + } + + err = dma_async_device_register(&ad->dma); + if (err) { + dev_err(&pdev->dev, "failed to register DMA device: %d\n", err); + return err; + } + + err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad); + if (err) { + dev_err(&pdev->dev, "failed to register with OF: %d\n", err); + dma_async_device_unregister(&ad->dma); + return err; + } + + dev_dbg(&pdev->dev, "all good, ready to go!\n"); + + return 0; +} + +static int admac_remove(struct platform_device *pdev) +{ + struct admac_data *ad = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&ad->dma); + + return 0; +} + +static const struct of_device_id admac_of_match[] = { + { .compatible = "apple,admac", }, + { } +}; +MODULE_DEVICE_TABLE(of, admac_of_match); + +static struct platform_driver apple_admac_driver = { + .driver = { + .name = "apple-admac", + .of_match_table = admac_of_match, + }, + .probe = admac_probe, + .remove = admac_remove, +}; +module_platform_driver(apple_admac_driver); + +MODULE_AUTHOR("Martin PoviÅ¡er <povik+lin@cutebit.org>"); +MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) in Apple SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b01961999ced..fc35a7ca31dd 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1282,6 +1282,17 @@ config GPIO_LP87565 This driver can also be built as a module. If so, the module will be called gpio-lp87565. +config GPIO_MACSMC + tristate "Apple Mac SMC GPIO" + depends on APPLE_SMC + default ARCH_APPLE + help + Support for GPIOs controlled by the SMC microcontroller on Apple Mac + systems. + + This driver can also be built as a module. If so, the module will be + called gpio-macsmc. + config GPIO_MADERA tristate "Cirrus Logic Madera class codecs" depends on PINCTRL_MADERA diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 14352f6dfe8e..5f7ec8fc6cdd 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -82,6 +82,7 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o +obj-$(CONFIG_GPIO_MACSMC) += gpio-macsmc.o obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c new file mode 100644 index 000000000000..2f1f07ea3891 --- /dev/null +++ b/drivers/gpio/gpio-macsmc.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC GPIO driver + * Copyright The Asahi Linux Contributors + * + * This driver implements basic SMC PMU GPIO support that can read inputs + * and write outputs. Mode changes and IRQ config are not yet implemented. + */ + +#include <linux/bitmap.h> +#include <linux/device.h> +#include <linux/gpio/driver.h> +#include <linux/irq.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> + +#define MAX_GPIO 64 + +/* + * Commands 0-6 are, presumably, the intended API. + * Command 0xff lets you get/set the pin configuration in detail directly, + * but the bit meanings seem not to be stable between devices/PMU hardware + * versions. + * + * We're going to try to make do with the low commands for now. + * We don't implement pin mode changes at this time. + */ + +#define CMD_ACTION (0 << 24) +#define CMD_OUTPUT (1 << 24) +#define CMD_INPUT (2 << 24) +#define CMD_PINMODE (3 << 24) +#define CMD_IRQ_ENABLE (4 << 24) +#define CMD_IRQ_ACK (5 << 24) +#define CMD_IRQ_MODE (6 << 24) +#define CMD_CONFIG (0xff << 24) + +#define MODE_INPUT 0 +#define MODE_OUTPUT 1 +#define MODE_VALUE_0 0 +#define MODE_VALUE_1 2 + +#define IRQ_MODE_HIGH 0 +#define IRQ_MODE_LOW 1 +#define IRQ_MODE_RISING 2 +#define IRQ_MODE_FALLING 3 +#define IRQ_MODE_BOTH 4 + +#define CONFIG_MASK GENMASK(23, 16) +#define CONFIG_VAL GENMASK(7, 0) + +#define CONFIG_OUTMODE GENMASK(7, 6) +#define CONFIG_IRQMODE GENMASK(5, 3) +#define CONFIG_PULLDOWN BIT(2) +#define CONFIG_PULLUP BIT(1) +#define CONFIG_OUTVAL BIT(0) + +/* + * output modes seem to differ depending on the PMU in use... ? + * j274 / M1 (Sera PMU): + * 0 = input + * 1 = output + * 2 = open drain + * 3 = disable + * j314 / M1Pro (Maverick PMU): + * 0 = input + * 1 = open drain + * 2 = output + * 3 = ? + */ + +#define SMC_EV_GPIO 0x7202 + +struct macsmc_gpio { + struct device *dev; + struct apple_smc *smc; + struct gpio_chip gc; + struct irq_chip ic; + struct notifier_block nb; + + struct mutex irq_mutex; + DECLARE_BITMAP(irq_supported, MAX_GPIO); + DECLARE_BITMAP(irq_enable_shadow, MAX_GPIO); + DECLARE_BITMAP(irq_enable, MAX_GPIO); + u32 irq_mode_shadow[MAX_GPIO]; + u32 irq_mode[MAX_GPIO]; + + int first_index; +}; + +static int macsmc_gpio_nr(smc_key key) +{ + int low = hex_to_bin(key & 0xff); + int high = hex_to_bin((key >> 8) & 0xff); + + if (low < 0 || high < 0) + return -1; + + return low | (high << 4); +} + +static int macsmc_gpio_key(unsigned int offset) +{ + return _SMC_KEY("gP\0\0") | (hex_asc_hi(offset) << 8) | hex_asc_lo(offset); +} + +static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(offset); + u32 val; + int ret; + + /* First try reading the explicit pin mode register */ + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val); + if (!ret) + return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; + + /* + * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode. + * Fall back to reading IRQ mode, which will only succeed for inputs. + */ + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val); + return (!ret) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; +} + +static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(offset); + u32 val; + int ret; + + ret = macsmc_gpio_get_direction(gc, offset); + if (ret < 0) + return ret; + + if (ret == GPIO_LINE_DIRECTION_OUT) + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_OUTPUT, &val); + else + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_INPUT, &val); + + if (ret < 0) + return ret; + + return val ? 1 : 0; +} + +static void macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(offset); + int ret; + + value |= CMD_OUTPUT; + ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value); + if (ret < 0) + dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n", &key, value); +} + +static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, unsigned int ngpios) +{ + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + int count = apple_smc_get_key_count(smcgp->smc) - smcgp->first_index; + int i; + + if (count > MAX_GPIO) + count = MAX_GPIO; + + bitmap_zero(valid_mask, ngpios); + + for (i = 0; i < count; i++) { + smc_key key; + int gpio_nr; + u32 val; + int ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key); + + if (ret < 0) + return ret; + + if (key > SMC_KEY(gPff)) + break; + + gpio_nr = macsmc_gpio_nr(key); + if (gpio_nr < 0 || gpio_nr > MAX_GPIO) { + dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key); + continue; + } + + set_bit(gpio_nr, valid_mask); + + /* Check for IRQ support */ + ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val); + if (!ret) + set_bit(gpio_nr, smcgp->irq_supported); + } + + return 0; +} + +static int macsmc_gpio_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct macsmc_gpio *smcgp = container_of(nb, struct macsmc_gpio, nb); + u16 type = event >> 16; + u8 offset = (event >> 8) & 0xff; + smc_key key = macsmc_gpio_key(offset); + unsigned long flags; + int ret; + + if (type != SMC_EV_GPIO) + return NOTIFY_DONE; + + if (offset > MAX_GPIO) { + dev_err(smcgp->dev, "GPIO event index %d out of range\n", offset); + return NOTIFY_BAD; + } + + local_irq_save(flags); + ret = handle_irq_desc(irq_resolve_mapping(smcgp->gc.irq.domain, offset)); + local_irq_restore(flags); + + if (apple_smc_write_u32(smcgp->smc, key, CMD_IRQ_ACK | 1) < 0) + dev_err(smcgp->dev, "GPIO IRQ ack failed for %p4ch\n", &key); + + return (ret == 0) ? NOTIFY_OK : NOTIFY_DONE; +} + +static void macsmc_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + + set_bit(irqd_to_hwirq(d), smcgp->irq_enable_shadow); +} + +static void macsmc_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + + clear_bit(irqd_to_hwirq(d), smcgp->irq_enable_shadow); +} + +static int macsmc_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + u32 mode; + + if (!test_bit(offset, smcgp->irq_supported)) + return -EINVAL; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_LEVEL_HIGH: + mode = IRQ_MODE_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + mode = IRQ_MODE_LOW; + break; + case IRQ_TYPE_EDGE_RISING: + mode = IRQ_MODE_RISING; + break; + case IRQ_TYPE_EDGE_FALLING: + mode = IRQ_MODE_FALLING; + break; + case IRQ_TYPE_EDGE_BOTH: + mode = IRQ_MODE_BOTH; + break; + default: + return -EINVAL; + } + + smcgp->irq_mode_shadow[offset] = mode; + return 0; +} + +static void macsmc_gpio_irq_bus_lock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + + mutex_lock(&smcgp->irq_mutex); +} + +static void macsmc_gpio_irq_bus_sync_unlock(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct macsmc_gpio *smcgp = gpiochip_get_data(gc); + smc_key key = macsmc_gpio_key(irqd_to_hwirq(d)); + int offset = irqd_to_hwirq(d); + bool val; + + if (smcgp->irq_mode_shadow[offset] != smcgp->irq_mode[offset]) { + u32 cmd = CMD_IRQ_MODE | smcgp->irq_mode_shadow[offset]; + if (apple_smc_write_u32(smcgp->smc, key, cmd) < 0) + dev_err(smcgp->dev, "GPIO IRQ config failed for %p4ch = 0x%x\n", &key, cmd); + else + smcgp->irq_mode_shadow[offset] = smcgp->irq_mode[offset]; + } + + val = test_bit(offset, smcgp->irq_enable_shadow); + if (test_bit(offset, smcgp->irq_enable) != val) { + if (apple_smc_write_u32(smcgp->smc, key, CMD_IRQ_ENABLE | val) < 0) + dev_err(smcgp->dev, "GPIO IRQ en/disable failed for %p4ch\n", &key); + else + change_bit(offset, smcgp->irq_enable); + } + + mutex_unlock(&smcgp->irq_mutex); +} + +static int macsmc_gpio_probe(struct platform_device *pdev) +{ + struct macsmc_gpio *smcgp; + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + smc_key key; + int ret; + + smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL); + if (!smcgp) + return -ENOMEM; + + pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "gpio"); + + smcgp->dev = &pdev->dev; + smcgp->smc = smc; + smcgp->first_index = apple_smc_find_first_key_index(smc, SMC_KEY(gP00)); + + if (smcgp->first_index >= apple_smc_get_key_count(smc)) + return -ENODEV; + + ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key); + if (ret < 0) + return ret; + + if (key > macsmc_gpio_key(MAX_GPIO - 1)) + return -ENODEV; + + dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key); + + smcgp->gc.label = "macsmc-pmu-gpio"; + smcgp->gc.owner = THIS_MODULE; + smcgp->gc.get = macsmc_gpio_get; + smcgp->gc.set = macsmc_gpio_set; + smcgp->gc.get_direction = macsmc_gpio_get_direction; + smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask; + smcgp->gc.can_sleep = true; + smcgp->gc.ngpio = MAX_GPIO; + smcgp->gc.base = -1; + smcgp->gc.parent = &pdev->dev; + + smcgp->ic.name = "macsmc-pmu-gpio"; + smcgp->ic.irq_mask = macsmc_gpio_irq_disable; + smcgp->ic.irq_unmask = macsmc_gpio_irq_enable; + smcgp->ic.irq_set_type = macsmc_gpio_irq_set_type; + smcgp->ic.irq_bus_lock = macsmc_gpio_irq_bus_lock; + smcgp->ic.irq_bus_sync_unlock = macsmc_gpio_irq_bus_sync_unlock; + smcgp->ic.irq_set_type = macsmc_gpio_irq_set_type; + smcgp->ic.flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; + + smcgp->gc.irq.chip = &smcgp->ic; + smcgp->gc.irq.parent_handler = NULL; + smcgp->gc.irq.num_parents = 0; + smcgp->gc.irq.parents = NULL; + smcgp->gc.irq.default_type = IRQ_TYPE_NONE; + smcgp->gc.irq.handler = handle_simple_irq; + + mutex_init(&smcgp->irq_mutex); + + smcgp->nb.notifier_call = macsmc_gpio_event; + apple_smc_register_notifier(smc, &smcgp->nb); + + return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp); +} + +static struct platform_driver macsmc_gpio_driver = { + .driver = { + .name = "macsmc-gpio", + }, + .probe = macsmc_gpio_probe, +}; +module_platform_driver(macsmc_gpio_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC GPIO driver"); +MODULE_ALIAS("platform:macsmc-gpio"); diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 627d637a1e7e..20c78bb1365d 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -71,6 +71,7 @@ config DRM_SIMPLEDRM depends on DRM && MMU select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER + select BACKLIGHT_CLASS_DEVICE help DRM driver for simple platform-provided framebuffers. diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 768242a78e2b..d681f0c3b840 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/backlight.h> #include <linux/clk.h> #include <linux/of_clk.h> #include <linux/minmax.h> @@ -225,6 +226,9 @@ struct simpledrm_device { size_t nformats; struct drm_connector connector; struct drm_simple_display_pipe pipe; + + /* backlight */ + struct backlight_device *backlight; }; static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev) @@ -673,6 +677,9 @@ simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip); drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip); + if (sdev->backlight) + backlight_enable(sdev->backlight); + drm_dev_exit(idx); } @@ -686,6 +693,9 @@ simpledrm_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe) if (!drm_dev_enter(dev, &idx)) return; + if (sdev->backlight) + backlight_disable(sdev->backlight); + /* Clear screen to black if disabled */ memset_io(sdev->screen_base, 0, sdev->pitch * sdev->mode.vdisplay); @@ -845,6 +855,10 @@ simpledrm_device_create(struct drm_driver *drv, struct platform_device *pdev) sdev->pdev = pdev; platform_set_drvdata(pdev, sdev); + sdev->backlight = devm_of_find_backlight(&pdev->dev); + if (IS_ERR(sdev->backlight)) + sdev->backlight = NULL; + ret = simpledrm_device_init_clocks(sdev); if (ret) return ERR_PTR(ret); diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 70da5931082f..bd47caba1413 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -130,7 +130,7 @@ config HID_APPLE depends on HID depends on LEDS_CLASS depends on NEW_LEDS - default !EXPERT + default !EXPERT || SPI_HID_APPLE help Support for some Apple devices which less or more break HID specification. @@ -676,11 +676,13 @@ config LOGIWHEELS_FF config HID_MAGICMOUSE tristate "Apple Magic Mouse/Trackpad multi-touch support" depends on HID + default SPI_HID_APPLE help Support for the Apple Magic Mouse/Trackpad multi-touch. Say Y here if you want support for the multi-touch features of the - Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad. + Apple Wireless "Magic" Mouse, the Apple Wireless "Magic" Trackpad and + fource touch Trackpads in Macbooks starting from 2015. config HID_MALTRON tristate "Maltron L90 keyboard" @@ -1041,7 +1043,7 @@ config HID_SONY * Guitar Hero PS3 and PC guitar dongles config SONY_FF - bool "Sony PS2/3/4 accessories force feedback support" + bool "Sony PS2/3/4 accessories force feedback support" depends on HID_SONY select INPUT_FF_MEMLESS help @@ -1320,4 +1322,8 @@ source "drivers/hid/amd-sfh-hid/Kconfig" source "drivers/hid/surface-hid/Kconfig" +source "drivers/hid/spi-hid/Kconfig" + +source "drivers/hid/dockchannel-hid/Kconfig" + endmenu diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index cac2cbe26d11..2c90409d515c 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -156,3 +156,7 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/ + +obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid/ + +obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid/ diff --git a/drivers/hid/dockchannel-hid/Kconfig b/drivers/hid/dockchannel-hid/Kconfig new file mode 100644 index 000000000000..8a81d551a83d --- /dev/null +++ b/drivers/hid/dockchannel-hid/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only OR MIT +menu "DockChannel HID support" + depends on APPLE_DOCKCHANNEL + +config HID_DOCKCHANNEL + tristate "HID over DockChannel transport layer for Apple Silicon SoCs" + default ARCH_APPLE + depends on APPLE_DOCKCHANNEL && INPUT && OF && HID + help + Say Y here if you use an M2 or later Apple Silicon based laptop. + The keyboard and touchpad are HID based devices connected via the + proprietary DockChannel interface. + +endmenu diff --git a/drivers/hid/dockchannel-hid/Makefile b/drivers/hid/dockchannel-hid/Makefile new file mode 100644 index 000000000000..7dba766b047f --- /dev/null +++ b/drivers/hid/dockchannel-hid/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only OR MIT +# +# Makefile for DockChannel HID transport drivers +# + +obj-$(CONFIG_HID_DOCKCHANNEL) += dockchannel-hid.o diff --git a/drivers/hid/dockchannel-hid/dockchannel-hid.c b/drivers/hid/dockchannel-hid/dockchannel-hid.c new file mode 100644 index 000000000000..76eab8b35dfe --- /dev/null +++ b/drivers/hid/dockchannel-hid/dockchannel-hid.c @@ -0,0 +1,1058 @@ +/* + * SPDX-License-Identifier: GPL-2.0 OR MIT + * + * Apple DockChannel HID transport driver + * + * Copyright (C) The Asahi Linux Contributors + */ +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/firmware.h> +#include <linux/gpio/consumer.h> +#include <linux/hid.h> +#include <linux/slab.h> +#include <linux/soc/apple/dockchannel.h> +#include <linux/of.h> +#include "../hid-ids.h" + +#define COMMAND_TIMEOUT 1000 + +#define MAX_INTERFACES 16 + +/* Data + checksum */ +#define MAX_PKT_SIZE (0xffff + 4) + +#define DCHID_CHANNEL_CMD 0x11 +#define DCHID_CHANNEL_REPORT 0x12 + +struct dchid_hdr { + u8 hdr_len; + u8 channel; + __le16 length; + u8 seq; + u8 iface; + __le16 pad; +} __packed; + +#define IFACE_COMM 0 + +#define FLAGS_GROUP GENMASK(7, 6) +#define FLAGS_REQ GENMASK(5, 0) + +#define GROUP_INPUT 0 +#define GROUP_OUTPUT 1 +#define GROUP_CMD 2 + +#define REQ_SET_REPORT 0 +#define REQ_GET_REPORT 1 + +struct dchid_subhdr { + u8 flags; + u8 unk; + __le16 length; + __le32 retcode; +} __packed; + +#define EVENT_GPIO_CMD 0xa0 +#define EVENT_INIT 0xf0 +#define EVENT_READY 0xf1 + +struct dchid_init_hdr { + u8 type; + u8 unk1; + u8 unk2; + u8 iface; + char name[16]; +} __packed; + +#define INIT_HID_DESCRIPTOR 0 +#define INIT_GPIO_REQUEST 1 +#define INIT_TERMINATOR 2 + +#define CMD_RESET_INTERFACE 0x40 +#define CMD_SEND_FIRMWARE 0x95 +#define CMD_ENABLE_INTERFACE 0xb4 +#define CMD_ACK_GPIO_CMD 0xa1 + +struct dchid_init_block_hdr { + __le16 type; + __le16 subtype; + __le16 length; +} __packed; + +#define MAX_GPIO_NAME 32 + +struct dchid_gpio_request { + __le16 unk; + __le16 id; + char name[MAX_GPIO_NAME]; +} __packed; + +struct dchid_gpio_cmd { + u8 type; + u8 iface; + u8 gpio; + u8 unk; + u8 cmd; +} __packed; + +struct dchid_gpio_ack { + u8 type; + __le32 retcode; + u8 cmd[]; +} __packed; + +#define STM_REPORT_ID 0x10 +#define STM_REPORT_SERIAL 0x11 +#define STM_REPORT_KEYBTYPE 0x14 + +#define KEYBOARD_TYPE_ANSI 0 +#define KEYBOARD_TYPE_ISO 1 +#define KEYBOARD_TYPE_JIS 2 + +struct dchid_stm_id { + u8 unk; + __le16 vendor_id; + __le16 product_id; + __le16 version_number; + u8 unk2; + u8 unk3; + u8 keyboard_type; + u8 serial_length; + /* Serial follows, but we grab it with a different report. */ +} __packed; + +#define FW_MAGIC 0x46444948 +#define FW_VER 1 + +struct fw_header { + __le32 magic; + __le32 version; + __le32 hdr_length; + __le32 data_length; + __le32 iface_offset; +} __packed; + +struct dchid_work { + struct work_struct work; + struct dchid_iface *iface; + + struct dchid_hdr hdr; + u8 data[]; +}; + +struct dchid_iface { + struct dockchannel_hid *dchid; + struct hid_device *hid; + + int index; + const char *name; + const struct device_node *of_node; + + uint8_t tx_seq; + uint8_t rx_seq; + bool deferred; + bool open; + + void *hid_desc; + size_t hid_desc_len; + + struct gpio_desc *gpio; + int gpio_id; + + struct mutex out_mutex; + u32 out_flags; + int out_report; + u32 retcode; + void *resp_buf; + size_t resp_size; + struct completion out_complete; +}; + +struct dockchannel_hid { + struct device *dev; + struct dockchannel *dc; + + bool id_ready; + struct dchid_stm_id device_id; + char serial[64]; + u8 keyboard_type; + + struct dchid_iface *comm; + struct dchid_iface *ifaces[MAX_INTERFACES]; + + u8 pkt_buf[MAX_PKT_SIZE]; + + struct workqueue_struct *wq; +}; + +static struct dchid_iface * +dchid_get_interface(struct dockchannel_hid *dchid, int index, const char *name) +{ + struct dchid_iface *iface; + + if (index >= MAX_INTERFACES) { + dev_err(dchid->dev, "Interface index %d out of range\n", index); + return NULL; + } + + if (dchid->ifaces[index]) + return dchid->ifaces[index]; + + iface = devm_kzalloc(dchid->dev, sizeof(struct dchid_iface), GFP_KERNEL); + if (!iface) + return NULL; + + iface->index = index; + iface->name = devm_kstrdup(dchid->dev, name, GFP_KERNEL); + iface->dchid = dchid; + iface->out_report= -1; + init_completion(&iface->out_complete); + mutex_init(&iface->out_mutex); + + dev_info(dchid->dev, "Initializing interface %s\n", iface->name); + + /* Comm is not a HID subdevice */ + if (!strcmp(name, "comm")) { + dchid->ifaces[index] = iface; + return iface; + } + + iface->of_node = of_get_child_by_name(dchid->dev->of_node, name); + if (!iface->of_node) { + dev_warn(dchid->dev, "No OF node for subdevice %s, ignoring.", name); + return NULL; + } + + dchid->ifaces[index] = iface; + return iface; +} + +static u32 dchid_checksum(void *p, size_t length) +{ + u32 sum = 0; + + while (length >= 4) { + sum += get_unaligned_le32(p); + p += 4; + length -= 4; + } + + WARN_ON_ONCE(length); + return sum; +} + +static int dchid_send(struct dchid_iface *iface, u32 flags, void *msg, size_t size) +{ + u32 checksum = 0xffffffff; + size_t wsize = round_down(size, 4); + size_t tsize = size - wsize; + int ret; + struct { + struct dchid_hdr hdr; + struct dchid_subhdr sub; + } __packed h; + + memset(&h, 0, sizeof(h)); + h.hdr.hdr_len = sizeof(h.hdr); + h.hdr.channel = DCHID_CHANNEL_CMD; + h.hdr.length = round_up(size, 4) + sizeof(h.sub); + h.hdr.seq = iface->tx_seq; + h.hdr.iface = iface->index; + h.sub.flags = flags; + h.sub.length = size; + + ret = dockchannel_send(iface->dchid->dc, &h, sizeof(h)); + if (ret < 0) + return ret; + checksum -= dchid_checksum(&h, sizeof(h)); + + ret = dockchannel_send(iface->dchid->dc, msg, wsize); + if (ret < 0) + return ret; + checksum -= dchid_checksum(msg, wsize); + + if (tsize) { + u8 tail[4] = {0, 0, 0, 0}; + + memcpy(tail, msg + wsize, tsize); + ret = dockchannel_send(iface->dchid->dc, tail, sizeof(tail)); + if (ret < 0) + return ret; + checksum -= dchid_checksum(tail, sizeof(tail)); + } + + ret = dockchannel_send(iface->dchid->dc, &checksum, sizeof(checksum)); + if (ret < 0) + return ret; + + return 0; +} + +static int dchid_cmd(struct dchid_iface *iface, u32 type, u32 req, + void *data, size_t size, void *resp_buf, size_t resp_size) +{ + int ret; + int report_id = *(u8*)data; + + mutex_lock(&iface->out_mutex); + + WARN_ON(iface->out_report != -1); + iface->out_report = report_id; + iface->out_flags = FIELD_PREP(FLAGS_GROUP, type) | FIELD_PREP(FLAGS_REQ, req); + iface->resp_buf = resp_buf; + iface->resp_size = resp_size; + reinit_completion(&iface->out_complete); + + ret = dchid_send(iface, iface->out_flags, data, size); + if (ret < 0) + goto done; + + if (!wait_for_completion_timeout(&iface->out_complete, msecs_to_jiffies(1000))) { + dev_err(iface->dchid->dev, "output report 0x%x to iface %d (%s) timed out\n", + report_id, iface->index, iface->name); + ret = -ETIMEDOUT; + goto done; + } + + ret = iface->resp_size; + if (iface->retcode) { + dev_err(iface->dchid->dev, + "output report 0x%x to iface %d (%s) failed with err 0x%x\n", + report_id, iface->index, iface->name, iface->retcode); + ret = -EIO; + } + +done: + iface->tx_seq++; + iface->out_report = -1; + iface->out_flags = 0; + iface->resp_buf = NULL; + iface->resp_size = 0; + mutex_unlock(&iface->out_mutex); + return ret; +} + +static int dchid_comm_cmd(struct dockchannel_hid *dchid, void *cmd, size_t size) +{ + return dchid_cmd(dchid->comm, GROUP_CMD, REQ_SET_REPORT, cmd, size, NULL, 0); +} + +static int dchid_enable_interface(struct dchid_iface *iface) +{ + u8 msg[] = { CMD_ENABLE_INTERFACE, iface->index }; + + return dchid_comm_cmd(iface->dchid, msg, sizeof(msg)); +} + +static int dchid_reset_interface(struct dchid_iface *iface, int state) +{ + u8 msg[] = { CMD_RESET_INTERFACE, 1, iface->index, state }; + + return dchid_comm_cmd(iface->dchid, msg, sizeof(msg)); +} + +static int dchid_send_firmware(struct dchid_iface *iface, void *firmware, size_t size) +{ + struct { + u8 cmd; + u8 unk1; + u8 unk2; + u8 iface; + u64 addr; + u32 size; + } __packed msg = { + .cmd = CMD_SEND_FIRMWARE, + .unk1 = 2, + .unk2 = 0, + .iface = iface->index, + .size = size, + }; + dma_addr_t addr; + void *buf = dmam_alloc_coherent(iface->dchid->dev, size, &addr, GFP_KERNEL); + + if (IS_ERR_OR_NULL(buf)) + return buf ? PTR_ERR(buf) : -ENOMEM; + + msg.addr = addr; + memcpy(buf, firmware, size); + wmb(); + + return dchid_comm_cmd(iface->dchid, &msg, sizeof(msg)); +} + +static int dchid_get_firmware(struct dchid_iface *iface, void **firmware, size_t *size) +{ + int ret; + const char *fw_name; + const struct firmware *fw; + struct fw_header *hdr; + u8 *fw_data; + + ret = of_property_read_string(iface->of_node, "firmware-name", &fw_name); + if (ret) { + /* Firmware is only for some devices */ + *firmware = NULL; + *size = 0; + return 0; + } + + ret = request_firmware(&fw, fw_name, iface->dchid->dev); + if (ret) + return ret; + + hdr = (struct fw_header *)fw->data; + + if (hdr->magic != FW_MAGIC || hdr->version != FW_VER || + hdr->hdr_length < sizeof(*hdr) || hdr->hdr_length > fw->size || + (hdr->hdr_length + (size_t)hdr->data_length) > fw->size || + hdr->iface_offset >= hdr->data_length) { + dev_warn(iface->dchid->dev, "%s: invalid firmware header\n", + fw_name); + ret = -EINVAL; + goto done; + } + + fw_data = devm_kmemdup(iface->dchid->dev, fw->data + hdr->hdr_length, + hdr->data_length, GFP_KERNEL); + if (!fw_data) { + ret = -ENOMEM; + goto done; + } + + if (hdr->iface_offset) + fw_data[hdr->iface_offset] = iface->index; + + *firmware = fw_data; + *size = hdr->data_length; + +done: + release_firmware(fw); + return ret; +} + + +static int dchid_start(struct hid_device *hdev) +{ + return 0; +}; + +static void dchid_stop(struct hid_device *hdev) +{ + /* no-op, we don't know what the shutdown commands are, if any */ +} + +static int dchid_open(struct hid_device *hdev) +{ + struct dchid_iface *iface = hdev->driver_data; + + iface->open = true; + return 0; +} + +static void dchid_close(struct hid_device *hdev) +{ + struct dchid_iface *iface = hdev->driver_data; + + iface->open = false; +} + +static int dchid_parse(struct hid_device *hdev) +{ + struct dchid_iface *iface = hdev->driver_data; + + return hid_parse_report(hdev, iface->hid_desc, iface->hid_desc_len); +} + +/* Note: buf excludes report number! For ease of fetching strings/etc. */ +static int dchid_get_report_cmd(struct dchid_iface *iface, u8 reportnum, void *buf, size_t len) +{ + int ret = dchid_cmd(iface, GROUP_CMD, REQ_GET_REPORT, &reportnum, 1, buf, len); + + return ret <= 0 ? ret : ret - 1; +} + +/* Note: buf includes report number! */ +static int dchid_set_report(struct dchid_iface *iface, void *buf, size_t len) +{ + return dchid_cmd(iface, GROUP_OUTPUT, REQ_SET_REPORT, buf, len, NULL, 0); +} + +static int dchid_raw_request(struct hid_device *hdev, + unsigned char reportnum, __u8 *buf, size_t len, + unsigned char rtype, int reqtype) +{ + struct dchid_iface *iface = hdev->driver_data; + + switch (reqtype) { + case HID_REQ_GET_REPORT: + buf[0] = reportnum; + return dchid_cmd(iface, GROUP_OUTPUT, REQ_GET_REPORT, &reportnum, 1, buf + 1, len - 1); + case HID_REQ_SET_REPORT: + return dchid_set_report(iface, buf, len); + default: + return -EIO; + } + + return 0; +} + +static struct hid_ll_driver dchid_ll = { + .start = &dchid_start, + .stop = &dchid_stop, + .open = &dchid_open, + .close = &dchid_close, + .parse = &dchid_parse, + .raw_request = &dchid_raw_request, +}; + +static void dchid_init_interface(struct dchid_iface *iface) +{ + void *fw; + size_t size; + + iface->deferred = false; + + /* Enable interface (general) */ + if (dchid_enable_interface(iface) < 0) + return; + + /* Look to see if we need firmware */ + if (dchid_get_firmware(iface, &fw, &size) < 0) + return; + + /* Only multi-touch has firmware */ + if (!fw || !size) + return; + + /* Send it to the device */ + if (dchid_send_firmware(iface, fw, size) < 0) + return; + + /* After loading firmware, multi-touch needs a reset */ + dchid_reset_interface(iface, 0); + dchid_reset_interface(iface, 2); +} + +static void dchid_handle_descriptor(struct dchid_iface *iface, void *hid_desc, size_t desc_len) +{ + if (iface->hid) { + dev_warn(iface->dchid->dev, "Tried to initialize already started interface %s!\n", + iface->name); + return; + } + + iface->hid_desc = devm_kmemdup(iface->dchid->dev, hid_desc, desc_len, GFP_KERNEL); + if (!iface->hid_desc) + return; + + iface->hid_desc_len = desc_len; + + /* We need to enable STM first, since it'll give us the device IDs */ + if (iface->dchid->id_ready || !strcmp(iface->name, "stm")) + dchid_init_interface(iface); + else + iface->deferred = true; +} + +static void dchid_handle_ready(struct dockchannel_hid *dchid, void *data, size_t length) +{ + struct hid_device *hid; + struct dchid_iface *iface; + int ret; + u8 *pkt = data; + u8 index; + int i; + + if (length < 2) { + dev_err(dchid->dev, "Bad length for ready message: %ld\n", length); + return; + } + + index = pkt[1]; + + if (index >= MAX_INTERFACES) { + dev_err(dchid->dev, "Got ready notification for bad iface %d\n", index); + return; + } + + iface = dchid->ifaces[index]; + if (!iface) { + dev_err(dchid->dev, "Got ready notification for unknown iface %d\n", index); + return; + } + + if (iface->hid) { + dev_warn(iface->dchid->dev, "Interface %s already ready!\n", + iface->name); + return; + } + + /* When STM is ready, grab global device info */ + if (!strcmp(iface->name, "stm")) { + ret = dchid_get_report_cmd(iface, STM_REPORT_ID, &dchid->device_id, + sizeof(dchid->device_id)); + if (ret < sizeof(dchid->device_id)) { + dev_warn(iface->dchid->dev, "Failed to get device ID from STM!\n"); + /* Fake it and keep going. Things might still work... */ + memset(&dchid->device_id, 0, sizeof(dchid->device_id)); + dchid->device_id.vendor_id = HOST_VENDOR_ID_APPLE; + } + ret = dchid_get_report_cmd(iface, STM_REPORT_SERIAL, dchid->serial, + sizeof(dchid->serial) - 1); + if (ret < 0) { + dev_warn(iface->dchid->dev, "Failed to get serial from STM!\n"); + dchid->serial[0] = 0; + } + + dchid->id_ready = true; + for (i = 0; i < MAX_INTERFACES; i++) + if (dchid->ifaces[i] && dchid->ifaces[i]->deferred) + dchid_init_interface(dchid->ifaces[i]); + + } + + hid = hid_allocate_device(); + if (IS_ERR(hid)) + return; + + snprintf(hid->name, sizeof(hid->name), "Apple MTP %s", iface->name); + snprintf(hid->phys, sizeof(hid->phys), "%s.%d (%s)", + dev_name(iface->dchid->dev), iface->index, iface->name); + strscpy(hid->uniq, dchid->serial, sizeof(hid->uniq)); + + hid->ll_driver = &dchid_ll; + hid->bus = BUS_HOST; + hid->vendor = dchid->device_id.vendor_id; + hid->product = dchid->device_id.product_id; + hid->version = dchid->device_id.version_number; + hid->type = HID_TYPE_OTHER; + if (!strcmp(iface->name, "multi-touch")) { + hid->type = HID_TYPE_SPI_MOUSE; + } else if (!strcmp(iface->name, "keyboard")) { + hid->type = HID_TYPE_SPI_KEYBOARD; + + /* These country codes match what earlier Apple HID keyboards did */ + switch (dchid->keyboard_type) { + case KEYBOARD_TYPE_ANSI: + hid->country = 33; // US-English + break; + + case KEYBOARD_TYPE_ISO: + hid->country = 13; // ISO + break; + + case KEYBOARD_TYPE_JIS: + hid->country = 15; // Japan + break; + } + } + + hid->dev.parent = iface->dchid->dev; + hid->driver_data = iface; + + ret = hid_add_device(hid); + if (ret < 0) { + hid_destroy_device(hid); + dev_warn(iface->dchid->dev, "Failed to register hid device %s", iface->name); + return; + } + + iface->hid = hid; +} + +static void dchid_request_gpio(struct dchid_iface *iface, int id, const char *name) +{ + char prop_name[MAX_GPIO_NAME + 16]; + + dev_info(iface->dchid->dev, "Requesting GPIO %s#%d: %s\n", iface->name, id, name); + + if (iface->gpio) { + dev_err(iface->dchid->dev, "Cannot request more than one GPIO per interface!\n"); + return; + } + + snprintf(prop_name, sizeof(prop_name), "apple,%s-gpios", name); + + iface->gpio = devm_gpiod_get_from_of_node(iface->dchid->dev, + iface->of_node, prop_name, 0, + GPIOD_OUT_LOW, name); + + if (IS_ERR_OR_NULL(iface->gpio)) { + dev_err(iface->dchid->dev, "Failed to request GPIO %s\n", prop_name); + iface->gpio = NULL; + return; + } + + iface->gpio_id = id; +} + +static void dchid_handle_init(struct dockchannel_hid *dchid, void *data, size_t length) +{ + struct dchid_init_hdr *hdr = data; + struct dchid_iface *iface; + struct dchid_init_block_hdr *blk; + + if (length < sizeof(*hdr)) + return; + + iface = dchid_get_interface(dchid, hdr->iface, hdr->name); + if (!iface) + return; + + data += sizeof(*hdr); + length -= sizeof(*hdr); + + while (length > sizeof(*blk)) { + blk = data; + data += sizeof(*blk); + length -= sizeof(*blk); + + if (blk->length > length) + return; + switch (blk->type) { + case INIT_HID_DESCRIPTOR: + dchid_handle_descriptor(iface, data, blk->length); + break; + + case INIT_GPIO_REQUEST: { + struct dchid_gpio_request *req = data; + + if (sizeof(*req) > length) + return; + dchid_request_gpio(iface, req->id, req->name); + break; + } + + case INIT_TERMINATOR: + return; + } + + data += blk->length + sizeof(*blk); + length -= blk->length + sizeof(*blk); + } +} + +static void dchid_handle_gpio(struct dockchannel_hid *dchid, void *data, size_t length) +{ + struct dchid_gpio_cmd *cmd = data; + struct dchid_iface *iface; + u32 retcode = 0xe000f00d; /* Give it a random Apple-style error code */ + struct dchid_gpio_ack *ack; + + if (length < sizeof(*cmd)) + return; + + if (cmd->iface >= MAX_INTERFACES || !(iface = dchid->ifaces[cmd->iface])) { + dev_err(dchid->dev, "Got GPIO command for bad inteface %d\n", cmd->iface); + goto err; + } + + if (!iface->gpio || cmd->gpio != iface->gpio_id) { + dev_err(dchid->dev, "Got GPIO command for bad GPIO %s#%d\n", + iface->name, cmd->gpio); + goto err; + } + + dev_info(dchid->dev, "GPIO command: %s#%d: %d\n", iface->name, cmd->gpio, cmd->cmd); + + switch (cmd->cmd) { + case 3: + /* Pulse. */ + gpiod_set_value_cansleep(iface->gpio, 1); + msleep(10); /* Random guess... */ + gpiod_set_value_cansleep(iface->gpio, 0); + retcode = 0; + break; + default: + dev_err(dchid->dev, "Unknown GPIO command %d\n", cmd->cmd ); + break; + } + +err: + /* Ack it */ + ack = kzalloc(sizeof(*ack) + length, GFP_KERNEL); + if (!ack) + return; + + ack->type = CMD_ACK_GPIO_CMD; + ack->retcode = retcode; + memcpy(ack->cmd, data, length); + + if (dchid_comm_cmd(dchid, ack, sizeof(*ack) + length) < 0) + dev_err(dchid->dev, "Failed to ACK GPIO command\n"); + + kfree(ack); +} + +static void dchid_handle_event(struct dockchannel_hid *dchid, void *data, size_t length) +{ + u8 *p = data; + switch (*p) { + case EVENT_INIT: + dchid_handle_init(dchid, data, length); + break; + case EVENT_READY: + dchid_handle_ready(dchid, data, length); + break; + case EVENT_GPIO_CMD: + dchid_handle_gpio(dchid, data, length); + break; + } +} + +static void dchid_handle_report(struct dchid_iface *iface, void *data, size_t length) +{ + struct dockchannel_hid *dchid = iface->dchid; + + if (!iface->hid) { + dev_warn(dchid->dev, "Report received but %s is not initialized!\n", iface->name); + return; + } + + if (!iface->open) + return; + + hid_input_report(iface->hid, HID_INPUT_REPORT, data, length, 1); +} + +static void dchid_packet_work(struct work_struct *ws) +{ + struct dchid_work *work = container_of(ws, struct dchid_work, work); + struct dchid_subhdr *shdr = (void *)work->data; + struct dockchannel_hid *dchid = work->iface->dchid; + int type = FIELD_GET(FLAGS_GROUP, shdr->flags); + u8 *payload = work->data + sizeof(*shdr); + + if (shdr->length + sizeof(*shdr) > work->hdr.length) { + dev_err(dchid->dev, "Bad sub header length (%d > %ld)\n", + shdr->length, work->hdr.length - sizeof(*shdr)); + return; + } + + switch (type) { + case GROUP_INPUT: + if (work->hdr.iface == IFACE_COMM) + dchid_handle_event(dchid, payload, shdr->length); + else + dchid_handle_report(work->iface, payload, shdr->length); + break; + default: + dev_err(dchid->dev, "Received unknown packet type %d\n", type); + break; + } + + kfree(work); +} + +static void dchid_handle_ack(struct dchid_iface *iface, struct dchid_hdr *hdr, void *data) +{ + struct dchid_subhdr *shdr = (void *)data; + u8 *payload = data + sizeof(*shdr); + + if (shdr->length + sizeof(*shdr) > hdr->length) { + dev_err(iface->dchid->dev, "Bad sub header length (%d > %ld)\n", + shdr->length, hdr->length - sizeof(*shdr)); + return; + } + if (shdr->flags != iface->out_flags) { + dev_err(iface->dchid->dev, + "Received unexpected flags 0x%x on ACK channel (expected 0x%x)\n", + shdr->flags, iface->out_flags); + return; + } + + if (shdr->length < 1) { + dev_err(iface->dchid->dev, "Received length 0 output report ack\n"); + return; + } + if (iface->tx_seq != hdr->seq) { + dev_err(iface->dchid->dev, "Received ACK with bad seq (expected %d, got %d)\n", + iface->rx_seq, hdr->seq); + return; + } + if (iface->out_report != payload[0]) { + dev_err(iface->dchid->dev, "Received ACK with bad report (expected %d, got %d\n", + iface->out_report, payload[0]); + return; + } + + if (iface->resp_buf && iface->resp_size) + memcpy(iface->resp_buf, payload + 1, min((size_t)shdr->length - 1, iface->resp_size)); + + iface->resp_size = shdr->length; + iface->out_report = -1; + iface->retcode = shdr->retcode; + complete(&iface->out_complete); +} + +static void dchid_handle_packet(void *cookie, size_t avail) +{ + struct dockchannel_hid *dchid = cookie; + struct dchid_hdr hdr; + struct dchid_work *work; + struct dchid_iface *iface; + u32 checksum; + + if (dockchannel_recv(dchid->dc, &hdr, sizeof(hdr)) != sizeof(hdr)) { + dev_err(dchid->dev, "Read failed (header)\n"); + return; + } + + if (hdr.hdr_len != sizeof(hdr)) { + dev_err(dchid->dev, "Bad header length %d\n", hdr.hdr_len); + goto done; + } + + if (dockchannel_recv(dchid->dc, dchid->pkt_buf, hdr.length + 4) != (hdr.length + 4)) { + dev_err(dchid->dev, "Read failed (body)\n"); + goto done; + } + + checksum = dchid_checksum(&hdr, sizeof(hdr)); + checksum += dchid_checksum(dchid->pkt_buf, hdr.length + 4); + + if (checksum != 0xffffffff) { + dev_err(dchid->dev, "Checksum mismatch (iface %d): 0x%08x != 0xffffffff\n", + hdr.iface, checksum); + goto done; + } + + + if (hdr.iface >= MAX_INTERFACES) { + dev_err(dchid->dev, "Bad iface %d\n", hdr.iface); + } + + iface = dchid->ifaces[hdr.iface]; + + if (!iface) { + dev_err(dchid->dev, "Received packet for uninitialized iface %d\n", hdr.iface); + goto done; + } + + switch (hdr.channel) { + case DCHID_CHANNEL_CMD: + dchid_handle_ack(iface, &hdr, dchid->pkt_buf); + goto done; + case DCHID_CHANNEL_REPORT: + break; + default: + dev_warn(dchid->dev, "Unknown channel 0x%x, treating as report...\n", + hdr.channel); + break; + } + + if (hdr.seq != iface->rx_seq) { + dev_err(dchid->dev, "Received packet out of sequence (expected %d, got %d)\n", + iface->rx_seq, hdr.seq); + goto done; + } + + iface->rx_seq++; + + work = kzalloc(sizeof(*work) + hdr.length, GFP_KERNEL); + if (!work) + return; + + work->hdr = hdr; + work->iface = iface; + memcpy(work->data, dchid->pkt_buf, hdr.length); + INIT_WORK(&work->work, dchid_packet_work); + + queue_work(dchid->wq, &work->work); + +done: + dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr)); +} + +static int dockchannel_hid_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dockchannel_hid *dchid; + struct device_node *child; + struct property *prop; + bool defer = false; + + /* + * First make sure all the GPIOs are available, in cased we need to defer. + * This is necessary because MTP will request them by name later, and by then + * it's too late to defer the probe. + */ + + for_each_child_of_node(dev->of_node, child) { + for_each_property_of_node(child, prop) { + size_t len = strlen(prop->name); + struct gpio_desc *gpio; + + if (len < 12 || strncmp("apple,", prop->name, 6) || + strcmp("-gpios", prop->name + len - 6)) + continue; + + gpio = gpiod_get_from_of_node(child, prop->name, 0, GPIOD_ASIS, + prop->name); + if (IS_ERR_OR_NULL(gpio)) { + if (PTR_ERR(gpio) == EPROBE_DEFER) { + defer = true; + of_node_put(child); + return -EPROBE_DEFER; + } + } else { + gpiod_put(gpio); + } + } + } + + dchid = devm_kzalloc(dev, sizeof(*dchid), GFP_KERNEL); + if (!dchid) + return -ENOMEM; + + + dchid->dev = &pdev->dev; + dchid->dc = dockchannel_init(pdev); + if (IS_ERR_OR_NULL(dchid->dc)) { + return -PTR_ERR(dchid->dc); + } + + dchid->comm = dchid_get_interface(dchid, IFACE_COMM, "comm"); + if (!dchid->comm) { + dev_err(dchid->dev, "Failed to initialize comm interface"); + return -EIO; + } + + dchid->wq = alloc_ordered_workqueue("dockchannel-hid-report", WQ_MEM_RECLAIM); + if (!dchid->wq) + return -ENOMEM; + + dev_info(dchid->dev, "initialized, awaiting packets\n"); + dockchannel_await(dchid->dc, dchid_handle_packet, dchid, sizeof(struct dchid_hdr)); + + return 0; +} + +static int dockchannel_hid_remove(struct platform_device *pdev) +{ + BUG_ON(1); + return 0; +} + +static const struct of_device_id dockchannel_hid_of_match[] = { + { .compatible = "apple,dockchannel-hid" }, + {}, +}; +MODULE_DEVICE_TABLE(of, dockchannel_hid_of_match); + +static struct platform_driver dockchannel_hid_driver = { + .driver = { + .name = "dockchannel-hid", + .of_match_table = dockchannel_hid_of_match, + }, + .probe = dockchannel_hid_probe, + .remove = dockchannel_hid_remove, +}; +module_platform_driver(dockchannel_hid_driver); + +MODULE_DESCRIPTION("Apple DockChannel HID transport driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 42a568902f49..f4da7d60313f 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -252,6 +252,50 @@ static const struct apple_key_translation apple_fn_keys[] = { { } }; +static const struct apple_key_translation apple_fn_keys_spi[] = { + { KEY_BACKSPACE, KEY_DELETE }, + { KEY_ENTER, KEY_INSERT }, + { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY }, + { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, + { KEY_F3, KEY_SCALE, APPLE_FLAG_FKEY }, + { KEY_F4, KEY_SEARCH, APPLE_FLAG_FKEY }, + { KEY_F5, KEY_RECORD, APPLE_FLAG_FKEY }, + { KEY_F6, KEY_SLEEP, APPLE_FLAG_FKEY }, + { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, + { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY }, + { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY }, + { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY }, + { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY }, + { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY }, + { KEY_UP, KEY_PAGEUP }, + { KEY_DOWN, KEY_PAGEDOWN }, + { KEY_LEFT, KEY_HOME }, + { KEY_RIGHT, KEY_END }, + { } +}; + +static const struct apple_key_translation apple_fn_keys_mbp13[] = { + { KEY_BACKSPACE, KEY_DELETE }, + { KEY_ENTER, KEY_INSERT }, + { KEY_UP, KEY_PAGEUP }, + { KEY_DOWN, KEY_PAGEDOWN }, + { KEY_LEFT, KEY_HOME }, + { KEY_RIGHT, KEY_END }, + { KEY_1, KEY_F1 }, + { KEY_2, KEY_F2 }, + { KEY_3, KEY_F3 }, + { KEY_4, KEY_F4 }, + { KEY_5, KEY_F5 }, + { KEY_6, KEY_F6 }, + { KEY_7, KEY_F7 }, + { KEY_8, KEY_F8 }, + { KEY_9, KEY_F9 }, + { KEY_0, KEY_F10 }, + { KEY_MINUS, KEY_F11 }, + { KEY_EQUAL, KEY_F12 }, + { } +}; + static const struct apple_key_translation powerbook_fn_keys[] = { { KEY_BACKSPACE, KEY_DELETE }, { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY }, @@ -400,6 +444,11 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, else if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI && hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) table = macbookair_fn_keys; + else if (hid->vendor == SPI_VENDOR_ID_APPLE && + hid->product == SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020) + table = apple_fn_keys_mbp13; + else if (hid->vendor == SPI_VENDOR_ID_APPLE) + table = apple_fn_keys_spi; else if (hid->product < 0x21d || hid->product >= 0x300) table = powerbook_fn_keys; else @@ -607,6 +656,8 @@ static void apple_setup_input(struct input_dev *input) /* Enable all needed keys */ apple_setup_key_translation(input, apple_fn_keys); + apple_setup_key_translation(input, apple_fn_keys_spi); + apple_setup_key_translation(input, apple_fn_keys_mbp13); apple_setup_key_translation(input, powerbook_fn_keys); apple_setup_key_translation(input, powerbook_numlock_keys); apple_setup_key_translation(input, apple_iso_keyboard); @@ -783,6 +834,10 @@ static int apple_probe(struct hid_device *hdev, struct apple_sc *asc; int ret; + if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE && + hdev->type != HID_TYPE_SPI_KEYBOARD) + return -ENODEV; + asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL); if (asc == NULL) { hid_err(hdev, "can't alloc apple descriptor\n"); @@ -1024,6 +1079,10 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021), .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), + .driver_data = APPLE_HAS_FN }, + { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE, + HID_ANY_ID), .driver_data = 0 }, { } }; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 00154a1cd2d8..048f13b487b3 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2219,6 +2219,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) case BUS_I2C: bus = "I2C"; break; + case BUS_SPI: + bus = "SPI"; + break; + case BUS_HOST: + bus = "HOST"; + break; case BUS_VIRTUAL: bus = "VIRTUAL"; break; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d9eb676abe96..547609a9aec0 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -89,6 +89,8 @@ #define USB_VENDOR_ID_APPLE 0x05ac #define BT_VENDOR_ID_APPLE 0x004c +#define SPI_VENDOR_ID_APPLE 0x05ac +#define HOST_VENDOR_ID_APPLE 0x05ac #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d #define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269 @@ -185,6 +187,11 @@ #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f +#define SPI_DEVICE_ID_APPLE_MACBOOK_AIR_2020 0x0281 +#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO13_2020 0x0341 +#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO14_2021 0x0342 +#define SPI_DEVICE_ID_APPLE_MACBOOK_PRO16_2021 0x0343 +#define HOST_DEVICE_ID_APPLE_MACBOOK_PRO13_2022 0x0354 #define USB_VENDOR_ID_ASUS 0x0486 #define USB_DEVICE_ID_ASUS_T91MT 0x0185 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 664a624a363d..a951e8a8f085 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -59,8 +59,12 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie #define MOUSE_REPORT_ID 0x29 #define MOUSE2_REPORT_ID 0x12 #define DOUBLE_REPORT_ID 0xf7 +#define SPI_REPORT_ID 0x02 +#define MTP_REPORT_ID 0x75 #define USB_BATTERY_TIMEOUT_MS 60000 +#define MAX_CONTACTS 16 + /* These definitions are not precise, but they're close enough. (Bits * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem * to be some kind of bit mask -- 0x20 may be a near-field reading, @@ -111,6 +115,25 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie #define TRACKPAD2_RES_Y \ ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100)) +#define J314_TP_DIMENSION_X (float)13000 +#define J314_TP_MIN_X -5900 +#define J314_TP_MAX_X 6500 +#define J314_TP_RES_X \ + ((J314_TP_MAX_X - J314_TP_MIN_X) / (J314_TP_DIMENSION_X / 100)) +#define J314_TP_DIMENSION_Y (float)8100 +#define J314_TP_MIN_Y -200 +#define J314_TP_MAX_Y 7400 +#define J314_TP_RES_Y \ + ((J314_TP_MAX_Y - J314_TP_MIN_Y) / (J314_TP_DIMENSION_Y / 100)) + +#define J314_TP_MAX_FINGER_ORIENTATION 16384 + +struct magicmouse_input_ops { + int (*raw_event)(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size); + int (*setup_input)(struct input_dev *input, struct hid_device *hdev); +}; + /** * struct magicmouse_sc - Tracks Magic Mouse-specific data. * @input: Input device through which we report events. @@ -129,9 +152,8 @@ struct magicmouse_sc { int scroll_accel; unsigned long scroll_jiffies; + struct input_mt_pos pos[MAX_CONTACTS]; struct { - short x; - short y; short scroll_x; short scroll_y; short scroll_x_hr; @@ -139,12 +161,13 @@ struct magicmouse_sc { u8 size; bool scroll_x_active; bool scroll_y_active; - } touches[16]; - int tracking_ids[16]; + } touches[MAX_CONTACTS]; + int tracking_ids[MAX_CONTACTS]; struct hid_device *hdev; struct delayed_work work; struct timer_list battery_timer; + struct magicmouse_input_ops input_ops; }; static int magicmouse_firm_touch(struct magicmouse_sc *msc) @@ -188,7 +211,7 @@ static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) } else if (last_state != 0) { state = last_state; } else if ((id = magicmouse_firm_touch(msc)) >= 0) { - int x = msc->touches[id].x; + int x = msc->pos[id].x; if (x < middle_button_start) state = 1; else if (x > middle_button_stop) @@ -249,8 +272,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda /* Store tracking ID and other fields. */ msc->tracking_ids[raw_id] = id; - msc->touches[id].x = x; - msc->touches[id].y = y; + msc->pos[id].x = x; + msc->pos[id].y = y; msc->touches[id].size = size; /* If requested, emulate a scroll wheel by detecting small @@ -374,6 +397,14 @@ static int magicmouse_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); + + return msc->input_ops.raw_event(hdev, report, data, size); +} + +static int magicmouse_raw_event_usb(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + struct magicmouse_sc *msc = hid_get_drvdata(hdev); struct input_dev *input = msc->input; int x = 0, y = 0, ii, clicks = 0, npoints; @@ -502,6 +533,175 @@ static int magicmouse_raw_event(struct hid_device *hdev, return 1; } +/** + * struct tp_finger - single trackpad finger structure, le16-aligned + * + * @unknown1: unknown + * @unknown2: unknown + * @abs_x: absolute x coordinate + * @abs_y: absolute y coordinate + * @rel_x: relative x coordinate + * @rel_y: relative y coordinate + * @tool_major: tool area, major axis + * @tool_minor: tool area, minor axis + * @orientation: 16384 when point, else 15 bit angle + * @touch_major: touch area, major axis + * @touch_minor: touch area, minor axis + * @unused: zeros + * @pressure: pressure on forcetouch touchpad + * @multi: one finger: varies, more fingers: constant + * @crc16: on last finger: crc over the whole message struct + * (i.e. message header + this struct) minus the last + * @crc16 field; unknown on all other fingers. + */ +struct tp_finger { + __le16 unknown1; + __le16 unknown2; + __le16 abs_x; + __le16 abs_y; + __le16 rel_x; + __le16 rel_y; + __le16 tool_major; + __le16 tool_minor; + __le16 orientation; + __le16 touch_major; + __le16 touch_minor; + __le16 unused[2]; + __le16 pressure; + __le16 multi; +} __attribute__((packed, aligned(2))); + +/** + * vendor trackpad report + * + * @num_fingers: the number of fingers being reported in @fingers + * @buttons: same as HID buttons + */ +struct tp_header { + // HID vendor part, up to 1751 bytes + u8 unknown[22]; + u8 num_fingers; + u8 buttons; + u8 unknown3[14]; +}; + +/** + * standard HID mouse report + * + * @report_id: reportid + * @buttons: HID Usage Buttons 3 1-bit reports + */ +struct tp_mouse_report { + // HID mouse report + u8 report_id; + u8 buttons; + u8 rel_x; + u8 rel_y; + u8 padding[4]; +}; + +static inline int le16_to_int(__le16 x) +{ + return (signed short)le16_to_cpu(x); +} + +static void report_finger_data(struct input_dev *input, int slot, + const struct input_mt_pos *pos, + const struct tp_finger *f) +{ + input_mt_slot(input, slot); + input_mt_report_slot_state(input, MT_TOOL_FINGER, true); + + input_report_abs(input, ABS_MT_TOUCH_MAJOR, + le16_to_int(f->touch_major) << 1); + input_report_abs(input, ABS_MT_TOUCH_MINOR, + le16_to_int(f->touch_minor) << 1); + input_report_abs(input, ABS_MT_WIDTH_MAJOR, + le16_to_int(f->tool_major) << 1); + input_report_abs(input, ABS_MT_WIDTH_MINOR, + le16_to_int(f->tool_minor) << 1); + input_report_abs(input, ABS_MT_ORIENTATION, + J314_TP_MAX_FINGER_ORIENTATION - le16_to_int(f->orientation)); + input_report_abs(input, ABS_MT_PRESSURE, le16_to_int(f->pressure)); + input_report_abs(input, ABS_MT_POSITION_X, pos->x); + input_report_abs(input, ABS_MT_POSITION_Y, pos->y); +} + +static int magicmouse_raw_event_mtp(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + struct magicmouse_sc *msc = hid_get_drvdata(hdev); + struct input_dev *input = msc->input; + struct tp_header *tp_hdr; + struct tp_finger *f; + int i, n; + u32 npoints; + const size_t hdr_sz = sizeof(struct tp_header); + const size_t touch_sz = sizeof(struct tp_finger); + u8 map_contacs[MAX_CONTACTS]; + + // hid_warn(hdev, "%s\n", __func__); + // print_hex_dump_debug("appleft ev: ", DUMP_PREFIX_OFFSET, 16, 1, data, + // size, false); + + /* Expect 46 bytes of prefix, and N * 30 bytes of touch data. */ + if (size < hdr_sz || ((size - hdr_sz) % touch_sz) != 0) + return 0; + + tp_hdr = (struct tp_header *)data; + + npoints = (size - hdr_sz) / touch_sz; + if (npoints < tp_hdr->num_fingers || npoints > MAX_CONTACTS) { + hid_warn(hdev, + "unexpected number of touches (%u) for " + "report\n", + npoints); + return 0; + } + + n = 0; + for (i = 0; i < tp_hdr->num_fingers; i++) { + f = (struct tp_finger *)(data + hdr_sz + i * touch_sz); + if (le16_to_int(f->touch_major) == 0) + continue; + + hid_dbg(hdev, "ev x:%04hx y:%04hx\n", le16_to_int(f->abs_x), + le16_to_int(f->abs_y)); + msc->pos[n].x = le16_to_int(f->abs_x); + msc->pos[n].y = -le16_to_int(f->abs_y); + map_contacs[n] = i; + n++; + } + + input_mt_assign_slots(input, msc->tracking_ids, msc->pos, n, 0); + + for (i = 0; i < n; i++) { + int idx = map_contacs[i]; + f = (struct tp_finger *)(data + hdr_sz + idx * touch_sz); + report_finger_data(input, msc->tracking_ids[i], &msc->pos[i], f); + } + + input_mt_sync_frame(input); + input_report_key(input, BTN_MOUSE, tp_hdr->buttons & 1); + + input_sync(input); + return 1; +} + +static int magicmouse_raw_event_spi(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + const size_t hdr_sz = sizeof(struct tp_mouse_report); + + if (size < hdr_sz) + return 0; + + if (data[0] != TRACKPAD2_USB_REPORT_ID) + return 0; + + return magicmouse_raw_event_mtp(hdev, report, data + hdr_sz, size - hdr_sz); +} + static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -519,7 +719,17 @@ static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, return 0; } -static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) + +static int magicmouse_setup_input(struct input_dev *input, + struct hid_device *hdev) +{ + struct magicmouse_sc *msc = hid_get_drvdata(hdev); + + return msc->input_ops.setup_input(input, hdev); +} + +static int magicmouse_setup_input_usb(struct input_dev *input, + struct hid_device *hdev) { int error; int mt_flags = 0; @@ -592,7 +802,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(EV_ABS, input->evbit); - error = input_mt_init_slots(input, 16, mt_flags); + error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags); if (error) return error; input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2, @@ -671,6 +881,79 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd return 0; } +static int magicmouse_setup_input_spi(struct input_dev *input, + struct hid_device *hdev) +{ + int error; + int mt_flags = 0; + + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + __clear_bit(BTN_0, input->keybit); + __clear_bit(BTN_RIGHT, input->keybit); + __clear_bit(BTN_MIDDLE, input->keybit); + __clear_bit(EV_REL, input->evbit); + __clear_bit(REL_X, input->relbit); + __clear_bit(REL_Y, input->relbit); + + mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK; + + /* finger touch area */ + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 5000, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 5000, 0, 0); + + /* finger approach area */ + input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 5000, 0, 0); + input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 5000, 0, 0); + + /* Note: Touch Y position from the device is inverted relative + * to how pointer motion is reported (and relative to how USB + * HID recommends the coordinates work). This driver keeps + * the origin at the same position, and just uses the additive + * inverse of the reported Y. + */ + + input_set_abs_params(input, ABS_MT_PRESSURE, 0, 6000, 0, 0); + + /* + * This makes libinput recognize this as a PressurePad and + * stop trying to use pressure for touch size. Pressure unit + * seems to be ~grams on these touchpads. + */ + input_abs_set_res(input, ABS_MT_PRESSURE, 1); + + /* finger orientation */ + input_set_abs_params(input, ABS_MT_ORIENTATION, -J314_TP_MAX_FINGER_ORIENTATION, + J314_TP_MAX_FINGER_ORIENTATION, 0, 0); + + /* finger position */ + input_set_abs_params(input, ABS_MT_POSITION_X, J314_TP_MIN_X, J314_TP_MAX_X, + 0, 0); + /* Y axis is inverted */ + input_set_abs_params(input, ABS_MT_POSITION_Y, -J314_TP_MAX_Y, -J314_TP_MIN_Y, + 0, 0); + + /* X/Y resolution */ + input_abs_set_res(input, ABS_MT_POSITION_X, J314_TP_RES_X); + input_abs_set_res(input, ABS_MT_POSITION_Y, J314_TP_RES_Y); + + input_set_events_per_packet(input, 60); + + /* touchpad button */ + input_set_capability(input, EV_KEY, BTN_MOUSE); + + /* + * hid-input may mark device as using autorepeat, but the trackpad does + * not actually want it. + */ + __clear_bit(EV_REP, input->evbit); + + error = input_mt_init_slots(input, MAX_CONTACTS, mt_flags); + if (error) + return error; + + return 0; +} + static int magicmouse_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -726,6 +1009,9 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev) feature_size = sizeof(feature_mt_trackpad2_usb); feature = feature_mt_trackpad2_usb; } + } else if (hdev->vendor == SPI_VENDOR_ID_APPLE) { + feature_size = sizeof(feature_mt_trackpad2_usb); + feature = feature_mt_trackpad2_usb; } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { feature_size = sizeof(feature_mt_mouse2); feature = feature_mt_mouse2; @@ -800,12 +1086,30 @@ static int magicmouse_probe(struct hid_device *hdev, struct hid_report *report; int ret; + if ((id->bus == BUS_SPI || id->bus == BUS_HOST) && id->vendor == SPI_VENDOR_ID_APPLE && + hdev->type != HID_TYPE_SPI_MOUSE) + return -ENODEV; + msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); if (msc == NULL) { hid_err(hdev, "can't alloc magicmouse descriptor\n"); return -ENOMEM; } + // internal trackpad use a data format use input ops to avoid + // conflicts with the report ID. + if (id->bus == BUS_HOST) { + msc->input_ops.raw_event = magicmouse_raw_event_mtp; + msc->input_ops.setup_input = magicmouse_setup_input_spi; + } else if (id->bus == BUS_SPI) { + msc->input_ops.raw_event = magicmouse_raw_event_spi; + msc->input_ops.setup_input = magicmouse_setup_input_spi; + + } else { + msc->input_ops.raw_event = magicmouse_raw_event_usb; + msc->input_ops.setup_input = magicmouse_setup_input_usb; + } + msc->scroll_accel = SCROLL_ACCEL_DEFAULT; msc->hdev = hdev; INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work); @@ -854,6 +1158,10 @@ static int magicmouse_probe(struct hid_device *hdev, else /* USB_VENDOR_ID_APPLE */ report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD2_USB_REPORT_ID, 0); + } else if (id->bus == BUS_SPI) { + report = hid_register_report(hdev, HID_INPUT_REPORT, SPI_REPORT_ID, 0); + } else if (id->bus == BUS_HOST) { + report = hid_register_report(hdev, HID_INPUT_REPORT, MTP_REPORT_ID, 0); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD_REPORT_ID, 0); @@ -868,6 +1176,10 @@ static int magicmouse_probe(struct hid_device *hdev, } report->size = 6; + /* MTP devices do not need the MT enable, this is handled by the MTP driver */ + if (id->bus == BUS_HOST) + return 0; + /* * Some devices repond with 'invalid report id' when feature * report switching it into multitouch mode is sent to it. @@ -948,6 +1260,10 @@ static const struct hid_device_id magic_mice[] = { USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 }, + { HID_SPI_DEVICE(SPI_VENDOR_ID_APPLE, HID_ANY_ID), + .driver_data = 0 }, + { HID_DEVICE(BUS_HOST, HID_GROUP_ANY, HOST_VENDOR_ID_APPLE, + HID_ANY_ID), .driver_data = 0 }, { } }; MODULE_DEVICE_TABLE(hid, magic_mice); diff --git a/drivers/hid/spi-hid/Kconfig b/drivers/hid/spi-hid/Kconfig new file mode 100644 index 000000000000..8e37f0fec28a --- /dev/null +++ b/drivers/hid/spi-hid/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "SPI HID support" + depends on SPI + +config SPI_HID_APPLE_OF + tristate "HID over SPI transport layer for Apple Silicon SoCs" + default ARCH_APPLE + depends on SPI && INPUT && OF + help + Say Y here if you use Apple Silicon based laptop. The keyboard and + touchpad are HID based devices connected via SPI. + + If unsure, say N. + + This support is also available as a module. If so, the module + will be called spi-hid-apple-of. It will also build/depend on the + module spi-hid-apple. + +endmenu + +config SPI_HID_APPLE_CORE + tristate + default y if SPI_HID_APPLE_OF=y + default m if SPI_HID_APPLE_OF=m + select HID + select CRC16 diff --git a/drivers/hid/spi-hid/Makefile b/drivers/hid/spi-hid/Makefile new file mode 100644 index 000000000000..f276ee12cb94 --- /dev/null +++ b/drivers/hid/spi-hid/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for SPI HID tarnsport drivers +# + +obj-$(CONFIG_SPI_HID_APPLE_CORE) += spi-hid-apple.o + +spi-hid-apple-objs = spi-hid-apple-core.o + +obj-$(CONFIG_SPI_HID_APPLE_OF) += spi-hid-apple-of.o diff --git a/drivers/hid/spi-hid/spi-hid-apple-core.c b/drivers/hid/spi-hid/spi-hid-apple-core.c new file mode 100644 index 000000000000..56941430befa --- /dev/null +++ b/drivers/hid/spi-hid/spi-hid-apple-core.c @@ -0,0 +1,1029 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Apple SPI HID transport driver + * + * Copyright (C) The Asahi Linux Contributors + * + * Based on: drivers/input/applespi.c + * + * MacBook (Pro) SPI keyboard and touchpad driver + * + * Copyright (c) 2015-2018 Federico Lorenzi + * Copyright (c) 2017-2018 Ronald Tschalär + * + */ + +//#define DEBUG 2 + +#include <asm/unaligned.h> +#include <linux/crc16.h> +#include <linux/delay.h> +#include <linux/device/driver.h> +#include <linux/hid.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/wait.h> + +#include "spi-hid-apple.h" + +#define SPIHID_DEF_WAIT msecs_to_jiffies(100) + +#define SPIHID_MAX_INPUT_REPORT_SIZE 0x800 + +/* support only keyboard, trackpad and management dev for now */ +#define SPIHID_MAX_DEVICES 3 + +#define SPIHID_DEVICE_ID_MNGT 0x0 +#define SPIHID_DEVICE_ID_KBD 0x1 +#define SPIHID_DEVICE_ID_TP 0x2 +#define SPIHID_DEVICE_ID_INFO 0xd0 + +#define SPIHID_READ_PACKET 0x20 +#define SPIHID_WRITE_PACKET 0x40 + +#define SPIHID_DESC_MAX 512 + +#define SPIHID_SET_LEDS 0x0151 /* caps lock */ + +#define SPI_RW_CHG_DELAY_US 200 /* 'Inter Stage Us'? */ + +static const u8 spi_hid_apple_booted[4] = { 0xa0, 0x80, 0x00, 0x00 }; +static const u8 spi_hid_apple_status_ok[4] = { 0xac, 0x27, 0x68, 0xd5 }; + +struct spihid_interface { + struct hid_device *hid; + u8 *hid_desc; + u32 hid_desc_len; + u32 id; + unsigned country; + u32 max_control_report_len; + u32 max_input_report_len; + u32 max_output_report_len; + u8 name[32]; + bool ready; +}; + +struct spihid_input_report { + u8 *buf; + u32 length; + u32 offset; + u8 device; + u8 flags; +}; + +struct spihid_apple { + struct spi_device *spidev; + + struct spihid_apple_ops *ops; + + struct spihid_interface mngt; + struct spihid_interface kbd; + struct spihid_interface tp; + + wait_queue_head_t wait; + struct mutex tx_lock; //< protects against concurrent SPI writes + + struct spi_message rx_msg; + struct spi_message tx_msg; + struct spi_transfer rx_transfer; + struct spi_transfer tx_transfer; + struct spi_transfer status_transfer; + + u8 *rx_buf; + u8 *tx_buf; + u8 *status_buf; + + u8 vendor[32]; + u8 product[64]; + u8 serial[32]; + + u32 num_devices; + + u32 vendor_id; + u32 product_id; + u32 version_number; + + u8 msg_id; + + /* fragmented HID report */ + struct spihid_input_report report; + + /* state tracking flags */ + bool status_booted; +}; + +/** + * struct spihid_msg_hdr - common header of protocol messages. + * + * Each message begins with fixed header, followed by a message-type specific + * payload, and ends with a 16-bit crc. Because of the varying lengths of the + * payload, the crc is defined at the end of each payload struct, rather than + * in this struct. + * + * @unknown0: request type? output, input (0x10), feature, protocol + * @unknown1: maybe report id? + * @unknown2: mostly zero, in info request maybe device num + * @msgid: incremented on each message, rolls over after 255; there is a + * separate counter for each message type. + * @rsplen: response length (the exact nature of this field is quite + * speculative). On a request/write this is often the same as + * @length, though in some cases it has been seen to be much larger + * (e.g. 0x400); on a response/read this the same as on the + * request; for reads that are not responses it is 0. + * @length: length of the remainder of the data in the whole message + * structure (after re-assembly in case of being split over + * multiple spi-packets), minus the trailing crc. The total size + * of a message is therefore @length + 10. + */ + +struct spihid_msg_hdr { + u8 unknown0; + u8 unknown1; + u8 unknown2; + u8 id; + __le16 rsplen; + __le16 length; +}; + +/** + * struct spihid_transfer_packet - a complete spi packet; always 256 bytes. This carries + * the (parts of the) message in the data. But note that this does not + * necessarily contain a complete message, as in some cases (e.g. many + * fingers pressed) the message is split over multiple packets (see the + * @offset, @remain, and @length fields). In general the data parts in + * spihid_transfer_packet's are concatenated until @remaining is 0, and the + * result is an message. + * + * @flags: 0x40 = write (to device), 0x20 = read (from device); note that + * the response to a write still has 0x40. + * @device: 1 = keyboard, 2 = touchpad + * @offset: specifies the offset of this packet's data in the complete + * message; i.e. > 0 indicates this is a continuation packet (in + * the second packet for a message split over multiple packets + * this would then be the same as the @length in the first packet) + * @remain: number of message bytes remaining in subsequents packets (in + * the first packet of a message split over two packets this would + * then be the same as the @length in the second packet) + * @length: length of the valid data in the @data in this packet + * @data: all or part of a message + * @crc16: crc over this whole structure minus this @crc16 field. This + * covers just this packet, even on multi-packet messages (in + * contrast to the crc in the message). + */ +struct spihid_transfer_packet { + u8 flags; + u8 device; + __le16 offset; + __le16 remain; + __le16 length; + u8 data[246]; + __le16 crc16; +}; + +/* + * how HID is mapped onto the protocol is not fully clear. This are the known + * reports/request: + * + * pkt.flags pkt.dev? msg.u0 msg.u1 msg.u2 + * info 0x40 0xd0 0x20 0x01 0xd0 + * + * info mngt: 0x40 0xd0 0x20 0x10 0x00 + * info kbd: 0x40 0xd0 0x20 0x10 0x01 + * info tp: 0x40 0xd0 0x20 0x10 0x02 + * + * desc kbd: 0x40 0xd0 0x20 0x10 0x01 + * desc trackpad: 0x40 0xd0 0x20 0x10 0x02 + * + * mt mode: 0x40 0x02 0x52 0x02 0x00 set protocol? + * capslock led 0x40 0x01 0x51 0x01 0x00 output report + * + * report kbd: 0x20 0x01 0x10 0x01 0x00 input report + * report tp: 0x20 0x02 0x10 0x02 0x00 input report + * + */ + + +static int spihid_apple_request(struct spihid_apple *spihid, u8 target, u8 unk0, + u8 unk1, u8 unk2, u16 resp_len, u8 *buf, + size_t len) +{ + struct spihid_transfer_packet *pkt; + struct spihid_msg_hdr *hdr; + u16 crc; + int err; + + /* know reports are small enoug to fit in a single packet */ + if (len > sizeof(pkt->data) - sizeof(*hdr) - sizeof(__le16)) + return -EINVAL; + + err = mutex_lock_interruptible(&spihid->tx_lock); + if (err < 0) + return err; + + pkt = (struct spihid_transfer_packet *)spihid->tx_buf; + + memset(pkt, 0, sizeof(*pkt)); + pkt->flags = SPIHID_WRITE_PACKET; + pkt->device = target; + pkt->length = sizeof(*hdr) + len + sizeof(__le16); + + hdr = (struct spihid_msg_hdr *)&pkt->data[0]; + hdr->unknown0 = unk0; + hdr->unknown1 = unk1; + hdr->unknown2 = unk2; + hdr->id = spihid->msg_id++; + hdr->rsplen = cpu_to_le16(resp_len); + hdr->length = cpu_to_le16(len); + + if (len) + memcpy(pkt->data + sizeof(*hdr), buf, len); + crc = crc16(0, &pkt->data[0], sizeof(*hdr) + len); + put_unaligned_le16(crc, pkt->data + sizeof(*hdr) + len); + + pkt->crc16 = crc16(0, spihid->tx_buf, + offsetof(struct spihid_transfer_packet, crc16)); + + err = spi_sync(spihid->spidev, &spihid->tx_msg); + mutex_unlock(&spihid->tx_lock); + if (err < 0) + return err; + + return (int)len; +} + +struct spihid_apple *spihid_get_data(struct spihid_interface *idev) +{ + switch (idev->id) { + case SPIHID_DEVICE_ID_KBD: + return container_of(idev, struct spihid_apple, kbd); + case SPIHID_DEVICE_ID_TP: + return container_of(idev, struct spihid_apple, tp); + default: + return NULL; + } +} + +static int apple_ll_start(struct hid_device *hdev) +{ + /* no-op SPI transport is already setup */ + return 0; +}; + +static void apple_ll_stop(struct hid_device *hdev) +{ + /* no-op, devices will be desstroyed on driver destruction */ +} + +static int apple_ll_open(struct hid_device *hdev) +{ + struct spihid_apple *spihid; + struct spihid_interface *idev = hdev->driver_data; + + if (idev->hid_desc_len == 0) { + spihid = spihid_get_data(idev); + dev_warn(&spihid->spidev->dev, + "HID descriptor missing for dev %u", idev->id); + } else + idev->ready = true; + + return 0; +} + +static void apple_ll_close(struct hid_device *hdev) +{ + struct spihid_interface *idev = hdev->driver_data; + idev->ready = false; +} + +static int apple_ll_parse(struct hid_device *hdev) +{ + struct spihid_interface *idev = hdev->driver_data; + + return hid_parse_report(hdev, idev->hid_desc, idev->hid_desc_len); +} + +static int apple_ll_raw_request(struct hid_device *hdev, + unsigned char reportnum, __u8 *buf, size_t len, + unsigned char rtype, int reqtype) +{ + struct spihid_interface *idev = hdev->driver_data; + struct spihid_apple *spihid = spihid_get_data(idev); + + dev_dbg(&spihid->spidev->dev, + "apple_ll_raw_request: device:%u reportnum:%hhu rtype:%hhu", + idev->id, reportnum, rtype); + + switch (reqtype) { + case HID_REQ_GET_REPORT: + return -EINVAL; // spihid_get_raw_report(); + case HID_REQ_SET_REPORT: + if (buf[0] != reportnum) + return -EINVAL; + if (reportnum != idev->id) { + dev_warn(&spihid->spidev->dev, + "device:%u reportnum:" + "%hhu mismatch", + idev->id, reportnum); + return -EINVAL; + } + return spihid_apple_request(spihid, idev->id, 0x52, reportnum, 0x00, 2, buf, len); + default: + return -EIO; + } +} + +static int apple_ll_output_report(struct hid_device *hdev, __u8 *buf, + size_t len) +{ + struct spihid_interface *idev = hdev->driver_data; + struct spihid_apple *spihid = spihid_get_data(idev); + if (!spihid) + return -1; + + dev_dbg(&spihid->spidev->dev, + "apple_ll_output_report: device:%u len:%zu:", + idev->id, len); + // second idev->id should maybe be buf[0]? + return spihid_apple_request(spihid, idev->id, 0x51, idev->id, 0x00, 0, buf, len); +} + +static struct hid_ll_driver apple_hid_ll = { + .start = &apple_ll_start, + .stop = &apple_ll_stop, + .open = &apple_ll_open, + .close = &apple_ll_close, + .parse = &apple_ll_parse, + .raw_request = &apple_ll_raw_request, + .output_report = &apple_ll_output_report, +}; + +static struct spihid_interface *spihid_get_iface(struct spihid_apple *spihid, + u32 iface) +{ + switch (iface) { + case SPIHID_DEVICE_ID_MNGT: + return &spihid->mngt; + case SPIHID_DEVICE_ID_KBD: + return &spihid->kbd; + case SPIHID_DEVICE_ID_TP: + return &spihid->tp; + default: + return NULL; + } +} + +static int spihid_verify_msg(struct spihid_apple *spihid, u8 *buf, size_t len) +{ + u16 msg_crc, crc; + struct device *dev = &spihid->spidev->dev; + + crc = crc16(0, buf, len - sizeof(__le16)); + msg_crc = get_unaligned_le16(buf + len - sizeof(__le16)); + if (crc != msg_crc) { + dev_warn_ratelimited(dev, "Read message crc mismatch\n"); + return 0; + } + return 1; +} + +static bool spihid_status_report(struct spihid_apple *spihid, u8 *pl, + size_t len) +{ + struct device *dev = &spihid->spidev->dev; + dev_dbg(dev, "%s: len: %zu", __func__, len); + if (len == 5 && pl[0] == 0xe0) + return true; + + return false; +} + +static bool spihid_process_input_report(struct spihid_apple *spihid, u32 device, + struct spihid_msg_hdr *hdr, u8 *payload, + size_t len) +{ + //dev_dbg(&spihid>spidev->dev, "input report: req:%hx iface:%u ", hdr->unknown0, device); + if (hdr->unknown0 != 0x10) + return false; + + /* HID device as well but Vendor usage only, handle it internally for now */ + if (device == 0) { + if (hdr->unknown1 == 0xe0) { + return spihid_status_report(spihid, payload, len); + } + } else if (device < SPIHID_MAX_DEVICES) { + struct spihid_interface *iface = + spihid_get_iface(spihid, device); + if (iface && iface->hid && iface->ready) { + hid_input_report(iface->hid, HID_INPUT_REPORT, payload, + len, 1); + return true; + } + } else + dev_dbg(&spihid->spidev->dev, + "unexpected iface:%u for input report", device); + + return false; +} + +struct spihid_device_info { + __le16 u0[2]; + __le16 num_devices; + __le16 vendor_id; + __le16 product_id; + __le16 version_number; + __le16 vendor_str[2]; //< offset and string length + __le16 product_str[2]; //< offset and string length + __le16 serial_str[2]; //< offset and string length +}; + +static bool spihid_process_device_info(struct spihid_apple *spihid, u32 iface, + u8 *payload, size_t len) +{ + struct device *dev = &spihid->spidev->dev; + + if (iface != SPIHID_DEVICE_ID_INFO) + return false; + + if (spihid->vendor_id == 0 && + len >= sizeof(struct spihid_device_info)) { + struct spihid_device_info *info = + (struct spihid_device_info *)payload; + u16 voff, vlen, poff, plen, soff, slen; + u32 num_devices; + + num_devices = __le16_to_cpu(info->num_devices); + + if (num_devices < SPIHID_MAX_DEVICES) { + dev_err(dev, + "Device info reports %u devices, expecting at least 3", + num_devices); + return false; + } + spihid->num_devices = num_devices; + + if (spihid->num_devices > SPIHID_MAX_DEVICES) { + dev_info( + dev, + "limiting the number of devices to mngt, kbd and mouse"); + spihid->num_devices = SPIHID_MAX_DEVICES; + } + + spihid->vendor_id = __le16_to_cpu(info->vendor_id); + spihid->product_id = __le16_to_cpu(info->product_id); + spihid->version_number = __le16_to_cpu(info->version_number); + + voff = __le16_to_cpu(info->vendor_str[0]); + vlen = __le16_to_cpu(info->vendor_str[1]); + + if (voff < len && vlen <= len - voff && + vlen < sizeof(spihid->vendor)) { + memcpy(spihid->vendor, payload + voff, vlen); + spihid->vendor[vlen] = '\0'; + } + + poff = __le16_to_cpu(info->product_str[0]); + plen = __le16_to_cpu(info->product_str[1]); + + if (poff < len && plen <= len - poff && + plen < sizeof(spihid->product)) { + memcpy(spihid->product, payload + poff, plen); + spihid->product[plen] = '\0'; + } + + soff = __le16_to_cpu(info->serial_str[0]); + slen = __le16_to_cpu(info->serial_str[1]); + + if (soff < len && slen <= len - soff && + slen < sizeof(spihid->serial)) { + memcpy(spihid->vendor, payload + soff, slen); + spihid->serial[slen] = '\0'; + } + + wake_up_interruptible(&spihid->wait); + } + return true; +} + +struct spihid_iface_info { + u8 u_0; + u8 interface_num; + u8 u_2; + u8 u_3; + u8 u_4; + u8 country_code; + __le16 max_input_report_len; + __le16 max_output_report_len; + __le16 max_control_report_len; + __le16 name_offset; + __le16 name_length; +}; + +static bool spihid_process_iface_info(struct spihid_apple *spihid, u32 num, + u8 *payload, size_t len) +{ + struct spihid_iface_info *info; + struct spihid_interface *iface = spihid_get_iface(spihid, num); + u32 name_off, name_len; + + if (!iface) + return false; + + if (!iface->max_input_report_len) { + if (len < sizeof(*info)) + return false; + + info = (struct spihid_iface_info *)payload; + + iface->max_input_report_len = + le16_to_cpu(info->max_input_report_len); + iface->max_output_report_len = + le16_to_cpu(info->max_output_report_len); + iface->max_control_report_len = + le16_to_cpu(info->max_control_report_len); + iface->country = info->country_code; + + name_off = le16_to_cpu(info->name_offset); + name_len = le16_to_cpu(info->name_length); + + if (name_off < len && name_len <= len - name_off && + name_len < sizeof(iface->name)) { + memcpy(iface->name, payload + name_off, name_len); + iface->name[name_len] = '\0'; + } + + dev_dbg(&spihid->spidev->dev, "Info for %s, country code: 0x%x", + iface->name, iface->country); + + wake_up_interruptible(&spihid->wait); + } + + return true; +} + +static int spihid_register_hid_device(struct spihid_apple *spihid, + struct spihid_interface *idev, u8 device); + +static bool spihid_process_iface_hid_report_desc(struct spihid_apple *spihid, + u32 num, u8 *payload, + size_t len) +{ + struct spihid_interface *iface = spihid_get_iface(spihid, num); + + if (!iface) + return false; + + if (iface->hid_desc_len == 0) { + if (len > SPIHID_DESC_MAX) + return false; + memcpy(iface->hid_desc, payload, len); + iface->hid_desc_len = len; + + /* do not register the mngt iface as HID device */ + if (num > 0) + spihid_register_hid_device(spihid, iface, num); + + wake_up_interruptible(&spihid->wait); + } + return true; +} + +static bool spihid_process_response(struct spihid_apple *spihid, + struct spihid_msg_hdr *hdr, u8 *payload, + size_t len) +{ + if (hdr->unknown0 == 0x20) { + switch (hdr->unknown1) { + case 0x01: + return spihid_process_device_info(spihid, hdr->unknown2, + payload, len); + case 0x02: + return spihid_process_iface_info(spihid, hdr->unknown2, + payload, len); + case 0x10: + return spihid_process_iface_hid_report_desc( + spihid, hdr->unknown2, payload, len); + default: + break; + } + } + + return false; +} + +static void spihid_process_message(struct spihid_apple *spihid, u8 *data, + size_t length, u8 device, u8 flags) +{ + struct device *dev = &spihid->spidev->dev; + struct spihid_msg_hdr *hdr; + bool handled = false; + u8 *payload; + + if (!spihid_verify_msg(spihid, data, length)) + return; + + hdr = (struct spihid_msg_hdr *)data; + + if (hdr->length == 0) + return; + + payload = data + sizeof(struct spihid_msg_hdr); + + switch (flags) { + case SPIHID_READ_PACKET: + handled = spihid_process_input_report(spihid, device, hdr, + payload, hdr->length); + break; + case SPIHID_WRITE_PACKET: + handled = spihid_process_response(spihid, hdr, payload, + hdr->length); + break; + default: + break; + } + +#if defined(DEBUG) && DEBUG > 1 + { + dev_dbg(dev, + "R msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n", + hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id, + hdr->length); + print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1, + payload, hdr->length, true); + } +#else + if (!handled) { + dev_dbg(dev, + "R unhandled msg: req:%02hhx rep:%02hhx dev:%02hhx id:%hu len:%hu\n", + hdr->unknown0, hdr->unknown1, hdr->unknown2, hdr->id, + hdr->length); + print_hex_dump_debug("spihid msg: ", DUMP_PREFIX_OFFSET, 16, 1, + payload, hdr->length, true); + } +#endif +} + +static void spihid_assemble_meesage(struct spihid_apple *spihid, + struct spihid_transfer_packet *pkt) +{ + size_t length, offset, remain; + struct device *dev = &spihid->spidev->dev; + struct spihid_input_report *rep = &spihid->report; + + length = le16_to_cpu(pkt->length); + remain = le16_to_cpu(pkt->remain); + offset = le16_to_cpu(pkt->offset); + + if (offset + length + remain > U16_MAX) { + return; + } + + if (pkt->device != rep->device || pkt->flags != rep->flags || + pkt->offset != rep->offset) { + rep->device = 0; + rep->flags = 0; + rep->offset = 0; + rep->length = 0; + } + + if (pkt->offset == 0) { + if (rep->offset != 0) { + dev_warn(dev, "incomplete report off:%u len:%u", + rep->offset, rep->length); + } + memcpy(rep->buf, pkt->data, length); + rep->offset = length; + rep->length = length + pkt->remain; + rep->device = pkt->device; + rep->flags = pkt->flags; + } else if (pkt->offset == rep->offset) { + if (pkt->offset + length + pkt->remain != rep->length) { + dev_warn(dev, "incomplete report off:%u len:%u", + rep->offset, rep->length); + return; + } + memcpy(rep->buf + pkt->offset, pkt->data, pkt->length); + rep->offset += pkt->length; + + if (rep->offset == rep->length) { + spihid_process_message(spihid, rep->buf, rep->length, + rep->device, rep->flags); + rep->device = 0; + rep->flags = 0; + rep->offset = 0; + rep->length = 0; + } + } +} + +static void spihid_process_read(struct spihid_apple *spihid) +{ + u16 crc; + size_t length; + struct device *dev = &spihid->spidev->dev; + struct spihid_transfer_packet *pkt; + + pkt = (struct spihid_transfer_packet *)spihid->rx_buf; + + /* check transfer packet crc */ + crc = crc16(0, spihid->rx_buf, + offsetof(struct spihid_transfer_packet, crc16)); + if (crc != pkt->crc16) { + dev_warn_ratelimited(dev, "Read package crc mismatch\n"); + return; + } + + length = le16_to_cpu(pkt->length); + + if (length < sizeof(struct spihid_msg_hdr) + 2) { + if (length == sizeof(spi_hid_apple_booted) && + !memcmp(pkt->data, spi_hid_apple_booted, length)) { + if (!spihid->status_booted) { + spihid->status_booted = true; + wake_up_interruptible(&spihid->wait); + } + } else { + dev_info(dev, "R short packet: len:%zu\n", length); + print_hex_dump_debug("spihid pkt:", DUMP_PREFIX_OFFSET, 16, 1, + pkt->data, length, false); + } + return; + } + +#if defined(DEBUG) && DEBUG > 1 + dev_dbg(dev, + "R pkt: flags:%02hhx dev:%02hhx off:%hu remain:%hu, len:%zu\n", + pkt->flags, pkt->device, pkt->offset, pkt->remain, length); +#if defined(DEBUG) && DEBUG > 2 + print_hex_dump_debug("spihid pkt: ", DUMP_PREFIX_OFFSET, 16, 1, + spihid->rx_buf, + sizeof(struct spihid_transfer_packet), true); +#endif +#endif + + if (length > sizeof(pkt->data)) { + dev_warn_ratelimited(dev, "Invalid pkt len:%zu", length); + return; + } + + /* short message */ + if (pkt->offset == 0 && pkt->remain == 0) { + spihid_process_message(spihid, pkt->data, length, pkt->device, + pkt->flags); + } else { + spihid_assemble_meesage(spihid, pkt); + } +} + +static void spihid_read_packet_sync(struct spihid_apple *spihid) +{ + int err; + + err = spi_sync(spihid->spidev, &spihid->rx_msg); + if (!err) { + spihid_process_read(spihid); + } else { + dev_warn(&spihid->spidev->dev, "RX failed: %d\n", err); + } +} + +irqreturn_t spihid_apple_core_irq(int irq, void *data) +{ + struct spi_device *spi = data; + struct spihid_apple *spihid = spi_get_drvdata(spi); + + spihid_read_packet_sync(spihid); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(spihid_apple_core_irq); + +static void spihid_apple_setup_spi_msgs(struct spihid_apple *spihid) +{ + memset(&spihid->rx_transfer, 0, sizeof(spihid->rx_transfer)); + + spihid->rx_transfer.rx_buf = spihid->rx_buf; + spihid->rx_transfer.len = sizeof(struct spihid_transfer_packet); + + spi_message_init(&spihid->rx_msg); + spi_message_add_tail(&spihid->rx_transfer, &spihid->rx_msg); + + memset(&spihid->tx_transfer, 0, sizeof(spihid->rx_transfer)); + memset(&spihid->status_transfer, 0, sizeof(spihid->status_transfer)); + + spihid->tx_transfer.tx_buf = spihid->tx_buf; + spihid->tx_transfer.len = sizeof(struct spihid_transfer_packet); + spihid->tx_transfer.delay.unit = SPI_DELAY_UNIT_USECS; + spihid->tx_transfer.delay.value = SPI_RW_CHG_DELAY_US; + + spihid->status_transfer.rx_buf = spihid->status_buf; + spihid->status_transfer.len = sizeof(spi_hid_apple_status_ok); + + spi_message_init(&spihid->tx_msg); + spi_message_add_tail(&spihid->tx_transfer, &spihid->tx_msg); + spi_message_add_tail(&spihid->status_transfer, &spihid->tx_msg); +} + +static int spihid_apple_setup_spi(struct spihid_apple *spihid) +{ + spihid_apple_setup_spi_msgs(spihid); + + return spihid->ops->power_on(spihid->ops); +} + +static int spihid_register_hid_device(struct spihid_apple *spihid, + struct spihid_interface *iface, u8 device) +{ + int ret; + struct hid_device *hid; + + iface->id = device; + + hid = hid_allocate_device(); + if (IS_ERR(hid)) + return PTR_ERR(hid); + + strscpy(hid->name, spihid->product, sizeof(hid->name)); + snprintf(hid->phys, sizeof(hid->phys), "%s (%hhx)", + dev_name(&spihid->spidev->dev), device); + strscpy(hid->uniq, spihid->serial, sizeof(hid->uniq)); + + hid->ll_driver = &apple_hid_ll; + hid->bus = BUS_SPI; + hid->vendor = spihid->vendor_id; + hid->product = spihid->product_id; + hid->version = spihid->version_number; + + if (device == SPIHID_DEVICE_ID_KBD) + hid->type = HID_TYPE_SPI_KEYBOARD; + else if (device == SPIHID_DEVICE_ID_TP) + hid->type = HID_TYPE_SPI_MOUSE; + + hid->country = iface->country; + hid->dev.parent = &spihid->spidev->dev; + hid->driver_data = iface; + + ret = hid_add_device(hid); + if (ret < 0) { + hid_destroy_device(hid); + dev_warn(&spihid->spidev->dev, + "Failed to register hid device %hhu", device); + return ret; + } + + iface->hid = hid; + + return 0; +} + +static void spihid_destroy_hid_device(struct spihid_interface *iface) +{ + if (iface->hid) { + hid_destroy_device(iface->hid); + iface->hid = NULL; + } + iface->ready = false; +} + +int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops) +{ + struct device *dev = &spi->dev; + struct spihid_apple *spihid; + int err, i; + + if (!ops || !ops->power_on || !ops->power_off || !ops->enable_irq || !ops->disable_irq) + return -EINVAL; + + spihid = devm_kzalloc(dev, sizeof(*spihid), GFP_KERNEL); + if (!spihid) + return -ENOMEM; + + spihid->ops = ops; + spihid->spidev = spi; + + // init spi + spi_set_drvdata(spi, spihid); + + /* allocate SPI buffers */ + spihid->rx_buf = devm_kmalloc( + &spi->dev, sizeof(struct spihid_transfer_packet), GFP_KERNEL); + spihid->tx_buf = devm_kmalloc( + &spi->dev, sizeof(struct spihid_transfer_packet), GFP_KERNEL); + spihid->status_buf = devm_kmalloc( + &spi->dev, sizeof(spi_hid_apple_status_ok), GFP_KERNEL); + + if (!spihid->rx_buf || !spihid->tx_buf || !spihid->status_buf) + return -ENOMEM; + + spihid->report.buf = + devm_kmalloc(dev, SPIHID_MAX_INPUT_REPORT_SIZE, GFP_KERNEL); + + spihid->kbd.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL); + spihid->tp.hid_desc = devm_kmalloc(dev, SPIHID_DESC_MAX, GFP_KERNEL); + + if (!spihid->report.buf || !spihid->kbd.hid_desc || + !spihid->tp.hid_desc) + return -ENOMEM; + + init_waitqueue_head(&spihid->wait); + + mutex_init(&spihid->tx_lock); + + /* Init spi transfer buffers and power device on */ + err = spihid_apple_setup_spi(spihid); + if (err < 0) + goto error; + + /* enable HID irq */ + spihid->ops->enable_irq(spihid->ops); + + // wait for boot message + err = wait_event_interruptible_timeout(spihid->wait, + spihid->status_booted, + msecs_to_jiffies(1000)); + if (err == 0) + err = -ENODEV; + if (err < 0) { + dev_err(dev, "waiting for device boot failed: %d", err); + goto error; + } + + /* request device information */ + dev_dbg(dev, "request device info"); + spihid_apple_request(spihid, 0xd0, 0x20, 0x01, 0xd0, 0, NULL, 0); + err = wait_event_interruptible_timeout(spihid->wait, spihid->vendor_id, + SPIHID_DEF_WAIT); + if (err == 0) + err = -ENODEV; + if (err < 0) { + dev_err(dev, "waiting for device info failed: %d", err); + goto error; + } + + /* request interface information */ + for (i = 0; i < spihid->num_devices; i++) { + struct spihid_interface *iface = spihid_get_iface(spihid, i); + if (!iface) + continue; + dev_dbg(dev, "request interface info 0x%02x", i); + spihid_apple_request(spihid, 0xd0, 0x20, 0x02, i, + SPIHID_DESC_MAX, NULL, 0); + err = wait_event_interruptible_timeout( + spihid->wait, iface->max_input_report_len, + SPIHID_DEF_WAIT); + } + + /* request HID report descriptors */ + for (i = 1; i < spihid->num_devices; i++) { + struct spihid_interface *iface = spihid_get_iface(spihid, i); + if (!iface) + continue; + dev_dbg(dev, "request hid report desc 0x%02x", i); + spihid_apple_request(spihid, 0xd0, 0x20, 0x10, i, + SPIHID_DESC_MAX, NULL, 0); + wait_event_interruptible_timeout( + spihid->wait, iface->hid_desc_len, SPIHID_DEF_WAIT); + } + + return 0; +error: + return err; +} +EXPORT_SYMBOL_GPL(spihid_apple_core_probe); + +void spihid_apple_core_remove(struct spi_device *spi) +{ + struct spihid_apple *spihid = spi_get_drvdata(spi); + + /* destroy input devices */ + + spihid_destroy_hid_device(&spihid->tp); + spihid_destroy_hid_device(&spihid->kbd); + + /* disable irq */ + spihid->ops->disable_irq(spihid->ops); + + /* power SPI device down */ + spihid->ops->power_off(spihid->ops); +} +EXPORT_SYMBOL_GPL(spihid_apple_core_remove); + +void spihid_apple_core_shutdown(struct spi_device *spi) +{ + struct spihid_apple *spihid = spi_get_drvdata(spi); + + /* disable irq */ + spihid->ops->disable_irq(spihid->ops); + + /* power SPI device down */ + spihid->ops->power_off(spihid->ops); +} +EXPORT_SYMBOL_GPL(spihid_apple_core_shutdown); + +MODULE_DESCRIPTION("Apple SPI HID transport driver"); +MODULE_AUTHOR("Janne Grunau <j@jannau.net>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/spi-hid/spi-hid-apple-of.c b/drivers/hid/spi-hid/spi-hid-apple-of.c new file mode 100644 index 000000000000..db76774eea7e --- /dev/null +++ b/drivers/hid/spi-hid/spi-hid-apple-of.c @@ -0,0 +1,138 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Apple SPI HID transport driver - Open Firmware + * + * Copyright (C) The Asahi Linux Contributors + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_irq.h> + +#include "spi-hid-apple.h" + + +struct spihid_apple_of { + struct spihid_apple_ops ops; + + struct gpio_desc *enable_gpio; + int irq; +}; + +int spihid_apple_of_power_on(struct spihid_apple_ops *ops) +{ + struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); + + /* reset the controller on boot */ + gpiod_direction_output(sh_of->enable_gpio, 1); + msleep(5); + gpiod_direction_output(sh_of->enable_gpio, 0); + msleep(5); + /* turn SPI device on */ + gpiod_direction_output(sh_of->enable_gpio, 1); + msleep(50); + + return 0; +} + +int spihid_apple_of_power_off(struct spihid_apple_ops *ops) +{ + struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); + + /* turn SPI device off */ + gpiod_direction_output(sh_of->enable_gpio, 0); + + return 0; +} + +int spihid_apple_of_enable_irq(struct spihid_apple_ops *ops) +{ + struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); + + enable_irq(sh_of->irq); + + return 0; +} + +int spihid_apple_of_disable_irq(struct spihid_apple_ops *ops) +{ + struct spihid_apple_of *sh_of = container_of(ops, struct spihid_apple_of, ops); + + disable_irq(sh_of->irq); + + return 0; +} + +static int spihid_apple_of_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct spihid_apple_of *spihid_of; + int err; + + dev_warn(dev, "%s:%d", __func__, __LINE__); + + spihid_of = devm_kzalloc(dev, sizeof(*spihid_of), GFP_KERNEL); + if (!spihid_of) + return -ENOMEM; + + spihid_of->ops.power_on = spihid_apple_of_power_on; + spihid_of->ops.power_off = spihid_apple_of_power_off; + spihid_of->ops.enable_irq = spihid_apple_of_enable_irq; + spihid_of->ops.disable_irq = spihid_apple_of_disable_irq; + + spihid_of->enable_gpio = devm_gpiod_get_index(dev, "spien", 0, 0); + if (IS_ERR(spihid_of->enable_gpio)) { + err = PTR_ERR(spihid_of->enable_gpio); + dev_err(dev, "failed to get 'spien' gpio pin: %d", err); + return err; + } + + spihid_of->irq = of_irq_get(dev->of_node, 0); + if (spihid_of->irq < 0) { + err = spihid_of->irq; + dev_err(dev, "failed to get 'extended-irq': %d", err); + return err; + } + err = devm_request_threaded_irq(dev, spihid_of->irq, NULL, + spihid_apple_core_irq, IRQF_ONESHOT | IRQF_NO_AUTOEN, + "spi-hid-apple-irq", spi); + if (err < 0) { + dev_err(dev, "failed to request extended-irq %d: %d", + spihid_of->irq, err); + return err; + } + + return spihid_apple_core_probe(spi, &spihid_of->ops); +} + +static const struct of_device_id spihid_apple_of_match[] = { + { .compatible = "apple,spi-hid-transport" }, + {}, +}; +MODULE_DEVICE_TABLE(of, spihid_apple_of_match); + +static struct spi_device_id spihid_apple_of_id[] = { + { "spi-hid-transport", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, spihid_apple_of_id); + +static struct spi_driver spihid_apple_of_driver = { + .driver = { + .name = "spi-hid-apple-of", + //.pm = &spi_hid_apple_of_pm, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(spihid_apple_of_match), + }, + + .id_table = spihid_apple_of_id, + .probe = spihid_apple_of_probe, + .remove = spihid_apple_core_remove, + .shutdown = spihid_apple_core_shutdown, +}; + +module_spi_driver(spihid_apple_of_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/spi-hid/spi-hid-apple.h b/drivers/hid/spi-hid/spi-hid-apple.h new file mode 100644 index 000000000000..2d9554e8a5f8 --- /dev/null +++ b/drivers/hid/spi-hid/spi-hid-apple.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ + +#ifndef SPI_HID_APPLE_H +#define SPI_HID_APPLE_H + +#include <linux/interrupt.h> +#include <linux/spi/spi.h> + +/** + * struct spihid_apple_ops - Ops to control the device from the core driver. + * + * @power_on: reset and power the device on. + * @power_off: power the device off. + * @enable_irq: enable irq or ACPI gpe. + * @disable_irq: disable irq or ACPI gpe. + */ + +struct spihid_apple_ops { + int (*power_on)(struct spihid_apple_ops *ops); + int (*power_off)(struct spihid_apple_ops *ops); + int (*enable_irq)(struct spihid_apple_ops *ops); + int (*disable_irq)(struct spihid_apple_ops *ops); +}; + +irqreturn_t spihid_apple_core_irq(int irq, void *data); + +int spihid_apple_core_probe(struct spi_device *spi, struct spihid_apple_ops *ops); +void spihid_apple_core_remove(struct spi_device *spi); +void spihid_apple_core_shutdown(struct spi_device *spi); + +#endif /* SPI_HID_APPLE_H */ diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index a18ab7358d8f..08e681473ae8 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -902,4 +902,16 @@ config INPUT_STPMIC1_ONKEY To compile this driver as a module, choose M here: the module will be called stpmic1_onkey. +config INPUT_MACSMC_HID + tristate "Apple Mac SMC lid/buttons" + depends on APPLE_SMC + default ARCH_APPLE + help + Say Y here if you want to use the input events delivered via the + SMC controller on Apple Mac machines using the macsmc driver. + This includes lid open/close and the power button. + + To compile this driver as a module, choose M here: the + module will be called macsmc-hid. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 28dfc444f0a9..a607c6472d0a 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_INPUT_IQS7222) += iqs7222.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o +obj-$(CONFIG_INPUT_MACSMC_HID) += macsmc-hid.o obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o diff --git a/drivers/input/misc/macsmc-hid.c b/drivers/input/misc/macsmc-hid.c new file mode 100644 index 000000000000..c4c7440d4ee6 --- /dev/null +++ b/drivers/input/misc/macsmc-hid.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC input event driver + * Copyright The Asahi Linux Contributors + * + * This driver exposes HID events from the SMC as an input device. + * This includes the lid open/close and power button notifications. + */ + +#include <linux/device.h> +#include <linux/input.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> +#include <linux/reboot.h> + +struct macsmc_hid { + struct device *dev; + struct apple_smc *smc; + struct input_dev *input; + struct notifier_block nb; + bool wakeup_mode; +}; + +#define SMC_EV_BTN 0x7201 +#define SMC_EV_LID 0x7203 + +#define BTN_POWER 0x06 +#define BTN_POWER_HELD1 0xfe +#define BTN_POWER_HELD2 0x00 + +static int macsmc_hid_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct macsmc_hid *smchid = container_of(nb, struct macsmc_hid, nb); + u16 type = event >> 16; + u8 d1 = (event >> 8) & 0xff; + u8 d2 = event & 0xff; + + switch (type) { + case SMC_EV_BTN: + switch (d1) { + case BTN_POWER: + if (smchid->wakeup_mode) { + if (d2) { + dev_info(smchid->dev, "Button wakeup\n"); + pm_wakeup_hard_event(smchid->dev); + } + } else { + input_report_key(smchid->input, KEY_POWER, d2); + input_sync(smchid->input); + } + break; + case BTN_POWER_HELD1: + /* + * TODO: is this pre-warning useful? + */ + if (d2) + dev_warn(smchid->dev, "Power button held down\n"); + break; + case BTN_POWER_HELD2: + /* + * If we get here, we have about 4 seconds before forced shutdown. + * Try to do an emergency shutdown to make sure the NVMe cache is + * flushed. macOS actually does this by panicing (!)... + */ + if (d2) { + dev_crit(smchid->dev, "Triggering forced shutdown!\n"); + if (kernel_can_power_off()) + kernel_power_off(); + else /* Missing macsmc-reboot driver? */ + kernel_restart("SMC power button triggered restart"); + } + break; + default: + dev_info(smchid->dev, "Unknown SMC button event: %02x %02x\n", d1, d2); + break; + } + return NOTIFY_OK; + case SMC_EV_LID: + if (smchid->wakeup_mode && !d1) { + dev_info(smchid->dev, "Lid wakeup\n"); + pm_wakeup_hard_event(smchid->dev); + } + input_report_switch(smchid->input, SW_LID, d1); + input_sync(smchid->input); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static int macsmc_hid_probe(struct platform_device *pdev) +{ + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + struct macsmc_hid *smchid; + bool have_lid, have_power; + int ret; + + have_lid = apple_smc_key_exists(smc, SMC_KEY(MSLD)); + have_power = apple_smc_key_exists(smc, SMC_KEY(bHLD)); + + if (!have_lid && !have_power) + return -ENODEV; + + smchid = devm_kzalloc(&pdev->dev, sizeof(*smchid), GFP_KERNEL); + if (!smchid) + return -ENOMEM; + + smchid->dev = &pdev->dev; + smchid->smc = smc; + platform_set_drvdata(pdev, smchid); + + smchid->input = devm_input_allocate_device(&pdev->dev); + if (!smchid->input) + return -ENOMEM; + + smchid->input->phys = "macsmc-hid (0)"; + smchid->input->name = "Apple SMC power/lid events"; + + if (have_lid) + input_set_capability(smchid->input, EV_SW, SW_LID); + if (have_power) + input_set_capability(smchid->input, EV_KEY, KEY_POWER); + + ret = input_register_device(smchid->input); + if (ret) { + dev_err(&pdev->dev, "Failed to register input device: %d\n", ret); + return ret; + } + + if (have_lid) { + u8 val; + + ret = apple_smc_read_u8(smc, SMC_KEY(MSLD), &val); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to read initial lid state\n"); + } else { + input_report_switch(smchid->input, SW_LID, val); + } + } + if (have_power) { + u32 val; + + ret = apple_smc_read_u32(smc, SMC_KEY(bHLD), &val); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to read initial power button state\n"); + } else { + input_report_key(smchid->input, KEY_POWER, val & 1); + } + } + + input_sync(smchid->input); + + smchid->nb.notifier_call = macsmc_hid_event; + apple_smc_register_notifier(smc, &smchid->nb); + + device_init_wakeup(&pdev->dev, 1); + + return 0; +} + +static int macsmc_hid_pm_prepare(struct device *dev) +{ + struct macsmc_hid *smchid = dev_get_drvdata(dev); + + smchid->wakeup_mode = true; + return 0; +} + +static void macsmc_hid_pm_complete(struct device *dev) +{ + struct macsmc_hid *smchid = dev_get_drvdata(dev); + + smchid->wakeup_mode = false; +} + +static const struct dev_pm_ops macsmc_hid_pm_ops = { + .prepare = macsmc_hid_pm_prepare, + .complete = macsmc_hid_pm_complete, +}; + +static struct platform_driver macsmc_hid_driver = { + .driver = { + .name = "macsmc-hid", + .owner = THIS_MODULE, + .pm = &macsmc_hid_pm_ops, + }, + .probe = macsmc_hid_probe, +}; +module_platform_driver(macsmc_hid_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC GPIO driver"); +MODULE_ALIAS("platform:macsmc-hid"); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c79a0df090c0..58603124dd42 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -67,6 +67,15 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST If unsure, say N here. +config IOMMU_IO_PGTABLE_DART + bool "Apple DART Formats" + select IOMMU_IO_PGTABLE + depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64) + help + Enable support for the Apple DART pagetable formats. These include + the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family + SoCs. + endmenu config IOMMU_DEBUGFS @@ -294,7 +303,7 @@ config APPLE_DART tristate "Apple DART IOMMU Support" depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64) select IOMMU_API - select IOMMU_IO_PGTABLE_LPAE + select IOMMU_IO_PGTABLE_DART default ARCH_APPLE help Support for Apple DART (Device Address Resolution Table) IOMMUs diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 44475a9b3eea..cc9f381013c3 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o +obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o obj-$(CONFIG_IOASID) += ioasid.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 8af0242a90d9..249798e26e5d 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -33,58 +33,162 @@ #include <linux/swab.h> #include <linux/types.h> -#define DART_MAX_STREAMS 16 +#define DART_MAX_STREAMS 256 #define DART_MAX_TTBR 4 #define MAX_DARTS_PER_DEVICE 2 -#define DART_STREAM_ALL 0xffff +/* Common registers */ #define DART_PARAMS1 0x00 -#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24) +#define DART_PARAMS1_PAGE_SHIFT GENMASK(27, 24) #define DART_PARAMS2 0x04 -#define DART_PARAMS_BYPASS_SUPPORT BIT(0) +#define DART_PARAMS2_BYPASS_SUPPORT BIT(0) -#define DART_STREAM_COMMAND 0x20 -#define DART_STREAM_COMMAND_BUSY BIT(2) -#define DART_STREAM_COMMAND_INVALIDATE BIT(20) +/* T8020/T6000 registers */ -#define DART_STREAM_SELECT 0x34 +#define DART_T8020_STREAM_COMMAND 0x20 +#define DART_T8020_STREAM_COMMAND_BUSY BIT(2) +#define DART_T8020_STREAM_COMMAND_INVALIDATE BIT(20) -#define DART_ERROR 0x40 -#define DART_ERROR_STREAM GENMASK(27, 24) -#define DART_ERROR_CODE GENMASK(11, 0) -#define DART_ERROR_FLAG BIT(31) +#define DART_T8020_STREAM_SELECT 0x34 -#define DART_ERROR_READ_FAULT BIT(4) -#define DART_ERROR_WRITE_FAULT BIT(3) -#define DART_ERROR_NO_PTE BIT(2) -#define DART_ERROR_NO_PMD BIT(1) -#define DART_ERROR_NO_TTBR BIT(0) +#define DART_T8020_ERROR 0x40 +#define DART_T8020_ERROR_STREAM GENMASK(27, 24) +#define DART_T8020_ERROR_CODE GENMASK(11, 0) +#define DART_T8020_ERROR_FLAG BIT(31) -#define DART_CONFIG 0x60 -#define DART_CONFIG_LOCK BIT(15) +#define DART_T8020_ERROR_READ_FAULT BIT(4) +#define DART_T8020_ERROR_WRITE_FAULT BIT(3) +#define DART_T8020_ERROR_NO_PTE BIT(2) +#define DART_T8020_ERROR_NO_PMD BIT(1) +#define DART_T8020_ERROR_NO_TTBR BIT(0) + +#define DART_T8020_CONFIG 0x60 +#define DART_T8020_CONFIG_LOCK BIT(15) #define DART_STREAM_COMMAND_BUSY_TIMEOUT 100 -#define DART_ERROR_ADDR_HI 0x54 -#define DART_ERROR_ADDR_LO 0x50 +#define DART_T8020_ERROR_ADDR_HI 0x54 +#define DART_T8020_ERROR_ADDR_LO 0x50 + +#define DART_T8020_STREAMS_ENABLE 0xfc + +#define DART_T8020_TCR 0x100 +#define DART_T8020_TCR_TRANSLATE_ENABLE BIT(7) +#define DART_T8020_TCR_BYPASS_DART BIT(8) +#define DART_T8020_TCR_BYPASS_DAPF BIT(12) + +#define DART_T8020_TTBR 0x200 +#define DART_T8020_TTBR_VALID BIT(31) +#define DART_T8020_TTBR_ADDR_OFF 0 +#define DART_T8020_TTBR_SHIFT 12 + +/* T8110 registers */ + +#define DART_T8110_PARAMS3 0x08 +#define DART_T8110_PARAMS3_PA_WIDTH GENMASK(29, 24) +#define DART_T8110_PARAMS3_VA_WIDTH GENMASK(21, 16) +#define DART_T8110_PARAMS3_VER_MAJ GENMASK(15, 8) +#define DART_T8110_PARAMS3_VER_MIN GENMASK(7, 0) + +#define DART_T8110_PARAMS4 0x0c +#define DART_T8110_PARAMS4_NUM_CLIENTS GENMASK(24, 16) +#define DART_T8110_PARAMS4_NUM_SIDS GENMASK(8, 0) + +#define DART_T8110_TLB_CMD 0x80 +#define DART_T8110_TLB_CMD_BUSY BIT(31) +#define DART_T8110_TLB_CMD_OP GENMASK(10, 8) +#define DART_T8110_TLB_CMD_OP_FLUSH_ALL 0 +#define DART_T8110_TLB_CMD_OP_FLUSH_SID 1 +#define DART_T8110_TLB_CMD_STREAM GENMASK(7, 0) + +#define DART_T8110_ERROR 0x100 +#define DART_T8110_ERROR_STREAM GENMASK(27, 20) +#define DART_T8110_ERROR_CODE GENMASK(14, 0) +#define DART_T8110_ERROR_FLAG BIT(31) + +#define DART_T8110_ERROR_MASK 0x104 + +#define DART_T8110_ERROR_READ_FAULT BIT(4) +#define DART_T8110_ERROR_WRITE_FAULT BIT(3) +#define DART_T8110_ERROR_NO_PTE BIT(3) +#define DART_T8110_ERROR_NO_PMD BIT(2) +#define DART_T8110_ERROR_NO_PGD BIT(1) +#define DART_T8110_ERROR_NO_TTBR BIT(0) + +#define DART_T8110_ERROR_ADDR_LO 0x170 +#define DART_T8110_ERROR_ADDR_HI 0x174 + +#define DART_T8110_PROTECT 0x200 +#define DART_T8110_UNPROTECT 0x204 +#define DART_T8110_PROTECT_LOCK 0x208 +#define DART_T8110_PROTECT_TTBR_TCR BIT(0) + +#define DART_T8110_ENABLE_STREAMS 0xc00 +#define DART_T8110_DISABLE_STREAMS 0xc20 + +#define DART_T8110_TCR 0x1000 +#define DART_T8110_TCR_REMAP GENMASK(11, 8) +#define DART_T8110_TCR_REMAP_EN BIT(7) +#define DART_T8110_TCR_BYPASS_DAPF BIT(2) +#define DART_T8110_TCR_BYPASS_DART BIT(1) +#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0) + +#define DART_T8110_TTBR 0x1400 +#define DART_T8110_TTBR_VALID BIT(0) +#define DART_T8110_TTBR_ADDR_OFF 2 +#define DART_T8110_TTBR_SHIFT 14 + +#define DART_TCR(dart, sid) ((dart)->hw->tcr + ((sid) << 2)) + +#define DART_TTBR(dart, sid, idx) ((dart)->hw->ttbr + \ + (((dart)->hw->ttbr_count * (sid)) << 2) + \ + ((idx) << 2)) + +struct apple_dart_stream_map; + +enum dart_type { + DART_T8020, + DART_T6000, + DART_T8110, +}; + +struct apple_dart_hw { + enum dart_type type; + irqreturn_t (*irq_handler)(int irq, void *dev); + int (*invalidate_tlb)(struct apple_dart_stream_map *stream_map); + + u32 oas; + enum io_pgtable_fmt fmt; -#define DART_STREAMS_ENABLE 0xfc + int max_sid_count; -#define DART_TCR(sid) (0x100 + 4 * (sid)) -#define DART_TCR_TRANSLATE_ENABLE BIT(7) -#define DART_TCR_BYPASS0_ENABLE BIT(8) -#define DART_TCR_BYPASS1_ENABLE BIT(12) + u64 lock; + u64 lock_bit; -#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx)) -#define DART_TTBR_VALID BIT(31) -#define DART_TTBR_SHIFT 12 + u64 error; + + u64 enable_streams; + u64 disable_streams; + + u64 tcr; + u64 tcr_enabled; + u64 tcr_disabled; + u64 tcr_bypass; + + u64 ttbr; + u64 ttbr_valid; + u64 ttbr_addr_off; + u64 ttbr_shift; + int ttbr_count; +}; /* * Private structure associated with each DART device. * * @dev: device struct + * @hw: SoC-specific hardware data * @regs: mapped MMIO region * @irq: interrupt number, can be shared with other DARTs * @clks: clocks associated with this DART @@ -98,6 +202,7 @@ */ struct apple_dart { struct device *dev; + const struct apple_dart_hw *hw; void __iomem *regs; @@ -107,12 +212,17 @@ struct apple_dart { spinlock_t lock; + u32 oas; u32 pgsize; + u32 num_streams; u32 supports_bypass : 1; u32 force_bypass : 1; struct iommu_group *sid2group[DART_MAX_STREAMS]; struct iommu_device iommu; + + u32 save_tcr[DART_MAX_STREAMS]; + u32 save_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR]; }; /* @@ -132,11 +242,11 @@ struct apple_dart { */ struct apple_dart_stream_map { struct apple_dart *dart; - unsigned long sidmap; + DECLARE_BITMAP(sidmap, DART_MAX_STREAMS); }; struct apple_dart_atomic_stream_map { struct apple_dart *dart; - atomic64_t sidmap; + atomic_long_t sidmap[BITS_TO_LONGS(DART_MAX_STREAMS)]; }; /* @@ -194,50 +304,55 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom) static void apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map) { + struct apple_dart *dart = stream_map->dart; int sid; - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) - writel(DART_TCR_TRANSLATE_ENABLE, - stream_map->dart->regs + DART_TCR(sid)); + for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) + writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid)); } static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map) { + struct apple_dart *dart = stream_map->dart; int sid; - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) - writel(0, stream_map->dart->regs + DART_TCR(sid)); + for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) + writel(dart->hw->tcr_disabled, dart->regs + DART_TCR(dart, sid)); } static void apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map) { + struct apple_dart *dart = stream_map->dart; int sid; WARN_ON(!stream_map->dart->supports_bypass); - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) - writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE, - stream_map->dart->regs + DART_TCR(sid)); + for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) + writel(dart->hw->tcr_bypass, + dart->regs + DART_TCR(dart, sid)); } static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map, u8 idx, phys_addr_t paddr) { + struct apple_dart *dart = stream_map->dart; int sid; - WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1)); - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) - writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT), - stream_map->dart->regs + DART_TTBR(sid, idx)); + WARN_ON(paddr & ((1 << dart->hw->ttbr_shift) - 1)); + for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) + writel(dart->hw->ttbr_valid | + (paddr >> dart->hw->ttbr_shift) << dart->hw->ttbr_addr_off, + dart->regs + DART_TTBR(dart, sid, idx)); } static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map, u8 idx) { + struct apple_dart *dart = stream_map->dart; int sid; - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) - writel(0, stream_map->dart->regs + DART_TTBR(sid, idx)); + for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) + writel(0, dart->regs + DART_TTBR(dart, sid, idx)); } static void @@ -245,12 +360,12 @@ apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map) { int i; - for (i = 0; i < DART_MAX_TTBR; ++i) + for (i = 0; i < stream_map->dart->hw->ttbr_count; ++i) apple_dart_hw_clear_ttbr(stream_map, i); } static int -apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map, +apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map, u32 command) { unsigned long flags; @@ -259,12 +374,12 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map, spin_lock_irqsave(&stream_map->dart->lock, flags); - writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT); - writel(command, stream_map->dart->regs + DART_STREAM_COMMAND); + writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT); + writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND); ret = readl_poll_timeout_atomic( - stream_map->dart->regs + DART_STREAM_COMMAND, command_reg, - !(command_reg & DART_STREAM_COMMAND_BUSY), 1, + stream_map->dart->regs + DART_T8020_STREAM_COMMAND, command_reg, + !(command_reg & DART_T8020_STREAM_COMMAND_BUSY), 1, DART_STREAM_COMMAND_BUSY_TIMEOUT); spin_unlock_irqrestore(&stream_map->dart->lock, flags); @@ -272,7 +387,7 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map, if (ret) { dev_err(stream_map->dart->dev, "busy bit did not clear after command %x for streams %lx\n", - command, stream_map->sidmap); + command, stream_map->sidmap[0]); return ret; } @@ -280,48 +395,102 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map, } static int -apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map) +apple_dart_t8110_hw_tlb_command(struct apple_dart_stream_map *stream_map, + u32 command) { - return apple_dart_hw_stream_command(stream_map, - DART_STREAM_COMMAND_INVALIDATE); + struct apple_dart *dart = stream_map->dart; + unsigned long flags; + int ret = 0; + int sid; + + spin_lock_irqsave(&dart->lock, flags); + + for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) { + u32 val = FIELD_PREP(DART_T8110_TLB_CMD_OP, command) | + FIELD_PREP(DART_T8110_TLB_CMD_STREAM, sid); + writel(val, dart->regs + DART_T8110_TLB_CMD); + + ret = readl_poll_timeout_atomic( + dart->regs + DART_T8110_TLB_CMD, val, + !(val & DART_T8110_TLB_CMD_BUSY), 1, + DART_STREAM_COMMAND_BUSY_TIMEOUT); + + if (ret) + break; + + } + + spin_unlock_irqrestore(&dart->lock, flags); + + if (ret) { + dev_err(stream_map->dart->dev, + "busy bit did not clear after command %x for stream %d\n", + command, sid); + return ret; + } + + return 0; +} + +static int +apple_dart_t8020_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map) +{ + return apple_dart_t8020_hw_stream_command( + stream_map, DART_T8020_STREAM_COMMAND_INVALIDATE); +} + +static int +apple_dart_t8110_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map) +{ + return apple_dart_t8110_hw_tlb_command( + stream_map, DART_T8110_TLB_CMD_OP_FLUSH_SID); } static int apple_dart_hw_reset(struct apple_dart *dart) { u32 config; struct apple_dart_stream_map stream_map; + int i; - config = readl(dart->regs + DART_CONFIG); - if (config & DART_CONFIG_LOCK) { + config = readl(dart->regs + dart->hw->lock); + if (config & dart->hw->lock_bit) { dev_err(dart->dev, "DART is locked down until reboot: %08x\n", config); return -EINVAL; } stream_map.dart = dart; - stream_map.sidmap = DART_STREAM_ALL; + bitmap_zero(stream_map.sidmap, DART_MAX_STREAMS); + bitmap_set(stream_map.sidmap, 0, dart->num_streams); apple_dart_hw_disable_dma(&stream_map); apple_dart_hw_clear_all_ttbrs(&stream_map); /* enable all streams globally since TCR is used to control isolation */ - writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE); + for (i = 0; i < BITS_TO_U32(dart->num_streams); i++) + writel(U32_MAX, dart->regs + dart->hw->enable_streams); /* clear any pending errors before the interrupt is unmasked */ - writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR); + writel(readl(dart->regs + dart->hw->error), dart->regs + dart->hw->error); + + if (dart->hw->type == DART_T8110) + writel(0, dart->regs + DART_T8110_ERROR_MASK); - return apple_dart_hw_invalidate_tlb(&stream_map); + return dart->hw->invalidate_tlb(&stream_map); } static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain) { - int i; + int i, j; struct apple_dart_atomic_stream_map *domain_stream_map; struct apple_dart_stream_map stream_map; for_each_stream_map(i, domain, domain_stream_map) { stream_map.dart = domain_stream_map->dart; - stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap); - apple_dart_hw_invalidate_tlb(&stream_map); + + for (j = 0; j < BITS_TO_LONGS(stream_map.dart->num_streams); j++) + stream_map.sidmap[j] = atomic64_read(&domain_stream_map->sidmap[j]); + + stream_map.dart->hw->invalidate_tlb(&stream_map); } } @@ -391,11 +560,11 @@ apple_dart_setup_translation(struct apple_dart_domain *domain, for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i) apple_dart_hw_set_ttbr(stream_map, i, pgtbl_cfg->apple_dart_cfg.ttbr[i]); - for (; i < DART_MAX_TTBR; ++i) + for (; i < stream_map->dart->hw->ttbr_count; ++i) apple_dart_hw_clear_ttbr(stream_map, i); apple_dart_hw_enable_translation(stream_map); - apple_dart_hw_invalidate_tlb(stream_map); + stream_map->dart->hw->invalidate_tlb(stream_map); } static int apple_dart_finalize_domain(struct iommu_domain *domain, @@ -405,7 +574,7 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain, struct apple_dart *dart = cfg->stream_maps[0].dart; struct io_pgtable_cfg pgtbl_cfg; int ret = 0; - int i; + int i, j; mutex_lock(&dart_domain->init_lock); @@ -414,20 +583,21 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain, for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart; - atomic64_set(&dart_domain->stream_maps[i].sidmap, - cfg->stream_maps[i].sidmap); + for (j = 0; j < BITS_TO_LONGS(dart->num_streams); j++) + atomic64_set(&dart_domain->stream_maps[i].sidmap[j], + cfg->stream_maps[i].sidmap[j]); } pgtbl_cfg = (struct io_pgtable_cfg){ .pgsize_bitmap = dart->pgsize, .ias = 32, - .oas = 36, + .oas = dart->oas, .coherent_walk = 1, .iommu_dev = dart->dev, }; dart_domain->pgtbl_ops = - alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain); + alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain); if (!dart_domain->pgtbl_ops) { ret = -ENOMEM; goto done; @@ -450,7 +620,7 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps, struct apple_dart_stream_map *master_maps, bool add_streams) { - int i; + int i, j; for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { if (domain_maps[i].dart != master_maps[i].dart) @@ -460,12 +630,14 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps, for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { if (!domain_maps[i].dart) break; - if (add_streams) - atomic64_or(master_maps[i].sidmap, - &domain_maps[i].sidmap); - else - atomic64_and(~master_maps[i].sidmap, - &domain_maps[i].sidmap); + for (j = 0; j < BITS_TO_LONGS(domain_maps[i].dart->num_streams); j++) { + if (add_streams) + atomic64_or(master_maps[i].sidmap[j], + &domain_maps[i].sidmap[j]); + else + atomic64_and(~master_maps[i].sidmap[j], + &domain_maps[i].sidmap[j]); + } } return 0; @@ -632,14 +804,14 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args) for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { if (cfg->stream_maps[i].dart == dart) { - cfg->stream_maps[i].sidmap |= 1 << sid; + set_bit(sid, cfg->stream_maps[i].sidmap); return 0; } } for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) { if (!cfg->stream_maps[i].dart) { cfg->stream_maps[i].dart = dart; - cfg->stream_maps[i].sidmap = 1 << sid; + set_bit(sid, cfg->stream_maps[i].sidmap); return 0; } } @@ -658,7 +830,7 @@ static void apple_dart_release_group(void *iommu_data) mutex_lock(&apple_dart_groups_lock); for_each_stream_map(i, group_master_cfg, stream_map) - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) stream_map->dart->sid2group[sid] = NULL; kfree(iommu_data); @@ -677,7 +849,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev) mutex_lock(&apple_dart_groups_lock); for_each_stream_map(i, cfg, stream_map) { - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) { + for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) { struct iommu_group *stream_group = stream_map->dart->sid2group[sid]; @@ -716,7 +888,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev) apple_dart_release_group); for_each_stream_map(i, cfg, stream_map) - for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) + for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) stream_map->dart->sid2group[sid] = group; res = group; @@ -787,30 +959,30 @@ static const struct iommu_ops apple_dart_iommu_ops = { } }; -static irqreturn_t apple_dart_irq(int irq, void *dev) +static irqreturn_t apple_dart_t8020_irq(int irq, void *dev) { struct apple_dart *dart = dev; const char *fault_name = NULL; - u32 error = readl(dart->regs + DART_ERROR); - u32 error_code = FIELD_GET(DART_ERROR_CODE, error); - u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO); - u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI); + u32 error = readl(dart->regs + DART_T8020_ERROR); + u32 error_code = FIELD_GET(DART_T8020_ERROR_CODE, error); + u32 addr_lo = readl(dart->regs + DART_T8020_ERROR_ADDR_LO); + u32 addr_hi = readl(dart->regs + DART_T8020_ERROR_ADDR_HI); u64 addr = addr_lo | (((u64)addr_hi) << 32); - u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error); + u8 stream_idx = FIELD_GET(DART_T8020_ERROR_STREAM, error); - if (!(error & DART_ERROR_FLAG)) + if (!(error & DART_T8020_ERROR_FLAG)) return IRQ_NONE; /* there should only be a single bit set but let's use == to be sure */ - if (error_code == DART_ERROR_READ_FAULT) + if (error_code == DART_T8020_ERROR_READ_FAULT) fault_name = "READ FAULT"; - else if (error_code == DART_ERROR_WRITE_FAULT) + else if (error_code == DART_T8020_ERROR_WRITE_FAULT) fault_name = "WRITE FAULT"; - else if (error_code == DART_ERROR_NO_PTE) + else if (error_code == DART_T8020_ERROR_NO_PTE) fault_name = "NO PTE FOR IOVA"; - else if (error_code == DART_ERROR_NO_PMD) + else if (error_code == DART_T8020_ERROR_NO_PMD) fault_name = "NO PMD FOR IOVA"; - else if (error_code == DART_ERROR_NO_TTBR) + else if (error_code == DART_T8020_ERROR_NO_TTBR) fault_name = "NO TTBR FOR IOVA"; else fault_name = "unknown"; @@ -820,7 +992,47 @@ static irqreturn_t apple_dart_irq(int irq, void *dev) "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx", error, stream_idx, error_code, fault_name, addr); - writel(error, dart->regs + DART_ERROR); + writel(error, dart->regs + DART_T8020_ERROR); + return IRQ_HANDLED; +} + + +static irqreturn_t apple_dart_t8110_irq(int irq, void *dev) +{ + struct apple_dart *dart = dev; + const char *fault_name = NULL; + u32 error = readl(dart->regs + DART_T8110_ERROR); + u32 error_code = FIELD_GET(DART_T8110_ERROR_CODE, error); + u32 addr_lo = readl(dart->regs + DART_T8110_ERROR_ADDR_LO); + u32 addr_hi = readl(dart->regs + DART_T8110_ERROR_ADDR_HI); + u64 addr = addr_lo | (((u64)addr_hi) << 32); + u8 stream_idx = FIELD_GET(DART_T8110_ERROR_STREAM, error); + + if (!(error & DART_T8110_ERROR_FLAG)) + return IRQ_NONE; + + /* there should only be a single bit set but let's use == to be sure */ + if (error_code == DART_T8110_ERROR_READ_FAULT) + fault_name = "READ FAULT"; + else if (error_code == DART_T8110_ERROR_WRITE_FAULT) + fault_name = "WRITE FAULT"; + else if (error_code == DART_T8110_ERROR_NO_PTE) + fault_name = "NO PTE FOR IOVA"; + else if (error_code == DART_T8110_ERROR_NO_PMD) + fault_name = "NO PMD FOR IOVA"; + else if (error_code == DART_T8110_ERROR_NO_PGD) + fault_name = "NO PGD FOR IOVA"; + else if (error_code == DART_T8110_ERROR_NO_TTBR) + fault_name = "NO TTBR FOR IOVA"; + else + fault_name = "unknown"; + + dev_err_ratelimited( + dart->dev, + "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx", + error, stream_idx, error_code, fault_name, addr); + + writel(error, dart->regs + DART_T8110_ERROR); return IRQ_HANDLED; } @@ -848,7 +1060,7 @@ static int apple_dart_set_bus_ops(const struct iommu_ops *ops) static int apple_dart_probe(struct platform_device *pdev) { int ret; - u32 dart_params[2]; + u32 dart_params[4]; struct resource *res; struct apple_dart *dart; struct device *dev = &pdev->dev; @@ -858,6 +1070,7 @@ static int apple_dart_probe(struct platform_device *pdev) return -ENOMEM; dart->dev = dev; + dart->hw = of_device_get_match_data(dev); spin_lock_init(&dart->lock); dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); @@ -882,17 +1095,39 @@ static int apple_dart_probe(struct platform_device *pdev) if (ret) return ret; - ret = apple_dart_hw_reset(dart); - if (ret) - goto err_clk_disable; - dart_params[0] = readl(dart->regs + DART_PARAMS1); dart_params[1] = readl(dart->regs + DART_PARAMS2); - dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]); - dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT; + dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]); + dart->supports_bypass = dart_params[1] & DART_PARAMS2_BYPASS_SUPPORT; + + switch (dart->hw->type) { + case DART_T8020: + case DART_T6000: + dart->oas = dart->hw->oas; + dart->num_streams = dart->hw->max_sid_count; + break; + + case DART_T8110: + dart_params[2] = readl(dart->regs + DART_T8110_PARAMS3); + dart_params[3] = readl(dart->regs + DART_T8110_PARAMS4); + dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]); + dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]); + break; + } + + if (dart->num_streams > DART_MAX_STREAMS) { + dev_err(&pdev->dev, "Too many streams (%d > %d)\n", + dart->num_streams, DART_MAX_STREAMS); + goto err_clk_disable; + } + dart->force_bypass = dart->pgsize > PAGE_SIZE; - ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED, + ret = apple_dart_hw_reset(dart); + if (ret) + goto err_clk_disable; + + ret = request_irq(dart->irq, dart->hw->irq_handler, IRQF_SHARED, "apple-dart fault handler", dart); if (ret) goto err_clk_disable; @@ -914,8 +1149,8 @@ static int apple_dart_probe(struct platform_device *pdev) dev_info( &pdev->dev, - "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n", - dart->pgsize, dart->supports_bypass, dart->force_bypass); + "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n", + dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass); return 0; err_sysfs_remove: @@ -946,8 +1181,131 @@ static int apple_dart_remove(struct platform_device *pdev) return 0; } +static const struct apple_dart_hw apple_dart_hw_t8103 = { + .type = DART_T8020, + .irq_handler = apple_dart_t8020_irq, + .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb, + .oas = 36, + .fmt = APPLE_DART, + .max_sid_count = 16, + + .enable_streams = DART_T8020_STREAMS_ENABLE, + .lock = DART_T8020_CONFIG, + .lock_bit = DART_T8020_CONFIG_LOCK, + + .error = DART_T8020_ERROR, + + .tcr = DART_T8020_TCR, + .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE, + .tcr_disabled = 0, + .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART, + + .ttbr = DART_T8020_TTBR, + .ttbr_valid = DART_T8020_TTBR_VALID, + .ttbr_addr_off = DART_T8020_TTBR_ADDR_OFF, + .ttbr_shift = DART_T8020_TTBR_SHIFT, + .ttbr_count = 4, +}; +static const struct apple_dart_hw apple_dart_hw_t6000 = { + .type = DART_T6000, + .irq_handler = apple_dart_t8020_irq, + .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb, + .oas = 42, + .fmt = APPLE_DART2, + .max_sid_count = 16, + + .enable_streams = DART_T8020_STREAMS_ENABLE, + .lock = DART_T8020_CONFIG, + .lock_bit = DART_T8020_CONFIG_LOCK, + + .error = DART_T8020_ERROR, + + .tcr = DART_T8020_TCR, + .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE, + .tcr_disabled = 0, + .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART, + + .ttbr = DART_T8020_TTBR, + .ttbr_valid = DART_T8020_TTBR_VALID, + .ttbr_addr_off = DART_T8020_TTBR_ADDR_OFF, + .ttbr_shift = DART_T8020_TTBR_SHIFT, + .ttbr_count = 4, +}; + +static const struct apple_dart_hw apple_dart_hw_t8110 = { + .type = DART_T8110, + .irq_handler = apple_dart_t8110_irq, + .invalidate_tlb = apple_dart_t8110_hw_invalidate_tlb, + .fmt = APPLE_DART2, + .max_sid_count = 256, + + .enable_streams = DART_T8110_ENABLE_STREAMS, + .disable_streams = DART_T8110_DISABLE_STREAMS, + .lock = DART_T8110_PROTECT, + .lock_bit = DART_T8110_PROTECT_TTBR_TCR, + + .error = DART_T8110_ERROR, + + .tcr = DART_T8110_TCR, + .tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE, + .tcr_disabled = 0, + .tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART, + + .ttbr = DART_T8110_TTBR, + .ttbr_valid = DART_T8110_TTBR_VALID, + .ttbr_addr_off = DART_T8110_TTBR_ADDR_OFF, + .ttbr_shift = DART_T8110_TTBR_SHIFT, + .ttbr_count = 1, +}; + +#ifdef CONFIG_PM_SLEEP +static int apple_dart_suspend(struct device *dev) +{ + struct apple_dart *dart = dev_get_drvdata(dev); + unsigned int sid, idx; + + for (sid = 0; sid < dart->num_streams; sid++) { + dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid)); + for (idx = 0; idx < dart->hw->ttbr_count; idx++) + dart->save_ttbr[sid][idx] = + readl_relaxed(dart->regs + DART_TTBR(dart, sid, idx)); + } + + return 0; +} + +static int apple_dart_resume(struct device *dev) +{ + struct apple_dart *dart = dev_get_drvdata(dev); + unsigned int sid, idx; + int ret; + + ret = apple_dart_hw_reset(dart); + if (ret) { + dev_err(dev, "Failed to reset DART on resume\n"); + return ret; + } + + for (sid = 0; sid < dart->num_streams; sid++) { + for (idx = 0; idx < dart->hw->ttbr_count; idx++) + writel_relaxed(dart->save_ttbr[sid][idx], + dart->regs + DART_TTBR(dart, sid, idx)); + writel_relaxed(dart->save_tcr[sid], dart->regs + DART_TCR(dart, sid)); + } + + return 0; +} + +static const struct dev_pm_ops apple_dart_pm_ops = { + .suspend = apple_dart_suspend, + .resume = apple_dart_resume, +}; +#endif + static const struct of_device_id apple_dart_of_match[] = { - { .compatible = "apple,t8103-dart", .data = NULL }, + { .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 }, + { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 }, + { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 }, {}, }; MODULE_DEVICE_TABLE(of, apple_dart_of_match); @@ -957,6 +1315,9 @@ static struct platform_driver apple_dart_driver = { .name = "apple-dart", .of_match_table = apple_dart_of_match, .suppress_bind_attrs = true, +#ifdef CONFIG_PM_SLEEP + .pm = &apple_dart_pm_ops, +#endif }, .probe = apple_dart_probe, .remove = apple_dart_remove, diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 94ff319ae8ac..d7f5e23da643 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -130,9 +130,6 @@ #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL -#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7) -#define APPLE_DART_PTE_PROT_NO_READ (1<<8) - /* IOPTE accessors */ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) @@ -406,15 +403,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte; - if (data->iop.fmt == APPLE_DART) { - pte = 0; - if (!(prot & IOMMU_WRITE)) - pte |= APPLE_DART_PTE_PROT_NO_WRITE; - if (!(prot & IOMMU_READ)) - pte |= APPLE_DART_PTE_PROT_NO_READ; - return pte; - } - if (data->iop.fmt == ARM_64_LPAE_S1 || data->iop.fmt == ARM_32_LPAE_S1) { pte = ARM_LPAE_PTE_nG; @@ -1107,52 +1095,6 @@ out_free_data: return NULL; } -static struct io_pgtable * -apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) -{ - struct arm_lpae_io_pgtable *data; - int i; - - if (cfg->oas > 36) - return NULL; - - data = arm_lpae_alloc_pgtable(cfg); - if (!data) - return NULL; - - /* - * The table format itself always uses two levels, but the total VA - * space is mapped by four separate tables, making the MMIO registers - * an effective "level 1". For simplicity, though, we treat this - * equivalently to LPAE stage 2 concatenation at level 2, with the - * additional TTBRs each just pointing at consecutive pages. - */ - if (data->start_level < 1) - goto out_free_data; - if (data->start_level == 1 && data->pgd_bits > 2) - goto out_free_data; - if (data->start_level > 1) - data->pgd_bits = 0; - data->start_level = 2; - cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits; - data->pgd_bits += data->bits_per_level; - - data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, - cfg); - if (!data->pgd) - goto out_free_data; - - for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) - cfg->apple_dart_cfg.ttbr[i] = - virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data)); - - return &data->iop; - -out_free_data: - kfree(data); - return NULL; -} - struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { .alloc = arm_64_lpae_alloc_pgtable_s1, .free = arm_lpae_free_pgtable, @@ -1178,11 +1120,6 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { .free = arm_lpae_free_pgtable, }; -struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = { - .alloc = apple_dart_alloc_pgtable, - .free = arm_lpae_free_pgtable, -}; - #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST static struct io_pgtable_cfg *cfg_cookie __initdata; diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c new file mode 100644 index 000000000000..9c3c2505f3dc --- /dev/null +++ b/drivers/iommu/io-pgtable-dart.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Apple DART page table allocator. + * + * Copyright (C) 2022 The Asahi Linux Contributors + * + * Based on io-pgtable-arm. + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "dart io-pgtable: " fmt + +#include <linux/atomic.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/io-pgtable.h> +#include <linux/kernel.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <asm/barrier.h> + +#define DART_MAX_ADDR_BITS 52 +#define DART_MAX_LEVELS 3 + +/* Struct accessors */ +#define io_pgtable_to_data(x) \ + container_of((x), struct dart_io_pgtable, iop) + +#define io_pgtable_ops_to_data(x) \ + io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) + +/* + * Calculate the right shift amount to get to the portion describing level l + * in a virtual address mapped by the pagetable in d. + */ +#define DART_LVL_SHIFT(l, d) \ + (((DART_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ + ilog2(sizeof(dart_iopte))) + +#define DART_GRANULE(d) \ + (sizeof(dart_iopte) << (d)->bits_per_level) +#define DART_PGD_SIZE(d) \ + (sizeof(dart_iopte) << (d)->pgd_bits) + +#define DART_PTES_PER_TABLE(d) \ + (DART_GRANULE(d) >> ilog2(sizeof(dart_iopte))) + +/* + * Calculate the index at level l used to map virtual address a using the + * pagetable in d. + */ +#define DART_PGD_IDX(l, d) \ + ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) + +#define DART_LVL_IDX(a, l, d) \ + (((u64)(a) >> DART_LVL_SHIFT(l, d)) & \ + ((1 << ((d)->bits_per_level + DART_PGD_IDX(l, d))) - 1)) + +/* Calculate the block/page mapping size at level l for pagetable in d. */ +#define DART_BLOCK_SIZE(l, d) (1ULL << DART_LVL_SHIFT(l, d)) + +#define APPLE_DART_PTE_SUBPAGE_START GENMASK_ULL(63, 52) +#define APPLE_DART_PTE_SUBPAGE_END GENMASK_ULL(51, 40) + +#define APPLE_DART1_PADDR_MASK GENMASK_ULL(35, 12) +#define APPLE_DART2_PADDR_MASK GENMASK_ULL(37, 10) +#define APPLE_DART2_PADDR_SHIFT (4) + +/* Apple DART1 protection bits */ +#define APPLE_DART1_PTE_PROT_NO_READ BIT(8) +#define APPLE_DART1_PTE_PROT_NO_WRITE BIT(7) +#define APPLE_DART1_PTE_PROT_SP_DIS BIT(1) + +/* Apple DART2 protection bits */ +#define APPLE_DART2_PTE_PROT_NO_READ BIT(3) +#define APPLE_DART2_PTE_PROT_NO_WRITE BIT(2) +#define APPLE_DART2_PTE_PROT_NO_CACHE BIT(1) + +/* marks PTE as valid */ +#define APPLE_DART_PTE_VALID BIT(0) + +/* IOPTE accessors */ +#define iopte_deref(pte, d) __va(iopte_to_paddr(pte, d)) + +struct dart_io_pgtable { + struct io_pgtable iop; + + int pgd_bits; + int start_level; + int bits_per_level; + + void *pgd; +}; + +typedef u64 dart_iopte; + +static inline bool iopte_leaf(dart_iopte pte, int lvl, + enum io_pgtable_fmt fmt) +{ + return (lvl == (DART_MAX_LEVELS - 1)) && (pte & APPLE_DART_PTE_VALID); +} + +static dart_iopte paddr_to_iopte(phys_addr_t paddr, + struct dart_io_pgtable *data) +{ + dart_iopte pte; + + if (data->iop.fmt == APPLE_DART) + return paddr & APPLE_DART1_PADDR_MASK; + + /* format is APPLE_DART2 */ + pte = paddr >> APPLE_DART2_PADDR_SHIFT; + pte &= APPLE_DART2_PADDR_MASK; + + return pte; +} + +static phys_addr_t iopte_to_paddr(dart_iopte pte, + struct dart_io_pgtable *data) +{ + u64 paddr; + + if (data->iop.fmt == APPLE_DART) + return pte & APPLE_DART1_PADDR_MASK; + + /* format is APPLE_DART2 */ + paddr = pte & APPLE_DART2_PADDR_MASK; + paddr <<= APPLE_DART2_PADDR_SHIFT; + + return paddr; +} + +static void *__dart_alloc_pages(size_t size, gfp_t gfp, + struct io_pgtable_cfg *cfg) +{ + struct device *dev = cfg->iommu_dev; + int order = get_order(size); + struct page *p; + + VM_BUG_ON((gfp & __GFP_HIGHMEM)); + p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, + gfp | __GFP_ZERO, order); + if (!p) + return NULL; + + return page_address(p); +} + +static void __dart_free_pages(void *pages, size_t size) +{ + free_pages((unsigned long)pages, get_order(size)); +} + +static void __dart_init_pte(struct dart_io_pgtable *data, + phys_addr_t paddr, dart_iopte prot, + int lvl, int num_entries, dart_iopte *ptep) +{ + dart_iopte pte = prot; + size_t sz = DART_BLOCK_SIZE(lvl, data); + int i; + + if (lvl == DART_MAX_LEVELS - 1 && data->iop.fmt == APPLE_DART) + pte |= APPLE_DART1_PTE_PROT_SP_DIS; + + pte |= APPLE_DART_PTE_VALID; + + /* subpage protection: always allow access to the entire page */ + pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0); + pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff); + + for (i = 0; i < num_entries; i++) + ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); +} + +static int dart_init_pte(struct dart_io_pgtable *data, + unsigned long iova, phys_addr_t paddr, + dart_iopte prot, int lvl, int num_entries, + dart_iopte *ptep) +{ + int i; + + for (i = 0; i < num_entries; i++) + if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { + /* We require an unmap first */ + WARN_ON(iopte_leaf(ptep[i], lvl, data->iop.fmt)); + return -EEXIST; + } + + __dart_init_pte(data, paddr, prot, lvl, num_entries, ptep); + return 0; +} + +static dart_iopte dart_install_table(dart_iopte *table, + dart_iopte *ptep, + dart_iopte curr, + struct dart_io_pgtable *data) +{ + dart_iopte old, new; + + new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID; + + /* + * Ensure the table itself is visible before its PTE can be. + * Whilst we could get away with cmpxchg64_release below, this + * doesn't have any ordering semantics when !CONFIG_SMP. + */ + dma_wmb(); + + old = cmpxchg64_relaxed(ptep, curr, new); + + return old; +} + +static int __dart_map(struct dart_io_pgtable *data, unsigned long iova, + phys_addr_t paddr, size_t size, size_t pgcount, + dart_iopte prot, int lvl, dart_iopte *ptep, + gfp_t gfp, size_t *mapped) +{ + dart_iopte *cptep, pte; + size_t block_size = DART_BLOCK_SIZE(lvl, data); + size_t tblsz = DART_GRANULE(data); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + int ret = 0, num_entries, max_entries, map_idx_start; + + /* Find our entry at the current level */ + map_idx_start = DART_LVL_IDX(iova, lvl, data); + ptep += map_idx_start; + + /* If we can install a leaf entry at this level, then do so */ + if (size == block_size) { + max_entries = DART_PTES_PER_TABLE(data) - map_idx_start; + num_entries = min_t(int, pgcount, max_entries); + ret = dart_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); + if (!ret && mapped) + *mapped += num_entries * size; + + return ret; + } + + /* We can't allocate tables at the final level */ + if (WARN_ON(lvl >= DART_MAX_LEVELS - 1)) + return -EINVAL; + + /* Grab a pointer to the next level */ + pte = READ_ONCE(*ptep); + if (!pte) { + cptep = __dart_alloc_pages(tblsz, gfp, cfg); + if (!cptep) + return -ENOMEM; + + pte = dart_install_table(cptep, ptep, 0, data); + if (pte) + __dart_free_pages(cptep, tblsz); + } + + if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { + cptep = iopte_deref(pte, data); + } else if (pte) { + /* We require an unmap first */ + WARN_ON(pte); + return -EEXIST; + } + + /* Rinse, repeat */ + return __dart_map(data, iova, paddr, size, pgcount, prot, lvl + 1, + cptep, gfp, mapped); +} + +static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data, + int prot) +{ + dart_iopte pte = 0; + + if (data->iop.fmt == APPLE_DART) { + if (!(prot & IOMMU_WRITE)) + pte |= APPLE_DART1_PTE_PROT_NO_WRITE; + if (!(prot & IOMMU_READ)) + pte |= APPLE_DART1_PTE_PROT_NO_READ; + } + if (data->iop.fmt == APPLE_DART2) { + if (!(prot & IOMMU_WRITE)) + pte |= APPLE_DART2_PTE_PROT_NO_WRITE; + if (!(prot & IOMMU_READ)) + pte |= APPLE_DART2_PTE_PROT_NO_READ; + if (!(prot & IOMMU_CACHE)) + pte |= APPLE_DART2_PTE_PROT_NO_CACHE; + } + + return pte; +} + +static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) +{ + struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + dart_iopte *ptep = data->pgd; + int ret, lvl = data->start_level; + dart_iopte prot; + long iaext = (s64)iova >> cfg->ias; + + if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) + return -EINVAL; + + if (WARN_ON(iaext || paddr >> cfg->oas)) + return -ERANGE; + + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + + prot = dart_prot_to_pte(data, iommu_prot); + ret = __dart_map(data, iova, paddr, pgsize, pgcount, prot, lvl, + ptep, gfp, mapped); + /* + * Synchronise all PTE updates for the new mapping before there's + * a chance for anything to kick off a table walk for the new iova. + */ + wmb(); + + return ret; +} + +static int dart_map(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) +{ + return dart_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp, + NULL); +} + +static void __dart_free_pgtable(struct dart_io_pgtable *data, int lvl, + dart_iopte *ptep) +{ + dart_iopte *start, *end; + unsigned long table_size; + + if (lvl == data->start_level) + table_size = DART_PGD_SIZE(data); + else + table_size = DART_GRANULE(data); + + start = ptep; + + /* Only leaf entries at the last level */ + if (lvl == DART_MAX_LEVELS - 1) + end = ptep; + else + end = (void *)ptep + table_size; + + while (ptep != end) { + dart_iopte pte = *ptep++; + + if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) + continue; + + __dart_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); + } + + __dart_free_pages(start, table_size); +} + +static size_t __dart_unmap(struct dart_io_pgtable *data, + struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size, size_t pgcount, + int lvl, dart_iopte *ptep) +{ + dart_iopte pte; + struct io_pgtable *iop = &data->iop; + int i = 0, num_entries, max_entries, unmap_idx_start; + + /* Something went horribly wrong and we ran out of page table */ + if (WARN_ON(lvl == DART_MAX_LEVELS)) + return 0; + + unmap_idx_start = DART_LVL_IDX(iova, lvl, data); + ptep += unmap_idx_start; + pte = READ_ONCE(*ptep); + if (WARN_ON(!pte)) + return 0; + + /* If the size matches this level, we're in the right place */ + if (size == DART_BLOCK_SIZE(lvl, data)) { + max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start; + num_entries = min_t(int, pgcount, max_entries); + + while (i < num_entries) { + pte = READ_ONCE(*ptep); + if (WARN_ON(!pte)) + break; + + /* clear pte */ + *ptep = 0; + + if (!iopte_leaf(pte, lvl, iop->fmt)) { + /* Also flush any partial walks */ + io_pgtable_tlb_flush_walk(iop, iova + i * size, size, + DART_GRANULE(data)); + __dart_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); + } else if (!iommu_iotlb_gather_queued(gather)) { + io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); + } + + ptep++; + i++; + } + + return i * size; + } + + /* Keep on walkin' */ + ptep = iopte_deref(pte, data); + return __dart_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); +} + +static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + dart_iopte *ptep = data->pgd; + long iaext = (s64)iova >> cfg->ias; + + if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) + return 0; + + if (WARN_ON(iaext)) + return 0; + + return __dart_unmap(data, gather, iova, pgsize, pgcount, + data->start_level, ptep); +} + +static size_t dart_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) +{ + return dart_unmap_pages(ops, iova, size, 1, gather); +} + +static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops); + dart_iopte pte, *ptep = data->pgd; + int lvl = data->start_level; + + do { + /* Valid IOPTE pointer? */ + if (!ptep) + return 0; + + /* Grab the IOPTE we're interested in */ + ptep += DART_LVL_IDX(iova, lvl, data); + pte = READ_ONCE(*ptep); + + /* Valid entry? */ + if (!pte) + return 0; + + /* Leaf entry? */ + if (iopte_leaf(pte, lvl, data->iop.fmt)) + goto found_translation; + + /* Take it to the next level */ + ptep = iopte_deref(pte, data); + } while (++lvl < DART_MAX_LEVELS); + + /* Ran out of page tables to walk */ + return 0; + +found_translation: + iova &= (DART_BLOCK_SIZE(lvl, data) - 1); + return iopte_to_paddr(pte, data) | iova; +} + +static void dart_restrict_pgsizes(struct io_pgtable_cfg *cfg) +{ + unsigned long granule, page_sizes; + unsigned int max_addr_bits = 48; + + /* + * We need to restrict the supported page sizes to match the + * translation regime for a particular granule. Aim to match + * the CPU page size if possible, otherwise prefer smaller sizes. + * While we're at it, restrict the block sizes to match the + * chosen granule. + */ + if (cfg->pgsize_bitmap & PAGE_SIZE) + granule = PAGE_SIZE; + else if (cfg->pgsize_bitmap & ~PAGE_MASK) + granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); + else if (cfg->pgsize_bitmap & PAGE_MASK) + granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); + else + granule = 0; + + switch (granule) { + case SZ_4K: + page_sizes = (SZ_4K | SZ_2M | SZ_1G); + break; + case SZ_16K: + page_sizes = (SZ_16K | SZ_32M); + break; + default: + page_sizes = 0; + } + + cfg->pgsize_bitmap &= page_sizes; + cfg->ias = min(cfg->ias, max_addr_bits); + cfg->oas = min(cfg->oas, max_addr_bits); +} + +static struct dart_io_pgtable * +dart_alloc_pgtable(struct io_pgtable_cfg *cfg) +{ + struct dart_io_pgtable *data; + int bits_per_level, levels, va_bits, pg_shift; + + dart_restrict_pgsizes(cfg); + + if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K))) + return NULL; + + if (cfg->ias > DART_MAX_ADDR_BITS) + return NULL; + + if (cfg->oas > DART_MAX_ADDR_BITS) + return NULL; + + pg_shift = __ffs(cfg->pgsize_bitmap); + bits_per_level = pg_shift - ilog2(sizeof(dart_iopte)); + + va_bits = cfg->ias - pg_shift; + levels = DIV_ROUND_UP(va_bits, bits_per_level); + if (levels > DART_MAX_LEVELS) + return NULL; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + data->bits_per_level = bits_per_level; + data->start_level = DART_MAX_LEVELS - levels; + + /* Calculate the actual size of our pgd (without concatenation) */ + data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); + + data->iop.ops = (struct io_pgtable_ops) { + .map = dart_map, + .map_pages = dart_map_pages, + .unmap = dart_unmap, + .unmap_pages = dart_unmap_pages, + .iova_to_phys = dart_iova_to_phys, + }; + + return data; +} + +static struct io_pgtable * +apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) +{ + struct dart_io_pgtable *data; + int i; + + if (!cfg->coherent_walk) + return NULL; + + if (cfg->oas != 36 && cfg->oas != 42) + return NULL; + + data = dart_alloc_pgtable(cfg); + if (!data) + return NULL; + + /* + * The table format itself always uses two levels, but the total VA + * space is mapped by four separate tables, making the MMIO registers + * an effective "level 1". For simplicity, though, we treat this + * equivalently to LPAE stage 2 concatenation at level 2, with the + * additional TTBRs each just pointing at consecutive pages. + */ + if (data->start_level == 0 && data->pgd_bits > 2) + goto out_free_data; + if (data->start_level > 0) + data->pgd_bits = 0; + data->start_level = 1; + cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits; + data->pgd_bits += data->bits_per_level; + + data->pgd = __dart_alloc_pages(DART_PGD_SIZE(data), GFP_KERNEL, + cfg); + if (!data->pgd) + goto out_free_data; + + for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) + cfg->apple_dart_cfg.ttbr[i] = + virt_to_phys(data->pgd + i * DART_GRANULE(data)); + + return &data->iop; + +out_free_data: + kfree(data); + return NULL; +} + +static void apple_dart_free_pgtable(struct io_pgtable *iop) +{ + struct dart_io_pgtable *data = io_pgtable_to_data(iop); + + __dart_free_pgtable(data, data->start_level, data->pgd); + kfree(data); +} + +struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = { + .alloc = apple_dart_alloc_pgtable, + .free = apple_dart_free_pgtable, +}; diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c index f4bfcef98297..49f46e1eabf7 100644 --- a/drivers/iommu/io-pgtable.c +++ b/drivers/iommu/io-pgtable.c @@ -20,7 +20,10 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = { [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, [ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns, +#endif +#ifdef CONFIG_IOMMU_IO_PGTABLE_DART [APPLE_DART] = &io_pgtable_apple_dart_init_fns, + [APPLE_DART2] = &io_pgtable_apple_dart_init_fns, #endif #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S [ARM_V7S] = &io_pgtable_arm_v7s_init_fns, diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c index 496c4951ccb1..06c7ad3b1c03 100644 --- a/drivers/mailbox/apple-mailbox.c +++ b/drivers/mailbox/apple-mailbox.c @@ -17,6 +17,7 @@ */ #include <linux/apple-mailbox.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/gfp.h> #include <linux/interrupt.h> @@ -25,6 +26,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/spinlock.h> #include <linux/types.h> #define APPLE_ASC_MBOX_CONTROL_FULL BIT(16) @@ -100,6 +102,9 @@ struct apple_mbox { struct device *dev; struct mbox_controller controller; + spinlock_t rx_lock; + spinlock_t tx_lock; + bool tx_pending; }; static const struct of_device_id apple_mbox_of_match[]; @@ -112,6 +117,14 @@ static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox) return !(mbox_ctrl & apple_mbox->hw->control_full); } +static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox) +{ + u32 mbox_ctrl = + readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control); + + return mbox_ctrl & apple_mbox->hw->control_empty; +} + static int apple_mbox_hw_send(struct apple_mbox *apple_mbox, struct apple_mbox_msg *msg) { @@ -155,11 +168,15 @@ static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data) { struct apple_mbox *apple_mbox = chan->con_priv; struct apple_mbox_msg *msg = data; + unsigned long flags; int ret; + spin_lock_irqsave(&apple_mbox->tx_lock, flags); + WARN_ON(apple_mbox->tx_pending); + ret = apple_mbox_hw_send(apple_mbox, msg); if (ret) - return ret; + goto err_unlock; /* * The interrupt is level triggered and will keep firing as long as the @@ -174,9 +191,13 @@ static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data) writel_relaxed(apple_mbox->hw->irq_bit_send_empty, apple_mbox->regs + apple_mbox->hw->irq_ack); } + apple_mbox->tx_pending = true; enable_irq(apple_mbox->irq_send_empty); - return 0; +err_unlock: + spin_unlock_irqrestore(&apple_mbox->tx_lock, flags); + + return ret; } static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data) @@ -191,17 +212,27 @@ static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data) * it at the main controller again. */ disable_irq_nosync(apple_mbox->irq_send_empty); - mbox_chan_txdone(&apple_mbox->chan, 0); + spin_lock(&apple_mbox->tx_lock); + if (apple_mbox->tx_pending) { + apple_mbox->tx_pending = false; + spin_unlock(&apple_mbox->tx_lock); + mbox_chan_txdone(&apple_mbox->chan, 0); + } else { + spin_unlock(&apple_mbox->tx_lock); + } return IRQ_HANDLED; } -static irqreturn_t apple_mbox_recv_irq(int irq, void *data) +static int apple_mbox_poll(struct apple_mbox *apple_mbox) { - struct apple_mbox *apple_mbox = data; + struct apple_mbox_msg msg; + int ret = 0; - while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) + while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) { mbox_chan_received_data(&apple_mbox->chan, (void *)&msg); + ret++; + } /* * The interrupt will keep firing even if there are no more messages @@ -216,9 +247,57 @@ static irqreturn_t apple_mbox_recv_irq(int irq, void *data) apple_mbox->regs + apple_mbox->hw->irq_ack); } + return ret; +} + +static irqreturn_t apple_mbox_recv_irq(int irq, void *data) +{ + struct apple_mbox *apple_mbox = data; + + spin_lock(&apple_mbox->rx_lock); + apple_mbox_poll(apple_mbox); + spin_unlock(&apple_mbox->rx_lock); + return IRQ_HANDLED; } +static bool apple_mbox_chan_peek_data(struct mbox_chan *chan) +{ + struct apple_mbox *apple_mbox = chan->con_priv; + unsigned long flags; + int ret; + + spin_lock_irqsave(&apple_mbox->rx_lock, flags); + ret = apple_mbox_poll(apple_mbox); + spin_unlock_irqrestore(&apple_mbox->rx_lock, flags); + + return ret > 0; +} + +static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout) +{ + struct apple_mbox *apple_mbox = chan->con_priv; + unsigned long deadline = jiffies + msecs_to_jiffies(timeout); + unsigned long flags; + + while (time_before(jiffies, deadline)) { + if (apple_mbox_hw_send_empty(apple_mbox)) { + spin_lock_irqsave(&apple_mbox->tx_lock, flags); + if (apple_mbox->tx_pending) { + apple_mbox->tx_pending = false; + disable_irq_nosync(apple_mbox->irq_send_empty); + } + /* Mailbox subsystem will call txdone for us */ + spin_unlock_irqrestore(&apple_mbox->tx_lock, flags); + return 0; + } + + udelay(1); + } + + return -ETIME; +} + static int apple_mbox_chan_startup(struct mbox_chan *chan) { struct apple_mbox *apple_mbox = chan->con_priv; @@ -250,6 +329,8 @@ static void apple_mbox_chan_shutdown(struct mbox_chan *chan) static const struct mbox_chan_ops apple_mbox_ops = { .send_data = apple_mbox_chan_send_data, + .peek_data = apple_mbox_chan_peek_data, + .flush = apple_mbox_chan_flush, .startup = apple_mbox_chan_startup, .shutdown = apple_mbox_chan_shutdown, }; @@ -304,6 +385,8 @@ static int apple_mbox_probe(struct platform_device *pdev) mbox->controller.txdone_irq = true; mbox->controller.of_xlate = apple_mbox_of_xlate; mbox->chan.con_priv = mbox; + spin_lock_init(&mbox->rx_lock); + spin_lock_init(&mbox->tx_lock); irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev)); if (!irqname) @@ -311,8 +394,8 @@ static int apple_mbox_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL, apple_mbox_recv_irq, - IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname, - mbox); + IRQF_NO_AUTOEN | IRQF_ONESHOT | IRQF_NO_SUSPEND, + irqname, mbox); if (ret) return ret; @@ -320,9 +403,8 @@ static int apple_mbox_probe(struct platform_device *pdev) if (!irqname) return -ENOMEM; - ret = devm_request_irq(dev, mbox->irq_send_empty, - apple_mbox_send_empty_irq, IRQF_NO_AUTOEN, - irqname, mbox); + ret = devm_request_irq(dev, mbox->irq_send_empty, apple_mbox_send_empty_irq, + IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox); if (ret) return ret; diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 4229b9b5da98..e27500e8cba9 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -310,7 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) return -ENOTSUPP; ret = chan->mbox->ops->flush(chan, timeout); - if (ret < 0) + if (ret >= 0) tx_tick(chan, ret); return ret; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 3b59456f5545..e675aaf00bc8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -51,6 +51,21 @@ config MFD_ACT8945A linear regulators, along with a complete ActivePath battery charger. +config MFD_APPLE_SPMI_PMU + tristate "Apple SPMI PMUs" + depends on SPMI + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + select MFD_SIMPLE_MFD_SPMI + help + Say yes here to enable support for Apple PMUs attached via the + SPMI bus. These can be found on Apple devices such as Apple + Silicon Macs. + + This driver itself only attaches to the core device, and relies + on subsystem drivers for individual device functions. You must + enable those for it to be useful. + config MFD_SUN4I_GPADC tristate "Allwinner sunxi platforms' GPADC MFD driver" select MFD_CORE @@ -1214,6 +1229,19 @@ config MFD_SIMPLE_MFD_I2C sub-devices represented by child nodes in Device Tree will be subsequently registered. +config MFD_SIMPLE_MFD_SPMI + tristate + depends on SPMI + select MFD_CORE + select REGMAP_SPMI + help + This driver creates a single register map with the intention for it + to be shared by all sub-devices. + + Once the register map has been successfully initialised, any + sub-devices represented by child nodes in Device Tree will be + subsequently registered. + config MFD_SL28CPLD tristate "Kontron sl28cpld Board Management Controller" depends on I2C diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 858cacf659d6..8d8abcd68351 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -266,6 +266,7 @@ obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o +obj-$(CONFIG_MFD_SIMPLE_MFD_SPMI) += simple-mfd-spmi.o obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o diff --git a/drivers/mfd/simple-mfd-spmi.c b/drivers/mfd/simple-mfd-spmi.c new file mode 100644 index 000000000000..99f25751000a --- /dev/null +++ b/drivers/mfd/simple-mfd-spmi.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Simple MFD - SPMI + * + * Copyright The Asahi Linux Contributors + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spmi.h> +#include <linux/of_platform.h> + +static const struct regmap_config spmi_regmap_config = { + .reg_bits = 16, + .val_bits = 8, + .max_register = 0xffff, +}; + +static int simple_spmi_probe(struct spmi_device *sdev) +{ + struct regmap *regmap; + + regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + return devm_of_platform_populate(&sdev->dev); +} + +static const struct of_device_id simple_spmi_id_table[] = { + { .compatible = "apple,spmi-pmu" }, + {} +}; +MODULE_DEVICE_TABLE(of, simple_spmi_id_table); + +static struct spmi_driver pmic_spmi_driver = { + .probe = simple_spmi_probe, + .driver = { + .name = "simple-mfd-spmi", + .owner = THIS_MODULE, + .of_match_table = simple_spmi_id_table, + }, +}; +module_spmi_driver(pmic_spmi_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Simple MFD - SPMI driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index ed53276f6ad9..2fd4221b24aa 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -2015,6 +2015,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( struct sdhci_host *host; int ret, bar = first_bar + slotno; size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0; + u32 cd_debounce_delay_ms; if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); @@ -2081,6 +2082,10 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( if (host->mmc->caps & MMC_CAP_CD_WAKE) device_init_wakeup(&pdev->dev, true); + if (device_property_read_u32(&pdev->dev, "cd-debounce-delay-ms", + &cd_debounce_delay_ms)) + cd_debounce_delay_ms = 200; + if (slot->cd_idx >= 0) { ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, slot->cd_override_level, 0); @@ -2088,7 +2093,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, slot->cd_override_level, - 0); + cd_debounce_delay_ms * 1000); if (ret == -EPROBE_DEFER) goto remove; @@ -2096,6 +2101,16 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( dev_warn(&pdev->dev, "failed to setup card detect gpio\n"); slot->cd_idx = -1; } + } else if (is_of_node(pdev->dev.fwnode)) { + /* Allow all OF systems to use a CD GPIO if provided */ + + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, + slot->cd_override_level, + cd_debounce_delay_ms * 1000); + if (ret == -EPROBE_DEFER) + goto remove; + else if (ret == 0) + slot->cd_idx = 0; } if (chip->fixes && chip->fixes->add_host) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index ac2d400d1d6c..aeb0294385ed 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1965,55 +1965,55 @@ static const struct driver_info at_umc2000sp_info = { static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ - USB_DEVICE(0x0b95, 0x1790), + USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x1790, 0xff, 0xff, 0), .driver_info = (unsigned long)&ax88179_info, }, { /* ASIX AX88178A 10/100/1000 */ - USB_DEVICE(0x0b95, 0x178a), + USB_DEVICE_AND_INTERFACE_INFO(0x0b95, 0x178a, 0xff, 0xff, 0), .driver_info = (unsigned long)&ax88178a_info, }, { /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ - USB_DEVICE(0x04b4, 0x3610), + USB_DEVICE_AND_INTERFACE_INFO(0x04b4, 0x3610, 0xff, 0xff, 0), .driver_info = (unsigned long)&cypress_GX3_info, }, { /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ - USB_DEVICE(0x2001, 0x4a00), + USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x4a00, 0xff, 0xff, 0), .driver_info = (unsigned long)&dlink_dub1312_info, }, { /* Sitecom USB 3.0 to Gigabit Adapter */ - USB_DEVICE(0x0df6, 0x0072), + USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0072, 0xff, 0xff, 0), .driver_info = (unsigned long)&sitecom_info, }, { /* Samsung USB Ethernet Adapter */ - USB_DEVICE(0x04e8, 0xa100), + USB_DEVICE_AND_INTERFACE_INFO(0x04e8, 0xa100, 0xff, 0xff, 0), .driver_info = (unsigned long)&samsung_info, }, { /* Lenovo OneLinkDock Gigabit LAN */ - USB_DEVICE(0x17ef, 0x304b), + USB_DEVICE_AND_INTERFACE_INFO(0x17ef, 0x304b, 0xff, 0xff, 0), .driver_info = (unsigned long)&lenovo_info, }, { /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ - USB_DEVICE(0x050d, 0x0128), + USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x0128, 0xff, 0xff, 0), .driver_info = (unsigned long)&belkin_info, }, { /* Toshiba USB 3.0 GBit Ethernet Adapter */ - USB_DEVICE(0x0930, 0x0a13), + USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x0a13, 0xff, 0xff, 0), .driver_info = (unsigned long)&toshiba_info, }, { /* Magic Control Technology U3-A9003 USB 3.0 Gigabit Ethernet Adapter */ - USB_DEVICE(0x0711, 0x0179), + USB_DEVICE_AND_INTERFACE_INFO(0x0711, 0x0179, 0xff, 0xff, 0), .driver_info = (unsigned long)&mct_info, }, { /* Allied Telesis AT-UMC2000 USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ - USB_DEVICE(0x07c9, 0x000e), + USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000e, 0xff, 0xff, 0), .driver_info = (unsigned long)&at_umc2000_info, }, { /* Allied Telesis AT-UMC200 USB 3.0/USB 3.1 Gen 1 to Fast Ethernet Adapter */ - USB_DEVICE(0x07c9, 0x000f), + USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x000f, 0xff, 0xff, 0), .driver_info = (unsigned long)&at_umc200_info, }, { /* Allied Telesis AT-UMC2000/SP USB 3.0/USB 3.1 Gen 1 to Gigabit Ethernet Adapter */ - USB_DEVICE(0x07c9, 0x0010), + USB_DEVICE_AND_INTERFACE_INFO(0x07c9, 0x0010, 0xff, 0xff, 0), .driver_info = (unsigned long)&at_umc2000sp_info, }, { }, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index 13c13504a6e8..19009eb9db93 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -47,3 +47,5 @@ brcmfmac-$(CONFIG_OF) += \ of.o brcmfmac-$(CONFIG_DMI) += \ dmi.o +brcmfmac-$(CONFIG_ACPI) += \ + acpi.o diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c new file mode 100644 index 000000000000..dec6a83d13b1 --- /dev/null +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright The Asahi Linux Contributors + */ + +#include <linux/acpi.h> +#include "debug.h" +#include "core.h" +#include "common.h" + +void brcmf_acpi_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings) +{ + acpi_status status; + const union acpi_object *o; + struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_device *adev = ACPI_COMPANION(dev); + + if (!adev) + return; + + if (!ACPI_FAILURE(acpi_dev_get_property(adev, "module-instance", + ACPI_TYPE_STRING, &o))) { + brcmf_dbg(INFO, "ACPI module-instance=%s\n", o->string.pointer); + settings->board_type = devm_kasprintf(dev, GFP_KERNEL, + "apple,%s", + o->string.pointer); + } else { + brcmf_dbg(INFO, "No ACPI module-instance\n"); + } + + status = acpi_evaluate_object(adev->handle, "RWCV", NULL, &buf); + o = buf.pointer; + if (!ACPI_FAILURE(status) && o && o->type == ACPI_TYPE_BUFFER && + o->buffer.length >= 2) { + char *antenna_sku = devm_kzalloc(dev, 3, GFP_KERNEL); + + if (!antenna_sku) { + brcmf_err("Failed to allocate antenna-sku"); + } else { + memcpy(antenna_sku, o->buffer.pointer, 2); + brcmf_dbg(INFO, "ACPI RWCV data=%*phN antenna-sku=%s\n", + (int)o->buffer.length, o->buffer.pointer, + antenna_sku); + settings->antenna_sku = antenna_sku; + } + + kfree(buf.pointer); + } else { + brcmf_dbg(INFO, "No ACPI antenna-sku\n"); + } +} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 3f5da3bb6aa5..f4bd98da9761 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -7,6 +7,8 @@ #define BRCMFMAC_BUS_H #include "debug.h" +#include <linux/kernel.h> +#include <linux/firmware.h> /* IDs of the 6 default common rings of msgbuf protocol */ #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT 0 @@ -34,6 +36,12 @@ enum brcmf_bus_protocol_type { BRCMF_PROTO_MSGBUF }; +/* Firmware blobs that may be available */ +enum brcmf_blob_type { + BRCMF_BLOB_CLM, + BRCMF_BLOB_TXCAP, +}; + struct brcmf_mp_device; struct brcmf_bus_dcmd { @@ -60,7 +68,7 @@ struct brcmf_bus_dcmd { * @wowl_config: specify if dongle is configured for wowl when going to suspend * @get_ramsize: obtain size of device memory. * @get_memdump: obtain device memory dump in provided buffer. - * @get_fwname: obtain firmware name. + * @get_blob: obtain a firmware blob. * * This structure provides an abstract interface towards the * bus specific driver. For control messages to common driver @@ -77,8 +85,8 @@ struct brcmf_bus_ops { void (*wowl_config)(struct device *dev, bool enabled); size_t (*get_ramsize)(struct device *dev); int (*get_memdump)(struct device *dev, void *data, size_t len); - int (*get_fwname)(struct device *dev, const char *ext, - unsigned char *fw_name); + int (*get_blob)(struct device *dev, const struct firmware **fw, + enum brcmf_blob_type type); void (*debugfs_create)(struct device *dev); int (*reset)(struct device *dev); }; @@ -220,10 +228,10 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len) } static inline -int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext, - unsigned char *fw_name) +int brcmf_bus_get_blob(struct brcmf_bus *bus, const struct firmware **fw, + enum brcmf_blob_type type) { - return bus->ops->get_fwname(bus->dev, ext, fw_name); + return bus->ops->get_blob(bus->dev, fw, type); } static inline diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 605206abe424..63ecbbe11af1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -770,12 +770,50 @@ void brcmf_set_mpc(struct brcmf_if *ifp, int mpc) } } +static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, + struct brcmf_scan_params_v2_le *params_le, + struct cfg80211_scan_request *request); + +static void brcmf_scan_params_v2_to_v1(struct brcmf_scan_params_v2_le *params_v2_le, + struct brcmf_scan_params_le *params_le) +{ + size_t params_size; + u32 ch; + int n_channels, n_ssids; + + memcpy(¶ms_le->ssid_le, ¶ms_v2_le->ssid_le, + sizeof(params_le->ssid_le)); + memcpy(¶ms_le->bssid, ¶ms_v2_le->bssid, + sizeof(params_le->bssid)); + + params_le->bss_type = params_v2_le->bss_type; + params_le->scan_type = params_v2_le->scan_type; + params_le->nprobes = params_v2_le->nprobes; + params_le->active_time = params_v2_le->active_time; + params_le->passive_time = params_v2_le->passive_time; + params_le->home_time = params_v2_le->home_time; + params_le->channel_num = params_v2_le->channel_num; + + ch = le32_to_cpu(params_v2_le->channel_num); + n_channels = ch & BRCMF_SCAN_PARAMS_COUNT_MASK; + n_ssids = ch >> BRCMF_SCAN_PARAMS_NSSID_SHIFT; + + params_size = sizeof(u16) * n_channels; + if (n_ssids > 0) { + params_size = roundup(params_size, sizeof(u32)); + params_size += sizeof(struct brcmf_ssid_le) * n_ssids; + } + + memcpy(¶ms_le->channel_list[0], + ¶ms_v2_le->channel_list[0], params_size); +} + s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, bool aborted, bool fw_abort) { struct brcmf_pub *drvr = cfg->pub; - struct brcmf_scan_params_le params_le; + struct brcmf_scan_params_v2_le params_v2_le; struct cfg80211_scan_request *scan_request; u64 reqid; u32 bucket; @@ -794,20 +832,23 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, if (fw_abort) { /* Do a scan abort to stop the driver's scan engine */ brcmf_dbg(SCAN, "ABORT scan in firmware\n"); - memset(¶ms_le, 0, sizeof(params_le)); - eth_broadcast_addr(params_le.bssid); - params_le.bss_type = DOT11_BSSTYPE_ANY; - params_le.scan_type = 0; - params_le.channel_num = cpu_to_le32(1); - params_le.nprobes = cpu_to_le32(1); - params_le.active_time = cpu_to_le32(-1); - params_le.passive_time = cpu_to_le32(-1); - params_le.home_time = cpu_to_le32(-1); - /* Scan is aborted by setting channel_list[0] to -1 */ - params_le.channel_list[0] = cpu_to_le16(-1); + + brcmf_escan_prep(cfg, ¶ms_v2_le, NULL); + /* E-Scan (or anyother type) can be aborted by SCAN */ - err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, - ¶ms_le, sizeof(params_le)); + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_V2)) { + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, + ¶ms_v2_le, + sizeof(params_v2_le)); + } else { + struct brcmf_scan_params_le params_le; + + brcmf_scan_params_v2_to_v1(¶ms_v2_le, ¶ms_le); + err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, + ¶ms_le, + sizeof(params_le)); + } + if (err) bphy_err(drvr, "Scan abort failed\n"); } @@ -1027,7 +1068,7 @@ done: } static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, - struct brcmf_scan_params_le *params_le, + struct brcmf_scan_params_v2_le *params_le, struct cfg80211_scan_request *request) { u32 n_ssids; @@ -1036,9 +1077,14 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, s32 offset; u16 chanspec; char *ptr; + int length; struct brcmf_ssid_le ssid_le; eth_broadcast_addr(params_le->bssid); + + length = BRCMF_SCAN_PARAMS_V2_FIXED_SIZE; + + params_le->version = BRCMF_SCAN_PARAMS_VERSION_V2; params_le->bss_type = DOT11_BSSTYPE_ANY; params_le->scan_type = BRCMF_SCANTYPE_ACTIVE; params_le->channel_num = 0; @@ -1048,6 +1094,15 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, params_le->home_time = cpu_to_le32(-1); memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); + /* Scan abort */ + if (!request) { + length += sizeof(u16); + params_le->channel_num = cpu_to_le32(1); + params_le->channel_list[0] = cpu_to_le16(-1); + params_le->length = cpu_to_le16(length); + return; + } + n_ssids = request->n_ssids; n_channels = request->n_channels; @@ -1055,6 +1110,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", n_channels); if (n_channels > 0) { + length += roundup(sizeof(u16) * n_channels, sizeof(u32)); for (i = 0; i < n_channels; i++) { chanspec = channel_to_chanspec(&cfg->d11inf, request->channels[i]); @@ -1065,12 +1121,14 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, } else { brcmf_dbg(SCAN, "Scanning all channels\n"); } + /* Copy ssid array if applicable */ brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids); if (n_ssids > 0) { - offset = offsetof(struct brcmf_scan_params_le, channel_list) + + offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) + n_channels * sizeof(u16); offset = roundup(offset, sizeof(u32)); + length += sizeof(ssid_le) * n_ssids, ptr = (char *)params_le + offset; for (i = 0; i < n_ssids; i++) { memset(&ssid_le, 0, sizeof(ssid_le)); @@ -1090,6 +1148,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, brcmf_dbg(SCAN, "Performing passive scan\n"); params_le->scan_type = BRCMF_SCANTYPE_PASSIVE; } + params_le->length = cpu_to_le16(length); /* Adding mask to channel numbers */ params_le->channel_num = cpu_to_le32((n_ssids << BRCMF_SCAN_PARAMS_NSSID_SHIFT) | @@ -1101,8 +1160,8 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, struct cfg80211_scan_request *request) { struct brcmf_pub *drvr = cfg->pub; - s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + - offsetof(struct brcmf_escan_params_le, params_le); + s32 params_size = BRCMF_SCAN_PARAMS_V2_FIXED_SIZE + + offsetof(struct brcmf_escan_params_le, params_v2_le); struct brcmf_escan_params_le *params; s32 err = 0; @@ -1122,8 +1181,22 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, goto exit; } BUG_ON(params_size + sizeof("escan") >= BRCMF_DCMD_MEDLEN); - brcmf_escan_prep(cfg, ¶ms->params_le, request); - params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); + brcmf_escan_prep(cfg, ¶ms->params_v2_le, request); + + params->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION_V2); + + if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_V2)) { + struct brcmf_escan_params_le *params_v1; + + params_size -= BRCMF_SCAN_PARAMS_V2_FIXED_SIZE; + params_size += BRCMF_SCAN_PARAMS_FIXED_SIZE; + params_v1 = kzalloc(params_size, GFP_KERNEL); + params_v1->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION); + brcmf_scan_params_v2_to_v1(¶ms->params_v2_le, ¶ms_v1->params_le); + kfree(params); + params = params_v1; + } + params->action = cpu_to_le16(WL_ESCAN_ACTION_START); params->sync_id = cpu_to_le16(0x1234); @@ -1344,51 +1417,44 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e) return reason; } -static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) +static int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags) { struct brcmf_pub *drvr = ifp->drvr; struct brcmf_wsec_pmk_le pmk; - int i, err; + int err; + + if (key_len > sizeof(pmk.key)) { + bphy_err(drvr, "key must be less than %zu bytes\n", + sizeof(pmk.key)); + return -EINVAL; + } + + memset(&pmk, 0, sizeof(pmk)); - /* convert to firmware key format */ - pmk.key_len = cpu_to_le16(pmk_len << 1); - pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE); - for (i = 0; i < pmk_len; i++) - snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]); + /* pass key material directly */ + pmk.key_len = cpu_to_le16(key_len); + pmk.flags = cpu_to_le16(flags); + memcpy(pmk.key, key, key_len); - /* store psk in firmware */ + /* store key material in firmware */ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK, &pmk, sizeof(pmk)); if (err < 0) bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n", - pmk_len); + key_len); return err; } +static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) +{ + return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0); +} + static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data, u16 pwd_len) { - struct brcmf_pub *drvr = ifp->drvr; - struct brcmf_wsec_sae_pwd_le sae_pwd; - int err; - - if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) { - bphy_err(drvr, "sae_password must be less than %d\n", - BRCMF_WSEC_MAX_SAE_PASSWORD_LEN); - return -EINVAL; - } - - sae_pwd.key_len = cpu_to_le16(pwd_len); - memcpy(sae_pwd.key, pwd_data, pwd_len); - - err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd, - sizeof(sae_pwd)); - if (err < 0) - bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n", - pwd_len); - - return err; + return brcmf_set_wsec(ifp, pwd_data, pwd_len, BRCMF_WSEC_PASSPHRASE); } static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason, @@ -3978,6 +4044,37 @@ exit: return 0; } +static s32 +brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa, + bool alive) +{ + struct brcmf_pmk_op_v3_le *pmk_op; + int length = offsetof(struct brcmf_pmk_op_v3_le, pmk); + int ret; + + pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL); + pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3); + + if (!pmksa) { + /* Flush operation, operate on entire list */ + pmk_op->count = cpu_to_le16(0); + } else { + /* Single PMK operation */ + pmk_op->count = cpu_to_le16(1); + length += sizeof(struct brcmf_pmksa_v3); + memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN); + memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); + pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN; + pmk_op->pmk[0].time_left = alive ? BRCMF_PMKSA_NO_EXPIRY : 0; + } + + pmk_op->length = cpu_to_le16(length); + + ret = brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_op, sizeof(*pmk_op)); + kfree(pmk_op); + return ret; +} + static __used s32 brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp) { @@ -4014,6 +4111,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, if (!check_vif_up(ifp->vif)) return -EIO; + brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmksa->bssid); + brcmf_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmksa->pmkid); + + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PMKID_V3)) + return brcmf_pmksa_v3_op(ifp, pmksa, true); + + /* TODO: implement PMKID_V2 */ + npmk = le32_to_cpu(cfg->pmk_list.npmk); for (i = 0; i < npmk; i++) if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN)) @@ -4030,9 +4135,6 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, return -EINVAL; } - brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmk[npmk].bssid); - brcmf_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmk[npmk].pmkid); - err = brcmf_update_pmklist(cfg, ifp); brcmf_dbg(TRACE, "Exit\n"); @@ -4056,6 +4158,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid); + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PMKID_V3)) + return brcmf_pmksa_v3_op(ifp, pmksa, false); + + /* TODO: implement PMKID_V2 */ + npmk = le32_to_cpu(cfg->pmk_list.npmk); for (i = 0; i < npmk; i++) if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN)) @@ -4092,6 +4199,11 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) if (!check_vif_up(ifp->vif)) return -EIO; + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PMKID_V3)) + return brcmf_pmksa_v3_op(ifp, NULL, false); + + /* TODO: implement PMKID_V2 */ + memset(&cfg->pmk_list, 0, sizeof(cfg->pmk_list)); err = brcmf_update_pmklist(cfg, ifp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 4ec7773b6906..99502de31d56 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -212,8 +212,8 @@ struct sbsocramregs { #define ARMCR4_TCBANB_MASK 0xf #define ARMCR4_TCBANB_SHIFT 0 -#define ARMCR4_BSZ_MASK 0x3f -#define ARMCR4_BSZ_MULT 8192 +#define ARMCR4_BSZ_MASK 0x7f +#define ARMCR4_BLK_1K_MASK 0x200 struct brcmf_core_priv { struct brcmf_core pub; @@ -675,7 +675,8 @@ static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem) } /** Return the TCM-RAM size of the ARMCR4 core. */ -static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) +static u32 brcmf_chip_tcm_ramsize(struct brcmf_chip_priv *ci, + struct brcmf_core_priv *cr4) { u32 corecap; u32 memsize = 0; @@ -683,6 +684,7 @@ static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) u32 nbb; u32 totb; u32 bxinfo; + u32 blksize; u32 idx; corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP); @@ -694,7 +696,12 @@ static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) for (idx = 0; idx < totb; idx++) { brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx); bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO); - memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; + if (bxinfo & ARMCR4_BLK_1K_MASK) + blksize = 1024; + else + blksize = 8192; + + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * blksize; } return memsize; @@ -727,11 +734,17 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) return 0x200000; case BRCM_CC_4359_CHIP_ID: return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000; + case BRCM_CC_4355_CHIP_ID: case BRCM_CC_4364_CHIP_ID: case CY_CC_4373_CHIP_ID: return 0x160000; case CY_CC_43752_CHIP_ID: + case BRCM_CC_4377_CHIP_ID: return 0x170000; + case BRCM_CC_4378_CHIP_ID: + return 0x352000; + case BRCM_CC_4387_CHIP_ID: + return 0x740000; default: brcmf_err("unknown chip: %s\n", ci->pub.name); break; @@ -749,7 +762,7 @@ int brcmf_chip_get_raminfo(struct brcmf_chip *pub) mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4); if (mem) { mem_core = container_of(mem, struct brcmf_core_priv, pub); - ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core); + ci->pub.ramsize = brcmf_chip_tcm_ramsize(ci, mem_core); ci->pub.rambase = brcmf_chip_tcm_rambase(ci); if (ci->pub.rambase == INVALID_RAMBASE) { brcmf_err("RAM base not provided with ARM CR4 core\n"); @@ -1286,15 +1299,18 @@ static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip) static inline void brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip) { + int i; struct brcmf_core *core; brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4); - core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); - brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | - D11_BCMA_IOCTL_PHYCLOCKEN, - D11_BCMA_IOCTL_PHYCLOCKEN, - D11_BCMA_IOCTL_PHYCLOCKEN); + /* Disable the cores only and let the firmware enable them. + * Releasing reset ourselves breaks BCM4387 in weird ways. + */ + for (i = 0; (core = brcmf_chip_get_d11core(&chip->pub, i)); i++) + brcmf_chip_coredisable(core, D11_BCMA_IOCTL_PHYRESET | + D11_BCMA_IOCTL_PHYCLOCKEN, + D11_BCMA_IOCTL_PHYCLOCKEN); } static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index fe01da9e620d..280ca0f1ba18 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -101,7 +101,7 @@ void brcmf_c_set_joinpref_default(struct brcmf_if *ifp) static int brcmf_c_download(struct brcmf_if *ifp, u16 flag, struct brcmf_dload_data_le *dload_buf, - u32 len) + u32 len, const char *var) { s32 err; @@ -112,18 +112,17 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag, dload_buf->crc = cpu_to_le32(0); len = sizeof(*dload_buf) + len - 1; - err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf, len); + err = brcmf_fil_iovar_data_set(ifp, var, dload_buf, len); return err; } -static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) +static int brcmf_c_download_blob(struct brcmf_if *ifp, + const void *data, size_t size, + const char *loadvar, const char *statvar) { struct brcmf_pub *drvr = ifp->drvr; - struct brcmf_bus *bus = drvr->bus_if; struct brcmf_dload_data_le *chunk_buf; - const struct firmware *clm = NULL; - u8 clm_name[BRCMF_FW_NAME_LEN]; u32 chunk_len; u32 datalen; u32 cumulative_len; @@ -133,27 +132,11 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) brcmf_dbg(TRACE, "Enter\n"); - memset(clm_name, 0, sizeof(clm_name)); - err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name); - if (err) { - bphy_err(drvr, "get CLM blob file name failed (%d)\n", err); - return err; - } - - err = firmware_request_nowarn(&clm, clm_name, bus->dev); - if (err) { - brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n", - err); - return 0; - } - chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL); - if (!chunk_buf) { - err = -ENOMEM; - goto done; - } + if (!chunk_buf) + return -ENOMEM; - datalen = clm->size; + datalen = size; cumulative_len = 0; do { if (datalen > MAX_CHUNK_LEN) { @@ -162,9 +145,10 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) chunk_len = datalen; dl_flag |= DL_END; } - memcpy(chunk_buf->data, clm->data + cumulative_len, chunk_len); + memcpy(chunk_buf->data, data + cumulative_len, chunk_len); - err = brcmf_c_download(ifp, dl_flag, chunk_buf, chunk_len); + err = brcmf_c_download(ifp, dl_flag, chunk_buf, chunk_len, + loadvar); dl_flag &= ~DL_BEGIN; @@ -173,20 +157,81 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) } while ((datalen > 0) && (err == 0)); if (err) { - bphy_err(drvr, "clmload (%zu byte file) failed (%d)\n", - clm->size, err); - /* Retrieve clmload_status and print */ - err = brcmf_fil_iovar_int_get(ifp, "clmload_status", &status); + bphy_err(drvr, "%s (%zu byte file) failed (%d)\n", + loadvar, size, err); + /* Retrieve status and print */ + err = brcmf_fil_iovar_int_get(ifp, statvar, &status); if (err) - bphy_err(drvr, "get clmload_status failed (%d)\n", err); + bphy_err(drvr, "get %s failed (%d)\n", statvar, err); else - brcmf_dbg(INFO, "clmload_status=%d\n", status); + brcmf_dbg(INFO, "%s=%d\n", statvar, status); err = -EIO; } kfree(chunk_buf); -done: - release_firmware(clm); + return err; +} + +static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) +{ + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_bus *bus = drvr->bus_if; + const struct firmware *fw = NULL; + s32 err; + + brcmf_dbg(TRACE, "Enter\n"); + + err = brcmf_bus_get_blob(bus, &fw, BRCMF_BLOB_CLM); + if (err || !fw) { + brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n", + err); + return 0; + } + + err = brcmf_c_download_blob(ifp, fw->data, fw->size, + "clmload", "clmload_status"); + + release_firmware(fw); + return err; +} + +static int brcmf_c_process_txcap_blob(struct brcmf_if *ifp) +{ + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_bus *bus = drvr->bus_if; + const struct firmware *fw = NULL; + s32 err; + + brcmf_dbg(TRACE, "Enter\n"); + + err = brcmf_bus_get_blob(bus, &fw, BRCMF_BLOB_TXCAP); + if (err || !fw) { + brcmf_info("no txcap_blob available (err=%d)\n", err); + return 0; + } + + brcmf_info("TxCap blob found, loading\n"); + err = brcmf_c_download_blob(ifp, fw->data, fw->size, + "txcapload", "txcapload_status"); + + release_firmware(fw); + return err; +} + +static int brcmf_c_process_cal_blob(struct brcmf_if *ifp) +{ + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_mp_device *settings = drvr->settings; + s32 err; + + brcmf_dbg(TRACE, "Enter\n"); + + if (!settings->cal_blob || !settings->cal_size) + return 0; + + brcmf_info("Calibration blob provided by platform, loading\n"); + err = brcmf_c_download_blob(ifp, settings->cal_blob, settings->cal_size, + "calload", "calload_status"); return err; } @@ -267,6 +312,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) goto done; } + /* Do TxCap downloading, if needed */ + err = brcmf_c_process_txcap_blob(ifp); + if (err < 0) { + bphy_err(drvr, "download TxCap blob file failed, %d\n", err); + goto done; + } + + /* Download external calibration blob, if available */ + err = brcmf_c_process_cal_blob(ifp); + if (err < 0) { + bphy_err(drvr, "download calibration blob file failed, %d\n", err); + goto done; + } + /* query for 'ver' to get version info from firmware */ memset(buf, 0, sizeof(buf)); err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); @@ -456,6 +515,7 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, /* No platform data for this device, try OF and DMI data */ brcmf_dmi_probe(settings, chip, chiprev); brcmf_of_probe(dev, bus_type, settings); + brcmf_acpi_probe(dev, bus_type, settings); } return settings; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index 15accc88d5c0..39e8abcf0522 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -51,6 +51,9 @@ struct brcmf_mp_device { struct brcmfmac_pd_cc *country_codes; const char *board_type; unsigned char mac[ETH_ALEN]; + const char *antenna_sku; + const void *cal_blob; + int cal_size; union { struct brcmfmac_sdio_pd sdio; } bus; @@ -73,6 +76,15 @@ static inline void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {} #endif +#ifdef CONFIG_ACPI +void brcmf_acpi_probe(struct device *dev, enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings); +#else +static inline void brcmf_acpi_probe(struct device *dev, + enum brcmf_bus_type bus_type, + struct brcmf_mp_device *settings) {} +#endif + u8 brcmf_map_prio_to_prec(void *cfg, u8 prio); u8 brcmf_map_prio_to_aci(void *cfg, u8 prio); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index d2ac844e1e9f..ac873677343c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -126,6 +126,53 @@ static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv) drv->feat_flags |= feat_flags; } +struct brcmf_feat_wlcfeat { + u16 min_ver_major; + u16 min_ver_minor; + u32 feat_flags; +}; + +static const struct brcmf_feat_wlcfeat brcmf_feat_wlcfeat_map[] = { + { 12, 0, BIT(BRCMF_FEAT_PMKID_V2) }, + { 13, 0, BIT(BRCMF_FEAT_PMKID_V3) }, +}; + +static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv) +{ + struct brcmf_if *ifp = brcmf_get_ifp(drv, 0); + const struct brcmf_feat_wlcfeat *e; + struct brcmf_wlc_version_le ver; + u32 feat_flags = 0; + int i, err, major, minor; + + err = brcmf_fil_iovar_data_get(ifp, "wlc_ver", &ver, sizeof(ver)); + if (err) + return; + + major = le16_to_cpu(ver.wlc_ver_major); + minor = le16_to_cpu(ver.wlc_ver_minor); + + brcmf_dbg(INFO, "WLC version: %d.%d\n", major, minor); + + for (i = 0; i < ARRAY_SIZE(brcmf_feat_wlcfeat_map); i++) { + e = &brcmf_feat_wlcfeat_map[i]; + if (major > e->min_ver_major || + (major == e->min_ver_major && + minor >= e->min_ver_minor)) { + feat_flags |= e->feat_flags; + } + } + + if (!feat_flags) + return; + + for (i = 0; i < BRCMF_FEAT_LAST; i++) + if (feat_flags & BIT(i)) + brcmf_dbg(INFO, "enabling firmware feature: %s\n", + brcmf_feat_names[i]); + drv->feat_flags |= feat_flags; +} + /** * brcmf_feat_iovar_int_get() - determine feature through iovar query. * @@ -288,6 +335,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_SCAN_RANDOM_MAC); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver"); if (drvr->settings->feature_disable) { brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", @@ -296,6 +344,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) ifp->drvr->feat_flags &= ~drvr->settings->feature_disable; } + brcmf_feat_wlc_version_overrides(drvr); brcmf_feat_firmware_overrides(drvr); /* set chip related quirks */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index d1f4257af696..becbcc50d57a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -29,6 +29,7 @@ * DOT11H: firmware supports 802.11h * SAE: simultaneous authentication of equals * FWAUTH: Firmware authenticator + * SCAN_V2: Version 2 scan params */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -51,7 +52,10 @@ BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \ BRCMF_FEAT_DEF(DOT11H) \ BRCMF_FEAT_DEF(SAE) \ - BRCMF_FEAT_DEF(FWAUTH) + BRCMF_FEAT_DEF(FWAUTH) \ + BRCMF_FEAT_DEF(SCAN_V2) \ + BRCMF_FEAT_DEF(PMKID_V2) \ + BRCMF_FEAT_DEF(PMKID_V3) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index dcbe55b56e43..fbfc9458d240 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -21,6 +21,8 @@ #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ #define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ #define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff" +#define BRCMF_FW_MACADDR_FMT "macaddr=%pM" +#define BRCMF_FW_MACADDR_LEN (7 + ETH_ALEN * 3) enum nvram_parser_state { IDLE, @@ -57,6 +59,7 @@ struct nvram_parser { bool multi_dev_v1; bool multi_dev_v2; bool boardrev_found; + bool strip_mac; }; /* @@ -121,6 +124,10 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp) nvp->multi_dev_v2 = true; if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0) nvp->boardrev_found = true; + /* strip macaddr if platform MAC overrides */ + if (nvp->strip_mac && + strncmp(&nvp->data[nvp->entry], "macaddr", 7) == 0) + st = COMMENT; } else if (!is_nvram_char(c) || c == ' ') { brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n", nvp->line, nvp->column); @@ -209,6 +216,7 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp, size = data_len; /* Add space for properties we may add */ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1; + size += BRCMF_FW_MACADDR_LEN + 1; /* Alloc for extra 0 byte + roundup by 4 + length field */ size += 1 + 3 + sizeof(u32); nvp->nvram = kzalloc(size, GFP_KERNEL); @@ -368,22 +376,34 @@ static void brcmf_fw_add_defaults(struct nvram_parser *nvp) nvp->nvram_len++; } +static void brcmf_fw_add_macaddr(struct nvram_parser *nvp, u8 *mac) +{ + BUG_ON(snprintf(&nvp->nvram[nvp->nvram_len], BRCMF_FW_MACADDR_LEN + 1, + BRCMF_FW_MACADDR_FMT, mac) != BRCMF_FW_MACADDR_LEN); + nvp->nvram_len += BRCMF_FW_MACADDR_LEN + 1; +} + /* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil * and ending in a NUL. Removes carriage returns, empty lines, comment lines, * and converts newlines to NULs. Shortens buffer as needed and pads with NULs. * End of buffer is completed with token identifying length of buffer. */ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len, - u32 *new_length, u16 domain_nr, u16 bus_nr) + u32 *new_length, u16 domain_nr, u16 bus_nr, + struct device *dev) { struct nvram_parser nvp; u32 pad; u32 token; __le32 token_le; + u8 mac[ETH_ALEN]; if (brcmf_init_nvram_parser(&nvp, data, data_len) < 0) return NULL; + if (eth_platform_get_mac_address(dev, mac) == 0) + nvp.strip_mac = true; + while (nvp.pos < data_len) { nvp.state = nv_parser_states[nvp.state](&nvp); if (nvp.state == END) @@ -404,6 +424,9 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len, brcmf_fw_add_defaults(&nvp); + if (nvp.strip_mac) + brcmf_fw_add_macaddr(&nvp, mac); + pad = nvp.nvram_len; *new_length = roundup(nvp.nvram_len + 1, 4); while (pad != *new_length) { @@ -430,6 +453,7 @@ struct brcmf_fw { struct device *dev; struct brcmf_fw_request *req; u32 curpos; + unsigned int board_index; void (*done)(struct device *dev, int err, struct brcmf_fw_request *req); }; @@ -546,7 +570,8 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) if (data) nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length, fwctx->req->domain_nr, - fwctx->req->bus_nr); + fwctx->req->bus_nr, + fwctx->dev); if (free_bcm47xx_nvram) bcm47xx_nvram_release_contents(data); @@ -596,39 +621,50 @@ static int brcmf_fw_complete_request(const struct firmware *fw, static char *brcm_alt_fw_path(const char *path, const char *board_type) { - char alt_path[BRCMF_FW_NAME_LEN]; - char suffix[5]; + char base[BRCMF_FW_NAME_LEN]; + const char *suffix; + char *ret; + + if (!board_type) + return NULL; - strscpy(alt_path, path, BRCMF_FW_NAME_LEN); - /* At least one character + suffix */ - if (strlen(alt_path) < 5) + suffix = strrchr(path, '.'); + if (!suffix || suffix == path) return NULL; - /* strip .txt or .bin at the end */ - strscpy(suffix, alt_path + strlen(alt_path) - 4, 5); - alt_path[strlen(alt_path) - 4] = 0; - strlcat(alt_path, ".", BRCMF_FW_NAME_LEN); - strlcat(alt_path, board_type, BRCMF_FW_NAME_LEN); - strlcat(alt_path, suffix, BRCMF_FW_NAME_LEN); + /* strip extension at the end */ + strscpy(base, path, BRCMF_FW_NAME_LEN); + base[suffix - path] = 0; - return kstrdup(alt_path, GFP_KERNEL); + ret = kasprintf(GFP_KERNEL, "%s.%s%s", base, board_type, suffix); + if (!ret) + brcmf_err("out of memory allocating firmware path for '%s'\n", + path); + + brcmf_dbg(TRACE, "FW alt path: %s\n", ret); + + return ret; } static int brcmf_fw_request_firmware(const struct firmware **fw, struct brcmf_fw *fwctx) { struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos]; + unsigned int i; int ret; - /* Files can be board-specific, first try a board-specific path */ - if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) { + /* Files can be board-specific, first try board-specific paths */ + for (i = 0; i < ARRAY_SIZE(fwctx->req->board_types); i++) { char *alt_path; - alt_path = brcm_alt_fw_path(cur->path, fwctx->req->board_type); + if (!fwctx->req->board_types[i]) + goto fallback; + alt_path = brcm_alt_fw_path(cur->path, + fwctx->req->board_types[i]); if (!alt_path) goto fallback; - ret = request_firmware(fw, alt_path, fwctx->dev); + ret = firmware_request_nowarn(fw, alt_path, fwctx->dev); kfree(alt_path); if (ret == 0) return ret; @@ -662,15 +698,40 @@ static void brcmf_fw_request_done_alt_path(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; struct brcmf_fw_item *first = &fwctx->req->items[0]; + const char *board_type, *alt_path; int ret = 0; - /* Fall back to canonical path if board firmware not found */ - if (!fw) - ret = request_firmware_nowait(THIS_MODULE, true, first->path, + if (fw) { + brcmf_fw_request_done(fw, ctx); + return; + } + + /* Try next board firmware */ + if (fwctx->board_index < ARRAY_SIZE(fwctx->req->board_types)) { + board_type = fwctx->req->board_types[fwctx->board_index++]; + if (!board_type) + goto fallback; + alt_path = brcm_alt_fw_path(first->path, board_type); + if (!alt_path) + goto fallback; + + ret = request_firmware_nowait(THIS_MODULE, true, alt_path, fwctx->dev, GFP_KERNEL, fwctx, - brcmf_fw_request_done); + brcmf_fw_request_done_alt_path); + kfree(alt_path); + + if (ret < 0) + brcmf_fw_request_done(fw, ctx); + return; + } - if (fw || ret < 0) +fallback: + /* Fall back to canonical path if board firmware not found */ + ret = request_firmware_nowait(THIS_MODULE, true, first->path, + fwctx->dev, GFP_KERNEL, fwctx, + brcmf_fw_request_done); + + if (ret < 0) brcmf_fw_request_done(fw, ctx); } @@ -714,10 +775,11 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, fwctx->done = fw_cb; /* First try alternative board-specific path if any */ - if (fwctx->req->board_type) + if (fwctx->req->board_types[0]) alt_path = brcm_alt_fw_path(first->path, - fwctx->req->board_type); + fwctx->req->board_types[0]); if (alt_path) { + fwctx->board_index++; ret = request_firmware_nowait(THIS_MODULE, true, alt_path, fwctx->dev, GFP_KERNEL, fwctx, brcmf_fw_request_done_alt_path); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index e290dec9c53d..1266cbaee072 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -11,6 +11,8 @@ #define BRCMF_FW_DEFAULT_PATH "brcm/" +#define BRCMF_FW_MAX_BOARD_TYPES 8 + /** * struct brcmf_firmware_mapping - Used to map chipid/revmask to firmware * filename and nvram filename. Each bus type implementation should create @@ -66,7 +68,7 @@ struct brcmf_fw_request { u16 domain_nr; u16 bus_nr; u32 n_items; - const char *board_type; + const char *board_types[BRCMF_FW_MAX_BOARD_TYPES]; struct brcmf_fw_item items[]; }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index c87b829adb0d..1d406649eca2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -48,6 +48,10 @@ /* size of brcmf_scan_params not including variable length array */ #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 +#define BRCMF_SCAN_PARAMS_V2_FIXED_SIZE 72 + +/* version of brcmf_scan_params structure */ +#define BRCMF_SCAN_PARAMS_VERSION_V2 2 /* masks for channel and ssid count */ #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff @@ -67,6 +71,7 @@ #define BRCMF_PRIMARY_KEY (1 << 1) #define DOT11_BSSTYPE_ANY 2 #define BRCMF_ESCAN_REQ_VERSION 1 +#define BRCMF_ESCAN_REQ_VERSION_V2 2 #define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */ @@ -169,6 +174,10 @@ #define BRCMF_HE_CAP_MCS_MAP_NSS_MAX 8 +#define BRCMF_PMKSA_VER_2 2 +#define BRCMF_PMKSA_VER_3 3 +#define BRCMF_PMKSA_NO_EXPIRY 0xffffffff + /* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each * ioctl. It is relatively small because firmware has small maximum size input * playload restriction for ioctls. @@ -350,6 +359,12 @@ struct brcmf_ssid_le { unsigned char SSID[IEEE80211_MAX_SSID_LEN]; }; +/* Alternate SSID structure used in some places... */ +struct brcmf_ssid8_le { + u8 SSID_len; + unsigned char SSID[IEEE80211_MAX_SSID_LEN]; +}; + struct brcmf_scan_params_le { struct brcmf_ssid_le ssid_le; /* default: {0, ""} */ u8 bssid[ETH_ALEN]; /* default: bcast */ @@ -386,6 +401,45 @@ struct brcmf_scan_params_le { __le16 channel_list[1]; /* list of chanspecs */ }; +struct brcmf_scan_params_v2_le { + __le16 version; /* structure version */ + __le16 length; /* structure length */ + struct brcmf_ssid_le ssid_le; /* default: {0, ""} */ + u8 bssid[ETH_ALEN]; /* default: bcast */ + s8 bss_type; /* default: any, + * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT + */ + u8 pad; + __le32 scan_type; /* flags, 0 use default */ + __le32 nprobes; /* -1 use default, number of probes per channel */ + __le32 active_time; /* -1 use default, dwell time per channel for + * active scanning + */ + __le32 passive_time; /* -1 use default, dwell time per channel + * for passive scanning + */ + __le32 home_time; /* -1 use default, dwell time for the + * home channel between channel scans + */ + __le32 channel_num; /* count of channels and ssids that follow + * + * low half is count of channels in + * channel_list, 0 means default (use all + * available channels) + * + * high half is entries in struct brcmf_ssid + * array that follows channel_list, aligned for + * s32 (4 bytes) meaning an odd channel count + * implies a 2-byte pad between end of + * channel_list and first ssid + * + * if ssid count is zero, single ssid in the + * fixed parameter portion is assumed, otherwise + * ssid in the fixed portion is ignored + */ + __le16 channel_list[1]; /* list of chanspecs */ +}; + struct brcmf_scan_results { u32 buflen; u32 version; @@ -397,7 +451,10 @@ struct brcmf_escan_params_le { __le32 version; __le16 action; __le16 sync_id; - struct brcmf_scan_params_le params_le; + union { + struct brcmf_scan_params_le params_le; + struct brcmf_scan_params_v2_le params_v2_le; + }; }; struct brcmf_escan_result_le { @@ -517,7 +574,7 @@ struct brcmf_wsec_key_le { struct brcmf_wsec_pmk_le { __le16 key_len; __le16 flags; - u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1]; + u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN]; }; /** @@ -742,6 +799,31 @@ struct brcmf_rev_info_le { }; /** + * struct brcmf_wlc_version_le - firmware revision info. + * + * @version: structure version. + * @length: structure length. + * @epi_ver_major: EPI major version + * @epi_ver_minor: EPI minor version + * @epi_ver_rc: EPI rc version + * @epi_ver_incr: EPI increment version + * @wlc_ver_major: WLC major version + * @wlc_ver_minor: WLC minor version + */ +struct brcmf_wlc_version_le { + __le16 version; + __le16 length; + + __le16 epi_ver_major; + __le16 epi_ver_minor; + __le16 epi_ver_rc; + __le16 epi_ver_incr; + + __le16 wlc_ver_major; + __le16 wlc_ver_minor; +}; + +/** * struct brcmf_assoclist_le - request assoc list. * * @count: indicates number of stations. @@ -804,6 +886,51 @@ struct brcmf_pmksa { }; /** + * struct brcmf_pmksa_v2 - PMK Security Association + * + * @length: Length of the structure. + * @bssid: The AP's BSSID. + * @pmkid: The PMK ID. + * @pmk: PMK material for FILS key derivation. + * @pmk_len: Length of PMK data. + * @ssid: The AP's SSID. + * @fils_cache_id: FILS cache identifier + */ +struct brcmf_pmksa_v2 { + __le16 length; + u8 bssid[ETH_ALEN]; + u8 pmkid[WLAN_PMKID_LEN]; + u8 pmk[WLAN_PMK_LEN_SUITE_B_192]; + __le16 pmk_len; + struct brcmf_ssid8_le ssid; + u16 fils_cache_id; +}; + +/** + * struct brcmf_pmksa_v3 - PMK Security Association + * + * @bssid: The AP's BSSID. + * @pmkid: The PMK ID. + * @pmkid_len: The length of the PMK ID. + * @pmk: PMK material for FILS key derivation. + * @pmk_len: Length of PMK data. + * @fils_cache_id: FILS cache identifier + * @ssid: The AP's SSID. + * @time_left: Remaining time until expiry. 0 = expired, ~0 = no expiry. + */ +struct brcmf_pmksa_v3 { + u8 bssid[ETH_ALEN]; + u8 pmkid[WLAN_PMKID_LEN]; + u8 pmkid_len; + u8 pmk[WLAN_PMK_LEN_SUITE_B_192]; + u8 pmk_len; + __le16 fils_cache_id; + u8 pad; + struct brcmf_ssid8_le ssid; + __le32 time_left; +}; + +/** * struct brcmf_pmk_list_le - List of pmksa's. * * @npmk: Number of pmksa's. @@ -815,6 +942,34 @@ struct brcmf_pmk_list_le { }; /** + * struct brcmf_pmk_list_v2_le - List of pmksa's. + * + * @version: Request version. + * @length: Length of this structure. + * @pmk: PMK SA information. + */ +struct brcmf_pmk_list_v2_le { + __le16 version; + __le16 length; + struct brcmf_pmksa_v2 pmk[BRCMF_MAXPMKID]; +}; + +/** + * struct brcmf_pmk_op_v3_le - Operation on PMKSA list. + * + * @version: Request version. + * @length: Length of this structure. + * @pmk: PMK SA information. + */ +struct brcmf_pmk_op_v3_le { + __le16 version; + __le16 length; + __le16 count; + __le16 pad; + struct brcmf_pmksa_v3 pmk[BRCMF_MAXPMKID]; +}; + +/** * struct brcmf_pno_param_le - PNO scan configuration parameters * * @version: PNO parameters version. diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h index 2e322edbb907..6a849f4a94dd 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h @@ -8,10 +8,10 @@ #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64 -#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512 +#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 1024 #define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64 #define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024 -#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512 +#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 1024 #define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 083ac58f466d..60a97277d798 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -64,14 +64,31 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, { struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio; struct device_node *root, *np = dev->of_node; + const char *prop; int irq; int err; u32 irqf; u32 val; + /* Apple ARM64 platforms have their own idea of board type, passed in + * via the device tree. They also have an antenna SKU parameter + */ + if (!of_property_read_string(np, "brcm,board-type", &prop)) + settings->board_type = prop; + + if (!of_property_read_string(np, "apple,antenna-sku", &prop)) + settings->antenna_sku = prop; + + /* The WLAN calibration blob is normally stored in SROM, but Apple + * ARM64 platforms pass it via the DT instead. + */ + prop = of_get_property(np, "brcm,cal-blob", &settings->cal_size); + if (prop && settings->cal_size) + settings->cal_blob = prop; + /* Set board-type to the first string of the machine compatible prop */ root = of_find_node_by_path("/"); - if (root) { + if (root && !settings->board_type) { int i; char *board_type; const char *tmp; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 97f0f13dfe50..d26a412c9ca3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -13,6 +13,7 @@ #include <linux/bcma/bcma.h> #include <linux/sched.h> #include <linux/io.h> +#include <linux/random.h> #include <asm/unaligned.h> #include <soc.h> @@ -49,16 +50,22 @@ enum brcmf_pcie_state { BRCMF_FW_DEF(43602, "brcmfmac43602-pcie"); BRCMF_FW_DEF(4350, "brcmfmac4350-pcie"); BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie"); +BRCMF_FW_CLM_DEF(4355C1, "brcmfmac4355c1-pcie"); BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie"); BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie"); BRCMF_FW_DEF(4358, "brcmfmac4358-pcie"); BRCMF_FW_DEF(4359, "brcmfmac4359-pcie"); -BRCMF_FW_DEF(4364, "brcmfmac4364-pcie"); +BRCMF_FW_CLM_DEF(4364B2, "brcmfmac4364b2-pcie"); +BRCMF_FW_CLM_DEF(4364B3, "brcmfmac4364b3-pcie"); BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie"); BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie"); BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie"); BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie"); BRCMF_FW_DEF(4371, "brcmfmac4371-pcie"); +BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie"); +BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie"); +BRCMF_FW_CLM_DEF(4378B3, "brcmfmac4378b3-pcie"); +BRCMF_FW_CLM_DEF(4387C2, "brcmfmac4387c2-pcie"); /* firmware config files */ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt"); @@ -66,6 +73,8 @@ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt"); /* per-board firmware binaries */ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin"); +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.clm_blob"); +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txcap_blob"); static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), @@ -73,13 +82,15 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C), + BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0xFFFFFFFF, 4355C1), /* 12 */ BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), - BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364), + BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0x0000000F, 4364B2), /* 3 */ + BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFF0, 4364B3), /* 4 */ BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B), BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), @@ -87,6 +98,10 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), + BRCMF_FW_ENTRY(BRCM_CC_4377_CHIP_ID, 0xFFFFFFFF, 4377B3), /* 4 */ + BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0x0000000F, 4378B1), /* 3 */ + BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFE0, 4378B3), /* 5 */ + BRCMF_FW_ENTRY(BRCM_CC_4387_CHIP_ID, 0xFFFFFFFF, 4387C2), /* 7 */ }; #define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */ @@ -118,6 +133,12 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144 +#define BRCMF_PCIE_64_PCIE2REG_INTMASK 0xC14 +#define BRCMF_PCIE_64_PCIE2REG_MAILBOXINT 0xC30 +#define BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK 0xC34 +#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0 0xA20 +#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1 0xA24 + #define BRCMF_PCIE2_INTA 0x01 #define BRCMF_PCIE2_INTB 0x02 @@ -137,6 +158,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000 +#define BRCMF_PCIE_MB_INT_FN0 (BRCMF_PCIE_MB_INT_FN0_0 | \ + BRCMF_PCIE_MB_INT_FN0_1) #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \ BRCMF_PCIE_MB_INT_D2H0_DB1 | \ BRCMF_PCIE_MB_INT_D2H1_DB0 | \ @@ -146,6 +169,40 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_PCIE_MB_INT_D2H3_DB0 | \ BRCMF_PCIE_MB_INT_D2H3_DB1) +#define BRCMF_PCIE_64_MB_INT_D2H0_DB0 0x1 +#define BRCMF_PCIE_64_MB_INT_D2H0_DB1 0x2 +#define BRCMF_PCIE_64_MB_INT_D2H1_DB0 0x4 +#define BRCMF_PCIE_64_MB_INT_D2H1_DB1 0x8 +#define BRCMF_PCIE_64_MB_INT_D2H2_DB0 0x10 +#define BRCMF_PCIE_64_MB_INT_D2H2_DB1 0x20 +#define BRCMF_PCIE_64_MB_INT_D2H3_DB0 0x40 +#define BRCMF_PCIE_64_MB_INT_D2H3_DB1 0x80 +#define BRCMF_PCIE_64_MB_INT_D2H4_DB0 0x100 +#define BRCMF_PCIE_64_MB_INT_D2H4_DB1 0x200 +#define BRCMF_PCIE_64_MB_INT_D2H5_DB0 0x400 +#define BRCMF_PCIE_64_MB_INT_D2H5_DB1 0x800 +#define BRCMF_PCIE_64_MB_INT_D2H6_DB0 0x1000 +#define BRCMF_PCIE_64_MB_INT_D2H6_DB1 0x2000 +#define BRCMF_PCIE_64_MB_INT_D2H7_DB0 0x4000 +#define BRCMF_PCIE_64_MB_INT_D2H7_DB1 0x8000 + +#define BRCMF_PCIE_64_MB_INT_D2H_DB (BRCMF_PCIE_64_MB_INT_D2H0_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H0_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H1_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H1_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H2_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H2_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H3_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H3_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H4_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H4_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H5_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H5_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H6_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H6_DB1 | \ + BRCMF_PCIE_64_MB_INT_D2H7_DB0 | \ + BRCMF_PCIE_64_MB_INT_D2H7_DB1) + #define BRCMF_PCIE_SHARED_VERSION_7 7 #define BRCMF_PCIE_MIN_SHARED_VERSION 5 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7 @@ -255,12 +312,26 @@ struct brcmf_pcie_core_info { u32 wrapbase; }; +#define BRCMF_OTP_MAX_PARAM_LEN 16 + +struct brcmf_otp_params { + char module[BRCMF_OTP_MAX_PARAM_LEN]; + char vendor[BRCMF_OTP_MAX_PARAM_LEN]; + char version[BRCMF_OTP_MAX_PARAM_LEN]; + bool valid; +}; + struct brcmf_pciedev_info { enum brcmf_pcie_state state; bool in_irq; struct pci_dev *pdev; char fw_name[BRCMF_FW_NAME_LEN]; char nvram_name[BRCMF_FW_NAME_LEN]; + char clm_name[BRCMF_FW_NAME_LEN]; + char txcap_name[BRCMF_FW_NAME_LEN]; + const struct firmware *clm_fw; + const struct firmware *txcap_fw; + const struct brcmf_pcie_reginfo *reginfo; void __iomem *regs; void __iomem *tcm; u32 ram_base; @@ -280,6 +351,7 @@ struct brcmf_pciedev_info { void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset, u16 value); struct brcmf_mp_device *settings; + struct brcmf_otp_params otp; }; struct brcmf_pcie_ringbuf { @@ -346,11 +418,49 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE }; +struct brcmf_pcie_reginfo { + u32 intmask; + u32 mailboxint; + u32 mailboxmask; + u32 h2d_mailbox_0; + u32 h2d_mailbox_1; + u32 int_d2h_db; + u32 int_fn0; +}; + +static const struct brcmf_pcie_reginfo brcmf_reginfo_default = { + .intmask = BRCMF_PCIE_PCIE2REG_INTMASK, + .mailboxint = BRCMF_PCIE_PCIE2REG_MAILBOXINT, + .mailboxmask = BRCMF_PCIE_PCIE2REG_MAILBOXMASK, + .h2d_mailbox_0 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, + .h2d_mailbox_1 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, + .int_d2h_db = BRCMF_PCIE_MB_INT_D2H_DB, + .int_fn0 = BRCMF_PCIE_MB_INT_FN0, +}; + +static const struct brcmf_pcie_reginfo brcmf_reginfo_64 = { + .intmask = BRCMF_PCIE_64_PCIE2REG_INTMASK, + .mailboxint = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT, + .mailboxmask = BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK, + .h2d_mailbox_0 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0, + .h2d_mailbox_1 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1, + .int_d2h_db = BRCMF_PCIE_64_MB_INT_D2H_DB, + .int_fn0 = 0, +}; + static void brcmf_pcie_setup(struct device *dev, int ret, struct brcmf_fw_request *fwreq); static struct brcmf_fw_request * brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo); +static u16 +brcmf_pcie_read_reg16(struct brcmf_pciedev_info *devinfo, u32 reg_offset) +{ + void __iomem *address = devinfo->regs + reg_offset; + + return ioread16(address); +} + static u32 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) { @@ -496,6 +606,8 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset, } +#define READCC32(devinfo, reg) brcmf_pcie_read_reg32(devinfo, \ + CHIPCREGOFFS(reg)) #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \ CHIPCREGOFFS(reg), value) @@ -779,30 +891,29 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo, static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo) { - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0); + brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0); } static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) { - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, - BRCMF_PCIE_MB_INT_D2H_DB | - BRCMF_PCIE_MB_INT_FN0_0 | - BRCMF_PCIE_MB_INT_FN0_1); + brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, + devinfo->reginfo->int_d2h_db | + devinfo->reginfo->int_fn0); } static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo) { if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1) brcmf_pcie_write_reg32(devinfo, - BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1); + devinfo->reginfo->h2d_mailbox_1, 1); } static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg) { struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; - if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) { + if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) { brcmf_pcie_intr_disable(devinfo); brcmf_dbg(PCIE, "Enter\n"); return IRQ_WAKE_THREAD; @@ -817,15 +928,14 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg) u32 status; devinfo->in_irq = true; - status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); + status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint); brcmf_dbg(PCIE, "Enter %x\n", status); if (status) { - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, + brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status); - if (status & (BRCMF_PCIE_MB_INT_FN0_0 | - BRCMF_PCIE_MB_INT_FN0_1)) + if (status & devinfo->reginfo->int_fn0) brcmf_pcie_handle_mb_data(devinfo); - if (status & BRCMF_PCIE_MB_INT_D2H_DB) { + if (status & devinfo->reginfo->int_d2h_db) { if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) brcmf_proto_msgbuf_rx_trigger( &devinfo->pdev->dev); @@ -884,8 +994,8 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) if (devinfo->in_irq) brcmf_err(bus, "Still in IRQ (processing) !!!\n"); - status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status); + status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint); + brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status); devinfo->irq_allocated = false; } @@ -937,7 +1047,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx) brcmf_dbg(PCIE, "RING !\n"); /* Any arbitrary value will do, lets use 1 */ - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1); + brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1); return 0; } @@ -1382,23 +1492,29 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len) return 0; } -static -int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name) +static int brcmf_pcie_get_blob(struct device *dev, const struct firmware **fw, + enum brcmf_blob_type type) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_fw_request *fwreq; - struct brcmf_fw_name fwnames[] = { - { ext, fw_name }, - }; + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + struct brcmf_pciedev_info *devinfo = buspub->devinfo; - fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev, - brcmf_pcie_fwnames, - ARRAY_SIZE(brcmf_pcie_fwnames), - fwnames, ARRAY_SIZE(fwnames)); - if (!fwreq) - return -ENOMEM; + switch (type) { + case BRCMF_BLOB_CLM: + *fw = devinfo->clm_fw; + devinfo->clm_fw = NULL; + break; + case BRCMF_BLOB_TXCAP: + *fw = devinfo->txcap_fw; + devinfo->txcap_fw = NULL; + break; + default: + return -ENOENT; + } + + if (!*fw) + return -ENOENT; - kfree(fwreq); return 0; } @@ -1445,7 +1561,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { .wowl_config = brcmf_pcie_wowl_config, .get_ramsize = brcmf_pcie_get_ramsize, .get_memdump = brcmf_pcie_get_memdump, - .get_fwname = brcmf_pcie_get_fwname, + .get_blob = brcmf_pcie_get_blob, .reset = brcmf_pcie_reset, }; @@ -1527,6 +1643,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, return 0; } +struct brcmf_random_seed_footer { + __le32 length; + __le32 magic; +}; + +#define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de +#define BRCMF_RANDOM_SEED_LENGTH 0x100 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, const struct firmware *fw, void *nvram, @@ -1558,11 +1681,32 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0); if (nvram) { + size_t rand_len = BRCMF_RANDOM_SEED_LENGTH; + struct brcmf_random_seed_footer footer = { + .length = cpu_to_le32(rand_len), + .magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC), + }; + void *randbuf; + brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); address = devinfo->ci->rambase + devinfo->ci->ramsize - nvram_len; memcpy_toio(devinfo->tcm + address, nvram, nvram_len); brcmf_fw_nvram_free(nvram); + + /* Some Apple chips/firmwares expect a buffer of random data + * to be present before NVRAM + */ + brcmf_dbg(PCIE, "Download random seed\n"); + + address -= sizeof(footer); + memcpy_toio(devinfo->tcm + address, &footer, sizeof(footer)); + + address -= rand_len; + randbuf = kzalloc(rand_len, GFP_KERNEL); + get_random_bytes(randbuf, rand_len); + memcpy_toio(devinfo->tcm + address, randbuf, rand_len); + kfree(randbuf); } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", devinfo->nvram_name); @@ -1698,15 +1842,22 @@ static int brcmf_pcie_buscoreprep(void *ctx) static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip) { struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; - u32 val; + struct brcmf_core *core; + u32 val, reg; devinfo->ci = chip; brcmf_pcie_reset_device(devinfo); - val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); + /* reginfo is not ready yet */ + core = brcmf_chip_get_core(chip, BCMA_CORE_PCIE2); + if (core->rev >= 64) + reg = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT; + else + reg = BRCMF_PCIE_PCIE2REG_MAILBOXINT; + + val = brcmf_pcie_read_reg32(devinfo, reg); if (val != 0xffffffff) - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, - val); + brcmf_pcie_write_reg32(devinfo, reg, val); return 0; } @@ -1729,8 +1880,223 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { .write32 = brcmf_pcie_buscore_write32, }; +#define BRCMF_OTP_SYS_VENDOR 0x15 +#define BRCMF_OTP_BRCM_CIS 0x80 + +#define BRCMF_OTP_VENDOR_HDR 0x00000008 + +static int +brcmf_pcie_parse_otp_sys_vendor(struct brcmf_pciedev_info *devinfo, + u8 *data, size_t size) +{ + int idx = 4; + const char *chip_params; + const char *board_params; + const char *p; + + /* 4-byte header and two empty strings */ + if (size < 6) + return -EINVAL; + + if (get_unaligned_le32(data) != BRCMF_OTP_VENDOR_HDR) + return -EINVAL; + + chip_params = &data[idx]; + + /* Skip first string, including terminator */ + idx += strnlen(chip_params, size - idx) + 1; + if (idx >= size) + return -EINVAL; + + board_params = &data[idx]; + + /* Skip to terminator of second string */ + idx += strnlen(board_params, size - idx); + if (idx >= size) + return -EINVAL; + + /* At this point both strings are guaranteed NUL-terminated */ + brcmf_dbg(PCIE, "OTP: chip_params='%s' board_params='%s'\n", + chip_params, board_params); + + p = skip_spaces(board_params); + while (*p) { + char tag = *p++; + const char *end; + size_t len; + + if (*p++ != '=') /* implicit NUL check */ + return -EINVAL; + + /* *p might be NUL here, if so end == p and len == 0 */ + end = strchrnul(p, ' '); + len = end - p; + + /* leave 1 byte for NUL in destination string */ + if (len > (BRCMF_OTP_MAX_PARAM_LEN - 1)) + return -EINVAL; + + /* Copy len characters plus a NUL terminator */ + switch (tag) { + case 'M': + strscpy(devinfo->otp.module, p, len + 1); + break; + case 'V': + strscpy(devinfo->otp.vendor, p, len + 1); + break; + case 'm': + strscpy(devinfo->otp.version, p, len + 1); + break; + } + + /* Skip to next arg, if any */ + p = skip_spaces(end); + } + + brcmf_dbg(PCIE, "OTP: module=%s vendor=%s version=%s\n", + devinfo->otp.module, devinfo->otp.vendor, + devinfo->otp.version); + + if (!devinfo->otp.module[0] || + !devinfo->otp.vendor[0] || + !devinfo->otp.version[0]) + return -EINVAL; + + devinfo->otp.valid = true; + return 0; +} + +static int +brcmf_pcie_parse_otp(struct brcmf_pciedev_info *devinfo, u8 *otp, size_t size) +{ + int p = 0; + int ret = -EINVAL; + + brcmf_dbg(PCIE, "parse_otp size=%ld\n", size); + + while (p < (size - 1)) { + u8 type = otp[p]; + u8 length = otp[p + 1]; + + if (type == 0) + break; + + if ((p + 2 + length) > size) + break; + + switch (type) { + case BRCMF_OTP_SYS_VENDOR: + brcmf_dbg(PCIE, "OTP @ 0x%x (%d): SYS_VENDOR\n", + p, length); + ret = brcmf_pcie_parse_otp_sys_vendor(devinfo, + &otp[p + 2], + length); + break; + case BRCMF_OTP_BRCM_CIS: + brcmf_dbg(PCIE, "OTP @ 0x%x (%d): BRCM_CIS\n", + p, length); + break; + default: + brcmf_dbg(PCIE, "OTP @ 0x%x (%d): Unknown type 0x%x\n", + p, length, type); + break; + } + + p += 2 + length; + } + + return ret; +} + +static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo) +{ + const struct pci_dev *pdev = devinfo->pdev; + struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev); + u32 coreid, base, words, idx, sromctl; + u16 *otp; + struct brcmf_core *core; + int ret; + + switch (devinfo->ci->chip) { + case BRCM_CC_4355_CHIP_ID: + coreid = BCMA_CORE_CHIPCOMMON; + base = 0x8c0; + words = 0xb2; + break; + case BRCM_CC_4364_CHIP_ID: + coreid = BCMA_CORE_CHIPCOMMON; + base = 0x8c0; + words = 0x1a0; + break; + case BRCM_CC_4377_CHIP_ID: + case BRCM_CC_4378_CHIP_ID: + coreid = BCMA_CORE_GCI; + base = 0x1120; + words = 0x170; + break; + case BRCM_CC_4387_CHIP_ID: + coreid = BCMA_CORE_GCI; + base = 0x113c; + words = 0x170; + break; + default: + /* OTP not supported on this chip */ + return 0; + } + + core = brcmf_chip_get_core(devinfo->ci, coreid); + if (!core) { + brcmf_err(bus, "No OTP core\n"); + return -ENODEV; + } + + if (coreid == BCMA_CORE_CHIPCOMMON) { + /* Chips with OTP accessed via ChipCommon need additional + * handling to access the OTP + */ + brcmf_pcie_select_core(devinfo, coreid); + sromctl = READCC32(devinfo, sromcontrol); + + if (!(sromctl & BCMA_CC_SROM_CONTROL_OTP_PRESENT)) { + /* Chip lacks OTP, try without it... */ + brcmf_err(bus, + "OTP unavailable, using default firmware\n"); + return 0; + } + + /* Map OTP to shadow area */ + WRITECC32(devinfo, sromcontrol, + sromctl | BCMA_CC_SROM_CONTROL_OTPSEL); + } + + otp = kcalloc(words, sizeof(u16), GFP_KERNEL); + if (!otp) + return -ENOMEM; + + /* Map bus window to SROM/OTP shadow area in core */ + base = brcmf_pcie_buscore_prep_addr(devinfo->pdev, base + core->base); + + brcmf_dbg(PCIE, "OTP data:\n"); + for (idx = 0; idx < words; idx++) { + otp[idx] = brcmf_pcie_read_reg16(devinfo, base + 2 * idx); + brcmf_dbg(PCIE, "[%8x] 0x%04x\n", base + 2 * idx, otp[idx]); + } + + if (coreid == BCMA_CORE_CHIPCOMMON) { + brcmf_pcie_select_core(devinfo, coreid); + WRITECC32(devinfo, sromcontrol, sromctl); + } + + ret = brcmf_pcie_parse_otp(devinfo, (u8 *)otp, 2 * words); + kfree(otp); + + return ret; +} + #define BRCMF_PCIE_FW_CODE 0 #define BRCMF_PCIE_FW_NVRAM 1 +#define BRCMF_PCIE_FW_CLM 2 +#define BRCMF_PCIE_FW_TXCAP 3 static void brcmf_pcie_setup(struct device *dev, int ret, struct brcmf_fw_request *fwreq) @@ -1755,6 +2121,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret, fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary; nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data; nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len; + devinfo->clm_fw = fwreq->items[BRCMF_PCIE_FW_CLM].binary; + devinfo->txcap_fw = fwreq->items[BRCMF_PCIE_FW_TXCAP].binary; kfree(fwreq); ret = brcmf_chip_get_raminfo(devinfo->ci); @@ -1830,6 +2198,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) struct brcmf_fw_name fwnames[] = { { ".bin", devinfo->fw_name }, { ".txt", devinfo->nvram_name }, + { ".clm_blob", devinfo->clm_name }, + { ".txcap_blob", devinfo->txcap_name }, }; fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev, @@ -1842,11 +2212,54 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; - fwreq->board_type = devinfo->settings->board_type; + fwreq->items[BRCMF_PCIE_FW_CLM].type = BRCMF_FW_TYPE_BINARY; + fwreq->items[BRCMF_PCIE_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL; + fwreq->items[BRCMF_PCIE_FW_TXCAP].type = BRCMF_FW_TYPE_BINARY; + fwreq->items[BRCMF_PCIE_FW_TXCAP].flags = BRCMF_FW_REQF_OPTIONAL; /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */ fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; fwreq->bus_nr = devinfo->pdev->bus->number; + /* Apple platforms with fancy firmware/NVRAM selection */ + if (devinfo->settings->board_type && + devinfo->settings->antenna_sku && + devinfo->otp.valid) { + const struct brcmf_otp_params *otp = &devinfo->otp; + struct device *dev = &devinfo->pdev->dev; + const char **bt = fwreq->board_types; + + brcmf_dbg(PCIE, "Apple board: %s\n", + devinfo->settings->board_type); + + /* Example: apple,shikoku-RASP-m-6.11-X3 */ + bt[0] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s-%s", + devinfo->settings->board_type, + otp->module, otp->vendor, otp->version, + devinfo->settings->antenna_sku); + bt[1] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s", + devinfo->settings->board_type, + otp->module, otp->vendor, otp->version); + bt[2] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s", + devinfo->settings->board_type, + otp->module, otp->vendor); + bt[3] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s", + devinfo->settings->board_type, + otp->module); + bt[4] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s", + devinfo->settings->board_type, + devinfo->settings->antenna_sku); + bt[5] = devinfo->settings->board_type; + + if (!bt[0] || !bt[1] || !bt[2] || !bt[3] || !bt[4]) { + kfree(fwreq); + return NULL; + } + + } else { + brcmf_dbg(PCIE, "Board: %s\n", devinfo->settings->board_type); + fwreq->board_types[0] = devinfo->settings->board_type; + } + return fwreq; } @@ -1858,6 +2271,7 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct brcmf_pciedev_info *devinfo; struct brcmf_pciedev *pcie_bus_dev; struct brcmf_bus *bus; + struct brcmf_core *core; brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); @@ -1876,6 +2290,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail; } + core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2); + if (core->rev >= 64) + devinfo->reginfo = &brcmf_reginfo_64; + else + devinfo->reginfo = &brcmf_reginfo_default; + pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); if (pcie_bus_dev == NULL) { ret = -ENOMEM; @@ -1918,6 +2338,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto fail_bus; + ret = brcmf_pcie_read_otp(devinfo); + if (ret) { + brcmf_err(bus, "failed to parse OTP\n"); + goto fail_brcmf; + } + fwreq = brcmf_pcie_prepare_fw_request(devinfo); if (!fwreq) { ret = -ENOMEM; @@ -1981,6 +2407,8 @@ brcmf_pcie_remove(struct pci_dev *pdev) brcmf_pcie_release_ringbuffers(devinfo); brcmf_pcie_reset_device(devinfo); brcmf_pcie_release_resource(devinfo); + release_firmware(devinfo->clm_fw); + release_firmware(devinfo->txcap_fw); if (devinfo->ci) brcmf_chip_detach(devinfo->ci); @@ -2038,7 +2466,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev) brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus); /* Check if device is still up and running, if so we are ready */ - if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) { + if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) { brcmf_dbg(PCIE, "Try to wakeup device....\n"); if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM)) goto cleanup; @@ -2086,6 +2514,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355), BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), @@ -2105,6 +2534,9 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4377_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4387_DEVICE_ID), { /* end: all zeroes */ } }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 212fbbe1cd7e..2b71991f7d9b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4130,23 +4130,24 @@ brcmf_sdio_watchdog(struct timer_list *t) } } -static -int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name) +static int brcmf_sdio_get_blob(struct device *dev, const struct firmware **fw, + enum brcmf_blob_type type) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_fw_request *fwreq; - struct brcmf_fw_name fwnames[] = { - { ext, fw_name }, - }; + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev, - brcmf_sdio_fwnames, - ARRAY_SIZE(brcmf_sdio_fwnames), - fwnames, ARRAY_SIZE(fwnames)); - if (!fwreq) - return -ENOMEM; + switch (type) { + case BRCMF_BLOB_CLM: + *fw = sdiodev->clm_fw; + sdiodev->clm_fw = NULL; + break; + default: + return -ENOENT; + } + + if (!*fw) + return -ENOENT; - kfree(fwreq); return 0; } @@ -4189,13 +4190,14 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .wowl_config = brcmf_sdio_wowl_config, .get_ramsize = brcmf_sdio_bus_get_ramsize, .get_memdump = brcmf_sdio_bus_get_memdump, - .get_fwname = brcmf_sdio_get_fwname, + .get_blob = brcmf_sdio_get_blob, .debugfs_create = brcmf_sdio_debugfs_create, .reset = brcmf_sdio_bus_reset }; #define BRCMF_SDIO_FW_CODE 0 #define BRCMF_SDIO_FW_NVRAM 1 +#define BRCMF_SDIO_FW_CLM 2 static void brcmf_sdio_firmware_callback(struct device *dev, int err, struct brcmf_fw_request *fwreq) @@ -4218,6 +4220,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, code = fwreq->items[BRCMF_SDIO_FW_CODE].binary; nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data; nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len; + sdiod->clm_fw = fwreq->items[BRCMF_SDIO_FW_CLM].binary; kfree(fwreq); /* try to download image and nvram to the dongle */ @@ -4416,6 +4419,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus) struct brcmf_fw_name fwnames[] = { { ".bin", bus->sdiodev->fw_name }, { ".txt", bus->sdiodev->nvram_name }, + { ".clm_blob", bus->sdiodev->clm_name }, }; fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev, @@ -4427,7 +4431,9 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus) fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY; fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; - fwreq->board_type = bus->sdiodev->settings->board_type; + fwreq->items[BRCMF_SDIO_FW_CLM].type = BRCMF_FW_TYPE_BINARY; + fwreq->items[BRCMF_SDIO_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL; + fwreq->board_types[0] = bus->sdiodev->settings->board_type; return fwreq; } @@ -4583,6 +4589,8 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) if (bus->sdiodev->settings) brcmf_release_module_param(bus->sdiodev->settings); + release_firmware(bus->sdiodev->clm_fw); + bus->sdiodev->clm_fw = NULL; kfree(bus->rxbuf); kfree(bus->hdrbuf); kfree(bus); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 15d2c02fa3ec..7b74c295e4c9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -186,9 +186,11 @@ struct brcmf_sdio_dev { struct sg_table sgtable; char fw_name[BRCMF_FW_NAME_LEN]; char nvram_name[BRCMF_FW_NAME_LEN]; + char clm_name[BRCMF_FW_NAME_LEN]; bool wowl_enabled; enum brcmf_sdiod_state state; struct brcmf_sdiod_freezer *freezer; + const struct firmware *clm_fw; }; /* sdio core registers */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 9fb68c2dc7e3..85e18fb9c497 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1154,24 +1154,11 @@ error: return NULL; } -static -int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name) +static int brcmf_usb_get_blob(struct device *dev, const struct firmware **fw, + enum brcmf_blob_type type) { - struct brcmf_bus *bus = dev_get_drvdata(dev); - struct brcmf_fw_request *fwreq; - struct brcmf_fw_name fwnames[] = { - { ext, fw_name }, - }; - - fwreq = brcmf_fw_alloc_request(bus->chip, bus->chiprev, - brcmf_usb_fwnames, - ARRAY_SIZE(brcmf_usb_fwnames), - fwnames, ARRAY_SIZE(fwnames)); - if (!fwreq) - return -ENOMEM; - - kfree(fwreq); - return 0; + /* No blobs for USB devices... */ + return -ENOENT; } static const struct brcmf_bus_ops brcmf_usb_bus_ops = { @@ -1180,7 +1167,7 @@ static const struct brcmf_bus_ops brcmf_usb_bus_ops = { .txdata = brcmf_usb_tx, .txctl = brcmf_usb_tx_ctlpkt, .rxctl = brcmf_usb_rx_ctlpkt, - .get_fwname = brcmf_usb_get_fwname, + .get_blob = brcmf_usb_get_blob, }; #define BRCMF_USB_FW_CODE 0 diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index ed0b707f0cdf..7e863cea344f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -37,6 +37,7 @@ #define BRCM_CC_4350_CHIP_ID 0x4350 #define BRCM_CC_43525_CHIP_ID 43525 #define BRCM_CC_4354_CHIP_ID 0x4354 +#define BRCM_CC_4355_CHIP_ID 0x4355 #define BRCM_CC_4356_CHIP_ID 0x4356 #define BRCM_CC_43566_CHIP_ID 43566 #define BRCM_CC_43567_CHIP_ID 43567 @@ -51,6 +52,9 @@ #define BRCM_CC_43664_CHIP_ID 43664 #define BRCM_CC_43666_CHIP_ID 43666 #define BRCM_CC_4371_CHIP_ID 0x4371 +#define BRCM_CC_4377_CHIP_ID 0x4377 +#define BRCM_CC_4378_CHIP_ID 0x4378 +#define BRCM_CC_4387_CHIP_ID 0x4387 #define CY_CC_4373_CHIP_ID 0x4373 #define CY_CC_43012_CHIP_ID 43012 #define CY_CC_43752_CHIP_ID 43752 @@ -69,6 +73,7 @@ #define BRCM_PCIE_4350_DEVICE_ID 0x43a3 #define BRCM_PCIE_4354_DEVICE_ID 0x43df #define BRCM_PCIE_4354_RAW_DEVICE_ID 0x4354 +#define BRCM_PCIE_4355_DEVICE_ID 0x43dc #define BRCM_PCIE_4356_DEVICE_ID 0x43ec #define BRCM_PCIE_43567_DEVICE_ID 0x43d3 #define BRCM_PCIE_43570_DEVICE_ID 0x43d9 @@ -87,6 +92,9 @@ #define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4 #define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5 #define BRCM_PCIE_4371_DEVICE_ID 0x440d +#define BRCM_PCIE_4377_DEVICE_ID 0x4488 +#define BRCM_PCIE_4378_DEVICE_ID 0x4425 +#define BRCM_PCIE_4387_DEVICE_ID 0x4433 /* brcmsmac IDs */ diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index d702d7d60235..67cef4dda24c 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -195,8 +195,20 @@ struct apple_nvme { int irq; spinlock_t lock; + + /* + * Delayed cache flush handling state + */ + struct nvme_ns *flush_ns; + unsigned long flush_interval; + unsigned long last_flush; + struct delayed_work flush_dwork; }; +unsigned int flush_interval = 1000; +module_param(flush_interval, uint, 0644); +MODULE_PARM_DESC(flush_interval, "Grace period in msecs between flushes"); + static_assert(sizeof(struct nvme_command) == 64); static_assert(sizeof(struct apple_nvmmu_tcb) == 128); @@ -729,6 +741,26 @@ static int apple_nvme_remove_sq(struct apple_nvme *anv) return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0); } +static bool apple_nvme_delayed_flush(struct apple_nvme *anv, struct nvme_ns *ns, + struct request *req) +{ + if (!anv->flush_interval || req_op(req) != REQ_OP_FLUSH) + return false; + if (delayed_work_pending(&anv->flush_dwork)) + return true; + if (time_before(jiffies, anv->last_flush + anv->flush_interval)) { + kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &anv->flush_dwork, + anv->flush_interval); + if (WARN_ON_ONCE(anv->flush_ns && anv->flush_ns != ns)) + goto out; + anv->flush_ns = ns; + return true; + } +out: + anv->last_flush = jiffies; + return false; +} + static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -764,6 +796,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, } blk_mq_start_request(req); + + if (apple_nvme_delayed_flush(anv, ns, req)) { + blk_mq_complete_request(req); + return BLK_STS_OK; + } + apple_nvme_submit_cmd(q, cmnd); return BLK_STS_OK; @@ -1366,6 +1404,28 @@ static int apple_nvme_attach_genpd(struct apple_nvme *anv) return 0; } +static void apple_nvme_flush_work(struct work_struct *work) +{ + struct nvme_command c = { }; + struct apple_nvme *anv; + struct nvme_ns *ns; + int err; + + anv = container_of(work, struct apple_nvme, flush_dwork.work); + ns = anv->flush_ns; + if (WARN_ON_ONCE(!ns)) + return; + + c.common.opcode = nvme_cmd_flush; + c.common.nsid = cpu_to_le32(anv->flush_ns->head->ns_id); + err = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0); + if (err) { + dev_err(anv->dev, "Deferred flush failed: %d\n", err); + } else { + anv->last_flush = jiffies; + } +} + static int apple_nvme_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1508,12 +1568,21 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; } + if (flush_interval) { + anv->flush_interval = msecs_to_jiffies(flush_interval); + anv->flush_ns = NULL; + anv->last_flush = jiffies - anv->flush_interval; + } + + INIT_DELAYED_WORK(&anv->flush_dwork, apple_nvme_flush_work); + nvme_reset_ctrl(&anv->ctrl); async_schedule(apple_nvme_async_probe, anv); return 0; put_dev: + apple_nvme_detach_genpd(anv); put_device(anv->dev); return ret; } @@ -1541,6 +1610,7 @@ static void apple_nvme_shutdown(struct platform_device *pdev) { struct apple_nvme *anv = platform_get_drvdata(pdev); + flush_delayed_work(&anv->flush_dwork); apple_nvme_disable(anv, true); if (apple_rtkit_is_running(anv->rtk)) apple_rtkit_shutdown(anv->rtk); diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 967d0084800e..577984390149 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -172,6 +172,19 @@ config NVMEM_BCM_OCOTP This driver can also be built as a module. If so, the module will be called nvmem-bcm-ocotp. +config NVMEM_SPMI_MFD + tristate "Generic SPMI MFD NVMEM" + depends on MFD_SIMPLE_MFD_SPMI || COMPILE_TEST + default ARCH_APPLE + help + Say y here to build a generic driver to expose an SPMI MFD device + as a NVMEM provider. This can be used for PMIC/PMU devices which + are used to store power and RTC-related settings on certain + platforms, such as Apple Silicon Macs. + + This driver can also be built as a module. If so, the module + will be called nvmem-spmi-mfd. + config NVMEM_STM32_ROMEM tristate "STMicroelectronics STM32 factory-programmed memory support" depends on ARCH_STM32 || COMPILE_TEST diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 00e136a0a123..026eac4da53d 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -38,6 +38,8 @@ nvmem-rockchip-otp-y := rockchip-otp.o obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o nvmem_stm32_romem-y := stm32-romem.o obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o +nvmem_spmi_mfd-y := spmi-mfd-nvmem.o +obj-$(CONFIG_NVMEM_SPMI_MFD) += nvmem_spmi_mfd.o nvmem_sunxi_sid-y := sunxi_sid.o obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o nvmem-uniphier-efuse-y := uniphier-efuse.o diff --git a/drivers/nvmem/spmi-mfd-nvmem.c b/drivers/nvmem/spmi-mfd-nvmem.c new file mode 100644 index 000000000000..284c93be2e18 --- /dev/null +++ b/drivers/nvmem/spmi-mfd-nvmem.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Generic SPMI MFD NVMEM driver + * + * Copyright The Asahi Linux Contributors + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +struct spmi_mfd_nvmem { + struct regmap *regmap; + unsigned int base; +}; + +static int spmi_mfd_nvmem_read(void *priv, unsigned int offset, + void *val, size_t bytes) +{ + struct spmi_mfd_nvmem *nvmem = priv; + + return regmap_bulk_read(nvmem->regmap, nvmem->base + offset, val, bytes); +} + +static int spmi_mfd_nvmem_write(void *priv, unsigned int offset, + void *val, size_t bytes) +{ + struct spmi_mfd_nvmem *nvmem = priv; + + return regmap_bulk_write(nvmem->regmap, nvmem->base + offset, val, bytes); +} + +static int spmi_mfd_nvmem_probe(struct platform_device *pdev) +{ + struct spmi_mfd_nvmem *nvmem; + const __be32 *addr; + int len; + struct nvmem_config nvmem_cfg = { + .dev = &pdev->dev, + .name = "spmi_mfd_nvmem", + .id = NVMEM_DEVID_AUTO, + .word_size = 1, + .stride = 1, + .reg_read = spmi_mfd_nvmem_read, + .reg_write = spmi_mfd_nvmem_write, + }; + + nvmem = devm_kzalloc(&pdev->dev, sizeof(*nvmem), GFP_KERNEL); + if (!nvmem) + return -ENOMEM; + + nvmem_cfg.priv = nvmem; + + nvmem->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!nvmem->regmap) { + dev_err(&pdev->dev, "Parent regmap unavailable.\n"); + return -ENXIO; + } + + addr = of_get_property(pdev->dev.of_node, "reg", &len); + if (!addr) { + dev_err(&pdev->dev, "no reg property\n"); + return -EINVAL; + } + if (len != 2 * sizeof(u32)) { + dev_err(&pdev->dev, "invalid reg property\n"); + return -EINVAL; + } + + nvmem->base = be32_to_cpup(&addr[0]); + nvmem_cfg.size = be32_to_cpup(&addr[1]); + + return PTR_ERR_OR_ZERO(devm_nvmem_register(&pdev->dev, &nvmem_cfg)); +} + +static const struct of_device_id spmi_mfd_nvmem_id_table[] = { + { .compatible = "apple,spmi-pmu-nvmem" }, + { .compatible = "spmi-mfd-nvmem" }, + { }, +}; +MODULE_DEVICE_TABLE(of, spmi_mfd_nvmem_id_table); + +static struct platform_driver spmi_mfd_nvmem_driver = { + .probe = spmi_mfd_nvmem_probe, + .driver = { + .name = "spmi-mfd-nvmem", + .owner = THIS_MODULE, + .of_match_table = spmi_mfd_nvmem_id_table, + }, +}; + +module_platform_driver(spmi_mfd_nvmem_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_DESCRIPTION("SPMI MFD NVMEM driver"); diff --git a/drivers/of/address.c b/drivers/of/address.c index 94f017d808c4..68f54ec92496 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -538,7 +538,7 @@ static u64 __of_translate_address(struct device_node *dev, pbus = of_match_bus(parent); pbus->count_cells(dev, &pna, &pns); if (!OF_CHECK_COUNTS(pna, pns)) { - pr_err("Bad cell count for %pOF\n", dev); + pr_debug("Bad cell count for %pOF\n", dev); break; } diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index a2c3c207a04b..f26a617ea5c5 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -507,20 +507,53 @@ static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port, return readl_relaxed(port->base + PORT_RID2SID(idx)); } +static int apple_pcie_probe_port(struct device_node *np) +{ + struct gpio_desc *gd; + + gd = gpiod_get_from_of_node(np, "reset-gpios", 0, + GPIOD_OUT_LOW, "PERST#"); + if (IS_ERR(gd)) { + return PTR_ERR(gd); + } + + gpiod_put(gd); + + gd = gpiod_get_from_of_node(np, "pwren-gpios", 0, + GPIOD_OUT_LOW, "PWREN"); + if (IS_ERR(gd)) { + if (PTR_ERR(gd) != -ENOENT) + return PTR_ERR(gd); + } else { + gpiod_put(gd); + } + + return 0; +} + static int apple_pcie_setup_port(struct apple_pcie *pcie, struct device_node *np) { struct platform_device *platform = to_platform_device(pcie->dev); struct apple_pcie_port *port; - struct gpio_desc *reset; + struct gpio_desc *reset, *pwren = NULL; u32 stat, idx; int ret, i; - reset = gpiod_get_from_of_node(np, "reset-gpios", 0, - GPIOD_OUT_LOW, "PERST#"); + reset = devm_gpiod_get_from_of_node(pcie->dev, np, "reset-gpios", 0, + GPIOD_OUT_LOW, "PERST#"); if (IS_ERR(reset)) return PTR_ERR(reset); + pwren = devm_gpiod_get_from_of_node(pcie->dev, np, "pwren-gpios", 0, + GPIOD_OUT_LOW, "PWREN"); + if (IS_ERR(pwren)) { + if (PTR_ERR(pwren) == -ENOENT) + pwren = NULL; + else + return PTR_ERR(pwren); + } + port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; @@ -541,18 +574,27 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK); /* Assert PERST# before setting up the clock */ - gpiod_set_value(reset, 1); + gpiod_set_value_cansleep(reset, 1); + + /* Power on the device if required */ + gpiod_set_value_cansleep(pwren, 1); ret = apple_pcie_setup_refclk(pcie, port); if (ret < 0) return ret; - /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */ - usleep_range(100, 200); + /* + * The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) + * If powering up, the minimal Tpvperl is 100ms + */ + if (pwren) + msleep(100); + else + usleep_range(100, 200); /* Deassert PERST# */ rmw_set(PORT_PERST_OFF, port->base + PORT_PERST); - gpiod_set_value(reset, 0); + gpiod_set_value_cansleep(reset, 0); /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ msleep(100); @@ -797,8 +839,18 @@ static int apple_pcie_init(struct pci_config_window *cfg) static int apple_pcie_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + struct device_node *of_port; int ret; + /* Check for probe dependencies for all ports first */ + for_each_child_of_node(dev->of_node, of_port) { + ret = apple_pcie_probe_port(of_port); + of_node_put(of_port); + if (ret) + return dev_err_probe(dev, ret, "Port %pOF probe fail\n", of_port); + } + ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb); if (ret) return ret; diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 18fc6a08569e..d683a75fc972 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -15,3 +15,5 @@ source "drivers/platform/mellanox/Kconfig" source "drivers/platform/olpc/Kconfig" source "drivers/platform/surface/Kconfig" + +source "drivers/platform/apple/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 4de08ef4ec9d..3e5d5039a28c 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ +obj-$(CONFIG_APPLE_PLATFORMS) += apple/ diff --git a/drivers/platform/apple/Kconfig b/drivers/platform/apple/Kconfig new file mode 100644 index 000000000000..42525aa9fbbe --- /dev/null +++ b/drivers/platform/apple/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Apple Platform-Specific Drivers +# + +menuconfig APPLE_PLATFORMS + bool "Apple Mac Platform-Specific Device Drivers" + default y + help + Say Y here to get to see options for platform-specific device drivers + for Apple devices. This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if APPLE_PLATFORMS + +config APPLE_SMC + tristate "Apple SMC Driver" + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + select MFD_CORE + help + Build support for the Apple System Management Controller present in + Apple Macs. This driver currently supports the SMC in Apple Silicon + Macs. For x86 Macs, see the applesmc driver (SENSORS_APPLESMC). + + Say Y here if you have an Apple Silicon Mac. + + To compile this driver as a module, choose M here: the module will + be called macsmc. + +if APPLE_SMC + +config APPLE_SMC_RTKIT + tristate "RTKit (Apple Silicon) backend" + depends on ARCH_APPLE || COMPILE_TEST + depends on APPLE_RTKIT + default ARCH_APPLE + help + Build support for SMC communications via the RTKit backend. This is + required for Apple Silicon Macs. + + Say Y here if you have an Apple Silicon Mac. + + To compile this driver as a module, choose M here: the module will + be called macsmc-rtkit. + +endif +endif diff --git a/drivers/platform/apple/Makefile b/drivers/platform/apple/Makefile new file mode 100644 index 000000000000..79fac195398b --- /dev/null +++ b/drivers/platform/apple/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for linux/drivers/platform/apple +# Apple Platform-Specific Drivers +# + +macsmc-y += smc_core.o +macsmc-rtkit-y += smc_rtkit.o + +obj-$(CONFIG_APPLE_SMC) += macsmc.o +obj-$(CONFIG_APPLE_SMC_RTKIT) += macsmc-rtkit.o diff --git a/drivers/platform/apple/smc.h b/drivers/platform/apple/smc.h new file mode 100644 index 000000000000..8ae51887b2c5 --- /dev/null +++ b/drivers/platform/apple/smc.h @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC internal core definitions + * Copyright (C) The Asahi Linux Contributors + */ + +#ifndef _SMC_H +#define _SMC_H + +#include <linux/mfd/macsmc.h> + +struct apple_smc_backend_ops { + int (*read_key)(void *cookie, smc_key key, void *buf, size_t size); + int (*write_key)(void *cookie, smc_key key, void *buf, size_t size); + int (*write_key_atomic)(void *cookie, smc_key key, void *buf, size_t size); + int (*rw_key)(void *cookie, smc_key key, void *wbuf, size_t wsize, + void *rbuf, size_t rsize); + int (*get_key_by_index)(void *cookie, int index, smc_key *key); + int (*get_key_info)(void *cookie, smc_key key, struct apple_smc_key_info *info); +}; + +struct apple_smc *apple_smc_probe(struct device *dev, const struct apple_smc_backend_ops *ops, + void *cookie); +void *apple_smc_get_cookie(struct apple_smc *smc); +int apple_smc_remove(struct apple_smc *smc); +void apple_smc_event_received(struct apple_smc *smc, uint32_t event); + +#endif diff --git a/drivers/platform/apple/smc_core.c b/drivers/platform/apple/smc_core.c new file mode 100644 index 000000000000..daf029cd072f --- /dev/null +++ b/drivers/platform/apple/smc_core.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC core framework + * Copyright The Asahi Linux Contributors + */ + +#include <linux/device.h> +#include <linux/mfd/core.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include "smc.h" + +struct apple_smc { + struct device *dev; + + void *be_cookie; + const struct apple_smc_backend_ops *be; + + struct mutex mutex; + + u32 key_count; + smc_key first_key; + smc_key last_key; + + struct blocking_notifier_head event_handlers; +}; + +static const struct mfd_cell apple_smc_devs[] = { + { + .name = "macsmc-gpio", + }, + { + .name = "macsmc-hid", + }, + { + .name = "macsmc-power", + }, + { + .name = "macsmc-reboot", + }, + { + .name = "macsmc-rtc", + }, +}; + +int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size) +{ + int ret; + + mutex_lock(&smc->mutex); + ret = smc->be->read_key(smc->be_cookie, key, buf, size); + mutex_unlock(&smc->mutex); + + return ret; +} +EXPORT_SYMBOL(apple_smc_read); + +int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size) +{ + int ret; + + mutex_lock(&smc->mutex); + ret = smc->be->write_key(smc->be_cookie, key, buf, size); + mutex_unlock(&smc->mutex); + + return ret; +} +EXPORT_SYMBOL(apple_smc_write); + +int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size) +{ + int ret; + + /* + * Will fail if SMC is busy. This is only used by SMC reboot/poweroff + * final calls, so it doesn't really matter at that point. + */ + if (!mutex_trylock(&smc->mutex)) + return -EBUSY; + + ret = smc->be->write_key_atomic(smc->be_cookie, key, buf, size); + mutex_unlock(&smc->mutex); + + return ret; +} +EXPORT_SYMBOL(apple_smc_write_atomic); + +int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize, + void *rbuf, size_t rsize) +{ + int ret; + + mutex_lock(&smc->mutex); + ret = smc->be->rw_key(smc->be_cookie, key, wbuf, wsize, rbuf, rsize); + mutex_unlock(&smc->mutex); + + return ret; +} +EXPORT_SYMBOL(apple_smc_rw); + +int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key) +{ + int ret; + + mutex_lock(&smc->mutex); + ret = smc->be->get_key_by_index(smc->be_cookie, index, key); + mutex_unlock(&smc->mutex); + + return ret; +} +EXPORT_SYMBOL(apple_smc_get_key_by_index); + +int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info) +{ + int ret; + + mutex_lock(&smc->mutex); + ret = smc->be->get_key_info(smc->be_cookie, key, info); + mutex_unlock(&smc->mutex); + + return ret; +} +EXPORT_SYMBOL(apple_smc_get_key_info); + +int apple_smc_find_first_key_index(struct apple_smc *smc, smc_key key) +{ + int start = 0, count = smc->key_count; + int ret; + + if (key <= smc->first_key) + return 0; + if (key > smc->last_key) + return smc->key_count; + + while (count > 1) { + int pivot = start + ((count - 1) >> 1); + smc_key pkey; + + ret = apple_smc_get_key_by_index(smc, pivot, &pkey); + if (ret < 0) + return ret; + + if (pkey == key) + return pivot; + + pivot++; + + if (pkey < key) { + count -= pivot - start; + start = pivot; + } else { + count = pivot - start; + } + } + + return start; +} +EXPORT_SYMBOL(apple_smc_find_first_key_index); + +int apple_smc_get_key_count(struct apple_smc *smc) +{ + return smc->key_count; +} +EXPORT_SYMBOL(apple_smc_get_key_count); + +void apple_smc_event_received(struct apple_smc *smc, uint32_t event) +{ + dev_dbg(smc->dev, "Event: 0x%08x\n", event); + blocking_notifier_call_chain(&smc->event_handlers, event, NULL); +} +EXPORT_SYMBOL(apple_smc_event_received); + +int apple_smc_register_notifier(struct apple_smc *smc, struct notifier_block *n) +{ + return blocking_notifier_chain_register(&smc->event_handlers, n); +} +EXPORT_SYMBOL(apple_smc_register_notifier); + +int apple_smc_unregister_notifier(struct apple_smc *smc, struct notifier_block *n) +{ + return blocking_notifier_chain_unregister(&smc->event_handlers, n); +} +EXPORT_SYMBOL(apple_smc_unregister_notifier); + +void *apple_smc_get_cookie(struct apple_smc *smc) +{ + return smc->be_cookie; +} +EXPORT_SYMBOL(apple_smc_get_cookie); + +struct apple_smc *apple_smc_probe(struct device *dev, const struct apple_smc_backend_ops *ops, void *cookie) +{ + struct apple_smc *smc; + u32 count; + int ret; + + smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL); + if (!smc) + return ERR_PTR(-ENOMEM); + + smc->dev = dev; + smc->be_cookie = cookie; + smc->be = ops; + mutex_init(&smc->mutex); + BLOCKING_INIT_NOTIFIER_HEAD(&smc->event_handlers); + + ret = apple_smc_read_u32(smc, SMC_KEY(#KEY), &count); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Failed to get key count")); + smc->key_count = be32_to_cpu(count); + + ret = apple_smc_get_key_by_index(smc, 0, &smc->first_key); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Failed to get first key")); + + ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &smc->last_key); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Failed to get last key")); + + /* Enable notifications */ + apple_smc_write_flag(smc, SMC_KEY(NTAP), 1); + + dev_info(dev, "Initialized (%d keys %p4ch..%p4ch)\n", + smc->key_count, &smc->first_key, &smc->last_key); + + dev_set_drvdata(dev, smc); + + ret = mfd_add_devices(dev, -1, apple_smc_devs, ARRAY_SIZE(apple_smc_devs), NULL, 0, NULL); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Subdevice initialization failed")); + + return smc; +} +EXPORT_SYMBOL(apple_smc_probe); + +int apple_smc_remove(struct apple_smc *smc) +{ + mfd_remove_devices(smc->dev); + + /* Disable notifications */ + apple_smc_write_flag(smc, SMC_KEY(NTAP), 1); + + return 0; +} +EXPORT_SYMBOL(apple_smc_remove); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC core"); diff --git a/drivers/platform/apple/smc_rtkit.c b/drivers/platform/apple/smc_rtkit.c new file mode 100644 index 000000000000..5b7c4c475bbb --- /dev/null +++ b/drivers/platform/apple/smc_rtkit.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC RTKit backend + * Copyright The Asahi Linux Contributors + */ + +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/soc/apple/rtkit.h> +#include "smc.h" + +#define SMC_ENDPOINT 0x20 + +/* Guess */ +#define SMC_SHMEM_SIZE 0x1000 + +#define SMC_MSG_READ_KEY 0x10 +#define SMC_MSG_WRITE_KEY 0x11 +#define SMC_MSG_GET_KEY_BY_INDEX 0x12 +#define SMC_MSG_GET_KEY_INFO 0x13 +#define SMC_MSG_INITIALIZE 0x17 +#define SMC_MSG_NOTIFICATION 0x18 +#define SMC_MSG_RW_KEY 0x20 + +#define SMC_DATA GENMASK(63, 32) +#define SMC_WSIZE GENMASK(31, 24) +#define SMC_SIZE GENMASK(23, 16) +#define SMC_ID GENMASK(15, 12) +#define SMC_MSG GENMASK(7, 0) +#define SMC_RESULT SMC_MSG + +#define SMC_RECV_TIMEOUT 100 + +struct apple_smc_rtkit { + struct device *dev; + struct apple_smc *core; + struct apple_rtkit *rtk; + + struct completion init_done; + bool initialized; + bool alive; + + struct resource *sram; + void __iomem *sram_base; + struct apple_rtkit_shmem shmem; + + unsigned int msg_id; + + bool atomic_pending; + struct completion cmd_done; + u64 cmd_ret; +}; + +static int apple_smc_rtkit_write_key_atomic(void *cookie, smc_key key, void *buf, size_t size) +{ + struct apple_smc_rtkit *smc = cookie; + int ret; + u64 msg; + u8 result; + + if (size > SMC_SHMEM_SIZE || size == 0) + return -EINVAL; + + if (!smc->alive) + return -EIO; + + memcpy_toio(smc->shmem.iomem, buf, size); + smc->msg_id = (smc->msg_id + 1) & 0xf; + msg = (FIELD_PREP(SMC_MSG, SMC_MSG_WRITE_KEY) | + FIELD_PREP(SMC_SIZE, size) | + FIELD_PREP(SMC_ID, smc->msg_id) | + FIELD_PREP(SMC_DATA, key)); + smc->atomic_pending = true; + + ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, true); + if (ret < 0) { + dev_err(smc->dev, "Failed to send command (%d)\n", ret); + return ret; + } + + while (smc->atomic_pending) { + ret = apple_rtkit_poll(smc->rtk); + if (ret < 0) { + dev_err(smc->dev, "RTKit poll failed (%llx)", msg); + return ret; + } + udelay(100); + } + + if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) { + dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n", + smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret)); + return -EIO; + } + + result = FIELD_GET(SMC_RESULT, smc->cmd_ret); + if (result != 0) + return -result; + + return FIELD_GET(SMC_SIZE, smc->cmd_ret); +} + +static int apple_smc_cmd(struct apple_smc_rtkit *smc, u64 cmd, u64 arg, + u64 size, u64 wsize, u32 *ret_data) +{ + int ret; + u64 msg; + u8 result; + + if (!smc->alive) + return -EIO; + + reinit_completion(&smc->cmd_done); + + smc->msg_id = (smc->msg_id + 1) & 0xf; + msg = (FIELD_PREP(SMC_MSG, cmd) | + FIELD_PREP(SMC_SIZE, size) | + FIELD_PREP(SMC_WSIZE, wsize) | + FIELD_PREP(SMC_ID, smc->msg_id) | + FIELD_PREP(SMC_DATA, arg)); + + ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, false); + if (ret < 0) { + dev_err(smc->dev, "Failed to send command\n"); + return ret; + } + + do { + if (wait_for_completion_timeout(&smc->cmd_done, + msecs_to_jiffies(SMC_RECV_TIMEOUT)) == 0) { + dev_err(smc->dev, "Command timed out (%llx)", msg); + return -ETIMEDOUT; + } + if (FIELD_GET(SMC_ID, smc->cmd_ret) == smc->msg_id) + break; + dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n", + smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret)); + } while(1); + + result = FIELD_GET(SMC_RESULT, smc->cmd_ret); + if (result != 0) + return -result; + + if (ret_data) + *ret_data = FIELD_GET(SMC_DATA, smc->cmd_ret); + + return FIELD_GET(SMC_SIZE, smc->cmd_ret); +} + +static int _apple_smc_rtkit_read_key(struct apple_smc_rtkit *smc, smc_key key, + void *buf, size_t size, size_t wsize) +{ + int ret; + u32 rdata; + u64 cmd; + + if (size > SMC_SHMEM_SIZE || size == 0) + return -EINVAL; + + cmd = wsize ? SMC_MSG_RW_KEY : SMC_MSG_READ_KEY; + + ret = apple_smc_cmd(smc, cmd, key, size, wsize, &rdata); + if (ret < 0) + return ret; + + if (size <= 4) + memcpy(buf, &rdata, size); + else + memcpy_fromio(buf, smc->shmem.iomem, size); + + return ret; +} + +static int apple_smc_rtkit_read_key(void *cookie, smc_key key, void *buf, size_t size) +{ + return _apple_smc_rtkit_read_key(cookie, key, buf, size, 0); +} + +static int apple_smc_rtkit_write_key(void *cookie, smc_key key, void *buf, size_t size) +{ + struct apple_smc_rtkit *smc = cookie; + + if (size > SMC_SHMEM_SIZE || size == 0) + return -EINVAL; + + memcpy_toio(smc->shmem.iomem, buf, size); + return apple_smc_cmd(smc, SMC_MSG_WRITE_KEY, key, size, 0, NULL); +} + +static int apple_smc_rtkit_rw_key(void *cookie, smc_key key, + void *wbuf, size_t wsize, void *rbuf, size_t rsize) +{ + struct apple_smc_rtkit *smc = cookie; + + if (wsize > SMC_SHMEM_SIZE || wsize == 0) + return -EINVAL; + + memcpy_toio(smc->shmem.iomem, wbuf, wsize); + return _apple_smc_rtkit_read_key(smc, key, rbuf, rsize, wsize); +} + +static int apple_smc_rtkit_get_key_by_index(void *cookie, int index, smc_key *key) +{ + struct apple_smc_rtkit *smc = cookie; + int ret; + + ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_BY_INDEX, index, 0, 0, key); + + *key = swab32(*key); + return ret; +} + +static int apple_smc_rtkit_get_key_info(void *cookie, smc_key key, struct apple_smc_key_info *info) +{ + struct apple_smc_rtkit *smc = cookie; + u8 key_info[6]; + int ret; + + ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_INFO, key, 0, 0, NULL); + if (ret >= 0 && info) { + info->size = key_info[0]; + info->type_code = get_unaligned_be32(&key_info[1]); + info->flags = key_info[5]; + } + return ret; +} + +static const struct apple_smc_backend_ops apple_smc_rtkit_be_ops = { + .read_key = apple_smc_rtkit_read_key, + .write_key = apple_smc_rtkit_write_key, + .write_key_atomic = apple_smc_rtkit_write_key_atomic, + .rw_key = apple_smc_rtkit_rw_key, + .get_key_by_index = apple_smc_rtkit_get_key_by_index, + .get_key_info = apple_smc_rtkit_get_key_info, +}; + +static void apple_smc_rtkit_crashed(void *cookie) +{ + struct apple_smc_rtkit *smc = cookie; + + dev_err(smc->dev, "SMC crashed! Your system will reboot in a few seconds...\n"); + smc->alive = false; +} + +static int apple_smc_rtkit_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr) +{ + struct apple_smc_rtkit *smc = cookie; + struct resource res = { + .start = bfr->iova, + .end = bfr->iova + bfr->size - 1, + .name = "rtkit_map", + .flags = smc->sram->flags, + }; + + if (!bfr->iova) { + dev_err(smc->dev, "RTKit wants a RAM buffer\n"); + return -EIO; + } + + if (res.end < res.start || !resource_contains(smc->sram, &res)) { + dev_err(smc->dev, + "RTKit buffer request outside SRAM region: %pR", &res); + return -EFAULT; + } + + bfr->iomem = smc->sram_base + (res.start - smc->sram->start); + bfr->is_mapped = true; + + return 0; +} + +static void apple_smc_rtkit_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr) +{ + // no-op +} + +static bool apple_smc_rtkit_recv_early(void *cookie, u8 endpoint, u64 message) +{ + struct apple_smc_rtkit *smc = cookie; + + if (endpoint != SMC_ENDPOINT) { + dev_err(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint); + return false; + } + + if (!smc->initialized) { + int ret; + + smc->shmem.iova = message; + smc->shmem.size = SMC_SHMEM_SIZE; + ret = apple_smc_rtkit_shmem_setup(smc, &smc->shmem); + if (ret < 0) + dev_err(smc->dev, "Failed to initialize shared memory\n"); + else + smc->alive = true; + smc->initialized = true; + complete(&smc->init_done); + } else if (FIELD_GET(SMC_MSG, message) == SMC_MSG_NOTIFICATION) { + /* Handle these in the RTKit worker thread */ + return false; + } else { + smc->cmd_ret = message; + if (smc->atomic_pending) { + smc->atomic_pending = false; + } else { + complete(&smc->cmd_done); + } + } + + return true; +} + +static void apple_smc_rtkit_recv(void *cookie, u8 endpoint, u64 message) +{ + struct apple_smc_rtkit *smc = cookie; + + if (endpoint != SMC_ENDPOINT) { + dev_err(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint); + return; + } + + if (FIELD_GET(SMC_MSG, message) != SMC_MSG_NOTIFICATION) { + dev_err(smc->dev, "Received unknown message from worker: 0x%llx\n", message); + return; + } + + apple_smc_event_received(smc->core, FIELD_GET(SMC_DATA, message)); +} + +static const struct apple_rtkit_ops apple_smc_rtkit_ops = { + .crashed = apple_smc_rtkit_crashed, + .recv_message = apple_smc_rtkit_recv, + .recv_message_early = apple_smc_rtkit_recv_early, + .shmem_setup = apple_smc_rtkit_shmem_setup, + .shmem_destroy = apple_smc_rtkit_shmem_destroy, +}; + +static int apple_smc_rtkit_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_smc_rtkit *smc; + int ret; + + smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL); + if (!smc) + return -ENOMEM; + + smc->dev = dev; + + smc->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); + if (!smc->sram) + return dev_err_probe(dev, EIO, + "No SRAM region"); + + smc->sram_base = devm_ioremap_resource(dev, smc->sram); + if (IS_ERR(smc->sram_base)) + return dev_err_probe(dev, PTR_ERR(smc->sram_base), + "Failed to map SRAM region"); + + smc->rtk = + devm_apple_rtkit_init(dev, smc, NULL, 0, &apple_smc_rtkit_ops); + if (IS_ERR(smc->rtk)) + return dev_err_probe(dev, PTR_ERR(smc->rtk), + "Failed to intialize RTKit"); + + ret = apple_rtkit_wake(smc->rtk); + if (ret != 0) + return dev_err_probe(dev, ret, + "Failed to wake up SMC"); + + ret = apple_rtkit_start_ep(smc->rtk, SMC_ENDPOINT); + if (ret != 0) { + dev_err(dev, "Failed to start endpoint"); + goto cleanup; + } + + init_completion(&smc->init_done); + init_completion(&smc->cmd_done); + + ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, + FIELD_PREP(SMC_MSG, SMC_MSG_INITIALIZE), NULL, false); + if (ret < 0) + return dev_err_probe(dev, ret, + "Failed to send init message"); + + if (wait_for_completion_timeout(&smc->init_done, + msecs_to_jiffies(SMC_RECV_TIMEOUT)) == 0) { + ret = -ETIMEDOUT; + dev_err(dev, "Timed out initializing SMC"); + goto cleanup; + } + + if (!smc->alive) { + ret = -EIO; + goto cleanup; + } + + smc->core = apple_smc_probe(dev, &apple_smc_rtkit_be_ops, smc); + if (IS_ERR(smc->core)) { + ret = PTR_ERR(smc->core); + goto cleanup; + } + + return 0; + +cleanup: + /* Try to shut down RTKit, if it's not completely wedged */ + if (apple_rtkit_is_running(smc->rtk)) + apple_rtkit_quiesce(smc->rtk); + + return ret; +} + +static int apple_smc_rtkit_remove(struct platform_device *pdev) +{ + struct apple_smc *core = platform_get_drvdata(pdev); + struct apple_smc_rtkit *smc = apple_smc_get_cookie(core); + + apple_smc_remove(core); + + if (apple_rtkit_is_running(smc->rtk)) + apple_rtkit_quiesce(smc->rtk); + + return 0; +} + +static const struct of_device_id apple_smc_rtkit_of_match[] = { + { .compatible = "apple,smc" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_smc_rtkit_of_match); + +static struct platform_driver apple_smc_rtkit_driver = { + .driver = { + .name = "macsmc-rtkit", + .owner = THIS_MODULE, + .of_match_table = apple_smc_rtkit_of_match, + }, + .probe = apple_smc_rtkit_probe, + .remove = apple_smc_rtkit_remove, +}; +module_platform_driver(apple_smc_rtkit_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC RTKit backend driver"); diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 4b563db3ab3e..94eeb910e5f9 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -117,6 +117,18 @@ config POWER_RESET_LINKSTATION Say Y here if you have a Buffalo LinkStation LS421D/E. +config POWER_RESET_MACSMC + tristate "Apple SMC reset/power-off driver" + depends on ARCH_APPLE || COMPILE_TEST + depends on APPLE_SMC + depends on OF + default ARCH_APPLE + help + This driver supports reset and power-off on Apple Mac machines + that implement this functionality via the SMC. + + Say Y here if you have an Apple Silicon Mac. + config POWER_RESET_MSM bool "Qualcomm MSM power-off driver" depends on ARCH_QCOM diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index f606a2f60539..ba49b8c0d0c0 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o obj-$(CONFIG_POWER_RESET_LINKSTATION) += linkstation-poweroff.o +obj-$(CONFIG_POWER_RESET_MACSMC) += macsmc-reboot.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o diff --git a/drivers/power/reset/macsmc-reboot.c b/drivers/power/reset/macsmc-reboot.c new file mode 100644 index 000000000000..c33ba2a7852d --- /dev/null +++ b/drivers/power/reset/macsmc-reboot.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC Reboot/Poweroff Handler + * Copyright The Asahi Linux Contributors + */ + +#include <linux/delay.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/slab.h> + +struct macsmc_reboot_nvmem { + struct nvmem_cell *shutdown_flag; + struct nvmem_cell *pm_setting; + struct nvmem_cell *boot_stage; + struct nvmem_cell *boot_error_count; + struct nvmem_cell *panic_count; +}; + +static const char *nvmem_names[] = { + "shutdown_flag", + "pm_setting", + "boot_stage", + "boot_error_count", + "panic_count", +}; + +enum boot_stage { + BOOT_STAGE_SHUTDOWN = 0x00, /* Clean shutdown */ + BOOT_STAGE_IBOOT_DONE = 0x2f, /* Last stage of bootloader */ + BOOT_STAGE_KERNEL_STARTED = 0x30, /* Normal OS booting */ +}; + +enum pm_setting { + PM_SETTING_AC_POWER_RESTORE = 0x02, + PM_SETTING_AC_POWER_OFF = 0x03, +}; + +static const char *ac_power_modes[] = { "off", "restore" }; + +static int ac_power_mode_map[] = { + PM_SETTING_AC_POWER_OFF, + PM_SETTING_AC_POWER_RESTORE, +}; + +struct macsmc_reboot { + struct device *dev; + struct apple_smc *smc; + struct notifier_block reboot_notify; + + union { + struct macsmc_reboot_nvmem nvm; + struct nvmem_cell *nvm_cells[ARRAY_SIZE(nvmem_names)]; + }; +}; + +/* Helpers to read/write a u8 given a struct nvmem_cell */ +static int nvmem_cell_get_u8(struct nvmem_cell *cell) +{ + size_t len; + u8 val; + void *ret = nvmem_cell_read(cell, &len); + + if (IS_ERR(ret)) + return PTR_ERR(ret); + + if (len < 1) { + kfree(ret); + return -EINVAL; + } + + val = *(u8 *)ret; + kfree(ret); + return val; +} + +static int nvmem_cell_set_u8(struct nvmem_cell *cell, u8 val) +{ + return nvmem_cell_write(cell, &val, sizeof(val)); +} + +static ssize_t macsmc_ac_power_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct macsmc_reboot *reboot = dev_get_drvdata(dev); + int mode; + int ret; + + mode = sysfs_match_string(ac_power_modes, buf); + if (mode < 0) + return mode; + + ret = nvmem_cell_set_u8(reboot->nvm.pm_setting, ac_power_mode_map[mode]); + if (ret < 0) + return ret; + + return n; +} + +static ssize_t macsmc_ac_power_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct macsmc_reboot *reboot = dev_get_drvdata(dev); + int len = 0; + int i; + int mode = nvmem_cell_get_u8(reboot->nvm.pm_setting); + + if (mode < 0) + return mode; + + for (i = 0; i < ARRAY_SIZE(ac_power_mode_map); i++) + if (mode == ac_power_mode_map[i]) + len += scnprintf(buf+len, PAGE_SIZE-len, + "[%s] ", ac_power_modes[i]); + else + len += scnprintf(buf+len, PAGE_SIZE-len, + "%s ", ac_power_modes[i]); + buf[len-1] = '\n'; + return len; +} +static DEVICE_ATTR(ac_power_mode, 0644, macsmc_ac_power_mode_show, + macsmc_ac_power_mode_store); + +/* + * SMC 'MBSE' key actions: + * + * 'offw' - shutdown warning + * 'slpw' - sleep warning + * 'rest' - restart warning + * 'off1' - shutdown (needs PMU bit set to stay on) + * 'susp' - suspend + * 'phra' - restart ("PE Halt Restart Action"?) + * 'panb' - panic beginning + * 'pane' - panic end + */ + +static int macsmc_power_off(struct sys_off_data *data) +{ + struct macsmc_reboot *reboot = data->cb_data; + + dev_info(reboot->dev, "Issuing power off (off1)\n"); + + if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(off1)) < 0) { + dev_err(reboot->dev, "Failed to issue MBSE = off1 (power_off)\n"); + } else { + mdelay(100); + WARN_ON(1); + } + + return NOTIFY_OK; +} + +static int macsmc_restart(struct sys_off_data *data) +{ + struct macsmc_reboot *reboot = data->cb_data; + + dev_info(reboot->dev, "Issuing restart (phra)\n"); + + if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(phra)) < 0) { + dev_err(reboot->dev, "Failed to issue MBSE = phra (restart)\n"); + } else { + mdelay(100); + WARN_ON(1); + } + + return NOTIFY_OK; +} + +static int macsmc_reboot_notify(struct notifier_block *this, unsigned long action, void *data) +{ + struct macsmc_reboot *reboot = container_of(this, struct macsmc_reboot, reboot_notify); + u32 val; + u8 shutdown_flag; + + switch (action) { + case SYS_RESTART: + val = SMC_KEY(rest); + shutdown_flag = 0; + break; + case SYS_POWER_OFF: + val = SMC_KEY(offw); + shutdown_flag = 1; + break; + default: + return NOTIFY_DONE; + } + + dev_info(reboot->dev, "Preparing for reboot (%p4ch)\n", &val); + + /* On the Mac Mini, this will turn off the LED for power off */ + if (apple_smc_write_u32(reboot->smc, SMC_KEY(MBSE), val) < 0) + dev_err(reboot->dev, "Failed to issue MBSE = %p4ch (reboot_prepare)\n", &val); + + /* Set the boot_stage to 0, which means we're doing a clean shutdown/reboot. */ + if (reboot->nvm.boot_stage && + nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_SHUTDOWN) < 0) + dev_err(reboot->dev, "Failed to write boot_stage\n"); + + /* + * Set the PMU flag to actually reboot into the off state. + * Without this, the device will just reboot. We make it optional in case it is no longer + * necessary on newer hardware. + */ + if (reboot->nvm.shutdown_flag && + nvmem_cell_set_u8(reboot->nvm.shutdown_flag, shutdown_flag) < 0) + dev_err(reboot->dev, "Failed to write shutdown_flag\n"); + + return NOTIFY_OK; +} + +static void macsmc_power_init_error_counts(struct macsmc_reboot *reboot) +{ + int boot_error_count, panic_count; + + if (!reboot->nvm.boot_error_count || !reboot->nvm.panic_count) + return; + + boot_error_count = nvmem_cell_get_u8(reboot->nvm.boot_error_count); + if (boot_error_count < 0) { + dev_err(reboot->dev, "Failed to read boot_error_count (%d)\n", boot_error_count); + return; + } + + panic_count = nvmem_cell_get_u8(reboot->nvm.panic_count); + if (panic_count < 0) { + dev_err(reboot->dev, "Failed to read panic_count (%d)\n", panic_count); + return; + } + + if (!boot_error_count && !panic_count) + return; + + dev_warn(reboot->dev, "PMU logged %d boot error(s) and %d panic(s)\n", + boot_error_count, panic_count); + + if (nvmem_cell_set_u8(reboot->nvm.panic_count, 0) < 0) + dev_err(reboot->dev, "Failed to reset panic_count\n"); + if (nvmem_cell_set_u8(reboot->nvm.boot_error_count, 0) < 0) + dev_err(reboot->dev, "Failed to reset boot_error_count\n"); +} + +static int macsmc_reboot_probe(struct platform_device *pdev) +{ + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + struct macsmc_reboot *reboot; + int ret, i; + + /* Ignore devices without this functionality */ + if (!apple_smc_key_exists(smc, SMC_KEY(MBSE))) + return -ENODEV; + + reboot = devm_kzalloc(&pdev->dev, sizeof(*reboot), GFP_KERNEL); + if (!reboot) + return -ENOMEM; + + reboot->dev = &pdev->dev; + reboot->smc = smc; + + platform_set_drvdata(pdev, reboot); + + pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "reboot"); + + for (i = 0; i < ARRAY_SIZE(nvmem_names); i++) { + struct nvmem_cell *cell; + cell = devm_nvmem_cell_get(&pdev->dev, + nvmem_names[i]); + if (IS_ERR(cell)) { + if (PTR_ERR(cell) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_warn(&pdev->dev, "Missing NVMEM cell %s (%ld)\n", + nvmem_names[i], PTR_ERR(cell)); + /* Non fatal, we'll deal with it */ + cell = NULL; + } + reboot->nvm_cells[i] = cell; + } + + /* Set the boot_stage to indicate we're running the OS kernel */ + if (reboot->nvm.boot_stage && + nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_KERNEL_STARTED) < 0) + dev_err(reboot->dev, "Failed to write boot_stage\n"); + + /* Display and clear the error counts */ + macsmc_power_init_error_counts(reboot); + + reboot->reboot_notify.notifier_call = macsmc_reboot_notify; + + ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_HIGH, + macsmc_power_off, reboot); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register power-off handler\n"); + + ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH, + macsmc_restart, reboot); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register restart handler\n"); + + ret = devm_register_reboot_notifier(&pdev->dev, &reboot->reboot_notify); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register reboot notifier\n"); + + dev_info(&pdev->dev, "Handling reboot and poweroff requests via SMC\n"); + + if (device_create_file(&pdev->dev, &dev_attr_ac_power_mode)) + dev_warn(&pdev->dev, "could not create sysfs file\n"); + + return 0; +} + +static int macsmc_reboot_remove(struct platform_device *pdev) +{ + device_remove_file(&pdev->dev, &dev_attr_ac_power_mode); + + return 0; +} + + +static struct platform_driver macsmc_reboot_driver = { + .driver = { + .name = "macsmc-reboot", + .owner = THIS_MODULE, + }, + .probe = macsmc_reboot_probe, + .remove = macsmc_reboot_remove, +}; +module_platform_driver(macsmc_reboot_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC reboot/poweroff driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_ALIAS("platform:macsmc-reboot"); diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 1aa8323ad9f6..780941701795 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -897,4 +897,11 @@ config BATTERY_UG3105 device is off or suspended, the functionality of this driver is limited to reporting capacity only. +config CHARGER_MACSMC + tristate "Apple SMC Charger / Battery support" + depends on APPLE_SMC + help + Say Y here to enable support for the charger and battery controls on + Apple SMC controllers, as used on Apple Silicon Macs. + endif # POWER_SUPPLY diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index 7f02f36aea55..bba0c6ad4101 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -108,3 +108,4 @@ obj-$(CONFIG_BATTERY_ACER_A500) += acer_a500_battery.o obj-$(CONFIG_BATTERY_SURFACE) += surface_battery.o obj-$(CONFIG_CHARGER_SURFACE) += surface_charger.o obj-$(CONFIG_BATTERY_UG3105) += ug3105_battery.o +obj-$(CONFIG_CHARGER_MACSMC) += macsmc_power.o diff --git a/drivers/power/supply/macsmc_power.c b/drivers/power/supply/macsmc_power.c new file mode 100644 index 000000000000..dee48ee8b5a4 --- /dev/null +++ b/drivers/power/supply/macsmc_power.c @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC Power/Battery Management + * Copyright The Asahi Linux Contributors + */ + +#include <linux/ctype.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> +#include <linux/power_supply.h> + +#define MAX_STRING_LENGTH 256 + +struct macsmc_power { + struct device *dev; + struct apple_smc *smc; + + struct power_supply *batt; + char model_name[MAX_STRING_LENGTH]; + char serial_number[MAX_STRING_LENGTH]; + char mfg_date[MAX_STRING_LENGTH]; + + struct power_supply *ac; + + struct notifier_block nb; +}; + +#define CHNC_BATTERY_FULL BIT(0) +#define CHNC_NO_CHARGER BIT(7) +#define CHNC_NOCHG_CH0C BIT(14) +#define CHNC_NOCHG_CH0B_CH0K BIT(15) +#define CHNC_BATTERY_FULL_2 BIT(18) +#define CHNC_BMS_BUSY BIT(23) +#define CHNC_NOAC_CH0J BIT(53) +#define CHNC_NOAC_CH0I BIT(54) + +#define CH0R_LOWER_FLAGS GENMASK(15, 0) +#define CH0R_NOAC_CH0I BIT(0) +#define CH0R_NOAC_CH0J BIT(5) +#define CH0R_BMS_BUSY BIT(8) +#define CH0R_NOAC_CH0K BIT(9) + +#define CH0X_CH0C BIT(0) +#define CH0X_CH0B BIT(1) + +static int macsmc_battery_get_status(struct macsmc_power *power) +{ + u64 nocharge_flags; + u32 nopower_flags; + u16 ac_current; + int ret; + + /* + * Note: there are fallbacks in case some of these SMC keys disappear in the future + * or are not present on some machines. We treat the absence of the CHCE/CHCC/BSFC/CHSC + * flags as an error, since they are quite fundamental and simple booleans. + */ + + /* + * If power input is inhibited, we are definitely discharging. + * However, if the only reason is the BMS is doing a balancing cycle, + * go ahead and ignore that one to avoid spooking users. + */ + ret = apple_smc_read_u32(power->smc, SMC_KEY(CH0R), &nopower_flags); + if (!ret && (nopower_flags & CH0R_LOWER_FLAGS & ~CH0R_BMS_BUSY)) + return POWER_SUPPLY_STATUS_DISCHARGING; + + /* If no charger is present, we are definitely discharging. */ + ret = apple_smc_read_flag(power->smc, SMC_KEY(CHCE)); + if (ret < 0) + return ret; + else if (!ret) + return POWER_SUPPLY_STATUS_DISCHARGING; + + /* If AC is not charge capable, we are definitely discharging. */ + ret = apple_smc_read_flag(power->smc, SMC_KEY(CHCC)); + if (ret < 0) + return ret; + else if (!ret) + return POWER_SUPPLY_STATUS_DISCHARGING; + + /* + * If the AC input current limit is tiny or 0, we are discharging no matter + * how much the BMS believes it can charge. + */ + ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-i), &ac_current); + if (!ret && ac_current < 100) + return POWER_SUPPLY_STATUS_DISCHARGING; + + /* If the battery is full, report it as such. */ + ret = apple_smc_read_flag(power->smc, SMC_KEY(BSFC)); + if (ret < 0) + return ret; + else if (ret) + return POWER_SUPPLY_STATUS_FULL; + + /* If there are reasons we aren't charging... */ + ret = apple_smc_read_u64(power->smc, SMC_KEY(CHNC), &nocharge_flags); + if (!ret) { + /* Perhaps the battery is full after all */ + if (nocharge_flags & CHNC_BATTERY_FULL) + return POWER_SUPPLY_STATUS_FULL; + /* Or maybe the BMS is just busy doing something, if so call it charging anyway */ + else if (nocharge_flags == CHNC_BMS_BUSY) + return POWER_SUPPLY_STATUS_CHARGING; + /* If we have other reasons we aren't charging, say we aren't */ + else if (nocharge_flags) + return POWER_SUPPLY_STATUS_NOT_CHARGING; + /* Else we're either charging or about to charge */ + else + return POWER_SUPPLY_STATUS_CHARGING; + } + + /* As a fallback, use the system charging flag. */ + ret = apple_smc_read_flag(power->smc, SMC_KEY(CHSC)); + if (ret < 0) + return ret; + if (!ret) + return POWER_SUPPLY_STATUS_NOT_CHARGING; + else + return POWER_SUPPLY_STATUS_CHARGING; +} + +static int macsmc_battery_get_charge_behaviour(struct macsmc_power *power) +{ + int ret; + u8 val; + + /* CH0I returns a bitmask like the low byte of CH0R */ + ret = apple_smc_read_u8(power->smc, SMC_KEY(CH0I), &val); + if (ret) + return ret; + if (val & CH0R_NOAC_CH0I) + return POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE; + + /* CH0C returns a bitmask containing CH0B/CH0C flags */ + ret = apple_smc_read_u8(power->smc, SMC_KEY(CH0C), &val); + if (ret) + return ret; + if (val & CH0X_CH0C) + return POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE; + else + return POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO; +} + +static int macsmc_battery_set_charge_behaviour(struct macsmc_power *power, int val) +{ + u8 ch0i, ch0c; + int ret; + + /* + * CH0I/CH0C are "hard" controls that will allow the battery to run down to 0. + * CH0K/CH0B are "soft" controls that are reset to 0 when SOC drops below 50%; + * we don't expose these yet. + */ + + switch (val) { + case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO: + ch0i = ch0c = 0; + break; + case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE: + ch0i = 0; + ch0c = 1; + break; + case POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE: + ch0i = 1; + ch0c = 0; + break; + default: + return -EINVAL; + } + + ret = apple_smc_write_u8(power->smc, SMC_KEY(CH0I), ch0i); + if (ret) + return ret; + return apple_smc_write_u8(power->smc, SMC_KEY(CH0C), ch0c); +} + +static int macsmc_battery_get_date(const char *s, int *out) +{ + if (!isdigit(s[0]) || !isdigit(s[1])) + return -ENOTSUPP; + + *out = (s[0] - '0') * 10 + s[1] - '0'; + return 0; +} + +static int macsmc_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct macsmc_power *power = power_supply_get_drvdata(psy); + int ret = 0; + u8 vu8; + u16 vu16; + u32 vu32; + s16 vs16; + s32 vs32; + s64 vs64; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = macsmc_battery_get_status(power); + ret = val->intval < 0 ? val->intval : 0; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = 1; + break; + case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: + val->intval = macsmc_battery_get_charge_behaviour(power); + ret = val->intval < 0 ? val->intval : 0; + break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0TE), &vu16); + val->intval = vu16 == 0xffff ? 0 : vu16 * 60; + break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0TF), &vu16); + val->intval = vu16 == 0xffff ? 0 : vu16 * 60; + break; + case POWER_SUPPLY_PROP_CAPACITY: + ret = apple_smc_read_u8(power->smc, SMC_KEY(BUIC), &vu8); + val->intval = vu8; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0AV), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + ret = apple_smc_read_s16(power->smc, SMC_KEY(B0AC), &vs16); + val->intval = vs16 * 1000; + break; + case POWER_SUPPLY_PROP_POWER_NOW: + ret = apple_smc_read_s32(power->smc, SMC_KEY(B0AP), &vs32); + val->intval = vs32 * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + ret = apple_smc_read_u16(power->smc, SMC_KEY(BITV), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RC), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + ret = apple_smc_read_u32(power->smc, SMC_KEY(CSIL), &vu32); + val->intval = vu32 * 1000; + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RI), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RV), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0DC), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0FC), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_NOW: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0RM), &vu16); + val->intval = swab16(vu16) * 1000; + break; + case POWER_SUPPLY_PROP_TEMP: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0AT), &vu16); + val->intval = vu16 - 2732; + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + ret = apple_smc_read_s64(power->smc, SMC_KEY(BAAC), &vs64); + val->intval = vs64; + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + ret = apple_smc_read_u16(power->smc, SMC_KEY(B0CT), &vu16); + val->intval = vu16; + break; + case POWER_SUPPLY_PROP_SCOPE: + val->intval = POWER_SUPPLY_SCOPE_SYSTEM; + break; + case POWER_SUPPLY_PROP_HEALTH: + ret = apple_smc_read_flag(power->smc, SMC_KEY(BBAD)); + val->intval = ret == 1 ? POWER_SUPPLY_HEALTH_DEAD : POWER_SUPPLY_HEALTH_GOOD; + ret = ret < 0 ? ret : 0; + break; + case POWER_SUPPLY_PROP_MODEL_NAME: + val->strval = power->model_name; + break; + case POWER_SUPPLY_PROP_SERIAL_NUMBER: + val->strval = power->serial_number; + break; + case POWER_SUPPLY_PROP_MANUFACTURE_YEAR: + ret = macsmc_battery_get_date(&power->mfg_date[0], &val->intval); + val->intval += 2000 - 8; /* -8 is a fixup for a firmware bug... */ + break; + case POWER_SUPPLY_PROP_MANUFACTURE_MONTH: + ret = macsmc_battery_get_date(&power->mfg_date[2], &val->intval); + break; + case POWER_SUPPLY_PROP_MANUFACTURE_DAY: + ret = macsmc_battery_get_date(&power->mfg_date[4], &val->intval); + break; + default: + return -EINVAL; + } + + return ret; +} + +static int macsmc_battery_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct macsmc_power *power = power_supply_get_drvdata(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: + return macsmc_battery_set_charge_behaviour(power, val->intval); + default: + return -EINVAL; + } +} + +static int macsmc_battery_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: + return true; + default: + return false; + } +} + +static enum power_supply_property macsmc_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_POWER_NOW, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_SCOPE, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_SERIAL_NUMBER, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH, + POWER_SUPPLY_PROP_MANUFACTURE_DAY, +}; + +static const struct power_supply_desc macsmc_battery_desc = { + .name = "macsmc-battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .get_property = macsmc_battery_get_property, + .set_property = macsmc_battery_set_property, + .property_is_writeable = macsmc_battery_property_is_writeable, + .properties = macsmc_battery_props, + .num_properties = ARRAY_SIZE(macsmc_battery_props), +}; + +static int macsmc_ac_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct macsmc_power *power = power_supply_get_drvdata(psy); + int ret = 0; + u16 vu16; + u32 vu32; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + ret = apple_smc_read_u32(power->smc, SMC_KEY(CHIS), &vu32); + val->intval = !!vu32; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-n), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = apple_smc_read_u16(power->smc, SMC_KEY(AC-i), &vu16); + val->intval = vu16 * 1000; + break; + case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT: + ret = apple_smc_read_u32(power->smc, SMC_KEY(ACPW), &vu32); + val->intval = vu32 * 1000; + break; + default: + return -EINVAL; + } + + return ret; +} + +static enum power_supply_property macsmc_ac_props[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT, +}; + +static const struct power_supply_desc macsmc_ac_desc = { + .name = "macsmc-ac", + .type = POWER_SUPPLY_TYPE_MAINS, + .get_property = macsmc_ac_get_property, + .properties = macsmc_ac_props, + .num_properties = ARRAY_SIZE(macsmc_ac_props), +}; + +static int macsmc_power_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct macsmc_power *power = container_of(nb, struct macsmc_power, nb); + + if ((event & 0xffffff00) == 0x71010100) { + bool charging = (event & 0xff) != 0; + + dev_info(power->dev, "Charging: %d\n", charging); + power_supply_changed(power->batt); + power_supply_changed(power->ac); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static int macsmc_power_probe(struct platform_device *pdev) +{ + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + struct power_supply_config psy_cfg = {}; + struct macsmc_power *power; + int ret; + + power = devm_kzalloc(&pdev->dev, sizeof(*power), GFP_KERNEL); + if (!power) + return -ENOMEM; + + power->dev = &pdev->dev; + power->smc = smc; + dev_set_drvdata(&pdev->dev, power); + + /* Ignore devices without a charger/battery */ + if (macsmc_battery_get_status(power) <= POWER_SUPPLY_STATUS_UNKNOWN) + return -ENODEV; + + /* Fetch string properties */ + apple_smc_read(smc, SMC_KEY(BMDN), power->model_name, sizeof(power->model_name) - 1); + apple_smc_read(smc, SMC_KEY(BMSN), power->serial_number, sizeof(power->serial_number) - 1); + apple_smc_read(smc, SMC_KEY(BMDT), power->mfg_date, sizeof(power->mfg_date) - 1); + + /* Turn off the "optimized battery charging" flags, in case macOS left them on */ + apple_smc_write_u8(power->smc, SMC_KEY(CH0K), 0); + apple_smc_write_u8(power->smc, SMC_KEY(CH0B), 0); + + psy_cfg.drv_data = power; + power->batt = devm_power_supply_register(&pdev->dev, &macsmc_battery_desc, &psy_cfg); + if (IS_ERR(power->batt)) { + dev_err(&pdev->dev, "Failed to register battery\n"); + ret = PTR_ERR(power->batt); + return ret; + } + + power->ac = devm_power_supply_register(&pdev->dev, &macsmc_ac_desc, &psy_cfg); + if (IS_ERR(power->ac)) { + dev_err(&pdev->dev, "Failed to register AC adapter\n"); + ret = PTR_ERR(power->ac); + return ret; + } + + power->nb.notifier_call = macsmc_power_event; + apple_smc_register_notifier(power->smc, &power->nb); + + return 0; +} + +static int macsmc_power_remove(struct platform_device *pdev) +{ + struct macsmc_power *power = dev_get_drvdata(&pdev->dev); + + apple_smc_unregister_notifier(power->smc, &power->nb); + + return 0; +} + +static struct platform_driver macsmc_power_driver = { + .driver = { + .name = "macsmc-power", + .owner = THIS_MODULE, + }, + .probe = macsmc_power_probe, + .remove = macsmc_power_remove, +}; +module_platform_driver(macsmc_power_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC battery and power management driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_ALIAS("platform:macsmc-power"); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index a00f901b5c1d..8717f2d9381c 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1973,4 +1973,17 @@ config RTC_DRV_MSC313 This driver can also be built as a module, if so, the module will be called "rtc-msc313". +config RTC_DRV_MACSMC + tristate "Apple Mac SMC RTC" + depends on ARCH_APPLE || COMPILE_TEST + depends on APPLE_SMC + depends on OF + default ARCH_APPLE + help + If you say yes here you get support for RTC functions + inside Apple SPMI PMUs. + + To compile this driver as a module, choose M here: the + module will be called rtc-macsmc. + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index fb04467b652d..68519801a320 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o +obj-$(CONFIG_RTC_DRV_MACSMC) += rtc-macsmc.o obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o obj-$(CONFIG_RTC_DRV_MAX6916) += rtc-max6916.o diff --git a/drivers/rtc/rtc-macsmc.c b/drivers/rtc/rtc-macsmc.c new file mode 100644 index 000000000000..34730c925248 --- /dev/null +++ b/drivers/rtc/rtc-macsmc.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC RTC driver + * Copyright The Asahi Linux Contributors + */ + +#include <linux/bitops.h> +#include <linux/mfd/core.h> +#include <linux/mfd/macsmc.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/slab.h> + +/* 48-bit RTC */ +#define RTC_BYTES 6 +#define RTC_BITS (8 * RTC_BYTES) + +/* 32768 Hz clock */ +#define RTC_SEC_SHIFT 15 + +struct macsmc_rtc { + struct device *dev; + struct apple_smc *smc; + struct rtc_device *rtc_dev; + struct nvmem_cell *rtc_offset; +}; + +static int macsmc_rtc_get_time(struct device *dev, struct rtc_time *tm) +{ + struct macsmc_rtc *rtc = dev_get_drvdata(dev); + u64 ctr = 0, off = 0; + time64_t now; + void *p_off; + size_t len; + int ret; + + ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES); + if (ret != RTC_BYTES) + return ret < 0 ? ret : -EIO; + + p_off = nvmem_cell_read(rtc->rtc_offset, &len); + if (IS_ERR(p_off)) + return PTR_ERR(p_off); + if (len < RTC_BYTES) { + kfree(p_off); + return -EIO; + } + + memcpy(&off, p_off, RTC_BYTES); + kfree(p_off); + + /* Sign extend from 48 to 64 bits, then arithmetic shift right 15 bits to get seconds */ + now = sign_extend64(ctr + off, RTC_BITS - 1) >> RTC_SEC_SHIFT; + rtc_time64_to_tm(now, tm); + + return ret; +} + +static int macsmc_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct macsmc_rtc *rtc = dev_get_drvdata(dev); + u64 ctr = 0, off = 0; + int ret; + + ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES); + if (ret != RTC_BYTES) + return ret < 0 ? ret : -EIO; + + /* This sets the offset such that the set second begins now */ + off = (rtc_tm_to_time64(tm) << RTC_SEC_SHIFT) - ctr; + return nvmem_cell_write(rtc->rtc_offset, &off, RTC_BYTES); +} + +static const struct rtc_class_ops macsmc_rtc_ops = { + .read_time = macsmc_rtc_get_time, + .set_time = macsmc_rtc_set_time, +}; + +static int macsmc_rtc_probe(struct platform_device *pdev) +{ + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + struct macsmc_rtc *rtc; + + /* Ignore devices without this functionality */ + if (!apple_smc_key_exists(smc, SMC_KEY(CLKM))) + return -ENODEV; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->dev = &pdev->dev; + rtc->smc = smc; + + pdev->dev.of_node = of_get_child_by_name(pdev->dev.parent->of_node, "rtc"); + + rtc->rtc_offset = devm_nvmem_cell_get(&pdev->dev, "rtc_offset"); + if (IS_ERR(rtc->rtc_offset)) + return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_offset), + "Failed to get rtc_offset NVMEM cell\n"); + + rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + + rtc->rtc_dev->ops = &macsmc_rtc_ops; + rtc->rtc_dev->range_min = S64_MIN >> (RTC_SEC_SHIFT + (64 - RTC_BITS)); + rtc->rtc_dev->range_max = S64_MAX >> (RTC_SEC_SHIFT + (64 - RTC_BITS)); + + platform_set_drvdata(pdev, rtc); + + return devm_rtc_register_device(rtc->rtc_dev); +} + +static struct platform_driver macsmc_rtc_driver = { + .driver = { + .name = "macsmc-rtc", + .owner = THIS_MODULE, + }, + .probe = macsmc_rtc_probe, +}; +module_platform_driver(macsmc_rtc_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC RTC driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_ALIAS("platform:macsmc-rtc"); diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig index a1596fefacff..694a44968f76 100644 --- a/drivers/soc/apple/Kconfig +++ b/drivers/soc/apple/Kconfig @@ -30,6 +30,20 @@ config APPLE_RTKIT Say 'y' here if you have an Apple SoC. +config APPLE_RTKIT_HELPER + tristate "Apple Generic RTKit helper co-processor" + depends on APPLE_RTKIT + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + help + Apple SoCs such as the M1 come with various co-processors running + their proprietary RTKit operating system. This option enables support + for a generic co-processor that does not implement any additional + in-band communications. It can be used for testing purposes, or for + coprocessors such as MTP that communicate over a different interface. + + Say 'y' here if you have an Apple SoC. + config APPLE_SART tristate "Apple SART DMA address filter" depends on ARCH_APPLE || COMPILE_TEST @@ -41,6 +55,16 @@ config APPLE_SART Say 'y' here if you have an Apple SoC. +config APPLE_DOCKCHANNEL + tristate "Apple DockChannel FIFO" + depends on ARCH_APPLE || COMPILE_TEST + default ARCH_APPLE + help + DockChannel is a simple FIFO used on Apple SoCs for debug and inter-processor + communications. + + Say 'y' here if you have an Apple SoC. + endmenu endif diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile index e293770cf66d..07a95a883d3d 100644 --- a/drivers/soc/apple/Makefile +++ b/drivers/soc/apple/Makefile @@ -4,5 +4,11 @@ obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o apple-rtkit-y = rtkit.o rtkit-crashlog.o +obj-$(CONFIG_APPLE_RTKIT_HELPER) += apple-rtkit-helper.o +apple-rtkit-helper-y = rtkit-helper.o + obj-$(CONFIG_APPLE_SART) += apple-sart.o apple-sart-y = sart.o + +obj-$(CONFIG_APPLE_DOCKCHANNEL) += apple-dockchannel.o +apple-dockchannel-y = dockchannel.o diff --git a/drivers/soc/apple/dockchannel.c b/drivers/soc/apple/dockchannel.c new file mode 100644 index 000000000000..4711b3364bfe --- /dev/null +++ b/drivers/soc/apple/dockchannel.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple DockChannel FIFO driver + * Copyright The Asahi Linux Contributors + */ + +#include <asm/unaligned.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/soc/apple/dockchannel.h> +#include <linux/of.h> +#include <linux/of_platform.h> + +#define DOCKCHANNEL_MAX_IRQ 32 + +#define DOCKCHANNEL_TX_TIMEOUT_MS 1000 +#define DOCKCHANNEL_RX_TIMEOUT_MS 1000 + +#define IRQ_MASK 0x0 +#define IRQ_FLAG 0x4 + +#define IRQ_TX BIT(0) +#define IRQ_RX BIT(1) + +#define CONFIG_TX_THRESH 0x0 +#define CONFIG_RX_THRESH 0x4 + +#define DATA_TX8 0x4 +#define DATA_TX16 0x8 +#define DATA_TX24 0xc +#define DATA_TX32 0x10 +#define DATA_TX_FREE 0x14 +#define DATA_RX8 0x18 +#define DATA_RX16 0x20 +#define DATA_RX24 0x24 +#define DATA_RX32 0x28 +#define DATA_RX_COUNT 0x2c + +struct dockchannel { + struct device *dev; + int tx_irq; + int rx_irq; + + void __iomem *config_base; + void __iomem *data_base; + + u32 fifo_size; + bool awaiting; + struct completion tx_comp; + struct completion rx_comp; + + void *cookie; + void (*data_available)(void *cookie, size_t avail); +}; + +struct dockchannel_common { + struct device *dev; + struct irq_domain *domain; + int irq; + + void __iomem *irq_base; +}; + +/* Dockchannel FIFO functions */ + +static irqreturn_t dockchannel_tx_irq(int irq, void *data) +{ + struct dockchannel *dockchannel = data; + + disable_irq_nosync(irq); + complete(&dockchannel->tx_comp); + + return IRQ_HANDLED; +} + +static irqreturn_t dockchannel_rx_irq(int irq, void *data) +{ + struct dockchannel *dockchannel = data; + + disable_irq_nosync(irq); + + if (dockchannel->awaiting) { + return IRQ_WAKE_THREAD; + } else { + complete(&dockchannel->rx_comp); + return IRQ_HANDLED; + } +} + +static irqreturn_t dockchannel_rx_irq_thread(int irq, void *data) +{ + struct dockchannel *dockchannel = data; + size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT); + + dockchannel->awaiting = false; + dockchannel->data_available(dockchannel->cookie, avail); + + return IRQ_HANDLED; +} + +int dockchannel_send(struct dockchannel *dockchannel, const void *buf, size_t count) +{ + size_t left = count; + const u8 *p = buf; + + while (left > 0) { + size_t avail = readl_relaxed(dockchannel->data_base + DATA_TX_FREE); + size_t block = min(left, avail); + + if (avail == 0) { + size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left); + + writel_relaxed(threshold, dockchannel->config_base + CONFIG_TX_THRESH); + reinit_completion(&dockchannel->tx_comp); + enable_irq(dockchannel->tx_irq); + + if (!wait_for_completion_timeout(&dockchannel->tx_comp, + msecs_to_jiffies(DOCKCHANNEL_TX_TIMEOUT_MS))) { + disable_irq(dockchannel->tx_irq); + return -ETIMEDOUT; + } + + continue; + } + + while (block >= 4) { + writel_relaxed(get_unaligned_le32(p), dockchannel->data_base + DATA_TX32); + p += 4; + left -= 4; + block -= 4; + } + while (block > 0) { + writeb_relaxed(*p++, dockchannel->data_base + DATA_TX8); + left--; + block--; + } + } + + return count; +} +EXPORT_SYMBOL(dockchannel_send); + +int dockchannel_recv(struct dockchannel *dockchannel, void *buf, size_t count) +{ + size_t left = count; + u8 *p = buf; + + while (left > 0) { + size_t avail = readl_relaxed(dockchannel->data_base + DATA_RX_COUNT); + size_t block = min(left, avail); + + if (avail == 0) { + size_t threshold = min((size_t)(dockchannel->fifo_size / 2), left); + + writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH); + reinit_completion(&dockchannel->rx_comp); + enable_irq(dockchannel->rx_irq); + + if (!wait_for_completion_timeout(&dockchannel->rx_comp, + msecs_to_jiffies(DOCKCHANNEL_RX_TIMEOUT_MS))) { + disable_irq(dockchannel->rx_irq); + return -ETIMEDOUT; + } + + continue; + } + + while (block >= 4) { + put_unaligned_le32(readl_relaxed(dockchannel->data_base + DATA_RX32), p); + p += 4; + left -= 4; + block -= 4; + } + while (block > 0) { + *p++ = readl_relaxed(dockchannel->data_base + DATA_RX8) >> 8; + left--; + block--; + } + } + + return count; +} +EXPORT_SYMBOL(dockchannel_recv); + +int dockchannel_await(struct dockchannel *dockchannel, + void (*callback)(void *cookie, size_t avail), + void *cookie, size_t count) +{ + size_t threshold = min((size_t)dockchannel->fifo_size, count); + + if (!count) { + dockchannel->awaiting = false; + disable_irq(dockchannel->rx_irq); + return 0; + } + + dockchannel->data_available = callback; + dockchannel->cookie = cookie; + dockchannel->awaiting = true; + writel_relaxed(threshold, dockchannel->config_base + CONFIG_RX_THRESH); + enable_irq(dockchannel->rx_irq); + + return threshold; +} +EXPORT_SYMBOL(dockchannel_await); + +struct dockchannel *dockchannel_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dockchannel *dockchannel; + int ret; + + dockchannel = devm_kzalloc(dev, sizeof(*dockchannel), GFP_KERNEL); + if (!dockchannel) + return ERR_PTR(-ENOMEM); + + dockchannel->dev = dev; + dockchannel->config_base = devm_platform_ioremap_resource_byname(pdev, "config"); + if (IS_ERR(dockchannel->config_base)) + return (void *)dockchannel->config_base; + + dockchannel->data_base = devm_platform_ioremap_resource_byname(pdev, "data"); + if (IS_ERR(dockchannel->data_base)) + return (void *)dockchannel->data_base; + + ret = of_property_read_u32(dev->of_node, "apple,fifo-size", &dockchannel->fifo_size); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Missing apple,fifo-size property")); + + init_completion(&dockchannel->tx_comp); + init_completion(&dockchannel->rx_comp); + + dockchannel->tx_irq = platform_get_irq_byname(pdev, "tx"); + if (dockchannel->tx_irq <= 0) { + return ERR_PTR(dev_err_probe(dev, dockchannel->tx_irq, + "Failed to get TX IRQ")); + } + + dockchannel->rx_irq = platform_get_irq_byname(pdev, "rx"); + if (dockchannel->rx_irq <= 0) { + return ERR_PTR(dev_err_probe(dev, dockchannel->rx_irq, + "Failed to get RX IRQ")); + } + + ret = devm_request_irq(dev, dockchannel->tx_irq, dockchannel_tx_irq, IRQF_NO_AUTOEN, + "apple-dockchannel-tx", dockchannel); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Failed to request TX IRQ")); + + ret = devm_request_threaded_irq(dev, dockchannel->rx_irq, dockchannel_rx_irq, + dockchannel_rx_irq_thread, IRQF_NO_AUTOEN, + "apple-dockchannel-rx", dockchannel); + if (ret) + return ERR_PTR(dev_err_probe(dev, ret, "Failed to request RX IRQ")); + + return dockchannel; +} +EXPORT_SYMBOL(dockchannel_init); + + +/* Dockchannel IRQchip */ + +static void dockchannel_irq(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct dockchannel_common *dcc = irq_get_handler_data(irq); + unsigned long flags = readl_relaxed(dcc->irq_base + IRQ_FLAG); + int bit; + + chained_irq_enter(chip, desc); + + for_each_set_bit(bit, &flags, DOCKCHANNEL_MAX_IRQ) + generic_handle_domain_irq(dcc->domain, bit); + + chained_irq_exit(chip, desc); +} + +static void dockchannel_irq_ack(struct irq_data *data) +{ + struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data); + unsigned int hwirq = data->hwirq; + + writel_relaxed(BIT(hwirq), dcc->irq_base + IRQ_FLAG); +} + +static void dockchannel_irq_mask(struct irq_data *data) +{ + struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data); + unsigned int hwirq = data->hwirq; + u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK); + + writel_relaxed(val & ~BIT(hwirq), dcc->irq_base + IRQ_MASK); +} + +static void dockchannel_irq_unmask(struct irq_data *data) +{ + struct dockchannel_common *dcc = irq_data_get_irq_chip_data(data); + unsigned int hwirq = data->hwirq; + u32 val = readl_relaxed(dcc->irq_base + IRQ_MASK); + + writel_relaxed(val | BIT(hwirq), dcc->irq_base + IRQ_MASK); +} + +static const struct irq_chip dockchannel_irqchip = { + .name = "dockchannel-irqc", + .irq_ack = dockchannel_irq_ack, + .irq_mask = dockchannel_irq_mask, + .irq_unmask = dockchannel_irq_unmask, +}; + +static int dockchannel_irq_domain_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw) +{ + irq_set_chip_data(virq, d->host_data); + irq_set_chip_and_handler(virq, &dockchannel_irqchip, handle_level_irq); + + return 0; +} + +static const struct irq_domain_ops dockchannel_irq_domain_ops = { + .xlate = irq_domain_xlate_twocell, + .map = dockchannel_irq_domain_map, +}; + +static int dockchannel_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dockchannel_common *dcc; + struct device_node *child; + + dcc = devm_kzalloc(dev, sizeof(*dcc), GFP_KERNEL); + if (!dcc) + return -ENOMEM; + + dcc->dev = dev; + platform_set_drvdata(pdev, dcc); + + dcc->irq_base = devm_platform_ioremap_resource_byname(pdev, "irq"); + if (IS_ERR(dcc->irq_base)) + return PTR_ERR(dcc->irq_base); + + writel_relaxed(0, dcc->irq_base + IRQ_MASK); + writel_relaxed(~0, dcc->irq_base + IRQ_FLAG); + + dcc->domain = irq_domain_add_linear(dev->of_node, DOCKCHANNEL_MAX_IRQ, + &dockchannel_irq_domain_ops, dcc); + if (!dcc->domain) + return -ENOMEM; + + dcc->irq = platform_get_irq(pdev, 0); + if (dcc->irq <= 0) + return dev_err_probe(dev, dcc->irq, "Failed to get IRQ"); + + irq_set_handler_data(dcc->irq, dcc); + irq_set_chained_handler(dcc->irq, dockchannel_irq); + + for_each_child_of_node(dev->of_node, child) + of_platform_device_create(child, NULL, dev); + + return 0; +} + +static int dockchannel_remove(struct platform_device *pdev) +{ + struct dockchannel_common *dcc = platform_get_drvdata(pdev); + int hwirq; + + device_for_each_child(&pdev->dev, NULL, of_platform_device_destroy); + + irq_set_chained_handler_and_data(dcc->irq, NULL, NULL); + + for (hwirq = 0; hwirq < DOCKCHANNEL_MAX_IRQ; hwirq++) + irq_dispose_mapping(irq_find_mapping(dcc->domain, hwirq)); + + irq_domain_remove(dcc->domain); + + writel_relaxed(0, dcc->irq_base + IRQ_MASK); + writel_relaxed(~0, dcc->irq_base + IRQ_FLAG); + + return 0; +} + +static const struct of_device_id dockchannel_of_match[] = { + { .compatible = "apple,dockchannel" }, + {}, +}; +MODULE_DEVICE_TABLE(of, dockchannel_of_match); + +static struct platform_driver dockchannel_driver = { + .driver = { + .name = "dockchannel", + .of_match_table = dockchannel_of_match, + }, + .probe = dockchannel_probe, + .remove = dockchannel_remove, +}; +module_platform_driver(dockchannel_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple DockChannel driver"); diff --git a/drivers/soc/apple/rtkit-helper.c b/drivers/soc/apple/rtkit-helper.c new file mode 100644 index 000000000000..01c7f30e41e6 --- /dev/null +++ b/drivers/soc/apple/rtkit-helper.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple Generic RTKit helper coprocessor + * Copyright The Asahi Linux Contributors + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/soc/apple/rtkit.h> + +#define APPLE_ASC_CPU_CONTROL 0x44 +#define APPLE_ASC_CPU_CONTROL_RUN BIT(4) + +struct apple_rtkit_helper { + struct device *dev; + struct apple_rtkit *rtk; + + void __iomem *asc_base; + + struct resource *sram; + void __iomem *sram_base; +}; + +static int apple_rtkit_helper_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr) +{ + struct apple_rtkit_helper *helper = cookie; + struct resource res = { + .start = bfr->iova, + .end = bfr->iova + bfr->size - 1, + .name = "rtkit_map", + .flags = helper->sram->flags, + }; + + if (!bfr->iova) { + bfr->buffer = dma_alloc_coherent(helper->dev, bfr->size, + &bfr->iova, GFP_KERNEL); + if (!bfr->buffer) + return -ENOMEM; + return 0; + } + + if (!helper->sram) { + dev_err(helper->dev, + "RTKit buffer request with no SRAM region: %pR", &res); + return -EFAULT; + } + + if (res.end < res.start || !resource_contains(helper->sram, &res)) { + dev_err(helper->dev, + "RTKit buffer request outside SRAM region: %pR", &res); + return -EFAULT; + } + + bfr->iomem = helper->sram_base + (res.start - helper->sram->start); + bfr->is_mapped = true; + + return 0; +} + +static void apple_rtkit_helper_shmem_destroy(void *cookie, struct apple_rtkit_shmem *bfr) +{ + // no-op +} + +static const struct apple_rtkit_ops apple_rtkit_helper_ops = { + .shmem_setup = apple_rtkit_helper_shmem_setup, + .shmem_destroy = apple_rtkit_helper_shmem_destroy, +}; + +static int apple_rtkit_helper_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_rtkit_helper *helper; + int ret; + + helper = devm_kzalloc(dev, sizeof(*helper), GFP_KERNEL); + if (!helper) + return -ENOMEM; + + helper->dev = dev; + platform_set_drvdata(pdev, helper); + + helper->asc_base = devm_platform_ioremap_resource_byname(pdev, "asc"); + if (IS_ERR(helper->asc_base)) + return PTR_ERR(helper->asc_base); + + helper->sram = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); + if (helper->sram) { + helper->sram_base = devm_ioremap_resource(dev, helper->sram); + if (IS_ERR(helper->sram_base)) + return dev_err_probe(dev, PTR_ERR(helper->sram_base), + "Failed to map SRAM region"); + } + + helper->rtk = + devm_apple_rtkit_init(dev, helper, NULL, 0, &apple_rtkit_helper_ops); + if (IS_ERR(helper->rtk)) + return dev_err_probe(dev, PTR_ERR(helper->rtk), + "Failed to intialize RTKit"); + + writel_relaxed(APPLE_ASC_CPU_CONTROL_RUN, + helper->asc_base + APPLE_ASC_CPU_CONTROL); + + /* Works for both wake and boot */ + ret = apple_rtkit_wake(helper->rtk); + if (ret != 0) + return dev_err_probe(dev, ret, "Failed to wake up coprocessor"); + + return 0; +} + +static int apple_rtkit_helper_remove(struct platform_device *pdev) +{ + struct apple_rtkit_helper *helper = platform_get_drvdata(pdev); + + if (apple_rtkit_is_running(helper->rtk)) + apple_rtkit_quiesce(helper->rtk); + + writel_relaxed(0, helper->asc_base + APPLE_ASC_CPU_CONTROL); + + return 0; +} + +static const struct of_device_id apple_rtkit_helper_of_match[] = { + { .compatible = "apple,rtk-helper-asc4" }, + {}, +}; +MODULE_DEVICE_TABLE(of, apple_rtkit_helper_of_match); + +static struct platform_driver apple_rtkit_helper_driver = { + .driver = { + .name = "rtkit-helper", + .of_match_table = apple_rtkit_helper_of_match, + }, + .probe = apple_rtkit_helper_probe, + .remove = apple_rtkit_helper_remove, +}; +module_platform_driver(apple_rtkit_helper_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple RTKit helper driver"); diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c index cf1129e9f76b..0a449e95c22b 100644 --- a/drivers/soc/apple/rtkit.c +++ b/drivers/soc/apple/rtkit.c @@ -628,37 +628,11 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, } EXPORT_SYMBOL_GPL(apple_rtkit_send_message); -int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message, - unsigned long timeout, bool atomic) +int apple_rtkit_poll(struct apple_rtkit *rtk) { - DECLARE_COMPLETION_ONSTACK(completion); - int ret; - long t; - - ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic); - if (ret < 0) - return ret; - - if (atomic) { - ret = mbox_flush(rtk->mbox_chan, timeout); - if (ret < 0) - return ret; - - if (try_wait_for_completion(&completion)) - return 0; - - return -ETIME; - } else { - t = wait_for_completion_interruptible_timeout( - &completion, msecs_to_jiffies(timeout)); - if (t < 0) - return t; - else if (t == 0) - return -ETIME; - return 0; - } + return mbox_client_peek_data(rtk->mbox_chan); } -EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait); +EXPORT_SYMBOL_GPL(apple_rtkit_poll); int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint) { diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3b1044ebc400..863e215a5127 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -79,6 +79,14 @@ config SPI_ALTERA_DFL Altera SPI master controller. The SPI master is connected to a SPI slave to Avalon bridge in a Intel MAX BMC. +config SPI_APPLE + tristate "Apple SoC SPI Controller platform driver" + depends on ARCH_APPLE || COMPILE_TEST + help + This enables support for the SPI controller present on + many Apple SoCs, including the t8103 (M1) and t600x + (M1 Pro/Max). + config SPI_AR934X tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver" depends on ATH79 || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 0f44eb6083a5..f86ba8634f77 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o obj-$(CONFIG_SPI_ALTERA) += spi-altera-platform.o obj-$(CONFIG_SPI_ALTERA_CORE) += spi-altera-core.o obj-$(CONFIG_SPI_ALTERA_DFL) += spi-altera-dfl.o +obj-$(CONFIG_SPI_APPLE) += spi-apple.o obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o diff --git a/drivers/spi/spi-apple.c b/drivers/spi/spi-apple.c new file mode 100644 index 000000000000..c483ad3f69ef --- /dev/null +++ b/drivers/spi/spi-apple.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple SoC SPI device driver + * + * Copyright The Asahi Linux Contributors + * + * Based on spi-sifive.c, Copyright 2018 SiFive, Inc. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/spi/spi.h> + +#define APPLE_SPI_CTRL 0x000 +#define APPLE_SPI_CTRL_RUN BIT(0) +#define APPLE_SPI_CTRL_TX_RESET BIT(2) +#define APPLE_SPI_CTRL_RX_RESET BIT(3) + +#define APPLE_SPI_CFG 0x004 +#define APPLE_SPI_CFG_CPHA BIT(1) +#define APPLE_SPI_CFG_CPOL BIT(2) +#define APPLE_SPI_CFG_MODE GENMASK(6, 5) +#define APPLE_SPI_CFG_MODE_POLLED 0 +#define APPLE_SPI_CFG_MODE_IRQ 1 +#define APPLE_SPI_CFG_MODE_DMA 2 +#define APPLE_SPI_CFG_IE_RXCOMPLETE BIT(7) +#define APPLE_SPI_CFG_IE_TXRXTHRESH BIT(8) +#define APPLE_SPI_CFG_LSB_FIRST BIT(13) +#define APPLE_SPI_CFG_WORD_SIZE GENMASK(16, 15) +#define APPLE_SPI_CFG_WORD_SIZE_8B 0 +#define APPLE_SPI_CFG_WORD_SIZE_16B 1 +#define APPLE_SPI_CFG_WORD_SIZE_32B 2 +#define APPLE_SPI_CFG_FIFO_THRESH GENMASK(18, 17) +#define APPLE_SPI_CFG_FIFO_THRESH_8B 0 +#define APPLE_SPI_CFG_FIFO_THRESH_4B 1 +#define APPLE_SPI_CFG_FIFO_THRESH_1B 2 +#define APPLE_SPI_CFG_IE_TXCOMPLETE BIT(21) + +#define APPLE_SPI_STATUS 0x008 +#define APPLE_SPI_STATUS_RXCOMPLETE BIT(0) +#define APPLE_SPI_STATUS_TXRXTHRESH BIT(1) +#define APPLE_SPI_STATUS_TXCOMPLETE BIT(2) + +#define APPLE_SPI_PIN 0x00c +#define APPLE_SPI_PIN_KEEP_MOSI BIT(0) +#define APPLE_SPI_PIN_CS BIT(1) + +#define APPLE_SPI_TXDATA 0x010 +#define APPLE_SPI_RXDATA 0x020 +#define APPLE_SPI_CLKDIV 0x030 +#define APPLE_SPI_CLKDIV_MAX 0x7ff +#define APPLE_SPI_RXCNT 0x034 +#define APPLE_SPI_WORD_DELAY 0x038 +#define APPLE_SPI_TXCNT 0x04c + +#define APPLE_SPI_FIFOSTAT 0x10c +#define APPLE_SPI_FIFOSTAT_TXFULL BIT(4) +#define APPLE_SPI_FIFOSTAT_LEVEL_TX GENMASK(15, 8) +#define APPLE_SPI_FIFOSTAT_RXEMPTY BIT(20) +#define APPLE_SPI_FIFOSTAT_LEVEL_RX GENMASK(31, 24) + +#define APPLE_SPI_IE_XFER 0x130 +#define APPLE_SPI_IF_XFER 0x134 +#define APPLE_SPI_XFER_RXCOMPLETE BIT(0) +#define APPLE_SPI_XFER_TXCOMPLETE BIT(1) + +#define APPLE_SPI_IE_FIFO 0x138 +#define APPLE_SPI_IF_FIFO 0x13c +#define APPLE_SPI_FIFO_RXTHRESH BIT(4) +#define APPLE_SPI_FIFO_TXTHRESH BIT(5) +#define APPLE_SPI_FIFO_RXFULL BIT(8) +#define APPLE_SPI_FIFO_TXEMPTY BIT(9) +#define APPLE_SPI_FIFO_RXUNDERRUN BIT(16) +#define APPLE_SPI_FIFO_TXOVERFLOW BIT(17) + +#define APPLE_SPI_SHIFTCFG 0x150 +#define APPLE_SPI_SHIFTCFG_CLK_ENABLE BIT(0) +#define APPLE_SPI_SHIFTCFG_CS_ENABLE BIT(1) +#define APPLE_SPI_SHIFTCFG_AND_CLK_DATA BIT(8) +#define APPLE_SPI_SHIFTCFG_CS_AS_DATA BIT(9) +#define APPLE_SPI_SHIFTCFG_TX_ENABLE BIT(10) +#define APPLE_SPI_SHIFTCFG_RX_ENABLE BIT(11) +#define APPLE_SPI_SHIFTCFG_BITS GENMASK(21, 16) +#define APPLE_SPI_SHIFTCFG_OVERRIDE_CS BIT(24) + +#define APPLE_SPI_PINCFG 0x154 +#define APPLE_SPI_PINCFG_KEEP_CLK BIT(0) +#define APPLE_SPI_PINCFG_KEEP_CS BIT(1) +#define APPLE_SPI_PINCFG_KEEP_MOSI BIT(2) +#define APPLE_SPI_PINCFG_CLK_IDLE_VAL BIT(8) +#define APPLE_SPI_PINCFG_CS_IDLE_VAL BIT(9) +#define APPLE_SPI_PINCFG_MOSI_IDLE_VAL BIT(10) + +#define APPLE_SPI_DELAY_PRE 0x160 +#define APPLE_SPI_DELAY_POST 0x168 +#define APPLE_SPI_DELAY_ENABLE BIT(0) +#define APPLE_SPI_DELAY_NO_INTERBYTE BIT(1) +#define APPLE_SPI_DELAY_SET_SCK BIT(4) +#define APPLE_SPI_DELAY_SET_MOSI BIT(6) +#define APPLE_SPI_DELAY_SCK_VAL BIT(8) +#define APPLE_SPI_DELAY_MOSI_VAL BIT(12) + +#define APPLE_SPI_FIFO_DEPTH 16 + +/* + * The slowest refclock available is 24MHz, the highest divider is 0x7ff, + * the largest word size is 32 bits, the FIFO depth is 16, the maximum + * intra-word delay is 0xffff refclocks. So the maximum time a transfer + * cycle can take is: + * + * (0x7ff * 32 + 0xffff) * 16 / 24e6 Hz ~= 87ms + * + * Double it and round it up to 200ms for good measure. + */ +#define APPLE_SPI_TIMEOUT_MS 200 + +struct apple_spi { + void __iomem *regs; /* MMIO register address */ + struct clk *clk; /* bus clock */ + struct completion done; /* wake-up from interrupt */ +}; + +static inline void reg_write(struct apple_spi *spi, int offset, u32 value) +{ + writel_relaxed(value, spi->regs + offset); +} + +static inline u32 reg_read(struct apple_spi *spi, int offset) +{ + return readl_relaxed(spi->regs + offset); +} + +static inline void reg_mask(struct apple_spi *spi, int offset, u32 clear, u32 set) +{ + u32 val = reg_read(spi, offset); + + val &= ~clear; + val |= set; + reg_write(spi, offset, val); +} + +static void apple_spi_init(struct apple_spi *spi) +{ + /* Set CS high (inactive) and disable override and auto-CS */ + reg_write(spi, APPLE_SPI_PIN, APPLE_SPI_PIN_CS); + reg_mask(spi, APPLE_SPI_SHIFTCFG, APPLE_SPI_SHIFTCFG_OVERRIDE_CS, 0); + reg_mask(spi, APPLE_SPI_PINCFG, APPLE_SPI_PINCFG_CS_IDLE_VAL, APPLE_SPI_PINCFG_KEEP_CS); + + /* Reset FIFOs */ + reg_write(spi, APPLE_SPI_CTRL, APPLE_SPI_CTRL_RX_RESET | APPLE_SPI_CTRL_TX_RESET); + + /* Configure defaults */ + reg_write(spi, APPLE_SPI_CFG, + FIELD_PREP(APPLE_SPI_CFG_FIFO_THRESH, APPLE_SPI_CFG_FIFO_THRESH_8B) | + FIELD_PREP(APPLE_SPI_CFG_MODE, APPLE_SPI_CFG_MODE_IRQ) | + FIELD_PREP(APPLE_SPI_CFG_WORD_SIZE, APPLE_SPI_CFG_WORD_SIZE_8B)); + + /* Disable IRQs */ + reg_write(spi, APPLE_SPI_IE_FIFO, 0); + reg_write(spi, APPLE_SPI_IE_XFER, 0); + + /* Disable delays */ + reg_write(spi, APPLE_SPI_DELAY_PRE, 0); + reg_write(spi, APPLE_SPI_DELAY_POST, 0); +} + +static int apple_spi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) +{ + struct apple_spi *spi = spi_controller_get_devdata(ctlr); + struct spi_device *device = msg->spi; + + u32 cfg = ((device->mode & SPI_CPHA ? APPLE_SPI_CFG_CPHA : 0) | + (device->mode & SPI_CPOL ? APPLE_SPI_CFG_CPOL : 0) | + (device->mode & SPI_LSB_FIRST ? APPLE_SPI_CFG_LSB_FIRST : 0)); + + /* Update core config */ + reg_mask(spi, APPLE_SPI_CFG, + APPLE_SPI_CFG_CPHA | APPLE_SPI_CFG_CPOL | APPLE_SPI_CFG_LSB_FIRST, cfg); + + return 0; +} + +static void apple_spi_set_cs(struct spi_device *device, bool is_high) +{ + struct apple_spi *spi = spi_controller_get_devdata(device->controller); + + reg_mask(spi, APPLE_SPI_PIN, APPLE_SPI_PIN_CS, is_high ? APPLE_SPI_PIN_CS : 0); +} + +static bool apple_spi_prep_transfer(struct apple_spi *spi, struct spi_transfer *t) +{ + u32 cr, fifo_threshold; + + /* Calculate and program the clock rate */ + cr = DIV_ROUND_UP(clk_get_rate(spi->clk), t->speed_hz); + reg_write(spi, APPLE_SPI_CLKDIV, min_t(u32, cr, APPLE_SPI_CLKDIV_MAX)); + + /* Update bits per word */ + reg_mask(spi, APPLE_SPI_SHIFTCFG, APPLE_SPI_SHIFTCFG_BITS, + FIELD_PREP(APPLE_SPI_SHIFTCFG_BITS, t->bits_per_word)); + + /* We will want to poll if the time we need to wait is + * less than the context switching time. + * Let's call that threshold 5us. The operation will take: + * bits_per_word * fifo_threshold / hz <= 5 * 10^-6 + * 200000 * bits_per_word * fifo_threshold <= hz + */ + fifo_threshold = APPLE_SPI_FIFO_DEPTH / 2; + return (200000 * t->bits_per_word * fifo_threshold) <= t->speed_hz; +} + +static irqreturn_t apple_spi_irq(int irq, void *dev_id) +{ + struct apple_spi *spi = dev_id; + u32 fifo = reg_read(spi, APPLE_SPI_IF_FIFO) & reg_read(spi, APPLE_SPI_IE_FIFO); + u32 xfer = reg_read(spi, APPLE_SPI_IF_XFER) & reg_read(spi, APPLE_SPI_IE_XFER); + + if (fifo || xfer) { + /* Disable interrupts until next transfer */ + reg_write(spi, APPLE_SPI_IE_XFER, 0); + reg_write(spi, APPLE_SPI_IE_FIFO, 0); + complete(&spi->done); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int apple_spi_wait(struct apple_spi *spi, u32 fifo_bit, u32 xfer_bit, int poll) +{ + int ret = 0; + + if (poll) { + u32 fifo, xfer; + unsigned long timeout = jiffies + APPLE_SPI_TIMEOUT_MS * HZ / 1000; + + do { + fifo = reg_read(spi, APPLE_SPI_IF_FIFO); + xfer = reg_read(spi, APPLE_SPI_IF_XFER); + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + break; + } + } while (!((fifo & fifo_bit) || (xfer & xfer_bit))); + } else { + reinit_completion(&spi->done); + reg_write(spi, APPLE_SPI_IE_XFER, xfer_bit); + reg_write(spi, APPLE_SPI_IE_FIFO, fifo_bit); + + if (!wait_for_completion_timeout(&spi->done, + msecs_to_jiffies(APPLE_SPI_TIMEOUT_MS))) + ret = -ETIMEDOUT; + + reg_write(spi, APPLE_SPI_IE_XFER, 0); + reg_write(spi, APPLE_SPI_IE_FIFO, 0); + } + + return ret; +} + +static void apple_spi_tx(struct apple_spi *spi, const void **tx_ptr, u32 *left, + unsigned int bytes_per_word) +{ + u32 inuse, words, wrote; + + if (!*tx_ptr) + return; + + inuse = FIELD_GET(APPLE_SPI_FIFOSTAT_LEVEL_TX, reg_read(spi, APPLE_SPI_FIFOSTAT)); + words = wrote = min_t(u32, *left, APPLE_SPI_FIFO_DEPTH - inuse); + + if (!words) + return; + + *left -= words; + + switch (bytes_per_word) { + case 1: { + const u8 *p = *tx_ptr; + + while (words--) + reg_write(spi, APPLE_SPI_TXDATA, *p++); + break; + } + case 2: { + const u16 *p = *tx_ptr; + + while (words--) + reg_write(spi, APPLE_SPI_TXDATA, *p++); + break; + } + case 4: { + const u32 *p = *tx_ptr; + + while (words--) + reg_write(spi, APPLE_SPI_TXDATA, *p++); + break; + } + default: + WARN_ON(1); + } + + *tx_ptr = ((u8 *)*tx_ptr) + bytes_per_word * wrote; +} + +static void apple_spi_rx(struct apple_spi *spi, void **rx_ptr, u32 *left, + unsigned int bytes_per_word) +{ + u32 words, read; + + if (!*rx_ptr) + return; + + words = read = FIELD_GET(APPLE_SPI_FIFOSTAT_LEVEL_RX, reg_read(spi, APPLE_SPI_FIFOSTAT)); + WARN_ON(words > *left); + + if (!words) + return; + + *left -= min_t(u32, *left, words); + + switch (bytes_per_word) { + case 1: { + u8 *p = *rx_ptr; + + while (words--) + *p++ = reg_read(spi, APPLE_SPI_RXDATA); + break; + } + case 2: { + u16 *p = *rx_ptr; + + while (words--) + *p++ = reg_read(spi, APPLE_SPI_RXDATA); + break; + } + case 4: { + u32 *p = *rx_ptr; + + while (words--) + *p++ = reg_read(spi, APPLE_SPI_RXDATA); + break; + } + default: + WARN_ON(1); + } + + *rx_ptr = ((u8 *)*rx_ptr) + bytes_per_word * read; +} + +static int apple_spi_transfer_one(struct spi_controller *ctlr, struct spi_device *device, + struct spi_transfer *t) +{ + struct apple_spi *spi = spi_controller_get_devdata(ctlr); + bool poll = apple_spi_prep_transfer(spi, t); + const void *tx_ptr = t->tx_buf; + void *rx_ptr = t->rx_buf; + unsigned int bytes_per_word; + u32 words, remaining_tx, remaining_rx; + u32 xfer_flags = 0; + u32 fifo_flags; + int retries = 100; + int ret = 0; + + if (t->bits_per_word > 16) + bytes_per_word = 4; + else if (t->bits_per_word > 8) + bytes_per_word = 2; + else + bytes_per_word = 1; + + words = t->len / bytes_per_word; + remaining_tx = tx_ptr ? words : 0; + remaining_rx = rx_ptr ? words : 0; + + /* Reset FIFOs */ + reg_write(spi, APPLE_SPI_CTRL, APPLE_SPI_CTRL_RX_RESET | APPLE_SPI_CTRL_TX_RESET); + + /* Clear IRQ flags */ + reg_write(spi, APPLE_SPI_IF_XFER, ~0); + reg_write(spi, APPLE_SPI_IF_FIFO, ~0); + + /* Determine transfer completion flags we wait for */ + if (tx_ptr) + xfer_flags |= APPLE_SPI_XFER_TXCOMPLETE; + if (rx_ptr) + xfer_flags |= APPLE_SPI_XFER_RXCOMPLETE; + + /* Set transfer length */ + reg_write(spi, APPLE_SPI_TXCNT, remaining_tx); + reg_write(spi, APPLE_SPI_RXCNT, remaining_rx); + + /* Prime transmit FIFO */ + apple_spi_tx(spi, &tx_ptr, &remaining_tx, bytes_per_word); + + /* Start transfer */ + reg_write(spi, APPLE_SPI_CTRL, APPLE_SPI_CTRL_RUN); + + /* TX again since a few words get popped off immediately */ + apple_spi_tx(spi, &tx_ptr, &remaining_tx, bytes_per_word); + + while (xfer_flags) { + fifo_flags = 0; + + if (remaining_tx) + fifo_flags |= APPLE_SPI_FIFO_TXTHRESH; + if (remaining_rx) + fifo_flags |= APPLE_SPI_FIFO_RXTHRESH; + + /* Wait for anything to happen */ + ret = apple_spi_wait(spi, fifo_flags, xfer_flags, poll); + if (ret) { + dev_err(&ctlr->dev, "transfer timed out (remaining %d tx, %d rx)\n", + remaining_tx, remaining_rx); + goto err; + } + + /* Stop waiting on transfer halves once they complete */ + xfer_flags &= ~reg_read(spi, APPLE_SPI_IF_XFER); + + /* Transmit and receive everything we can */ + apple_spi_tx(spi, &tx_ptr, &remaining_tx, bytes_per_word); + apple_spi_rx(spi, &rx_ptr, &remaining_rx, bytes_per_word); + } + + /* + * Sometimes the transfer completes before the last word is in the RX FIFO. + * Normally one retry is all it takes to get the last word out. + */ + while (remaining_rx && retries--) + apple_spi_rx(spi, &rx_ptr, &remaining_rx, bytes_per_word); + + if (remaining_tx) + dev_err(&ctlr->dev, "transfer completed with %d words left to transmit\n", + remaining_tx); + if (remaining_rx) + dev_err(&ctlr->dev, "transfer completed with %d words left to receive\n", + remaining_rx); + +err: + fifo_flags = reg_read(spi, APPLE_SPI_IF_FIFO); + WARN_ON(fifo_flags & APPLE_SPI_FIFO_TXOVERFLOW); + WARN_ON(fifo_flags & APPLE_SPI_FIFO_RXUNDERRUN); + + /* Stop transfer */ + reg_write(spi, APPLE_SPI_CTRL, 0); + + return ret; +} + +static void apple_spi_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static int apple_spi_probe(struct platform_device *pdev) +{ + struct apple_spi *spi; + int ret, irq; + struct spi_controller *ctlr; + + ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(struct apple_spi)); + if (!ctlr) + return dev_err_probe(&pdev->dev, -ENOMEM, "out of memory\n"); + + spi = spi_controller_get_devdata(ctlr); + init_completion(&spi->done); + platform_set_drvdata(pdev, ctlr); + + spi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(spi->regs)) + return PTR_ERR(spi->regs); + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk), "Unable to find bus clock\n"); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, apple_spi_irq, 0, + dev_name(&pdev->dev), spi); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Unable to bind to interrupt\n"); + + ret = clk_prepare_enable(spi->clk); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Unable to enable bus clock\n"); + + ret = devm_add_action_or_reset(&pdev->dev, apple_spi_clk_disable_unprepare, spi->clk); + if (ret) + return ret; + + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->bus_num = pdev->id; + ctlr->num_chipselect = 1; + ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; + ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); + ctlr->flags = 0; + ctlr->prepare_message = apple_spi_prepare_message; + ctlr->set_cs = apple_spi_set_cs; + ctlr->transfer_one = apple_spi_transfer_one; + ctlr->auto_runtime_pm = true; + + pm_runtime_set_active(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); + + apple_spi_init(spi); + + ret = devm_spi_register_controller(&pdev->dev, ctlr); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "devm_spi_register_controller failed\n"); + + return 0; +} + +static const struct of_device_id apple_spi_of_match[] = { + { .compatible = "apple,spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, apple_spi_of_match); + +static struct platform_driver apple_spi_driver = { + .probe = apple_spi_probe, + .driver = { + .name = "apple-spi", + .owner = THIS_MODULE, + .of_match_table = apple_spi_of_match, + }, +}; +module_platform_driver(apple_spi_driver); + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_DESCRIPTION("Apple SoC SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ea09d1b42bf6..c8342b950783 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2059,6 +2059,22 @@ void spi_flush_queue(struct spi_controller *ctlr) /*-------------------------------------------------------------------------*/ #if defined(CONFIG_OF) +static void of_spi_parse_dt_cs_delay(struct device_node *nc, + struct spi_delay *delay, const char *prop) +{ + u32 value; + + if (!of_property_read_u32(nc, prop, &value)) { + if (value > U16_MAX) { + delay->value = DIV_ROUND_UP(value, 1000); + delay->unit = SPI_DELAY_UNIT_USECS; + } else { + delay->value = value; + delay->unit = SPI_DELAY_UNIT_NSECS; + } + } +} + static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, struct device_node *nc) { @@ -2148,6 +2164,11 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, if (!of_property_read_u32(nc, "spi-max-frequency", &value)) spi->max_speed_hz = value; + /* Device CS delays */ + of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns"); + of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns"); + of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns"); + return 0; } diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig index 737802046314..96c73c5b5720 100644 --- a/drivers/spmi/Kconfig +++ b/drivers/spmi/Kconfig @@ -45,4 +45,12 @@ config SPMI_MTK_PMIF This is required for communicating with Mediatek PMICs and other devices that have the SPMI interface. +config SPMI_APPLE + tristate "Apple SoC SPMI Controller platform driver" + depends on ARCH_APPLE || COMPILE_TEST + help + This enables basic support for the SPMI controller present on + many Apple SoCs, including the t8103 (M1) and t600x + (M1 Pro/Max). + endif diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile index 9d974424c8c1..989b84bbca60 100644 --- a/drivers/spmi/Makefile +++ b/drivers/spmi/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_SPMI) += spmi.o obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o +obj-$(CONFIG_SPMI_APPLE) += spmi-apple-controller.o diff --git a/drivers/spmi/spmi-apple-controller.c b/drivers/spmi/spmi-apple-controller.c new file mode 100644 index 000000000000..5b49d6c609f5 --- /dev/null +++ b/drivers/spmi/spmi-apple-controller.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple SoC SPMI device driver + * + * Copyright The Asahi Linux Contributors + * + * Inspired by: + * OpenBSD support Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org> + * Correllium support Copyright (C) 2021 Corellium LLC + * hisi-spmi-controller.c + * spmi-pmic-ard.c Copyright (c) 2021, The Linux Foundation. + */ + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/spmi.h> + + +/* SPMI Controller Registers */ +#define SPMI_STATUS_REG 0 +#define SPMI_CMD_REG 0x4 +#define SPMI_RSP_REG 0x8 + +#define SPMI_RX_FIFO_EMPTY BIT(24) +#define SPMI_TX_FIFO_EMPTY BIT(8) + +/* Apple SPMI controler */ +struct apple_spmi { + void __iomem *regs; + struct spmi_controller *ctrl; +}; + +static inline u32 read_reg(struct apple_spmi *spmi, int offset) +{ + return(readl_relaxed(spmi->regs + offset)); +} + +static inline void write_reg(u32 value, struct apple_spmi *spmi, int offset) +{ + writel_relaxed(value, spmi->regs + offset); +} + +static int spmi_read_cmd(struct spmi_controller *ctrl, + u8 opc, u8 slave_id, u16 slave_addr, u8 *__buf, size_t bc) +{ + struct apple_spmi *spmi; + u32 spmi_cmd = opc|slave_id<<8|slave_addr<<16|(bc-1)|(1<<15); + u32 rsp; + volatile u32 status; + size_t len_to_read; + u8 i; + + spmi = spmi_controller_get_drvdata(ctrl); + + write_reg(spmi_cmd, spmi, SPMI_CMD_REG); + + /* Wait for Rx FIFO to have something */ + /* Quite ugly msleep, need to find a better way to do it */ + i=0; + do { + status=read_reg(spmi, SPMI_STATUS_REG); + msleep(10); + i+=1; + } while ((status & SPMI_RX_FIFO_EMPTY) && i<5); + + if(i>=5){ + dev_err(&ctrl->dev,"spmi_read_cmd:took to long to get the status"); + return -1; + } + + /* Read SPMI reply status */ + rsp=read_reg(spmi, SPMI_RSP_REG); + + len_to_read = 0; + /* Read SPMI data reply */ + while (!( status & SPMI_RX_FIFO_EMPTY ) && (len_to_read < bc )) { + rsp=read_reg(spmi, SPMI_RSP_REG); + i=0; + while ((len_to_read<bc)&&(i<4)) { + __buf[len_to_read++]=((0xff<<(8*i))&rsp)>>(8*i); + i+=1; + } + } + + return 0; +} + +static int spmi_write_cmd(struct spmi_controller *ctrl, + u8 opc, u8 slave_id, u16 slave_addr, const u8 *__buf, size_t bc) +{ + struct apple_spmi *spmi; + u32 spmi_cmd = opc|slave_id<<8|slave_addr<<16|(bc-1)|(1<<15); + volatile u32 rsp; + volatile u32 status; + size_t i=0,j; + + spmi = spmi_controller_get_drvdata(ctrl); + + write_reg(spmi_cmd, spmi, SPMI_CMD_REG); + + while (i<bc) { + j=0; + spmi_cmd=0; + while ((j<4)&(i<bc)) { + spmi_cmd |= __buf[i++]<<(j++*8); + } + write_reg(spmi_cmd, spmi, SPMI_CMD_REG); + } + + /* Wait for Rx FIFO to have something */ + /* Quite ugly msleep, need to find a better way to do it */ + i=0; + do { + status=read_reg(spmi, SPMI_STATUS_REG); + msleep(10); + i+=1; + } while ((status & SPMI_RX_FIFO_EMPTY) && i<5); + + if(i>=5){ + dev_err(&ctrl->dev,"spmi_write_cmd:took to long to get the status"); + return -1; + } + + rsp = read_reg(spmi, SPMI_RSP_REG); + + return 0; +} + +static int spmi_controller_probe(struct platform_device *pdev) +{ + struct apple_spmi *spmi; + struct spmi_controller *ctrl; + int ret; + + ctrl = spmi_controller_alloc(&pdev->dev, sizeof(struct apple_spmi)); + if (IS_ERR(ctrl)) { + dev_err_probe(&pdev->dev, PTR_ERR(ctrl), "Can't allocate spmi_controller data\n"); + return -ENOMEM; + } + + spmi = spmi_controller_get_drvdata(ctrl); + spmi->ctrl=ctrl; + platform_set_drvdata(pdev, ctrl); + + spmi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(spmi->regs)) { + dev_err_probe(&pdev->dev, PTR_ERR(spmi->regs), "Can't get ioremap regs.\n"); + return PTR_ERR(spmi->regs); + } + + ctrl->dev.of_node = of_node_get(pdev->dev.of_node); + + /* Callbacks */ + ctrl->read_cmd = spmi_read_cmd; + ctrl->write_cmd = spmi_write_cmd; + + ret = spmi_controller_add(ctrl); + if (ret) { + dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret); + goto err_put_controller; + } + + /* Let's look for other nodes in device tree like the rtc */ + ret = devm_of_platform_populate(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "spmi_controller_probe: devm_of_platform_populate failed with error %d!\n", ret); + goto err_devm_of_platform_populate; + } + + return 0; + +err_put_controller: + spmi_controller_put(ctrl); +err_devm_of_platform_populate: + return ret; +} + +static int spmi_del_controller(struct platform_device *pdev) +{ + struct spmi_controller *ctrl = platform_get_drvdata(pdev); + + spmi_controller_remove(ctrl); + spmi_controller_put(ctrl); + return 0; +} + +static const struct of_device_id spmi_controller_match_table[] = { + {.compatible = "apple,spmi",}, + {} +}; +MODULE_DEVICE_TABLE(of, spmi_controller_match_table); + +static struct platform_driver spmi_controller_driver = { + .probe = spmi_controller_probe, + .remove = spmi_del_controller, + .driver = { + .name = "apple-spmi", + .owner = THIS_MODULE, + .of_match_table = spmi_controller_match_table, + }, +}; +module_platform_driver(spmi_controller_driver); + +MODULE_AUTHOR("Jean-Francois Bortolotti <jeff@borto.fr>"); +MODULE_DESCRIPTION("Apple SoC SPMI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index d5ca904def34..6f721bfc7a61 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -40,6 +40,7 @@ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/of.h> +#include <linux/pm_runtime.h> #include <asm/irq.h> /* UART name and device definitions */ @@ -1355,30 +1356,49 @@ static int apple_s5l_serial_startup(struct uart_port *port) /* power power management control */ +static int __maybe_unused s3c24xx_serial_runtime_suspend(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct s3c24xx_uart_port *ourport = to_ourport(port); + int timeout = 10000; + + while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) + udelay(100); + + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + + clk_disable_unprepare(ourport->clk); + return 0; +}; + +static int __maybe_unused s3c24xx_serial_runtime_resume(struct device *dev) +{ + struct uart_port *port = dev_get_drvdata(dev); + struct s3c24xx_uart_port *ourport = to_ourport(port); + + clk_prepare_enable(ourport->clk); + + if (!IS_ERR(ourport->baudclk)) + clk_prepare_enable(ourport->baudclk); + return 0; +}; + static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, unsigned int old) { struct s3c24xx_uart_port *ourport = to_ourport(port); - int timeout = 10000; ourport->pm_level = level; switch (level) { - case 3: - while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) - udelay(100); - - if (!IS_ERR(ourport->baudclk)) - clk_disable_unprepare(ourport->baudclk); - - clk_disable_unprepare(ourport->clk); + case UART_PM_STATE_OFF: + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_sync(port->dev); break; - case 0: - clk_prepare_enable(ourport->clk); - - if (!IS_ERR(ourport->baudclk)) - clk_prepare_enable(ourport->baudclk); + case UART_PM_STATE_ON: + pm_runtime_get_sync(port->dev); break; default: dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level); @@ -2249,18 +2269,15 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) } } + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + dev_dbg(&pdev->dev, "%s: adding port\n", __func__); uart_add_one_port(&s3c24xx_uart_drv, &ourport->port); platform_set_drvdata(pdev, &ourport->port); - /* - * Deactivate the clock enabled in s3c24xx_serial_init_port here, - * so that a potential re-enablement through the pm-callback overlaps - * and keeps the clock enabled in this case. - */ - clk_disable_unprepare(ourport->clk); - if (!IS_ERR(ourport->baudclk)) - clk_disable_unprepare(ourport->baudclk); + pm_runtime_put_sync(&pdev->dev); ret = s3c24xx_serial_cpufreq_register(ourport); if (ret < 0) @@ -2274,10 +2291,21 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) static int s3c24xx_serial_remove(struct platform_device *dev) { struct uart_port *port = s3c24xx_dev_to_port(&dev->dev); + struct s3c24xx_uart_port *ourport = to_ourport(port); if (port) { + pm_runtime_get_sync(&dev->dev); + s3c24xx_serial_cpufreq_deregister(to_ourport(port)); uart_remove_one_port(&s3c24xx_uart_drv, port); + + clk_disable_unprepare(ourport->clk); + if (!IS_ERR(ourport->baudclk)) + clk_disable_unprepare(ourport->baudclk); + + pm_runtime_disable(&dev->dev); + pm_runtime_set_suspended(&dev->dev); + pm_runtime_put_noidle(&dev->dev); } uart_unregister_driver(&s3c24xx_uart_drv); @@ -2286,8 +2314,8 @@ static int s3c24xx_serial_remove(struct platform_device *dev) } /* UART power management code */ -#ifdef CONFIG_PM_SLEEP -static int s3c24xx_serial_suspend(struct device *dev) + +static int __maybe_unused s3c24xx_serial_suspend(struct device *dev) { struct uart_port *port = s3c24xx_dev_to_port(dev); @@ -2297,7 +2325,7 @@ static int s3c24xx_serial_suspend(struct device *dev) return 0; } -static int s3c24xx_serial_resume(struct device *dev) +static int __maybe_unused s3c24xx_serial_resume(struct device *dev) { struct uart_port *port = s3c24xx_dev_to_port(dev); struct s3c24xx_uart_port *ourport = to_ourport(port); @@ -2317,7 +2345,7 @@ static int s3c24xx_serial_resume(struct device *dev) return 0; } -static int s3c24xx_serial_resume_noirq(struct device *dev) +static int __maybe_unused s3c24xx_serial_resume_noirq(struct device *dev) { struct uart_port *port = s3c24xx_dev_to_port(dev); struct s3c24xx_uart_port *ourport = to_ourport(port); @@ -2387,16 +2415,14 @@ static int s3c24xx_serial_resume_noirq(struct device *dev) } static const struct dev_pm_ops s3c24xx_serial_pm_ops = { +#ifdef CONFIG_PM_SLEEP .suspend = s3c24xx_serial_suspend, .resume = s3c24xx_serial_resume, .resume_noirq = s3c24xx_serial_resume_noirq, +#endif + SET_RUNTIME_PM_OPS(s3c24xx_serial_runtime_suspend, + s3c24xx_serial_runtime_resume, NULL) }; -#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops) - -#else /* !CONFIG_PM_SLEEP */ - -#define SERIAL_SAMSUNG_PM_OPS NULL -#endif /* CONFIG_PM_SLEEP */ /* Console code */ @@ -2937,7 +2963,7 @@ static struct platform_driver samsung_serial_driver = { .id_table = s3c24xx_serial_driver_ids, .driver = { .name = "samsung-uart", - .pm = SERIAL_SAMSUNG_PM_OPS, + .pm = &s3c24xx_serial_pm_ops, .of_match_table = of_match_ptr(s3c24xx_uart_dt_match), }, }; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 573421984948..b4c58deccabb 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -116,6 +116,9 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) dwc->current_dr_role = mode; } +static void dwc3_core_exit(struct dwc3 *dwc); +static int dwc3_core_init_for_resume(struct dwc3 *dwc); + static void __dwc3_set_mode(struct work_struct *work) { struct dwc3 *dwc = work_to_dwc(work); @@ -130,10 +133,11 @@ static void __dwc3_set_mode(struct work_struct *work) if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG) dwc3_otg_update(dwc, 0); - if (!dwc->desired_dr_role) + if (!dwc->desired_dr_role && !dwc->role_switch_reset_quirk) goto out; - if (dwc->desired_dr_role == dwc->current_dr_role) + if (dwc->desired_dr_role == dwc->current_dr_role && + !dwc->role_switch_reset_quirk) goto out; if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev) @@ -158,6 +162,34 @@ static void __dwc3_set_mode(struct work_struct *work) break; } + if (dwc->role_switch_reset_quirk) { + if (dwc->current_dr_role) { + dwc->current_dr_role = 0; + dwc3_core_exit(dwc); + } + + if (dwc->desired_dr_role) { + /* + * the first call to __dwc3_set_mode comes from + * dwc3_drd_init. In that case dwc3_core_init has been + * called but dwc->current_dr_role is zero such that + * we must not reinitialize the core again here. + */ + if (dwc->role_switch_reset_quirk_initialized) { + ret = dwc3_core_init_for_resume(dwc); + if (ret) { + dev_err(dwc->dev, + "failed to reinitialize core\n"); + goto out; + } + } + + dwc->role_switch_reset_quirk_initialized = 1; + } else { + goto out; + } + } + /* For DRD host or device mode only */ if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) { reg = dwc3_readl(dwc->regs, DWC3_GCTL); @@ -1764,6 +1796,9 @@ static int dwc3_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), "could not get suspend clock\n"); } + + if (of_device_is_compatible(dev->of_node, "apple,dwc3")) + dwc->role_switch_reset_quirk = true; } ret = reset_control_deassert(dwc->reset); @@ -1900,7 +1935,6 @@ static int dwc3_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM static int dwc3_core_init_for_resume(struct dwc3 *dwc) { int ret; @@ -1927,6 +1961,7 @@ assert_reset: return ret; } +#ifdef CONFIG_PM static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) { unsigned long flags; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 81c486b3941c..ee0225ce2234 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1103,6 +1103,9 @@ struct dwc3_scratchpad_array { * 3 - Reserved * @dis_metastability_quirk: set to disable metastability quirk. * @dis_split_quirk: set to disable split boundary. + * @role_switch_reset_quirk: set to force reinitialization after any role switch + * @role_switch_reset_quirk_initialized: set to true after the first role switch + * which is triggered from dwc3_drd_init directly * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. * @max_cfg_eps: current max number of IN eps used across all USB configs. @@ -1318,6 +1321,9 @@ struct dwc3 { unsigned dis_split_quirk:1; unsigned async_callbacks:1; + unsigned role_switch_reset_quirk:1; + unsigned role_switch_reset_quirk_initialized:1; + u16 imod_interval; int max_cfg_eps; diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index 039bf241769a..4579505cac1f 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -461,6 +461,9 @@ static int dwc3_usb_role_switch_set(struct usb_role_switch *sw, break; } + if (dwc->role_switch_reset_quirk && role == USB_ROLE_NONE) + mode = 0; + dwc3_set_mode(dwc, mode); return 0; } @@ -489,6 +492,10 @@ static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw) role = USB_ROLE_DEVICE; break; } + + if (dwc->role_switch_reset_quirk && !dwc->current_dr_role) + role = USB_ROLE_NONE; + spin_unlock_irqrestore(&dwc->lock, flags); return role; } |