git: 5c1def83a4cc - main - ath12k: import driver from upstream

From: Bjoern A. Zeeb <bz_at_FreeBSD.org>
Date: Mon, 21 Aug 2023 01:36:50 UTC
The branch main has been updated by bz:

URL: https://cgit.FreeBSD.org/src/commit/?id=5c1def83a4cc2eb3f828600dfd786f8c5788fb7d

commit 5c1def83a4cc2eb3f828600dfd786f8c5788fb7d
Author:     Bjoern A. Zeeb <bz@FreeBSD.org>
AuthorDate: 2023-06-11 00:23:44 +0000
Commit:     Bjoern A. Zeeb <bz@FreeBSD.org>
CommitDate: 2023-08-21 01:33:04 +0000

    ath12k: import driver from upstream
    
    Import BSD-3-Clause-Clear ath12k driver based on wireless-testing
    (wt-2023-05-11) 711dca0ca3d77414f8f346e564e9c8640147f40d (after v6.4-rc1)
    with further updates based on
    (wt-2023-06-09) 7bd20e011626ccc3ad53e57873452b1716fcfaaa (after v6.4-rc5),
    (wt-2023-07-24) 62e409149b62a285e89018e49b2e115757fb9022 (after v6.5-rc3),
    (wt-2023-08-06) 2a220a15be657a24868368892e3e2caba2115283 (after v6.5-rc4),
    (wt-2023-08-13) 81e147b1317ee7cde8b624ee8c0501b470d7e91c (after v6.5-rc5).
    
    Complement the driver to make compile on FreeBSD
    using LinuxKPI with changes covered by #ifdef (__FreeBSD__).
    
    Add the module build framework but keep disconnected from the
    build for now.
    The current driver (or rather LinuxKPI) lacks support for some
    "qcom" bits needed in order to get things working (as does ath11k).
    There was interest by various people to enhance support further
    for ath11k which will equally benefit ath12k.
    
    Given the lack of full license texts on the files this is
    imported under the draft policy for handling SPDX files (D29226)
    and with approval for BSD-3-Clause-Clear. [1]
    
    Approved by:    core (jhb, 2023-05-11) [1]
    MFC after:      20 days
---
 sys/contrib/dev/athk/ath12k/Kconfig    |   34 +
 sys/contrib/dev/athk/ath12k/Makefile   |   27 +
 sys/contrib/dev/athk/ath12k/ce.c       |  964 ++++
 sys/contrib/dev/athk/ath12k/ce.h       |  184 +
 sys/contrib/dev/athk/ath12k/core.c     |  968 +++++
 sys/contrib/dev/athk/ath12k/core.h     |  879 ++++
 sys/contrib/dev/athk/ath12k/dbring.c   |  357 ++
 sys/contrib/dev/athk/ath12k/dbring.h   |   80 +
 sys/contrib/dev/athk/ath12k/debug.c    |  161 +
 sys/contrib/dev/athk/ath12k/debug.h    |   67 +
 sys/contrib/dev/athk/ath12k/dp.c       | 1587 +++++++
 sys/contrib/dev/athk/ath12k/dp.h       | 1820 ++++++++
 sys/contrib/dev/athk/ath12k/dp_mon.c   | 2597 +++++++++++
 sys/contrib/dev/athk/ath12k/dp_mon.h   |  106 +
 sys/contrib/dev/athk/ath12k/dp_rx.c    | 4274 ++++++++++++++++++
 sys/contrib/dev/athk/ath12k/dp_rx.h    |  145 +
 sys/contrib/dev/athk/ath12k/dp_tx.c    | 1225 ++++++
 sys/contrib/dev/athk/ath12k/dp_tx.h    |   41 +
 sys/contrib/dev/athk/ath12k/hal.c      | 2235 ++++++++++
 sys/contrib/dev/athk/ath12k/hal.h      | 1142 +++++
 sys/contrib/dev/athk/ath12k/hal_desc.h | 2961 +++++++++++++
 sys/contrib/dev/athk/ath12k/hal_rx.c   |  850 ++++
 sys/contrib/dev/athk/ath12k/hal_rx.h   |  704 +++
 sys/contrib/dev/athk/ath12k/hal_tx.c   |  145 +
 sys/contrib/dev/athk/ath12k/hal_tx.h   |  194 +
 sys/contrib/dev/athk/ath12k/hif.h      |  144 +
 sys/contrib/dev/athk/ath12k/htc.c      |  789 ++++
 sys/contrib/dev/athk/ath12k/htc.h      |  316 ++
 sys/contrib/dev/athk/ath12k/hw.c       | 1047 +++++
 sys/contrib/dev/athk/ath12k/hw.h       |  314 ++
 sys/contrib/dev/athk/ath12k/mac.c      | 7489 ++++++++++++++++++++++++++++++++
 sys/contrib/dev/athk/ath12k/mac.h      |   76 +
 sys/contrib/dev/athk/ath12k/mhi.c      |  619 +++
 sys/contrib/dev/athk/ath12k/mhi.h      |   46 +
 sys/contrib/dev/athk/ath12k/pci.c      | 1449 ++++++
 sys/contrib/dev/athk/ath12k/pci.h      |  141 +
 sys/contrib/dev/athk/ath12k/peer.c     |  342 ++
 sys/contrib/dev/athk/ath12k/peer.h     |   67 +
 sys/contrib/dev/athk/ath12k/qmi.c      | 3114 +++++++++++++
 sys/contrib/dev/athk/ath12k/qmi.h      |  570 +++
 sys/contrib/dev/athk/ath12k/reg.c      |  745 ++++
 sys/contrib/dev/athk/ath12k/reg.h      |   95 +
 sys/contrib/dev/athk/ath12k/rx_desc.h  | 1441 ++++++
 sys/contrib/dev/athk/ath12k/trace.c    |   10 +
 sys/contrib/dev/athk/ath12k/trace.h    |  152 +
 sys/contrib/dev/athk/ath12k/wmi.c      | 7147 ++++++++++++++++++++++++++++++
 sys/contrib/dev/athk/ath12k/wmi.h      | 4918 +++++++++++++++++++++
 sys/modules/ath12k/Makefile            |   37 +
 48 files changed, 54815 insertions(+)

diff --git a/sys/contrib/dev/athk/ath12k/Kconfig b/sys/contrib/dev/athk/ath12k/Kconfig
new file mode 100644
index 000000000000..4f9c514c13e7
--- /dev/null
+++ b/sys/contrib/dev/athk/ath12k/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH12K
+	tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)"
+	depends on MAC80211 && HAS_DMA && PCI
+	depends on CRYPTO_MICHAEL_MIC
+	select QCOM_QMI_HELPERS
+	select MHI_BUS
+	select QRTR
+	select QRTR_MHI
+	help
+	  Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
+	  802.11be) family of chipsets, for example WCN7850 and
+	  QCN9274.
+
+	  If you choose to build a module, it'll be called ath12k.
+
+config ATH12K_DEBUG
+	bool "ath12k debugging"
+	depends on ATH12K
+	help
+	  Enable debug support, for example debug messages which must
+	  be enabled separately using the debug_mask module parameter.
+
+	  If unsure, say Y to make it easier to debug problems. But if
+	  you want optimal performance choose N.
+
+config ATH12K_TRACING
+	bool "ath12k tracing support"
+	depends on ATH12K && EVENT_TRACING
+	help
+	  Enable ath12k tracing infrastructure.
+
+	  If unsure, say Y to make it easier to debug problems. But if
+	  you want optimal performance choose N.
diff --git a/sys/contrib/dev/athk/ath12k/Makefile b/sys/contrib/dev/athk/ath12k/Makefile
new file mode 100644
index 000000000000..62c52e733b5e
--- /dev/null
+++ b/sys/contrib/dev/athk/ath12k/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH12K) += ath12k.o
+ath12k-y += core.o \
+	    hal.o \
+	    hal_tx.o \
+	    hal_rx.o \
+	    wmi.o \
+	    mac.o \
+	    reg.o \
+	    htc.o \
+	    qmi.o \
+	    dp.o  \
+	    dp_tx.o \
+	    dp_rx.o \
+	    debug.o \
+	    ce.o \
+	    peer.o \
+	    dbring.o \
+	    hw.o \
+	    mhi.o \
+	    pci.o \
+	    dp_mon.o
+
+ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/sys/contrib/dev/athk/ath12k/ce.c b/sys/contrib/dev/athk/ath12k/ce.c
new file mode 100644
index 000000000000..be0d669d31fc
--- /dev/null
+++ b/sys/contrib/dev/athk/ath12k/ce.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+const struct ce_attr ath12k_host_ce_config_qcn9274[] = {
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 16,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+
+	/* CE3: host->target WMI (mac0) */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 2048,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+	},
+
+	/* CE5: target->host pktlog */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+	},
+
+	/* CE6: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE7: host->target WMI (mac1) */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE8: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE9: MHI */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE10: MHI */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE11: MHI */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE12: CV Prefetch */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE13: CV Prefetch */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE14: target->host dbg log */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+
+	/* CE15: reserved for future use */
+	{
+		.flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+};
+
+const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
+	/* CE0: host->target HTC control and raw streams */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 16,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE1: target->host HTT + HTC control */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 512,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+
+	/* CE2: target->host WMI */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 64,
+		.recv_cb = ath12k_htc_rx_completion_handler,
+	},
+
+	/* CE3: host->target WMI (mac0) */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 32,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE4: host->target HTT */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 2048,
+		.src_sz_max = 256,
+		.dest_nentries = 0,
+	},
+
+	/* CE5: target->host pktlog */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE6: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE7: host->target WMI (mac1) */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 0,
+	},
+
+	/* CE8: target autonomous hif_memcpy */
+	{
+		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+};
+
+static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
+					 struct sk_buff *skb, dma_addr_t paddr)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct ath12k_ce_ring *ring = pipe->dest_ring;
+	struct hal_srng *srng;
+	unsigned int write_index;
+	unsigned int nentries_mask = ring->nentries_mask;
+	struct hal_ce_srng_dest_desc *desc;
+	int ret;
+
+	lockdep_assert_held(&ab->ce.ce_lock);
+
+	write_index = ring->write_index;
+
+	srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+	spin_lock_bh(&srng->lock);
+
+	ath12k_hal_srng_access_begin(ab, srng);
+
+	if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+		ret = -ENOSPC;
+		goto exit;
+	}
+
+	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+	if (!desc) {
+		ret = -ENOSPC;
+		goto exit;
+	}
+
+	ath12k_hal_ce_dst_set_desc(desc, paddr);
+
+	ring->skb[write_index] = skb;
+	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+	ring->write_index = write_index;
+
+	pipe->rx_buf_needed--;
+
+	ret = 0;
+exit:
+	ath12k_hal_srng_access_end(ab, srng);
+
+	spin_unlock_bh(&srng->lock);
+
+	return ret;
+}
+
+static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct sk_buff *skb;
+	dma_addr_t paddr;
+	int ret = 0;
+
+	if (!(pipe->dest_ring || pipe->status_ring))
+		return 0;
+
+	spin_lock_bh(&ab->ce.ce_lock);
+	while (pipe->rx_buf_needed) {
+		skb = dev_alloc_skb(pipe->buf_sz);
+		if (!skb) {
+			ret = -ENOMEM;
+			goto exit;
+		}
+
+		WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+		paddr = dma_map_single(ab->dev, skb->data,
+				       skb->len + skb_tailroom(skb),
+				       DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+			ath12k_warn(ab, "failed to dma map ce rx buf\n");
+			dev_kfree_skb_any(skb);
+			ret = -EIO;
+			goto exit;
+		}
+
+		ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+		ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+		if (ret) {
+			ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+			dma_unmap_single(ab->dev, paddr,
+					 skb->len + skb_tailroom(skb),
+					 DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+			goto exit;
+		}
+	}
+
+exit:
+	spin_unlock_bh(&ab->ce.ce_lock);
+	return ret;
+}
+
+static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
+					 struct sk_buff **skb, int *nbytes)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct hal_ce_srng_dst_status_desc *desc;
+	struct hal_srng *srng;
+	unsigned int sw_index;
+	unsigned int nentries_mask;
+	int ret = 0;
+
+	spin_lock_bh(&ab->ce.ce_lock);
+
+	sw_index = pipe->dest_ring->sw_index;
+	nentries_mask = pipe->dest_ring->nentries_mask;
+
+	srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+	spin_lock_bh(&srng->lock);
+
+	ath12k_hal_srng_access_begin(ab, srng);
+
+	desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
+	if (!desc) {
+		ret = -EIO;
+		goto err;
+	}
+
+	*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
+	if (*nbytes == 0) {
+		ret = -EIO;
+		goto err;
+	}
+
+	*skb = pipe->dest_ring->skb[sw_index];
+	pipe->dest_ring->skb[sw_index] = NULL;
+
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	pipe->dest_ring->sw_index = sw_index;
+
+	pipe->rx_buf_needed++;
+err:
+	ath12k_hal_srng_access_end(ab, srng);
+
+	spin_unlock_bh(&srng->lock);
+
+	spin_unlock_bh(&ab->ce.ce_lock);
+
+	return ret;
+}
+
+static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct sk_buff *skb;
+	struct sk_buff_head list;
+	unsigned int nbytes, max_nbytes;
+	int ret;
+
+	__skb_queue_head_init(&list);
+	while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+		max_nbytes = skb->len + skb_tailroom(skb);
+		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+				 max_nbytes, DMA_FROM_DEVICE);
+
+		if (unlikely(max_nbytes < nbytes)) {
+			ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+				    nbytes, max_nbytes);
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+
+		skb_put(skb, nbytes);
+		__skb_queue_tail(&list, skb);
+	}
+
+	while ((skb = __skb_dequeue(&list))) {
+		ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+			   pipe->pipe_num, skb->len);
+		pipe->recv_cb(ab, skb);
+	}
+
+	ret = ath12k_ce_rx_post_pipe(pipe);
+	if (ret && ret != -ENOSPC) {
+		ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+			    pipe->pipe_num, ret);
+		mod_timer(&ab->rx_replenish_retry,
+			  jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+	}
+}
+
+static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct hal_ce_srng_src_desc *desc;
+	struct hal_srng *srng;
+	unsigned int sw_index;
+	unsigned int nentries_mask;
+	struct sk_buff *skb;
+
+	spin_lock_bh(&ab->ce.ce_lock);
+
+	sw_index = pipe->src_ring->sw_index;
+	nentries_mask = pipe->src_ring->nentries_mask;
+
+	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+	spin_lock_bh(&srng->lock);
+
+	ath12k_hal_srng_access_begin(ab, srng);
+
+	desc = ath12k_hal_srng_src_reap_next(ab, srng);
+	if (!desc) {
+		skb = ERR_PTR(-EIO);
+		goto err_unlock;
+	}
+
+	skb = pipe->src_ring->skb[sw_index];
+
+	pipe->src_ring->skb[sw_index] = NULL;
+
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+	spin_unlock_bh(&srng->lock);
+
+	spin_unlock_bh(&ab->ce.ce_lock);
+
+	return skb;
+}
+
+static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct sk_buff *skb;
+
+	while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
+		if (!skb)
+			continue;
+
+		dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
+				 DMA_TO_DEVICE);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id,
+						 struct hal_srng_params *ring_params)
+{
+	u32 msi_data_start;
+	u32 msi_data_count, msi_data_idx;
+	u32 msi_irq_start;
+	u32 addr_lo;
+	u32 addr_hi;
+	int ret;
+
+	ret = ath12k_hif_get_user_msi_vector(ab, "CE",
+					     &msi_data_count, &msi_data_start,
+					     &msi_irq_start);
+
+	if (ret)
+		return;
+
+	ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
+	ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
+
+	ring_params->msi_addr = addr_lo;
+	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+	ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
+	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+static int ath12k_ce_init_ring(struct ath12k_base *ab,
+			       struct ath12k_ce_ring *ce_ring,
+			       int ce_id, enum hal_ring_type type)
+{
+	struct hal_srng_params params = { 0 };
+	int ret;
+
+	params.ring_base_paddr = ce_ring->base_addr_ce_space;
+	params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+	params.num_entries = ce_ring->nentries;
+
+	if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
+		ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
+	switch (type) {
+	case HAL_CE_SRC:
+		if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
+			params.intr_batch_cntr_thres_entries = 1;
+		break;
+	case HAL_CE_DST:
+		params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max;
+		if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+			params.intr_timer_thres_us = 1024;
+			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+			params.low_threshold = ce_ring->nentries - 3;
+		}
+		break;
+	case HAL_CE_DST_STATUS:
+		if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+			params.intr_batch_cntr_thres_entries = 1;
+			params.intr_timer_thres_us = 0x1000;
+		}
+		break;
+	default:
+		ath12k_warn(ab, "Invalid CE ring type %d\n", type);
+		return -EINVAL;
+	}
+
+	/* TODO: Init other params needed by HAL to init the ring */
+
+	ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, &params);
+	if (ret < 0) {
+		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+			    ret, ce_id);
+		return ret;
+	}
+
+	ce_ring->hal_ring_id = ret;
+
+	return 0;
+}
+
+static struct ath12k_ce_ring *
+ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
+{
+	struct ath12k_ce_ring *ce_ring;
+	dma_addr_t base_addr;
+
+	ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+	if (!ce_ring)
+		return ERR_PTR(-ENOMEM);
+
+	ce_ring->nentries = nentries;
+	ce_ring->nentries_mask = nentries - 1;
+
+	/* Legacy platforms that do not support cache
+	 * coherent DMA are unsupported
+	 */
+	ce_ring->base_addr_owner_space_unaligned =
+		dma_alloc_coherent(ab->dev,
+				   nentries * desc_sz + CE_DESC_RING_ALIGN,
+				   &base_addr, GFP_KERNEL);
+	if (!ce_ring->base_addr_owner_space_unaligned) {
+		kfree(ce_ring);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+	ce_ring->base_addr_owner_space =
+		PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned,
+			  CE_DESC_RING_ALIGN);
+
+	ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned,
+					    CE_DESC_RING_ALIGN);
+
+	return ce_ring;
+}
+
+static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
+{
+	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+	const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
+	struct ath12k_ce_ring *ring;
+	int nentries;
+	int desc_sz;
+
+	pipe->attr_flags = attr->flags;
+
+	if (attr->src_nentries) {
+		pipe->send_cb = ath12k_ce_send_done_cb;
+		nentries = roundup_pow_of_two(attr->src_nentries);
+		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+		if (IS_ERR(ring))
+			return PTR_ERR(ring);
+		pipe->src_ring = ring;
+	}
+
+	if (attr->dest_nentries) {
+		pipe->recv_cb = attr->recv_cb;
+		nentries = roundup_pow_of_two(attr->dest_nentries);
+		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+		if (IS_ERR(ring))
+			return PTR_ERR(ring);
+		pipe->dest_ring = ring;
+
+		desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+		ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+		if (IS_ERR(ring))
+			return PTR_ERR(ring);
+		pipe->status_ring = ring;
+	}
+
+	return 0;
+}
+
+void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
+{
+	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+
+	if (pipe->send_cb)
+		pipe->send_cb(pipe);
+
+	if (pipe->recv_cb)
+		ath12k_ce_recv_process_cb(pipe);
+}
+
+void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
+{
+	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+
+	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
+		pipe->send_cb(pipe);
+}
+
+int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
+		   u16 transfer_id)
+{
+	struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+	struct hal_ce_srng_src_desc *desc;
+	struct hal_srng *srng;
+	unsigned int write_index, sw_index;
+	unsigned int nentries_mask;
+	int ret = 0;
+	u8 byte_swap_data = 0;
+	int num_used;
+
+	/* Check if some entries could be regained by handling tx completion if
+	 * the CE has interrupts disabled and the used entries is more than the
+	 * defined usage threshold.
+	 */
+	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+		spin_lock_bh(&ab->ce.ce_lock);
+		write_index = pipe->src_ring->write_index;
+
+		sw_index = pipe->src_ring->sw_index;
+
+		if (write_index >= sw_index)
+			num_used = write_index - sw_index;
+		else
+			num_used = pipe->src_ring->nentries - sw_index +
+				   write_index;
+
+		spin_unlock_bh(&ab->ce.ce_lock);
+
+		if (num_used > ATH12K_CE_USAGE_THRESHOLD)
+			ath12k_ce_poll_send_completed(ab, pipe->pipe_num);
+	}
+
+	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+		return -ESHUTDOWN;
+
+	spin_lock_bh(&ab->ce.ce_lock);
+
+	write_index = pipe->src_ring->write_index;
+	nentries_mask = pipe->src_ring->nentries_mask;
+
+	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+	spin_lock_bh(&srng->lock);
+
+	ath12k_hal_srng_access_begin(ab, srng);
+
+	if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+		ath12k_hal_srng_access_end(ab, srng);
+		ret = -ENOBUFS;
+		goto unlock;
+	}
+
+	desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
+	if (!desc) {
+		ath12k_hal_srng_access_end(ab, srng);
+		ret = -ENOBUFS;
+		goto unlock;
+	}
+
+	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+		byte_swap_data = 1;
+
+	ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr,
+				   skb->len, transfer_id, byte_swap_data);
+
+	pipe->src_ring->skb[write_index] = skb;
+	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+						       write_index);
+
+	ath12k_hal_srng_access_end(ab, srng);
+
+unlock:
+	spin_unlock_bh(&srng->lock);
+
+	spin_unlock_bh(&ab->ce.ce_lock);
+
+	return ret;
+}
+
+static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe)
+{
+	struct ath12k_base *ab = pipe->ab;
+	struct ath12k_ce_ring *ring = pipe->dest_ring;
+	struct sk_buff *skb;
+	int i;
+
+	if (!(ring && pipe->buf_sz))
+		return;
+
+	for (i = 0; i < ring->nentries; i++) {
+		skb = ring->skb[i];
+		if (!skb)
+			continue;
+
+		ring->skb[i] = NULL;
+		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+void ath12k_ce_cleanup_pipes(struct ath12k_base *ab)
+{
+	struct ath12k_ce_pipe *pipe;
+	int pipe_num;
+
+	for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) {
+		pipe = &ab->ce.ce_pipe[pipe_num];
+		ath12k_ce_rx_pipe_cleanup(pipe);
+
+		/* Cleanup any src CE's which have interrupts disabled */
+		ath12k_ce_poll_send_completed(ab, pipe_num);
+
+		/* NOTE: Should we also clean up tx buffer in all pipes? */
+	}
+}
+
+void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
+{
+	struct ath12k_ce_pipe *pipe;
+	int i;
+	int ret;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		pipe = &ab->ce.ce_pipe[i];
+		ret = ath12k_ce_rx_post_pipe(pipe);
+		if (ret) {
+			if (ret == -ENOSPC)
+				continue;
+
+			ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+				    i, ret);
+			mod_timer(&ab->rx_replenish_retry,
+				  jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+
+			return;
+		}
+	}
+}
+
+void ath12k_ce_rx_replenish_retry(struct timer_list *t)
+{
+	struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+	ath12k_ce_rx_post_buf(ab);
+}
+
+static void ath12k_ce_shadow_config(struct ath12k_base *ab)
+{
+	int i;
+
+	for (i = 0; i < ab->hw_params->ce_count; i++) {
+		if (ab->hw_params->host_ce_config[i].src_nentries)
+			ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i);
+
+		if (ab->hw_params->host_ce_config[i].dest_nentries) {
+			ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i);
+			ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i);
+		}
+	}
+}
+
+void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
+				 u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+	if (!ab->hw_params->supports_shadow_regs)
+		return;
+
+	ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+	/* shadow is already configured */
+	if (*shadow_cfg_len)
+		return;
+
+	/* shadow isn't configured yet, configure now.
+	 * non-CE srngs are configured firstly, then
+	 * all CE srngs.
+	 */
+	ath12k_hal_srng_shadow_config(ab);
+	ath12k_ce_shadow_config(ab);
+
*** 54204 LINES SKIPPED ***