lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 23 Oct 2022 20:28:08 +0200
From:   Lorenzo Bianconi <lorenzo@...nel.org>
To:     netdev@...r.kernel.org
Cc:     nbd@....name, john@...ozen.org, sean.wang@...iatek.com,
        Mark-MC.Lee@...iatek.com, davem@...emloft.net, edumazet@...gle.com,
        kuba@...nel.org, pabeni@...hat.com, matthias.bgg@...il.com,
        linux-mediatek@...ts.infradead.org, lorenzo.bianconi@...hat.com,
        Bo.Jiao@...iatek.com, sujuan.chen@...iatek.com,
        ryder.Lee@...iatek.com, evelyn.tsai@...iatek.com,
        devicetree@...r.kernel.org, robh@...nel.org, daniel@...rotopia.org
Subject: [PATCH v2 net-next 4/6] net: ethernet: mtk_wed: introduce wed wo support

Introduce WO chip support to mtk wed driver. MTK WED WO is used to
implement RX Wireless Ethernet Dispatch and offload traffic received by
wlan nic to the wired interface.

Tested-by: Daniel Golle <daniel@...rotopia.org>
Co-developed-by: Sujuan Chen <sujuan.chen@...iatek.com>
Signed-off-by: Sujuan Chen <sujuan.chen@...iatek.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
 drivers/net/ethernet/mediatek/Makefile      |   2 +-
 drivers/net/ethernet/mediatek/mtk_wed.c     |   7 +-
 drivers/net/ethernet/mediatek/mtk_wed.h     |   2 +
 drivers/net/ethernet/mediatek/mtk_wed_mcu.c |   3 +-
 drivers/net/ethernet/mediatek/mtk_wed_wo.c  | 545 ++++++++++++++++++++
 drivers/net/ethernet/mediatek/mtk_wed_wo.h  | 107 ++++
 6 files changed, 662 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_wo.c

diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index d4bdefa77159..8e0c61c33ff8 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,7 +5,7 @@
 
 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
-mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
 ifdef CONFIG_DEBUG_FS
 mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
 endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 65e01bf4b4d2..9c9dd17332b6 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -16,6 +16,7 @@
 #include "mtk_wed_regs.h"
 #include "mtk_wed.h"
 #include "mtk_ppe.h"
+#include "mtk_wed_wo.h"
 
 #define MTK_PCIE_BASE(n)		(0x1a143000 + (n) * 0x2000)
 
@@ -355,6 +356,8 @@ mtk_wed_detach(struct mtk_wed_device *dev)
 
 	mtk_wed_free_buffer(dev);
 	mtk_wed_free_tx_rings(dev);
+	if (hw->version != 1)
+		mtk_wed_wo_deinit(hw);
 
 	if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
 		struct device_node *wlan_node;
@@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *dev)
 	}
 
 	mtk_wed_hw_init_early(dev);
-	if (hw->hifsys)
+	if (hw->version == 1)
 		regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
 				   BIT(hw->index), 0);
+	else
+		ret = mtk_wed_wo_init(hw);
 
 out:
 	mutex_unlock(&hw_lock);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index ae420ca01a48..af656fd31ff9 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -10,6 +10,7 @@
 #include <linux/netdevice.h>
 
 struct mtk_eth;
+struct mtk_wed_wo;
 
 struct mtk_wed_hw {
 	struct device_node *node;
@@ -22,6 +23,7 @@ struct mtk_wed_hw {
 	struct regmap *mirror;
 	struct dentry *debugfs_dir;
 	struct mtk_wed_device *wed_dev;
+	struct mtk_wed_wo *wed_wo;
 	u32 debugfs_reg;
 	u32 num_flows;
 	u8 version;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index a3180336d242..e58beda37152 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -122,8 +122,7 @@ mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
 	if (id == MTK_WED_MODULE_ID_WO)
 		hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
 
-	dev_kfree_skb(skb);
-	return 0;
+	return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
 }
 
 static int
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
new file mode 100644
index 000000000000..81c4be26a57c
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@...nel.org>  */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/bitfield.h>
+
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+
+static u32
+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+	u32 val;
+
+	if (regmap_read(wo->mmio.regs, reg, &val))
+		val = ~0;
+
+	return val;
+}
+
+static void
+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+	regmap_write(wo->mmio.regs, reg, val);
+}
+
+static u32
+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
+{
+	u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
+
+	return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
+}
+
+static void
+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
+{
+	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
+}
+
+static void
+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
+{
+	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
+}
+
+static void
+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wo->mmio.lock, flags);
+	wo->mmio.irq_mask &= ~mask;
+	wo->mmio.irq_mask |= val;
+	if (set)
+		mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+	spin_unlock_irqrestore(&wo->mmio.lock, flags);
+}
+
+static void
+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
+{
+	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
+	tasklet_schedule(&wo->mmio.irq_tasklet);
+}
+
+static void
+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
+{
+	mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
+}
+
+static void
+mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
+{
+	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
+	mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
+}
+
+static void
+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+		      u32 val)
+{
+	wmb();
+	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
+}
+
+static void *
+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
+		   bool flush)
+{
+	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+	int index = (q->tail + 1) % q->n_desc;
+	struct mtk_wed_wo_queue_entry *entry;
+	struct mtk_wed_wo_queue_desc *desc;
+	void *buf;
+
+	if (!q->queued)
+		return NULL;
+
+	if (flush)
+		q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
+	else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
+		return NULL;
+
+	q->tail = index;
+	q->queued--;
+
+	desc = &q->desc[index];
+	entry = &q->entry[index];
+	buf = entry->buf;
+	if (len)
+		*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
+				 le32_to_cpu(READ_ONCE(desc->ctrl)));
+	if (buf)
+		dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
+				 DMA_FROM_DEVICE);
+	entry->buf = NULL;
+
+	return buf;
+}
+
+static int
+mtk_wed_wo_queue_rx_skb(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+	struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+	int ret;
+
+	ret = mtk_wed_mcu_check_msg(wo, skb);
+	if (ret) {
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
+		mtk_wed_mcu_rx_event(wo, skb);
+	else
+		mtk_wed_mcu_rx_unsolicited_event(wo, skb);
+
+	return 0;
+}
+
+static int
+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+			gfp_t gfp, bool rx)
+{
+	enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+	int n_buf = 0;
+
+	spin_lock_bh(&q->lock);
+	while (q->queued < q->n_desc) {
+		void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
+		struct mtk_wed_wo_queue_entry *entry;
+		dma_addr_t addr;
+
+		if (!buf)
+			break;
+
+		addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
+		if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
+			skb_free_frag(buf);
+			break;
+		}
+
+		q->head = (q->head + 1) % q->n_desc;
+		entry = &q->entry[q->head];
+		entry->addr = addr;
+		entry->len = q->buf_size;
+		q->entry[q->head].buf = buf;
+
+		if (rx) {
+			struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
+			u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
+				   FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
+					      entry->len);
+
+			WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
+			WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+		}
+		q->queued++;
+		n_buf++;
+	}
+	spin_unlock_bh(&q->lock);
+
+	return n_buf;
+}
+
+static void
+mtk_wed_wo_poll_complete(struct mtk_wed_wo *wo)
+{
+	mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
+	mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
+}
+
+static int
+mtk_wed_wo_rx_process(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+		      int budget)
+{
+	int done = 0;
+
+	while (done < budget) {
+		struct sk_buff *skb;
+		void *data;
+		u32 len;
+
+		data = mtk_wed_wo_dequeue(wo, q, &len, false);
+		if (!data)
+			break;
+
+		skb = build_skb(data, q->buf_size);
+		if (!skb) {
+			skb_free_frag(data);
+			continue;
+		}
+
+		__skb_put(skb, len);
+		mtk_wed_wo_queue_rx_skb(wo, skb);
+		done++;
+	}
+
+	if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
+		u32 index = (q->head - 1) % q->n_desc;
+
+		mtk_wed_wo_queue_kick(wo, q, index);
+	}
+
+	return done;
+}
+
+static int
+mtk_wed_wo_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct mtk_wed_wo *wo = container_of(napi->dev, struct mtk_wed_wo,
+					     mmio.napi_dev);
+	int done = 0, cur;
+
+	rcu_read_lock();
+	do {
+		cur = mtk_wed_wo_rx_process(wo, &wo->q_rx, budget - done);
+		/* rx packet handle */
+		done += cur;
+	} while (cur && done < budget);
+	rcu_read_unlock();
+
+	if (done < budget && napi_complete(napi))
+		mtk_wed_wo_poll_complete(wo);
+
+	return done;
+}
+
+static irqreturn_t
+mtk_wed_wo_irq_handler(int irq, void *data)
+{
+	struct mtk_wed_wo *wo = data;
+
+	mtk_wed_wo_set_isr(wo, 0);
+	tasklet_schedule(&wo->mmio.irq_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
+{
+	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
+	u32 intr, mask;
+
+	/* disable interrupts */
+	mtk_wed_wo_set_isr(wo, 0);
+
+	intr = mtk_wed_wo_get_isr(wo);
+	intr &= wo->mmio.irq_mask;
+	mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
+	mtk_wed_wo_irq_disable(wo, mask);
+
+	if (intr & MTK_WED_WO_RXCH_INT_MASK)
+		napi_schedule(&wo->mmio.napi);
+}
+
+/* mtk wed wo hw queues */
+
+static int
+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+		       int n_desc, int buf_size, int index,
+		       struct mtk_wed_wo_queue_regs *regs)
+{
+	spin_lock_init(&q->lock);
+	q->regs = *regs;
+	q->n_desc = n_desc;
+	q->buf_size = buf_size;
+
+	q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
+				      &q->desc_dma, GFP_KERNEL);
+	if (!q->desc)
+		return -ENOMEM;
+
+	q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
+				GFP_KERNEL);
+	if (!q->entry)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void
+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+	dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
+			  q->desc_dma);
+}
+
+static void
+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+	struct page *page;
+	int i;
+
+	spin_lock_bh(&q->lock);
+	for (i = 0; i < q->n_desc; i++) {
+		struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
+
+		dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
+				 DMA_TO_DEVICE);
+		skb_free_frag(entry->buf);
+		entry->buf = NULL;
+	}
+	spin_unlock_bh(&q->lock);
+
+	if (!q->cache.va)
+		return;
+
+	page = virt_to_page(q->cache.va);
+	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
+	memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+	struct page *page;
+
+	spin_lock_bh(&q->lock);
+	for (;;) {
+		void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
+
+		if (!buf)
+			break;
+
+		skb_free_frag(buf);
+	}
+	spin_unlock_bh(&q->lock);
+
+	if (!q->cache.va)
+		return;
+
+	page = virt_to_page(q->cache.va);
+	__page_frag_cache_drain(page, q->cache.pagecnt_bias);
+	memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+	mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+	mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
+	mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
+}
+
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+			    struct sk_buff *skb)
+{
+	struct mtk_wed_wo_queue_entry *entry;
+	struct mtk_wed_wo_queue_desc *desc;
+	int ret = 0, index;
+	u32 ctrl;
+
+	spin_lock_bh(&q->lock);
+
+	q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
+	index = (q->head + 1) % q->n_desc;
+	if (q->tail == index) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	entry = &q->entry[index];
+	if (skb->len > entry->len) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	desc = &q->desc[index];
+	q->head = index;
+
+	dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
+				DMA_TO_DEVICE);
+	memcpy(entry->buf, skb->data, skb->len);
+	dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
+				   DMA_TO_DEVICE);
+
+	ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
+	       MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
+	WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
+	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+	mtk_wed_wo_queue_kick(wo, q, q->head);
+	mtk_wed_wo_kickout(wo);
+out:
+	spin_unlock_bh(&q->lock);
+
+	dev_kfree_skb(skb);
+
+	return ret;
+}
+
+static int
+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
+{
+	return 0;
+}
+
+static int
+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
+{
+	struct mtk_wed_wo_queue_regs regs;
+	struct device_node *np;
+	int ret;
+
+	np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
+	if (!np)
+		return -ENODEV;
+
+	wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
+	if (IS_ERR_OR_NULL(wo->mmio.regs))
+		return PTR_ERR(wo->mmio.regs);
+
+	wo->mmio.irq = irq_of_parse_and_map(np, 0);
+	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
+	spin_lock_init(&wo->mmio.lock);
+	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
+
+	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
+			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
+			       KBUILD_MODNAME, wo);
+	if (ret)
+		goto error;
+
+	regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
+	regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
+	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
+	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
+
+	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
+				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
+				     &regs);
+	if (ret)
+		goto error;
+
+	mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
+	mtk_wed_wo_queue_reset(wo, &wo->q_tx);
+
+	regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
+	regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
+	regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
+	regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
+
+	ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
+				     MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
+				     &regs);
+	if (ret)
+		goto error;
+
+	init_dummy_netdev(&wo->mmio.napi_dev);
+	snprintf(wo->mmio.napi_dev.name, sizeof(wo->mmio.napi_dev.name), "%s",
+		 "mtk_wed_wo");
+	netif_napi_add(&wo->mmio.napi_dev, &wo->mmio.napi, mtk_wed_wo_rx_poll);
+	mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
+	mtk_wed_wo_queue_reset(wo, &wo->q_rx);
+	napi_enable(&wo->mmio.napi);
+
+	/* rx queue irqmask */
+	mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+
+	return 0;
+
+error:
+	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+
+	return ret;
+}
+
+static void
+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
+{
+	/* disable interrupts */
+	mtk_wed_wo_set_isr(wo, 0);
+
+	tasklet_disable(&wo->mmio.irq_tasklet);
+	netif_napi_del(&wo->mmio.napi);
+
+	disable_irq(wo->mmio.irq);
+	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+
+	mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
+	mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
+	mtk_wed_wo_queue_free(wo, &wo->q_tx);
+	mtk_wed_wo_queue_free(wo, &wo->q_rx);
+}
+
+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
+{
+	struct mtk_wed_wo *wo;
+	int ret;
+
+	wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
+	if (!wo)
+		return -ENOMEM;
+
+	hw->wed_wo = wo;
+	wo->hw = hw;
+
+	ret = mtk_wed_wo_hardware_init(wo);
+	if (ret)
+		return ret;
+
+	ret = mtk_wed_mcu_init(wo);
+	if (ret)
+		return ret;
+
+	return mtk_wed_wo_exception_init(wo);
+}
+
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
+{
+	struct mtk_wed_wo *wo = hw->wed_wo;
+
+	mtk_wed_wo_hw_deinit(wo);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index 86d102a064ca..18c0ffcd8176 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -79,6 +79,54 @@ enum mtk_wed_dummy_cr_idx {
 #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK	BIT(5)
 #define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK	BIT(0)
 
+#define MTK_WED_WO_RING_SIZE	256
+#define MTK_WED_WO_CMD_LEN	1504
+
+#define MTK_WED_WO_TXCH_NUM		0
+#define MTK_WED_WO_RXCH_NUM		1
+#define MTK_WED_WO_RXCH_WO_EXCEPTION	7
+
+#define MTK_WED_WO_TXCH_INT_MASK	BIT(0)
+#define MTK_WED_WO_RXCH_INT_MASK	BIT(1)
+#define MTK_WED_WO_EXCEPTION_INT_MASK	BIT(7)
+#define MTK_WED_WO_ALL_INT_MASK		(MTK_WED_WO_RXCH_INT_MASK | \
+					 MTK_WED_WO_EXCEPTION_INT_MASK)
+
+#define MTK_WED_WO_CCIF_BUSY		0x004
+#define MTK_WED_WO_CCIF_START		0x008
+#define MTK_WED_WO_CCIF_TCHNUM		0x00c
+#define MTK_WED_WO_CCIF_RCHNUM		0x010
+#define MTK_WED_WO_CCIF_RCHNUM_MASK	GENMASK(7, 0)
+
+#define MTK_WED_WO_CCIF_ACK		0x014
+#define MTK_WED_WO_CCIF_IRQ0_MASK	0x018
+#define MTK_WED_WO_CCIF_IRQ1_MASK	0x01c
+#define MTK_WED_WO_CCIF_DUMMY1		0x020
+#define MTK_WED_WO_CCIF_DUMMY2		0x024
+#define MTK_WED_WO_CCIF_DUMMY3		0x028
+#define MTK_WED_WO_CCIF_DUMMY4		0x02c
+#define MTK_WED_WO_CCIF_SHADOW1		0x030
+#define MTK_WED_WO_CCIF_SHADOW2		0x034
+#define MTK_WED_WO_CCIF_SHADOW3		0x038
+#define MTK_WED_WO_CCIF_SHADOW4		0x03c
+#define MTK_WED_WO_CCIF_DUMMY5		0x050
+#define MTK_WED_WO_CCIF_DUMMY6		0x054
+#define MTK_WED_WO_CCIF_DUMMY7		0x058
+#define MTK_WED_WO_CCIF_DUMMY8		0x05c
+#define MTK_WED_WO_CCIF_SHADOW5		0x060
+#define MTK_WED_WO_CCIF_SHADOW6		0x064
+#define MTK_WED_WO_CCIF_SHADOW7		0x068
+#define MTK_WED_WO_CCIF_SHADOW8		0x06c
+
+#define MTK_WED_WO_CTL_SD_LEN1		GENMASK(13, 0)
+#define MTK_WED_WO_CTL_LAST_SEC1	BIT(14)
+#define MTK_WED_WO_CTL_BURST		BIT(15)
+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT	16
+#define MTK_WED_WO_CTL_SD_LEN0		GENMASK(29, 16)
+#define MTK_WED_WO_CTL_LAST_SEC0	BIT(30)
+#define MTK_WED_WO_CTL_DMA_DONE		BIT(31)
+#define MTK_WED_WO_INFO_WINFO		GENMASK(15, 0)
+
 struct mtk_wed_wo_memory_region {
 	const char *prefix;
 	void __iomem *addr;
@@ -111,10 +159,53 @@ struct mtk_wed_fw_trailer {
 	u32 crc;
 };
 
+struct mtk_wed_wo_queue_regs {
+	u32 desc_base;
+	u32 ring_size;
+	u32 cpu_idx;
+	u32 dma_idx;
+};
+
+struct mtk_wed_wo_queue_desc {
+	__le32 buf0;
+	__le32 ctrl;
+	__le32 buf1;
+	__le32 info;
+	__le32 reserved[4];
+} __packed __aligned(32);
+
+struct mtk_wed_wo_queue_entry {
+	dma_addr_t addr;
+	void *buf;
+	u32 len;
+};
+
+struct mtk_wed_wo_queue {
+	struct mtk_wed_wo_queue_regs regs;
+
+	struct page_frag_cache cache;
+	spinlock_t lock;
+
+	struct mtk_wed_wo_queue_desc *desc;
+	dma_addr_t desc_dma;
+
+	struct mtk_wed_wo_queue_entry *entry;
+
+	u16 head;
+	u16 tail;
+	int n_desc;
+	int queued;
+	int buf_size;
+
+};
+
 struct mtk_wed_wo {
 	struct mtk_wed_hw *hw;
 	struct regmap *boot;
 
+	struct mtk_wed_wo_queue q_tx;
+	struct mtk_wed_wo_queue q_rx;
+
 	struct {
 		struct mutex mutex;
 		int timeout;
@@ -123,6 +214,18 @@ struct mtk_wed_wo {
 		struct sk_buff_head res_q;
 		wait_queue_head_t wait;
 	} mcu;
+
+	struct {
+		struct regmap *regs;
+
+		spinlock_t lock;
+		struct tasklet_struct irq_tasklet;
+		int irq;
+		u32 irq_mask;
+
+		struct net_device napi_dev;
+		struct napi_struct napi;
+	} mmio;
 };
 
 static inline int
@@ -148,5 +251,9 @@ void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
 int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
 			 const void *data, int len, bool wait_resp);
 int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
+			    struct sk_buff *skb);
 
 #endif /* __MTK_WED_WO_H */
-- 
2.37.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ