generic: 6.18: mtk_eth_soc: improve non-MTK tag_8021q DSA

Add patches to improve support for using 3rd-party DSA switches
like MaxLinear MxL862xx with MediaTek's mtk_eth_soc being the
conduit. This involves reorganizing hardware queues to avoid
overlap (currently dp->index is used -- if there is more than one
DSA switch this is problematic), and correctly programming flows
of the non-MTK DSA users ports in the PPE offloading engine.

Signed-off-by: Daniel Golle <daniel@makrotopia.org>
This commit is contained in:
Daniel Golle 2026-04-23 18:58:52 +01:00
parent c9a87914a9
commit dbd8eab75d
12 changed files with 914 additions and 17 deletions

View File

@ -37,7 +37,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
.glo_cfg = 0x4604,
.rst_idx = 0x4608,
.delay_irq = 0x460c,
@@ -4125,6 +4128,56 @@ static void mtk_set_mcr_max_rx(struct mt
@@ -4178,6 +4181,56 @@ static void mtk_set_mcr_max_rx(struct mt
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
@ -63,7 +63,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
+ mtk_hw_dump_reg(eth, "FE", 0x1400, 0x300);
+ mtk_hw_dump_reg(eth, "ADMA", reg_map->pdma.rx_ptr, 0x300);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ for (id = 0; id < MTK_QDMA_NUM_QUEUES / 16; id++) {
+ for (id = 0; id < eth->soc->num_tx_queues / MTK_QTX_PER_PAGE; id++) {
+ mtk_w32(eth, id, reg_map->qdma.page);
+ pr_info("\nQDMA PAGE:%x ", mtk_r32(eth, reg_map->qdma.page));
+ mtk_hw_dump_reg(eth, "QDMA", reg_map->qdma.qtx_cfg, 0x100);
@ -94,7 +94,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
static void mtk_hw_reset(struct mtk_eth *eth)
{
u32 val;
@@ -4604,6 +4657,8 @@ static void mtk_pending_work(struct work
@@ -4657,6 +4710,8 @@ static void mtk_pending_work(struct work
rtnl_lock();
set_bit(MTK_RESETTING, &eth->state);
@ -105,7 +105,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
/* Run again reset preliminary configuration in order to avoid any
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1192,6 +1192,7 @@ struct mtk_reg_map {
@@ -1200,6 +1200,7 @@ struct mtk_reg_map {
u32 rx_ptr; /* rx base pointer */
u32 rx_cnt_cfg; /* rx max count configuration */
u32 qcrx_ptr; /* rx cpu pointer */

View File

@ -0,0 +1,68 @@
From 75baf4f561029dc48a5b244320c50773356f9f3e Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 15:08:11 +0100
Subject: [PATCH 1/9] net: ethernet: mtk_ppe_offload: use
rhashtable_lookup_fast in flow ops
mtk_flow_offload_replace(), mtk_flow_offload_destroy() and
mtk_flow_offload_stats() look up the flow hash table with
rhashtable_lookup(), which is annotated __must_hold_shared(RCU)
and, per kerneldoc, "must only be called under the RCU read lock."
All three call sites run under mtk_flow_offload_mutex but without
an explicit rcu_read_lock(), so CONFIG_PROVE_RCU trips:
suspicious rcu_dereference_check() usage!
include/linux/rhashtable.h:632 ...
Call trace:
mtk_flow_offload_replace+0x...
mtk_flow_offload_cmd+0x...
...
Switch to rhashtable_lookup_fast(), which takes the RCU read lock
around the lookup internally. The driver's mutex keeps the entry
pointer alive past the rhashtable_lookup_fast() call -- exactly the
"other mechanism guaranteeing that the object won't go away" that
rhashtable_lookup_fast()'s kerneldoc requires.
No functional change; just silences a lockdep false positive.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -261,7 +261,8 @@ mtk_flow_offload_replace(struct mtk_eth
int err = 0;
int i;
- if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+ if (rhashtable_lookup_fast(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params))
return -EEXIST;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
@@ -503,8 +504,8 @@ mtk_flow_offload_destroy(struct mtk_eth
{
struct mtk_flow_entry *entry;
- entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
- mtk_flow_ht_params);
+ entry = rhashtable_lookup_fast(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params);
if (!entry)
return -ENOENT;
@@ -525,8 +526,8 @@ mtk_flow_offload_stats(struct mtk_eth *e
u64 packets, bytes;
int idle;
- entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
- mtk_flow_ht_params);
+ entry = rhashtable_lookup_fast(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params);
if (!entry)
return -ENOENT;

View File

@ -0,0 +1,72 @@
From daa37761de13976153502332f2fa675bd3761cc4 Mon Sep 17 00:00:00 2001
From: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
Date: Thu, 23 Apr 2026 13:56:30 +0100
Subject: [PATCH 2/9] net: ethernet: mtk_ppe: set tport_idx on netsys_v3 for
QoS
On MT7988 (netsys_v3) the PPE MAC info block carries a transport
port index field. When not set, ETH-to-ETH and WiFi-to-ETH hardware-
offloaded flows do not enter QDMA for scheduling, even with
IB2_PSE_QOS programmed on the entry -- the packet bypasses the
QDMA scheduler entirely, so any per-queue shaping installed
downstream has no effect.
Populate the netsys_v3 tport/tinfo fields in struct mtk_foe_mac_info
and set TPORT_IDX=1 from mtk_foe_entry_set_queue() on v3+.
No functional change on netsys_v1 or v2.
The hard-coded value 1 matches MediaTek's reference driver; the
datasheet may define additional transport modes, but none are in use
by in-tree consumers.
Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
[Daniel Golle: hoist l2 declaration to function top per kernel C89 style]
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_ppe.c | 7 +++++++
drivers/net/ethernet/mediatek/mtk_ppe.h | 4 ++++
2 files changed, 11 insertions(+)
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -479,6 +479,7 @@ int mtk_foe_entry_set_queue(struct mtk_e
unsigned int queue)
{
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+ struct mtk_foe_mac_info *l2;
if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_QID_V2;
@@ -490,6 +491,12 @@ int mtk_foe_entry_set_queue(struct mtk_e
*ib2 |= MTK_FOE_IB2_PSE_QOS;
}
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ l2 = mtk_foe_entry_l2(eth, entry);
+ l2->tport &= ~MTK_FOE_TPORT_IDX;
+ l2->tport |= FIELD_PREP(MTK_FOE_TPORT_IDX, 1);
+ }
+
return 0;
}
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -96,6 +96,8 @@ enum {
#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
+#define MTK_FOE_TPORT_IDX GENMASK(3, 0)
+
enum {
MTK_FOE_STATE_INVALID,
MTK_FOE_STATE_UNBIND,
@@ -124,6 +126,8 @@ struct mtk_foe_mac_info {
/* netsys_v3 */
u32 w3info;
u32 amsdu;
+ u16 tinfo;
+ u16 tport;
};
/* software-only entry type */

View File

@ -0,0 +1,59 @@
From eef4108f42f1b6e41274be5220253b2cd37ae8d1 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 13:57:37 +0100
Subject: [PATCH 3/9] net: ethernet: mtk_ppe_offload: set output device before
VLAN/PPPoE push
In mtk_flow_offload_replace(), run mtk_flow_set_output_device() and
mtk_wed_flow_add() before the VLAN push loop and PPPoE push, rather
than after them.
Today this is a no-op: set_output_device() on the DSA_TAG_PROTO_MTK
path writes the special-tag etype into the L2 block, not a VLAN, so
the relative order of VLAN push and output-device setup does not
matter. The WED path likewise does not stack any VLANs.
This prepares the ground for taggers whose output-device setup
pushes an outer 802.1Q tag (e.g. DSA_TAG_PROTO_MXL862_8021Q) which
must land in vlan1 so that subsequent user VLANs stack into vlan2.
Getting that order wrong would put the DSA outer tag in vlan2 and
the user VLAN in vlan1, which the switch catchall strip rule would
then remove from the wrong layer.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -453,6 +453,14 @@ mtk_flow_offload_replace(struct mtk_eth
return err;
}
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
+ if (err)
+ return err;
+
+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;
+
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
@@ -462,14 +470,6 @@ mtk_flow_offload_replace(struct mtk_eth
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
- err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
- &wed_index);
- if (err)
- return err;
-
- if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
- return err;
-
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;

View File

@ -0,0 +1,192 @@
From 629c7910dade8866807358e245af808d9a282cd7 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 14:03:20 +0100
Subject: [PATCH 4/9] net: ethernet: mtk_eth_soc: per-SoC QDMA TX queue count with
register paging
MT7988 (netsys_v3) has 32 QDMA TX queues but the register window only
exposes 16 at a time; accessing the upper 16 requires selecting the
register page via the qdma.page register first. On netsys_v1 and
netsys_v2 the hardware still has 16 queues and the page register,
though present at a fixed offset in the reg_map, is effectively only
ever written to with page 0.
Replace the global MTK_QDMA_NUM_QUEUES define with a per-SoC
num_tx_queues field in struct mtk_soc_data. Keep the count at 16 for
V1 and V2, bump it to 32 only for MT7988 (V3). This matches the
current behavior for every SoC that was already working -- including
the ramips MT7621 that shares the driver -- while allowing V3 to use
the full queue count.
Add a qdma.page field to struct mtk_reg_map. Populate it for all
three QDMA reg maps:
mtk_reg_map (V1) = 0x19f0
mt7986_reg_map (V2) = 0x45f0
mt7988_reg_map (V3) = 0x45f0
Add MTK_QTX_PER_PAGE (= 16), the number of queues whose config
registers fit in one register page window. In mtk_set_queue_speed()
and the queue init loop in mtk_tx_alloc(), write the page register
before touching qtx_cfg / qtx_sch, using idx / MTK_QTX_PER_PAGE as
page and idx % MTK_QTX_PER_PAGE as in-page offset. Other call sites
(mtk_dma_free(), DSA user-port bound check, alloc_etherdev_mqs())
switch to eth->soc->num_tx_queues.
MT7628 / RT5350 predate QDMA and are guarded by the existing
MTK_HAS_CAPS(MTK_QDMA) checks; num_tx_queues stays 0 for them and
those paths remain unreachable.
Based on work by Bo-Cun Chen for MediaTek's downstream tree
("net: ethernet: mtk_eth_soc: change qdma txq num to 32"), reshaped
to avoid bumping the global define for older SoCs.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 26 +++++++++++++++------
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 ++--
2 files changed, 22 insertions(+), 9 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -846,7 +846,8 @@ static void mtk_set_queue_speed(struct m
}
}
- ofs = MTK_QTX_OFFSET * idx;
+ mtk_w32(eth, idx / MTK_QTX_PER_PAGE, soc->reg_map->qdma.page);
+ ofs = MTK_QTX_OFFSET * (idx % MTK_QTX_PER_PAGE);
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
@@ -2929,7 +2930,11 @@ static int mtk_tx_alloc(struct mtk_eth *
soc->reg_map->qdma.crx_ptr);
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
- for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
+ for (i = 0; i < soc->num_tx_queues; i++) {
+ mtk_w32(eth, i / MTK_QTX_PER_PAGE,
+ soc->reg_map->qdma.page);
+ ofs = MTK_QTX_OFFSET * (i % MTK_QTX_PER_PAGE);
+
val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
@@ -2941,7 +2946,6 @@ static int mtk_tx_alloc(struct mtk_eth *
if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
- ofs += MTK_QTX_OFFSET;
}
val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
@@ -3506,7 +3510,7 @@ static void mtk_dma_free(struct mtk_eth
int i, j, txqs = 1;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- txqs = MTK_QDMA_NUM_QUEUES;
+ txqs = soc->num_tx_queues;
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
@@ -3778,7 +3782,7 @@ found:
return NOTIFY_DONE;
dp = dsa_port_from_netdev(dev);
- if (dp->index >= MTK_QDMA_NUM_QUEUES)
+ if (dp->index >= eth->soc->num_tx_queues)
return NOTIFY_DONE;
if (mac->speed > 0 && mac->speed <= s.base.speed)
@@ -5028,7 +5032,7 @@ static int mtk_add_mac(struct mtk_eth *e
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- txqs = MTK_QDMA_NUM_QUEUES;
+ txqs = eth->soc->num_tx_queues;
eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
if (!eth->netdev[id]) {
@@ -5673,6 +5677,7 @@ static const struct mtk_soc_data mt2701_
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
+ .num_tx_queues = 16,
.tx = {
DESC_SIZE(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
@@ -5700,6 +5705,7 @@ static const struct mtk_soc_data mt7621_
.offload_version = 1,
.ppe_num = 1,
.hash_offset = 2,
+ .num_tx_queues = 16,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.tx = {
DESC_SIZE(struct mtk_tx_dma),
@@ -5730,6 +5736,7 @@ static const struct mtk_soc_data mt7622_
.ppe_num = 1,
.hash_offset = 2,
.has_accounting = true,
+ .num_tx_queues = 16,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.tx = {
DESC_SIZE(struct mtk_tx_dma),
@@ -5758,6 +5765,7 @@ static const struct mtk_soc_data mt7623_
.offload_version = 1,
.ppe_num = 1,
.hash_offset = 2,
+ .num_tx_queues = 16,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
.tx = {
@@ -5786,6 +5794,7 @@ static const struct mtk_soc_data mt7629_
.required_pctl = false,
.has_accounting = true,
.version = 1,
+ .num_tx_queues = 16,
.tx = {
DESC_SIZE(struct mtk_tx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
@@ -5815,6 +5824,7 @@ static const struct mtk_soc_data mt7981_
.ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
+ .num_tx_queues = 16,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.tx = {
DESC_SIZE(struct mtk_tx_dma_v2),
@@ -5845,6 +5855,7 @@ static const struct mtk_soc_data mt7986_
.ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
+ .num_tx_queues = 16,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.tx = {
DESC_SIZE(struct mtk_tx_dma_v2),
@@ -5875,6 +5886,7 @@ static const struct mtk_soc_data mt7988_
.ppe_num = 3,
.hash_offset = 4,
.has_accounting = true,
+ .num_tx_queues = 32,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
.tx = {
DESC_SIZE(struct mtk_tx_dma_v2),
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -25,7 +25,7 @@
#define MTK_MAX_DSA_PORTS 7
#define MTK_DSA_PORT_MASK GENMASK(2, 0)
-#define MTK_QDMA_NUM_QUEUES 16
+#define MTK_QTX_PER_PAGE 16
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
@@ -1248,6 +1248,7 @@ struct mtk_soc_data {
u8 hash_offset;
u8 version;
u8 ppe_num;
+ u16 num_tx_queues;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;

View File

@ -0,0 +1,168 @@
From e404de9968c4010b1f5fea4fba78ddbb220ba97a Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 15:19:04 +0100
Subject: [PATCH 5/9] net: ethernet: mtk_eth_soc: add per-conduit DSA user-port
queue map
Today the driver maps every DSA user port to QDMA TX queue
dp->index + 3 at three call sites (link-speed notifier, SW TX
select_queue, and PPE flow offload). The mapping is correct for a
single DSA switch per conduit with contiguous port indices, which
covers every current MediaTek SoC paired with MT7530/MT7531/built-in
switch.
It falls apart in two scenarios that start to matter:
1) Multiple DSA switches on one MT7988 (built-in MT7530 on GMAC2
plus an external switch on a SerDes GMAC). Each switch has its
own dp->index space starting at 0, so port 0 of switch A and
port 0 of switch B both map to QDMA queue 3 and stomp each
other's shaping and offload state.
2) Switches with non-contiguous user-port indices (CPU port in the
middle of the range, some indices reserved), where "queue =
dp->index + 3" leaves gaps or overshoots num_tx_queues.
Introduce a per-conduit queue map so those three call sites can
convert dp->index to a collision-free queue ID:
- mac->dsa_queue_base: queue index where this conduit's DSA user
ports start. Zeroth conduit uses MTK_MAX_DEVS (skipping the
queues reserved for non-DSA egress), later conduits start after
the previous conduit's user-port range.
- mac->dsa_port_rank[dp->index]: rank (0..N-1) of dp within its
switch's user-port sequence, so sparse dp->index layouts become
dense queue assignments.
mtk_update_dsa_queue_map() walks every conduit and rebuilds both.
It is called from the existing DSA user-port NETDEV_CHANGE notifier
so link-up of any DSA user port triggers a recompute. Initial
values at mac creation reproduce the old "queue = dp->index + 3"
layout, so behavior does not change until the next patches switch
call sites over.
MTK_DSA_USER_PORT_MAX is 32, comfortably covering the 16-port
MxL862xx; bump it if a larger switch needs per-port queuing.
This patch only adds infrastructure; no call sites are converted
yet so there is no functional change.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 56 ++++++++++++++++++++-
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 10 ++++
2 files changed, 65 insertions(+), 1 deletion(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3751,6 +3751,48 @@ static bool mtk_uses_dsa(struct net_devi
#endif
}
+/*
+ * Recompute the per-conduit DSA user-port queue map. Each conduit's mac
+ * gets a queue base (queues 0..MTK_MAX_DEVS-1 are reserved for non-DSA
+ * egress via GDM{1,2,3}), followed by a contiguous rank for every user
+ * port of the DSA switch attached to that conduit. dsa_port_rank[]
+ * translates dp->index (which may be non-contiguous and may collide
+ * across switches) into that rank.
+ *
+ * Call this on DSA topology changes. Idempotent.
+ */
+static void mtk_update_dsa_queue_map(struct mtk_eth *eth)
+{
+ struct net_device *conduit;
+ struct dsa_switch *ds;
+ struct mtk_mac *mac;
+ struct dsa_port *dp;
+ u8 base = MTK_MAX_DEVS;
+ u8 rank;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ conduit = eth->netdev[i];
+ if (!conduit)
+ continue;
+
+ mac = netdev_priv(conduit);
+ mac->dsa_queue_base = base;
+
+ if (!netdev_uses_dsa(conduit))
+ continue;
+
+ ds = conduit->dsa_ptr->ds;
+ rank = 0;
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dp->index < MTK_DSA_USER_PORT_MAX)
+ mac->dsa_port_rank[dp->index] = rank;
+ rank++;
+ }
+ base += rank;
+ }
+}
+
static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
{
struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
@@ -3775,6 +3817,8 @@ found:
if (!dsa_user_dev_check(dev))
return NOTIFY_DONE;
+ mtk_update_dsa_queue_map(eth);
+
if (__ethtool_get_link_ksettings(dev, &s))
return NOTIFY_DONE;
@@ -5010,7 +5054,7 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_t phy_mode;
struct phylink *phylink;
struct mtk_mac *mac;
- int id, err, count;
+ int id, err, count, i;
unsigned int sid;
int txqs = 1;
u32 val;
@@ -5045,6 +5089,16 @@ static int mtk_add_mac(struct mtk_eth *e
mac->hw = eth;
mac->of_node = np;
+ /*
+ * Initialize the DSA user-port queue map to an identity mapping
+ * starting at MTK_MAX_DEVS, so that a mac without any DSA switch
+ * attached keeps the historic "queue = dp->index + 3" layout.
+ * mtk_update_dsa_queue_map() rewrites this once DSA state is up.
+ */
+ mac->dsa_queue_base = MTK_MAX_DEVS;
+ for (i = 0; i < MTK_DSA_USER_PORT_MAX; i++)
+ mac->dsa_port_rank[i] = i;
+
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
mac->hwlro_ip_cnt = 0;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -25,6 +25,14 @@
#define MTK_MAX_DSA_PORTS 7
#define MTK_DSA_PORT_MASK GENMASK(2, 0)
+/*
+ * Upper bound on dp->index across all DSA switches we support attaching
+ * to an MT7988 / MT7986 / MT7622 / MT7621 conduit. Sized to cover the
+ * 16-port MaxLinear MxL862xx with room to spare; increase when a larger
+ * switch needs per-port queue mapping.
+ */
+#define MTK_DSA_USER_PORT_MAX 32
+
#define MTK_QTX_PER_PAGE 16
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
@@ -1414,6 +1422,8 @@ struct mtk_mac {
int hwlro_ip_cnt;
unsigned int syscfg0;
struct notifier_block device_notifier;
+ u8 dsa_queue_base;
+ u8 dsa_port_rank[MTK_DSA_USER_PORT_MAX];
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */

View File

@ -0,0 +1,85 @@
From 199cb88e7127b469d3a4b17346129dcea2b719d9 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 15:19:58 +0100
Subject: [PATCH 6/9] net: ethernet: mtk_eth_soc: use DSA queue map in TX paths
Convert the two SW TX paths that map DSA user ports to QDMA queues
to use the per-conduit queue map introduced by the previous patch:
- mtk_device_event(), the DSA user-port link-speed notifier, now
computes the queue ID as mac->dsa_queue_base +
mac->dsa_port_rank[dp->index] instead of dp->index + 3.
- mtk_select_queue() does the same for SW xmit, keying on
skb_get_queue_mapping(skb) which DSA taggers set to dp->index.
The default identity map set up at mac creation keeps the resulting
queue IDs identical to the old formula as long as only one DSA
switch with contiguous dp->index is attached to the conduit -- the
current common case. When mtk_update_dsa_queue_map() runs on DSA
attach, multi-switch trees (e.g. MT7988 + built-in MT7530 + external
MxL862xx) and non-contiguous dp->index layouts (e.g. MxL862xx with
CPU port 0 and user ports 1..15) get collision-free queue IDs.
Also tighten the bound check: the old code rejected
"dp->index >= soc->num_tx_queues", which on V1 with a 16-port
switch could still compute queue=18 and write out of range.
Now reject on dp->index >= MTK_DSA_USER_PORT_MAX first (the array
bound), then on queue >= soc->num_tx_queues (the HW bound).
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3802,6 +3802,7 @@ static int mtk_device_event(struct notif
struct net_device *ldev;
struct list_head *iter;
struct dsa_port *dp;
+ unsigned int queue;
if (event != NETDEV_CHANGE)
return NOTIFY_DONE;
@@ -3826,13 +3827,17 @@ found:
return NOTIFY_DONE;
dp = dsa_port_from_netdev(dev);
- if (dp->index >= eth->soc->num_tx_queues)
+ if (dp->index >= MTK_DSA_USER_PORT_MAX)
+ return NOTIFY_DONE;
+
+ queue = mac->dsa_queue_base + mac->dsa_port_rank[dp->index];
+ if (queue >= eth->soc->num_tx_queues)
return NOTIFY_DONE;
if (mac->speed > 0 && mac->speed <= s.base.speed)
s.base.speed = 0;
- mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
+ mtk_set_queue_speed(eth, queue, s.base.speed);
return NOTIFY_DONE;
}
@@ -4994,12 +4999,17 @@ static u16 mtk_select_queue(struct net_d
struct net_device *sb_dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ unsigned int dp_idx;
unsigned int queue = 0;
- if (netdev_uses_dsa(dev))
- queue = skb_get_queue_mapping(skb) + 3;
- else
+ if (netdev_uses_dsa(dev)) {
+ dp_idx = skb_get_queue_mapping(skb);
+ if (dp_idx < MTK_DSA_USER_PORT_MAX)
+ queue = mac->dsa_queue_base +
+ mac->dsa_port_rank[dp_idx];
+ } else {
queue = mac->id;
+ }
if (queue >= dev->num_tx_queues)
queue = 0;

View File

@ -0,0 +1,54 @@
From 6916564f8a24970e36102b56bcec577f5129cfe1 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 15:20:54 +0100
Subject: [PATCH 7/9] net: ethernet: mtk_ppe_offload: use DSA queue map in flow
offload path
Convert mtk_flow_set_output_device() to use the per-conduit DSA
queue map for PPE-offloaded flows targeting a DSA user port,
matching the two SW TX paths already converted in the previous
patch.
mtk_flow_get_dsa_port() substitutes *dev with the DSA conduit's
netdev on success, so by the time we pick the QDMA queue for the
offloaded flow we have the conduit's struct mtk_mac and can
compute queue = mac->dsa_queue_base + mac->dsa_port_rank[dsa_port]
just like SW xmit does.
Behavior is unchanged for any existing configuration (single DSA
switch per conduit with contiguous dp->index); the allocator only
produces different results for multi-switch trees and for switches
with sparse dp->index layouts. The dsa_port < MTK_DSA_USER_PORT_MAX
guard matches the SW TX path and is purely defensive -- no shipped
DSA driver today has dp->index in that range.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -193,6 +193,7 @@ mtk_flow_set_output_device(struct mtk_et
int *wed_index)
{
struct mtk_wdma_info info = {};
+ struct mtk_mac *mac;
int pse_port, dsa_port, queue;
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
@@ -230,9 +231,12 @@ mtk_flow_set_output_device(struct mtk_et
else
return -EOPNOTSUPP;
- if (dsa_port >= 0) {
+ if (dsa_port >= 0)
mtk_foe_entry_set_dsa(eth, foe, dsa_port);
- queue = 3 + dsa_port;
+
+ if (dsa_port >= 0 && dsa_port < MTK_DSA_USER_PORT_MAX) {
+ mac = netdev_priv(dev);
+ queue = mac->dsa_queue_base + mac->dsa_port_rank[dsa_port];
} else {
queue = pse_port - 1;
}

View File

@ -0,0 +1,45 @@
From dc26f882f63c34a4d510fc4326c958da52c6856b Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 15:23:20 +0100
Subject: [PATCH 8/9] net: dsa: tag_mxl862xx_8021q: set skb queue_mapping to
DSA port index
Conduit drivers that want to assign a dedicated TX queue per DSA
user port read skb->queue_mapping in their ndo_select_queue and
map it to a HW queue. The tag_mxl862xx_8021q xmit path currently
leaves queue_mapping untouched at whatever the stack / qdisc set
it to, which is typically 0. That collapses every user port to
the same conduit queue.
Overwrite queue_mapping with dp->index before handing the skb to
dsa_8021q_xmit(). The original value is read first to derive the
PCP from the user port's TC map, so per-priority egress still
works as designed.
The same pattern is already used implicitly by the MTK native
tag -- DSA cores it on behalf of the tagger -- and is expected
by mtk_eth_soc's mtk_select_queue().
Signed-off-by: Chad Monroe <chad@monroe.io>
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
net/dsa/tag_mxl862xx_8021q.c | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/net/dsa/tag_mxl862xx_8021q.c
+++ b/net/dsa/tag_mxl862xx_8021q.c
@@ -23,6 +23,14 @@ static struct sk_buff *mxl862_8021q_xmit
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
+ /*
+ * Re-key skb->queue_mapping to the DSA user port index so that
+ * conduit drivers (e.g. mtk_eth_soc) can map it to a per-port
+ * QDMA TX queue. Must happen after reading the original
+ * queue_mapping for PCP derivation.
+ */
+ skb_set_queue_mapping(skb, dp->index);
+
return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
(pcp << VLAN_PRIO_SHIFT) | tx_vid);
}

View File

@ -0,0 +1,153 @@
From 031ec521141aaa7fe13a937e0f942319f501cc4d Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 23 Apr 2026 15:24:32 +0100
Subject: [PATCH] net: ethernet: mtk_ppe: offload flows to MxL862xx switches in
tag_8021q mode
Extend the PPE flow-offload output path to handle egress through
an MxL862xx switch running in tag_8021q mode
(DSA_TAG_PROTO_MXL862_8021Q), in addition to the existing support
for MediaTek's native 8-byte DSA tag (DSA_TAG_PROTO_MTK).
In tag_8021q mode MxL862xx expects every ingress frame on the CPU port
to carry an outer 802.1Q tag whose VID identifies the target user
port; the switch's per-port egress catchall rule then strips that
outer tag. The VID to use is the one produced by
dsa_tag_8021q_standalone_vid(dp), which bakes the tag_8021q reserved
marker (RSV = GENMASK(11, 10)), the DSA tree's switch ID, and the user
port's dp->index into the low 12 bits of the VID.
Change mtk_flow_get_dsa_port() to return, alongside dp->index, the VID
the PPE should push on the offloaded frame, in a new push_vid
out-parameter:
- DSA_TAG_PROTO_MTK -> push_vid = 0;
caller uses mtk_foe_entry_set_dsa()
(magic etype) as before.
- DSA_TAG_PROTO_MXL862_8021Q -> push_vid = standalone VID;
caller pushes it as a VLAN via
mtk_foe_entry_set_vlan().
The switch statement is intentionally scoped: other tag_8021q-based
taggers (sja1105, ocelot_8021q, ...) use different tag layers
(CTAG-in-range rather than Q-in-Q) and/or bridge-scoped VIDs and are
not interchangeable here.
The prerequisite reorder commit ("mtk_ppe_offload: set output device
before VLAN/PPPoE push") put mtk_flow_set_output_device() ahead of the
user VLAN push loop, so the outer MxL862xx VID lands in vlan1 and a
subsequent user VLAN stacks into vlan2. Queue selection stays with the
DSA queue map introduced in the preceding patch.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
---
.../net/ethernet/mediatek/mtk_ppe_offload.c | 54 +++++++++++++++++--
1 file changed, 49 insertions(+), 5 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -10,6 +10,7 @@
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/dsa.h>
+#include <linux/dsa/8021q.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -166,8 +167,25 @@ mtk_flow_mangle_ipv4(const struct flow_a
return 0;
}
+/*
+ * Inspect *dev as a DSA user netdev. On success, substitute *dev with
+ * the conduit netdev, return the DSA user port's dp->index, and in
+ * *push_vid hand back either 0 (native MTK tag; the PPE injects its
+ * magic etype later via mtk_foe_entry_set_dsa()) or the outer 802.1Q
+ * VID the PPE should push on the egressing frame (tag_8021q taggers
+ * that use an outer Q-in-Q tag with a standalone per-port VID, i.e.
+ * the MxL862xx tag_8021q tagger).
+ *
+ * push_vid may be NULL for callers that only want the idev->conduit
+ * substitution (e.g. ingress-side ppe_idx selection, where no VID
+ * is to be pushed on the flow entry).
+ *
+ * Other tag_8021q-based taggers (sja1105, ocelot_8021q, ...) use
+ * different tag layers and/or bridge-scoped VIDs; do not extend the
+ * switch below to cover them without per-tagger review.
+ */
static int
-mtk_flow_get_dsa_port(struct net_device **dev)
+mtk_flow_get_dsa_port(struct net_device **dev, u16 *push_vid)
{
#if IS_ENABLED(CONFIG_NET_DSA)
struct dsa_port *dp;
@@ -176,8 +194,18 @@ mtk_flow_get_dsa_port(struct net_device
if (IS_ERR(dp))
return -ENODEV;
- if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ switch (dp->cpu_dp->tag_ops->proto) {
+ case DSA_TAG_PROTO_MTK:
+ if (push_vid)
+ *push_vid = 0;
+ break;
+ case DSA_TAG_PROTO_MXL862_8021Q:
+ if (push_vid)
+ *push_vid = dsa_tag_8021q_standalone_vid(dp);
+ break;
+ default:
return -ENODEV;
+ }
*dev = dsa_port_to_conduit(dp);
@@ -195,6 +223,7 @@ mtk_flow_set_output_device(struct mtk_et
struct mtk_wdma_info info = {};
struct mtk_mac *mac;
int pse_port, dsa_port, queue;
+ u16 push_vid = 0;
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
@@ -220,7 +249,7 @@ mtk_flow_set_output_device(struct mtk_et
goto out;
}
- dsa_port = mtk_flow_get_dsa_port(&dev);
+ dsa_port = mtk_flow_get_dsa_port(&dev, &push_vid);
if (dev == eth->netdev[0])
pse_port = PSE_GDM1_PORT;
@@ -231,8 +260,12 @@ mtk_flow_set_output_device(struct mtk_et
else
return -EOPNOTSUPP;
- if (dsa_port >= 0)
- mtk_foe_entry_set_dsa(eth, foe, dsa_port);
+ if (dsa_port >= 0) {
+ if (push_vid)
+ mtk_foe_entry_set_vlan(eth, foe, push_vid);
+ else
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
+ }
if (dsa_port >= 0 && dsa_port < MTK_DSA_USER_PORT_MAX) {
mac = netdev_priv(dev);
@@ -275,6 +308,17 @@ mtk_flow_offload_replace(struct mtk_eth
flow_rule_match_meta(rule, &match);
if (mtk_is_netsys_v2_or_greater(eth)) {
idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ /*
+ * If idev is a DSA user netdev, substitute it with
+ * its conduit so the valid-idev check below passes
+ * and ppe_index picks up the conduit's PPE engine.
+ * Without this, upstream flows from a DSA user port
+ * (e.g. MxL862xx lanN) install on ppe[0] while the
+ * conduit's GDMA routes ingress through its own
+ * ppe[mac->ppe_idx]; the lookup misses and the
+ * flow never binds in HW.
+ */
+ mtk_flow_get_dsa_port(&idev, NULL);
if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) {
struct mtk_mac *mac = netdev_priv(idev);

View File

@ -97,7 +97,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
} else {
switch (speed) {
case SPEED_10:
@@ -904,7 +934,7 @@ static void mtk_xgdm_mac_link_up(struct
@@ -905,7 +935,7 @@ static void mtk_xgdm_mac_link_up(struct
return;
/* Eliminate the interference(before link-up) caused by PHY noise */
@ -106,7 +106,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
mdelay(20);
mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
MTK_XMAC_CNT_CTRL(mac->id));
@@ -2937,10 +2967,16 @@ static int mtk_tx_alloc(struct mtk_eth *
@@ -2942,10 +2972,16 @@ static int mtk_tx_alloc(struct mtk_eth *
mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
val = MTK_QTX_SCH_MIN_RATE_EN |
@ -126,7 +126,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
@@ -5918,6 +5954,36 @@ static const struct mtk_soc_data mt7986_
@@ -5993,6 +6029,37 @@ static const struct mtk_soc_data mt7986_
},
};
@ -142,6 +142,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
+ .ppe_num = 2,
+ .hash_offset = 4,
+ .has_accounting = true,
+ .num_tx_queues = 32,
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+ .tx = {
+ DESC_SIZE(struct mtk_tx_dma_v2),
@ -163,7 +164,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
static const struct mtk_soc_data mt7988_data = {
.reg_map = &mt7988_reg_map,
.ana_rgc3 = 0x128,
@@ -5979,6 +6045,7 @@ const struct of_device_id of_mtk_match[]
@@ -6055,6 +6122,7 @@ const struct of_device_id of_mtk_match[]
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
@ -173,7 +174,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
{},
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -263,6 +263,13 @@
@@ -271,6 +271,13 @@
#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
@ -187,7 +188,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
/* QDMA TX Scheduler Rate Control Register */
#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
@@ -538,9 +545,23 @@
@@ -546,9 +553,23 @@
#define XMAC_MCR_FORCE_RX_FC BIT(4)
/* XFI Mac logic reset registers */
@ -212,7 +213,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
/* XFI Mac count global control */
#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
#define XMAC_GLB_CNTCLR BIT(0)
@@ -841,6 +862,17 @@ enum mtk_clks_map {
@@ -849,6 +870,17 @@ enum mtk_clks_map {
BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
@ -230,7 +231,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
@@ -997,12 +1029,14 @@ enum mkt_eth_capabilities {
@@ -1005,12 +1037,14 @@ enum mkt_eth_capabilities {
MTK_RSTCTRL_PPE2_BIT,
MTK_U3_COPHY_V2_BIT,
MTK_SRAM_BIT,
@ -246,7 +247,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -1044,14 +1078,16 @@ enum mkt_eth_capabilities {
@@ -1052,14 +1086,16 @@ enum mkt_eth_capabilities {
#define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT)
#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT)
@ -265,7 +266,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
@@ -1083,12 +1119,13 @@ enum mkt_eth_capabilities {
@@ -1091,12 +1127,13 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
@ -283,7 +284,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
/* MUXes present on SoCs */
/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
@@ -1098,9 +1135,9 @@ enum mkt_eth_capabilities {
@@ -1106,9 +1143,9 @@ enum mkt_eth_capabilities {
#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
@ -296,7 +297,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
@@ -1140,18 +1177,24 @@ enum mkt_eth_capabilities {
@@ -1148,18 +1185,24 @@ enum mkt_eth_capabilities {
#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \

View File

@ -15,7 +15,7 @@ Signed-off-by: Bo-Cun Chen <bc-bocun.chen@mediatek.com>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4517,27 +4517,40 @@ static int mtk_hw_init(struct mtk_eth *e
@@ -4570,27 +4570,40 @@ static int mtk_hw_init(struct mtk_eth *e
mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);