Merge patch series "drivers/net/airoha_eth: fixes"

Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu> says:

Several fixes for the airoha ethernet driver.

Link: https://lore.kernel.org/r/20250709092810.4032971-1-mikhail.kshevetskiy@iopsys.eu
This commit is contained in:
Tom Rini 2025-07-15 09:56:08 -06:00
commit 2af7f4a456

View File

@ -97,6 +97,7 @@
(_n) == 2 ? GDM2_BASE : GDM1_BASE)
#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
#define GDM_PAD_EN BIT(28)
#define GDM_DROP_CRC_ERR BIT(23)
#define GDM_IP4_CKSUM BIT(22)
#define GDM_TCP_CKSUM BIT(21)
@ -354,13 +355,37 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
#define airoha_switch_wr(eth, offset, val) \
airoha_wr((eth)->switch_regs, (offset), (val))
static inline dma_addr_t dma_map_unaligned(void *vaddr, size_t len,
enum dma_data_direction dir)
{
uintptr_t start, end;
start = ALIGN_DOWN((uintptr_t)vaddr, ARCH_DMA_MINALIGN);
end = ALIGN((uintptr_t)(vaddr + len), ARCH_DMA_MINALIGN);
return dma_map_single((void *)start, end - start, dir);
}
static inline void dma_unmap_unaligned(dma_addr_t addr, size_t len,
enum dma_data_direction dir)
{
uintptr_t start, end;
start = ALIGN_DOWN((uintptr_t)addr, ARCH_DMA_MINALIGN);
end = ALIGN((uintptr_t)(addr + len), ARCH_DMA_MINALIGN);
dma_unmap_single(start, end - start, dir);
}
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
/* Disable any kind of CRC drop or offload */
airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), 0);
/*
* Disable any kind of CRC drop or offload.
* Enable padding of short TX packets to 60 bytes.
*/
airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), GDM_PAD_EN);
}
}
@ -371,13 +396,14 @@ static int airoha_fe_init(struct airoha_eth *eth)
return 0;
}
static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
uchar *rx_packet)
static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index)
{
struct airoha_qdma_desc *desc;
uchar *rx_packet;
u32 val;
desc = &q->desc[index];
rx_packet = net_rx_packets[index];
index = (index + 1) % q->ndesc;
dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
@ -391,7 +417,7 @@ static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN);
WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE);
}
static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
@ -399,7 +425,7 @@ static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
int i;
for (i = 0; i < q->ndesc; i++)
airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
airoha_qdma_reset_rx_desc(q, i);
}
static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
@ -423,10 +449,14 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
RX_RING_SIZE_MASK,
FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
/*
* See arht_eth_free_pkt() for the reasons used to fill
* REG_RX_CPU_IDX(qid) register.
*/
airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
FIELD_PREP(RX_RING_THR_MASK, 0));
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1));
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 3));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
@ -804,6 +834,11 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length)
u32 val;
int i;
/*
* There is no need to pad short TX packets to 60 bytes since the
* GDM_PAD_EN bit set in the corresponding REG_GDM_FWD_CFG(n) register.
*/
dma_addr = dma_map_single(packet, length, DMA_TO_DEVICE);
qid = 0;
@ -826,14 +861,14 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length)
WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
for (i = 0; i < 100; i++) {
dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
DMA_FROM_DEVICE);
dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc),
DMA_FROM_DEVICE);
if (desc->ctrl & QDMA_DESC_DONE_MASK)
break;
@ -864,8 +899,8 @@ static int airoha_eth_recv(struct udevice *dev, int flags, uchar **packetp)
q = &qdma->q_rx[qid];
desc = &q->desc[q->head];
dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
DMA_FROM_DEVICE);
dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc),
DMA_FROM_DEVICE);
if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
return -EAGAIN;
@ -885,6 +920,7 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
struct airoha_qdma *qdma = &eth->qdma[0];
struct airoha_queue *q;
int qid;
u16 prev, pprev;
if (!packet)
return 0;
@ -892,14 +928,25 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
qid = 0;
q = &qdma->q_rx[qid];
dma_map_single(packet, length, DMA_TO_DEVICE);
airoha_qdma_reset_rx_desc(q, q->head, packet);
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
/*
* Due to cpu cache issue the airoha_qdma_reset_rx_desc() function
* will always touch 2 descriptors:
* - if current descriptor is even, then the previous and the one
* before previous descriptors will be touched (previous cacheline)
* - if current descriptor is odd, then only current and previous
* descriptors will be touched (current cacheline)
*
* Thus, to prevent possible destroying of rx queue, only (q->ndesc - 2)
* descriptors might be used for packet receiving.
*/
prev = (q->head + q->ndesc - 1) % q->ndesc;
pprev = (q->head + q->ndesc - 2) % q->ndesc;
q->head = (q->head + 1) % q->ndesc;
airoha_qdma_reset_rx_desc(q, prev);
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, pprev));
return 0;
}
@ -926,6 +973,7 @@ static int arht_eth_write_hwaddr(struct udevice *dev)
static const struct udevice_id airoha_eth_ids[] = {
{ .compatible = "airoha,en7581-eth" },
{ }
};
static const struct eth_ops airoha_eth_ops = {