[PATCH] staging: mt7621-eth: Refactor RX ring resource allocation and cleanup

Kamal Heib kamalheib1 at gmail.com
Fri May 18 17:33:27 UTC 2018


Simplify the code of allocate and cleanup RX ring resources by using
helper functions, also make sure to free the allocated resources in
cause of allocation failure.

Signed-off-by: Kamal Heib <kamalheib1 at gmail.com>
---
 drivers/staging/mt7621-eth/mtk_eth_soc.c | 122 ++++++++++++++++++++-----------
 1 file changed, 81 insertions(+), 41 deletions(-)

diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 2c7a2e666bfb..16cd514e9790 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -278,38 +278,87 @@ static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
 	WRITE_ONCE(dma_txd->txd2, txd->txd2);
 }
 
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
+static void mtk_rx_unmap_dma(struct mtk_eth *eth, struct mtk_rx_ring *ring)
 {
 	int i;
 
-	if (ring->rx_data && ring->rx_dma) {
-		for (i = 0; i < ring->rx_ring_size; i++) {
-			if (!ring->rx_data[i])
-				continue;
-			if (!ring->rx_dma[i].rxd1)
-				continue;
+	for (i = 0; i < ring->rx_ring_size; i++) {
+		if (ring->rx_dma[i].rxd1)
 			dma_unmap_single(eth->dev,
 					 ring->rx_dma[i].rxd1,
 					 ring->rx_buf_size,
 					 DMA_FROM_DEVICE);
+	}
+}
+
+static int mtk_rx_map_dma(struct mtk_eth *eth, struct mtk_rx_ring *ring)
+{
+	int i, pad;
+
+	if (!eth->soc->rx_2b_offset)
+		pad = NET_IP_ALIGN;
+
+	for (i = 0; i < ring->rx_ring_size; i++) {
+		dma_addr_t dma_addr = dma_map_single(eth->dev,
+				ring->rx_data[i] + NET_SKB_PAD + pad,
+				ring->rx_buf_size,
+				DMA_FROM_DEVICE);
+		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+			goto unmap_dma;
+		ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
+
+		if (eth->soc->rx_sg_dma)
+			ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
+		else
+			ring->rx_dma[i].rxd2 = RX_DMA_LSO;
+	}
+
+	return 0;
+
+unmap_dma:
+	mtk_rx_unmap_dma(eth, ring);
+	return -ENOMEM;
+}
+
+static void mtk_rx_free_frags(struct mtk_rx_ring *ring)
+{
+	int i;
+
+	for (i = 0; i < ring->rx_ring_size; i++) {
+		if (ring->rx_data[i])
 			skb_free_frag(ring->rx_data[i]);
-		}
-		kfree(ring->rx_data);
-		ring->rx_data = NULL;
 	}
+}
 
-	if (ring->rx_dma) {
-		dma_free_coherent(eth->dev,
-				  ring->rx_ring_size * sizeof(*ring->rx_dma),
-				  ring->rx_dma,
-				  ring->rx_phys);
-		ring->rx_dma = NULL;
+static int mtk_rx_alloc_frags(struct mtk_rx_ring *ring)
+{
+	int i;
+
+	for (i = 0; i < ring->rx_ring_size; i++) {
+		ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
+		if (!ring->rx_data[i])
+			goto free_frags;
 	}
+
+	return 0;
+
+free_frags:
+	mtk_rx_free_frags(ring);
+	return -ENOMEM;
+}
+
+static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
+{
+	mtk_rx_unmap_dma(eth, ring);
+	mtk_rx_free_frags(ring);
+	dma_free_coherent(eth->dev, ring->rx_ring_size * sizeof(*ring->rx_dma),
+			  ring->rx_dma, ring->rx_phys);
+	kfree(ring->rx_data);
 }
 
 static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
 {
-	int i, pad = 0;
+	int err;
 
 	ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
 	ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
@@ -317,38 +366,23 @@ static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
 	ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
 				GFP_KERNEL);
 	if (!ring->rx_data)
-		goto no_rx_mem;
+		return -ENOMEM;
 
-	for (i = 0; i < ring->rx_ring_size; i++) {
-		ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
-		if (!ring->rx_data[i])
-			goto no_rx_mem;
-	}
+	err = mtk_rx_alloc_frags(ring);
+	if (err)
+		goto free_rx_data;
 
 	ring->rx_dma =
 		dma_alloc_coherent(eth->dev,
 				   ring->rx_ring_size * sizeof(*ring->rx_dma),
 				   &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
 	if (!ring->rx_dma)
-		goto no_rx_mem;
-
-	if (!eth->soc->rx_2b_offset)
-		pad = NET_IP_ALIGN;
+		goto free_frags;
 
-	for (i = 0; i < ring->rx_ring_size; i++) {
-		dma_addr_t dma_addr = dma_map_single(eth->dev,
-				ring->rx_data[i] + NET_SKB_PAD + pad,
-				ring->rx_buf_size,
-				DMA_FROM_DEVICE);
-		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-			goto no_rx_mem;
-		ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
+	err = mtk_rx_map_dma(eth, ring);
+	if (err)
+		goto free_dma;
 
-		if (eth->soc->rx_sg_dma)
-			ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-		else
-			ring->rx_dma[i].rxd2 = RX_DMA_LSO;
-	}
 	ring->rx_calc_idx = ring->rx_ring_size - 1;
 	/* make sure that all changes to the dma ring are flushed before we
 	 * continue
@@ -357,7 +391,13 @@ static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
 
 	return 0;
 
-no_rx_mem:
+free_dma:
+	dma_free_coherent(eth->dev, ring->rx_ring_size * sizeof(*ring->rx_dma),
+			  ring->rx_dma, ring->rx_phys);
+free_frags:
+	mtk_rx_free_frags(ring);
+free_rx_data:
+	kfree(ring->rx_data);
 	return -ENOMEM;
 }
 
-- 
2.14.3



More information about the devel mailing list