[PATCH v5 4/7] staging: et131x: drop packet when error occurs in et131x_tx

ZHAO Gang gamerh2o at gmail.com
Fri Nov 22 16:36:00 UTC 2013


As TODO file suggested, drop packet instead of return NETDEV_TX_BUSY
when tx failed.

et131x_tx calls function et131x_send_packets, I put the work of
et131x_send_packets directly into et131x_tx, and made some changes to
let the code more readable.

Signed-off-by: ZHAO Gang <gamerh2o at gmail.com>
---
v3 -> v4: no change
v4 -> v5: simplify code suggested by Dan

 drivers/staging/et131x/et131x.c | 84 +++++++++++------------------------------
 1 file changed, 22 insertions(+), 62 deletions(-)

diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 6df7145..83d29811 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -3128,55 +3128,6 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
 	return 0;
 }
 
-/* et131x_send_packets - This function is called by the OS to send packets
- * @skb: the packet(s) to send
- * @netdev:device on which to TX the above packet(s)
- *
- * Return 0 in almost all cases; non-zero value in extreme hard failure only
- */
-static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
-{
-	int status = 0;
-	struct et131x_adapter *adapter = netdev_priv(netdev);
-
-	/* Send these packets
-	 *
-	 * NOTE: The Linux Tx entry point is only given one packet at a time
-	 * to Tx, so the PacketCount and it's array used makes no sense here
-	 */
-
-	/* TCB is not available */
-	if (adapter->tx_ring.used >= NUM_TCB) {
-		/* NOTE: If there's an error on send, no need to queue the
-		 * packet under Linux; if we just send an error up to the
-		 * netif layer, it will resend the skb to us.
-		 */
-		status = -ENOMEM;
-	} else {
-		/* We need to see if the link is up; if it's not, make the
-		 * netif layer think we're good and drop the packet
-		 */
-		if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
-					!netif_carrier_ok(netdev)) {
-			dev_kfree_skb_any(skb);
-			skb = NULL;
-
-			adapter->net_stats.tx_dropped++;
-		} else {
-			status = send_packet(skb, adapter);
-			if (status != 0 && status != -ENOMEM) {
-				/* On any other error, make netif think we're
-				 * OK and drop the packet
-				 */
-				dev_kfree_skb_any(skb);
-				skb = NULL;
-				adapter->net_stats.tx_dropped++;
-			}
-		}
-	}
-	return status;
-}
-
 /* free_send_packet - Recycle a struct tcb
  * @adapter: pointer to our adapter
  * @tcb: pointer to struct tcb
@@ -4542,10 +4493,8 @@ static void et131x_multicast(struct net_device *netdev)
 /* et131x_tx - The handler to tx a packet on the device
  * @skb: data to be Tx'd
  * @netdev: device on which data is to be Tx'd
- *
- * Returns 0 on success, errno on failure (as defined in errno.h)
  */
-static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
 {
 	int status = 0;
 	struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4558,17 +4507,28 @@ static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
 	/* Save the timestamp for the TX timeout watchdog */
 	netdev->trans_start = jiffies;
 
-	/* Call the device-specific data Tx routine */
-	status = et131x_send_packets(skb, netdev);
+	/* TCB is not available */
+	if (adapter->tx_ring.used >= NUM_TCB)
+		goto drop;
 
-	/* Check status and manage the netif queue if necessary */
-	if (status != 0) {
-		if (status == -ENOMEM)
-			status = NETDEV_TX_BUSY;
-		else
-			status = NETDEV_TX_OK;
-	}
-	return status;
+	/* We need to see if the link is up; if it's not, make the
+	 * netif layer think we're good and drop the packet
+	 */
+	if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
+	    !netif_carrier_ok(netdev))
+		goto drop;
+
+	status = send_packet(skb, adapter);
+	if (status)
+		goto drop;
+
+	return NETDEV_TX_OK;
+
+drop:
+	dev_kfree_skb_any(skb);
+	adapter->net_stats.tx_dropped++;
+	/* return success to make netif layer happy */
+	return NETDEV_TX_OK;
 }
 
 /* et131x_tx_timeout - Timeout handler
-- 
1.8.3.1



More information about the devel mailing list