patch-2.4.26 linux-2.4.26/drivers/net/sungem.c

Next file: linux-2.4.26/drivers/net/tg3.c
Previous file: linux-2.4.26/drivers/net/sk_mca.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.25/drivers/net/sungem.c linux-2.4.26/drivers/net/sungem.c
@@ -651,6 +651,7 @@
 	cluster_start = curr = (gp->rx_new & ~(4 - 1));
 	count = 0;
 	kick = -1;
+	wmb();
 	while (curr != limit) {
 		curr = NEXT_RX(curr);
 		if (++count == 4) {
@@ -667,13 +668,16 @@
 			count = 0;
 		}
 	}
-	if (kick >= 0)
+	if (kick >= 0) {
+		mb();
 		writel(kick, gp->regs + RXDMA_KICK);
+	}
 }
 
 static void gem_rx(struct gem *gp)
 {
 	int entry, drops;
+	u32 done;
 
 	if (netif_msg_intr(gp))
 		printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -681,6 +685,7 @@
 
 	entry = gp->rx_new;
 	drops = 0;
+	done = readl(gp->regs + RXDMA_DONE);
 	for (;;) {
 		struct gem_rxd *rxd = &gp->init_block->rxd[entry];
 		struct sk_buff *skb;
@@ -691,6 +696,19 @@
 		if ((status & RXDCTRL_OWN) != 0)
 			break;
 
+		/* When writing back RX descriptor, GEM writes status
+		 * then buffer address, possibly in seperate transactions.
+		 * If we don't wait for the chip to write both, we could
+		 * post a new buffer to this descriptor then have GEM spam
+		 * on the buffer address.  We sync on the RX completion
+		 * register to prevent this from happening.
+		 */
+		if (entry == done) {
+			done = readl(gp->regs + RXDMA_DONE);
+			if (entry == done)
+				break;
+		}
+
 		skb = gp->rx_skbs[entry];
 
 		len = (status & RXDCTRL_BUFSZ) >> 16;
@@ -877,6 +895,7 @@
 		if (gem_intme(entry))
 			ctrl |= TXDCTRL_INTME;
 		txd->buffer = cpu_to_le64(mapping);
+		wmb();
 		txd->control_word = cpu_to_le64(ctrl);
 		entry = NEXT_TX(entry);
 	} else {
@@ -916,6 +935,7 @@
 			
 			txd = &gp->init_block->txd[entry];
 			txd->buffer = cpu_to_le64(mapping);
+			wmb();
 			txd->control_word = cpu_to_le64(this_ctrl | len);
 
 			if (gem_intme(entry))
@@ -925,6 +945,7 @@
 		}
 		txd = &gp->init_block->txd[first_entry];
 		txd->buffer = cpu_to_le64(first_mapping);
+		wmb();
 		txd->control_word =
 			cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
 	}
@@ -936,6 +957,7 @@
 	if (netif_msg_tx_queued(gp))
 		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
 		       dev->name, entry, skb->len);
+	mb();
 	writel(gp->tx_new, gp->regs + TXDMA_KICK);
 	spin_unlock_irq(&gp->lock);
 
@@ -1430,6 +1452,7 @@
 			gp->rx_skbs[i] = NULL;
 		}
 		rxd->status_word = 0;
+		wmb();
 		rxd->buffer = 0;
 	}
 
@@ -1491,6 +1514,7 @@
 					RX_BUF_ALLOC_SIZE(gp),
 					PCI_DMA_FROMDEVICE);
 		rxd->buffer = cpu_to_le64(dma_addr);
+		wmb();
 		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
 		skb_reserve(skb, RX_OFFSET);
 	}
@@ -1499,8 +1523,10 @@
 		struct gem_txd *txd = &gb->txd[i];
 
 		txd->control_word = 0;
+		wmb();
 		txd->buffer = 0;
 	}
+	wmb();
 }
 
 /* Must be invoked under gp->lock. */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)