patch-2.4.4 linux/drivers/net/sungem.c
Next file: linux/drivers/net/sungem.h
Previous file: linux/drivers/net/sundance.c
Back to the patch index
Back to the overall index
- Lines: 848
- Date:
Thu Apr 26 22:17:25 2001
- Orig file:
v2.4.3/linux/drivers/net/sungem.c
- Orig date:
Sun Mar 25 18:14:21 2001
diff -u --recursive --new-file v2.4.3/linux/drivers/net/sungem.c linux/drivers/net/sungem.c
@@ -1,4 +1,4 @@
-/* $Id: sungem.c,v 1.8 2001/03/22 22:48:51 davem Exp $
+/* $Id: sungem.c,v 1.13 2001/04/20 08:16:28 davem Exp $
* sungem.c: Sun GEM ethernet driver.
*
* Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com)
@@ -36,6 +36,11 @@
#include <asm/pbm.h>
#endif
+#ifdef __powerpc__
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#endif
+
#include "sungem.h"
static char version[] __devinitdata =
@@ -64,11 +69,8 @@
*/
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-#if 0
- /* Need to figure this one out. */
- { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_PPC_GEM,
+ { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-#endif
{0, }
};
@@ -159,13 +161,20 @@
dev->name);
}
- if (pcs_miistat & PCS_MIISTAT_LS)
+ if (pcs_miistat & PCS_MIISTAT_LS) {
printk(KERN_INFO "%s: PCS link is now up.\n",
dev->name);
- else
+ } else {
printk(KERN_INFO "%s: PCS link is now down.\n",
dev->name);
+ /* If this happens and the link timer is not running,
+ * reset so we re-negotiate.
+ */
+ if (!timer_pending(&gp->link_timer))
+ return 1;
+ }
+
return 0;
}
@@ -181,13 +190,13 @@
return 0;
if (txmac_stat & MAC_TXSTAT_URUN) {
- printk("%s: TX MAC xmit underrun.\n",
+ printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
dev->name);
gp->net_stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
- printk("%s: TX MAC max packet size error.\n",
+ printk(KERN_ERR "%s: TX MAC max packet size error.\n",
dev->name);
gp->net_stats.tx_errors++;
}
@@ -219,7 +228,7 @@
u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
if (rxmac_stat & MAC_RXSTAT_OFLW) {
- printk("%s: RX MAC fifo overflow.\n",
+ printk(KERN_ERR "%s: RX MAC fifo overflow.\n",
dev->name);
gp->net_stats.rx_over_errors++;
gp->net_stats.rx_fifo_errors++;
@@ -274,7 +283,8 @@
{
u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
- if (gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
printk(KERN_ERR "%s: PCI error [%04x] ",
dev->name, pci_estat);
@@ -405,21 +415,41 @@
while (entry != limit) {
struct sk_buff *skb;
struct gem_txd *txd;
- u32 dma_addr;
+ u32 dma_addr, dma_len;
+ int frag;
- txd = &gp->init_block->txd[entry];
skb = gp->tx_skbs[entry];
- dma_addr = (u32) le64_to_cpu(txd->buffer);
- pci_unmap_single(gp->pdev, dma_addr,
- skb->len, PCI_DMA_TODEVICE);
- gp->tx_skbs[entry] = NULL;
+ if (skb_shinfo(skb)->nr_frags) {
+ int last = entry + skb_shinfo(skb)->nr_frags;
+ int walk = entry;
+ int incomplete = 0;
+ last &= (TX_RING_SIZE - 1);
+ for (;;) {
+ walk = NEXT_TX(walk);
+ if (walk == limit)
+ incomplete = 1;
+ if (walk == last)
+ break;
+ }
+ if (incomplete)
+ break;
+ }
+ gp->tx_skbs[entry] = NULL;
gp->net_stats.tx_bytes += skb->len;
- gp->net_stats.tx_packets++;
- dev_kfree_skb_irq(skb);
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ txd = &gp->init_block->txd[entry];
- entry = NEXT_TX(entry);
+ dma_addr = (u32) le64_to_cpu(txd->buffer);
+ dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
+
+ pci_unmap_single(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+ }
+
+ gp->net_stats.tx_packets++;
+ dev_kfree_skb_irq(skb);
}
gp->tx_old = entry;
@@ -441,7 +471,7 @@
struct gem_rxd *rxd =
&gp->init_block->rxd[cluster_start];
for (;;) {
- rxd->status_word = cpu_to_le64(RXDCTRL_FRESH);
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
rxd++;
cluster_start = NEXT_RX(cluster_start);
if (cluster_start == curr)
@@ -491,19 +521,19 @@
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
- new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+ new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
if (new_skb == NULL) {
drops++;
goto drop_it;
}
pci_unmap_single(gp->pdev, dma_addr,
- RX_BUF_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ RX_BUF_ALLOC_SIZE(gp), PCI_DMA_FROMDEVICE);
gp->rx_skbs[entry] = new_skb;
new_skb->dev = gp->dev;
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
rxd->buffer = cpu_to_le64(pci_map_single(gp->pdev,
new_skb->data,
- RX_BUF_ALLOC_SIZE,
+ RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE));
skb_reserve(new_skb, RX_OFFSET);
@@ -527,6 +557,8 @@
skb = copy_skb;
}
+ skb->csum = ((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+ skb->ip_summed = CHECKSUM_HW;
skb->protocol = eth_type_trans(skb, gp->dev);
netif_rx(skb);
@@ -548,8 +580,8 @@
static void gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct gem *gp = (struct gem *) dev->priv;
+ struct net_device *dev = dev_id;
+ struct gem *gp = dev->priv;
u32 gem_status = readl(gp->regs + GREG_STAT);
spin_lock(&gp->lock);
@@ -567,33 +599,114 @@
spin_unlock(&gp->lock);
}
+static void gem_tx_timeout(struct net_device *dev)
+{
+ struct gem *gp = dev->priv;
+
+ printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
+ dev->name,
+ readl(gp->regs + TXDMA_CFG),
+ readl(gp->regs + MAC_TXSTAT),
+ readl(gp->regs + MAC_TXCFG));
+ printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
+ dev->name,
+ readl(gp->regs + RXDMA_CFG),
+ readl(gp->regs + MAC_RXSTAT),
+ readl(gp->regs + MAC_RXCFG));
+
+ spin_lock_irq(&gp->lock);
+
+ gem_stop(gp, gp->regs);
+ gem_init_rings(gp, 1);
+ gem_init_hw(gp);
+
+ spin_unlock_irq(&gp->lock);
+
+ netif_wake_queue(dev);
+}
+
static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct gem *gp = (struct gem *) dev->priv;
- long len;
- int entry, avail;
- u32 mapping;
+ struct gem *gp = dev->priv;
+ int entry;
+ u64 ctrl;
- len = skb->len;
- mapping = pci_map_single(gp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ ctrl = 0;
+ if (skb->ip_summed == CHECKSUM_HW) {
+ u64 csum_start_off, csum_stuff_off;
+
+ csum_start_off = (u64) (skb->h.raw - skb->data);
+ csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
+
+ ctrl = (TXDCTRL_CENAB |
+ (csum_start_off << 15) |
+ (csum_stuff_off << 21));
+ }
spin_lock_irq(&gp->lock);
- entry = gp->tx_new;
- gp->tx_skbs[entry] = skb;
- gp->tx_new = NEXT_TX(entry);
- avail = TX_BUFFS_AVAIL(gp);
- if (avail <= 0)
+ if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
+ spin_unlock_irq(&gp->lock);
+ return 1;
+ }
- {
+ entry = gp->tx_new;
+ gp->tx_skbs[entry] = skb;
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
struct gem_txd *txd = &gp->init_block->txd[entry];
- u64 ctrl = (len & TXDCTRL_BUFSZ) | TXDCTRL_EOF | TXDCTRL_SOF;
+ u32 mapping, len;
- txd->control_word = cpu_to_le64(ctrl);
+ len = skb->len;
+ mapping = pci_map_single(gp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
txd->buffer = cpu_to_le64(mapping);
+ txd->control_word = cpu_to_le64(ctrl);
+ entry = NEXT_TX(entry);
+ } else {
+ struct gem_txd *txd;
+ u32 first_len, first_mapping;
+ int frag, first_entry = entry;
+
+ /* We must give this initial chunk to the device last.
+ * Otherwise we could race with the device.
+ */
+ first_len = skb->len - skb->data_len;
+ first_mapping = pci_map_single(gp->pdev, skb->data,
+ first_len, PCI_DMA_TODEVICE);
+ entry = NEXT_TX(entry);
+
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ u32 len, mapping;
+ u64 this_ctrl;
+
+ len = this_frag->size;
+ mapping = pci_map_single(gp->pdev,
+ ((void *) page_address(this_frag->page) +
+ this_frag->page_offset),
+ len, PCI_DMA_TODEVICE);
+ this_ctrl = ctrl;
+ if (frag == skb_shinfo(skb)->nr_frags - 1)
+ this_ctrl |= TXDCTRL_EOF;
+
+ txd = &gp->init_block->txd[entry];
+ txd->buffer = cpu_to_le64(mapping);
+ txd->control_word = cpu_to_le64(this_ctrl | len);
+
+ entry = NEXT_TX(entry);
+ }
+ txd = &gp->init_block->txd[first_entry];
+ txd->buffer = cpu_to_le64(first_mapping);
+ txd->control_word = cpu_to_le64(ctrl | TXDCTRL_SOF | first_len);
}
+ gp->tx_new = entry;
+ if (TX_BUFFS_AVAIL(gp) <= 0)
+ netif_stop_queue(dev);
+
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irq(&gp->lock);
@@ -602,6 +715,30 @@
return 0;
}
+/* Jumbo-grams don't seem to work :-( */
+#if 1
+#define MAX_MTU 1500
+#else
+#define MAX_MTU 9000
+#endif
+
+static int gem_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct gem *gp = dev->priv;
+
+ if (new_mtu < 0 || new_mtu > MAX_MTU)
+ return -EINVAL;
+
+ spin_lock_irq(&gp->lock);
+ gem_stop(gp, gp->regs);
+ dev->mtu = new_mtu;
+ gem_init_rings(gp, 1);
+ gem_init_hw(gp);
+ spin_unlock_irq(&gp->lock);
+
+ return 0;
+}
+
#define STOP_TRIES 32
static void gem_stop(struct gem *gp, unsigned long regs)
@@ -676,8 +813,29 @@
} else if (full_duplex) {
val |= MAC_XIFCFG_FLED;
}
+
+ if (speed == 1000)
+ val |= (MAC_XIFCFG_GMII);
+
writel(val, gp->regs + MAC_XIFCFG);
+ /* If gigabit and half-duplex, enable carrier extension
+ * mode. Else, disable it.
+ */
+ if (speed == 1000 && !full_duplex) {
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+ } else {
+ val = readl(gp->regs + MAC_TXCFG);
+ writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+ val = readl(gp->regs + MAC_RXCFG);
+ writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+ }
+
if (gp->phy_type == phy_serialink ||
gp->phy_type == phy_serdes) {
u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
@@ -689,10 +847,6 @@
val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
writel(val, gp->regs + MAC_MCCFG);
- /* XXX Set up PCS MII Control and Serialink Control
- * XXX registers.
- */
-
if (!full_duplex)
writel(512, gp->regs + MAC_STIME);
else
@@ -767,7 +921,16 @@
restart_timer = gem_mdio_link_not_up(gp);
}
} else {
- /* XXX Code PCS support... XXX */
+ u32 val = readl(gp->regs + PCS_MIISTAT);
+
+ if (!(val & PCS_MIISTAT_LS))
+ val = readl(gp->regs + PCS_MIISTAT);
+
+ if ((val & PCS_MIISTAT_LS) == 0) {
+ restart_timer = 1;
+ } else {
+ gem_set_link_modes(gp);
+ }
}
if (restart_timer) {
@@ -792,7 +955,7 @@
skb = gp->rx_skbs[i];
dma_addr = (u32) le64_to_cpu(rxd->buffer);
pci_unmap_single(gp->pdev, dma_addr,
- RX_BUF_ALLOC_SIZE,
+ RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
gp->rx_skbs[i] = NULL;
@@ -804,14 +967,22 @@
for (i = 0; i < TX_RING_SIZE; i++) {
if (gp->tx_skbs[i] != NULL) {
struct gem_txd *txd;
+ int frag;
skb = gp->tx_skbs[i];
- txd = &gb->txd[i];
- dma_addr = (u32) le64_to_cpu(txd->buffer);
- pci_unmap_single(gp->pdev, dma_addr,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_any(skb);
gp->tx_skbs[i] = NULL;
+
+ for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+ txd = &gb->txd[i];
+ dma_addr = (u32) le64_to_cpu(txd->buffer);
+ pci_unmap_single(gp->pdev, dma_addr,
+ le64_to_cpu(txd->control_word) &
+ TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
+
+ if (frag != skb_shinfo(skb)->nr_frags)
+ i++;
+ }
+ dev_kfree_skb_any(skb);
}
}
}
@@ -834,7 +1005,7 @@
struct sk_buff *skb;
struct gem_rxd *rxd = &gb->rxd[i];
- skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
+ skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), gfp_flags);
if (!skb) {
rxd->buffer = 0;
rxd->status_word = 0;
@@ -845,10 +1016,10 @@
skb->dev = dev;
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
dma_addr = pci_map_single(gp->pdev, skb->data,
- RX_BUF_ALLOC_SIZE,
+ RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE);
rxd->buffer = cpu_to_le64(dma_addr);
- rxd->status_word = cpu_to_le64(RXDCTRL_FRESH);
+ rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
skb_reserve(skb, RX_OFFSET);
}
@@ -862,16 +1033,21 @@
static void gem_init_phy(struct gem *gp)
{
- if (gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ u32 val;
+
/* Init datapath mode register. */
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
- writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
+ val = PCS_DMODE_MGM;
} else if (gp->phy_type == phy_serialink) {
- writel(PCS_DMODE_SM, gp->regs + PCS_DMODE);
+ val = PCS_DMODE_SM | PCS_DMODE_GMOE;
} else {
- writel(PCS_DMODE_ESM, gp->regs + PCS_DMODE);
+ val = PCS_DMODE_ESM;
}
+
+ writel(val, gp->regs + PCS_DMODE);
}
if (gp->phy_type == phy_mii_mdio0 ||
@@ -899,7 +1075,59 @@
val |= (PHY_CTRL_ANRES | PHY_CTRL_ANENAB);
phy_write(gp, PHY_CTRL, val);
} else {
- /* XXX Implement me XXX */
+ u32 val;
+ int limit;
+
+ /* Reset PCS unit. */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= PCS_MIICTRL_RST;
+ writeb(val, gp->regs + PCS_MIICTRL);
+
+ limit = 32;
+ while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+ udelay(100);
+ if (limit-- <= 0)
+ break;
+ }
+ if (limit <= 0)
+ printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
+ gp->dev->name);
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ val = readl(gp->regs + PCS_CFG);
+ val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Advertise all capabilities. */
+ val = readl(gp->regs + PCS_MIIADV);
+ val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+ PCS_MIIADV_SP | PCS_MIIADV_AP);
+ writel(val, gp->regs + PCS_MIIADV);
+
+ /* Enable and restart auto-negotiation, disable wrapback/loopback,
+ * and re-enable PCS.
+ */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+ val &= ~PCS_MIICTRL_WB;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ val = readl(gp->regs + PCS_CFG);
+ val |= PCS_CFG_ENABLE;
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Make sure serialink loopback is off. The meaning
+ * of this bit is logically inverted based upon whether
+ * you are in Serialink or SERDES mode.
+ */
+ val = readl(gp->regs + PCS_SCTRL);
+ if (gp->phy_type == phy_serialink)
+ val &= ~PCS_SCTRL_LOOP;
+ else
+ val |= PCS_SCTRL_LOOP;
+ writel(val, gp->regs + PCS_SCTRL);
}
}
@@ -907,7 +1135,7 @@
{
u32 val;
- val = (TXDMA_CFG_BASE | (0x4ff << 10) | TXDMA_CFG_PMODE);
+ val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
writel(val, gp->regs + TXDMA_CFG);
writel(0, gp->regs + TXDMA_DBHI);
@@ -916,7 +1144,7 @@
writel(0, gp->regs + TXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_512);
writel(val, gp->regs + RXDMA_CFG);
writel(0, gp->regs + RXDMA_DBHI);
@@ -931,12 +1159,12 @@
writel(val, gp->regs + RXDMA_PTHRESH);
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
- writel(((5 & RXDMA_BLANK_IPKTS) |
- ((8 << 12) & RXDMA_BLANK_ITIME)),
+ writel(((6 & RXDMA_BLANK_IPKTS) |
+ ((4 << 12) & RXDMA_BLANK_ITIME)),
gp->regs + RXDMA_BLANK);
else
- writel(((5 & RXDMA_BLANK_IPKTS) |
- ((4 << 12) & RXDMA_BLANK_ITIME)),
+ writel(((6 & RXDMA_BLANK_IPKTS) |
+ ((2 << 12) & RXDMA_BLANK_ITIME)),
gp->regs + RXDMA_BLANK);
}
@@ -947,7 +1175,8 @@
unsigned char *e = &gp->dev->dev_addr[0];
u32 rxcfg;
- if (gp->pdev->device == PCI_DEVICE_ID_SUN_GEM)
+ if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+ gp->pdev->device == PCI_DEVICE_ID_SUN_GEM)
writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
writel(0x00, gp->regs + MAC_IPG0);
@@ -955,7 +1184,7 @@
writel(0x04, gp->regs + MAC_IPG2);
writel(0x40, gp->regs + MAC_STIME);
writel(0x40, gp->regs + MAC_MINFSZ);
- writel(0x5ee, gp->regs + MAC_MAXFSZ);
+ writel(0x20000000 | (gp->dev->mtu + 18), gp->regs + MAC_MAXFSZ);
writel(0x07, gp->regs + MAC_PASIZE);
writel(0x04, gp->regs + MAC_JAMSIZE);
writel(0x10, gp->regs + MAC_ATTLIM);
@@ -1097,7 +1326,7 @@
static int gem_open(struct net_device *dev)
{
- struct gem *gp = (struct gem *) dev->priv;
+ struct gem *gp = dev->priv;
unsigned long regs = gp->regs;
del_timer(&gp->link_timer);
@@ -1117,6 +1346,9 @@
{
struct gem *gp = dev->priv;
+ del_timer(&gp->link_timer);
+ gem_stop(gp, gp->regs);
+ gem_clean_rings(gp);
free_irq(gp->pdev->irq, (void *)dev);
return 0;
}
@@ -1244,14 +1476,10 @@
struct pci_dev *pdev = gp->pdev;
u32 mif_cfg = readl(gp->regs + MIF_CFG);
- if (pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM
-#if 0
- || pdev->device == PCI_DEVICE_ID_SUN_PPC_GEM
-#endif
- ) {
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
/* One of the MII PHYs _must_ be present
- * as these chip versions have no gigabit
- * PHY.
+ * as this chip has no gigabit PHY.
*/
if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
@@ -1263,13 +1491,17 @@
/* Determine initial PHY interface type guess. MDIO1 is the
* external PHY and thus takes precedence over MDIO0.
*/
- if (mif_cfg & MIF_CFG_MDI1)
+ if (mif_cfg & MIF_CFG_MDI1) {
gp->phy_type = phy_mii_mdio1;
- else if (mif_cfg & MIF_CFG_MDI0)
+ mif_cfg |= MIF_CFG_PSELECT;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ } else if (mif_cfg & MIF_CFG_MDI0) {
gp->phy_type = phy_mii_mdio0;
- else
+ mif_cfg &= ~MIF_CFG_PSELECT;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ } else {
gp->phy_type = phy_serialink;
-
+ }
if (gp->phy_type == phy_mii_mdio1 ||
gp->phy_type == phy_mii_mdio0) {
int i;
@@ -1279,25 +1511,34 @@
if (phy_read(gp, PHY_CTRL) != 0xffff)
break;
}
+ if (i == 32) {
+ if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
+ printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
+ return -1;
+ }
+ gp->phy_type = phy_serdes;
+ }
}
/* Fetch the FIFO configurations now too. */
gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
- if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
- if (gp->tx_fifo_sz != (9 * 1024) ||
- gp->rx_fifo_sz != (20 * 1024)) {
- printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
- gp->tx_fifo_sz, gp->rx_fifo_sz);
- return -1;
- }
- } else {
- if (gp->tx_fifo_sz != (2 * 1024) ||
- gp->rx_fifo_sz != (2 * 1024)) {
- printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
- gp->tx_fifo_sz, gp->rx_fifo_sz);
- return -1;
+ if (pdev->vendor == PCI_VENDOR_ID_SUN) {
+ if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+ if (gp->tx_fifo_sz != (9 * 1024) ||
+ gp->rx_fifo_sz != (20 * 1024)) {
+ printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ gp->tx_fifo_sz, gp->rx_fifo_sz);
+ return -1;
+ }
+ } else {
+ if (gp->tx_fifo_sz != (2 * 1024) ||
+ gp->rx_fifo_sz != (2 * 1024)) {
+ printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+ gp->tx_fifo_sz, gp->rx_fifo_sz);
+ return -1;
+ }
}
}
@@ -1309,23 +1550,64 @@
if (gp->rx_fifo_sz <= (2 * 1024)) {
gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
} else {
- int off = ((gp->rx_fifo_sz * 3) / 4);
- int on = off - (1 * 1024);
+ int off = (gp->rx_fifo_sz - (5 * 1024));
+ int on = off - 1024;
gp->rx_pause_off = off;
gp->rx_pause_on = on;
}
{
- u32 bifcfg = readl(gp->regs + GREG_BIFCFG);
+ u32 cfg = readl(gp->regs + GREG_BIFCFG);
+
+ cfg |= GREG_BIFCFG_B64DIS;
+ writel(cfg, gp->regs + GREG_BIFCFG);
- bifcfg |= GREG_BIFCFG_B64DIS;
- writel(bifcfg, gp->regs + GREG_BIFCFG);
+ cfg = GREG_CFG_IBURST;
+ cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
+ cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
+ writel(cfg, gp->regs + GREG_CFG);
}
return 0;
}
+static int __devinit gem_get_device_address(struct gem *gp)
+{
+ struct net_device *dev = gp->dev;
+ struct pci_dev *pdev = gp->pdev;
+
+#ifdef __sparc__
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ int node = -1;
+
+ if (pcp != NULL) {
+ node = pcp->prom_node;
+ if (prom_getproplen(node, "local-mac-address") == 6)
+ prom_getproperty(node, "local-mac-address",
+ dev->dev_addr, 6);
+ else
+ node = -1;
+ }
+ if (node == -1)
+ memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+#endif
+#ifdef __powerpc__
+ struct device_node *gem_node;
+ unsigned char *addr;
+
+ gem_node = pci_device_to_OF_node(pdev);
+ addr = get_property(gem_node, "local-mac-address", NULL);
+ if (addr == NULL) {
+ printk("\n");
+ printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
+ return -1;
+ }
+ memcpy(dev->dev_addr, addr, MAX_ADDR_LEN);
+#endif
+ return 0;
+}
+
static int __devinit gem_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1338,6 +1620,9 @@
if (gem_version_printed++ == 0)
printk(KERN_INFO "%s", version);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
+
gemreg_base = pci_resource_start(pdev, 0);
gemreg_len = pci_resource_len(pdev, 0);
@@ -1352,6 +1637,7 @@
printk(KERN_ERR PFX "Etherdev init failed, aborting.\n");
return -ENOMEM;
}
+ SET_MODULE_OWNER(dev);
if (!request_mem_region(gemreg_base, gemreg_len, dev->name)) {
printk(KERN_ERR PFX "MMIO resource (0x%lx@0x%lx) unavailable, "
@@ -1372,6 +1658,7 @@
gp->pdev = pdev;
dev->base_addr = (long) pdev;
+ gp->dev = dev;
spin_lock_init(&gp->lock);
@@ -1402,9 +1689,8 @@
printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
dev->name);
-#ifdef __sparc__
- memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
-#endif
+ if (gem_get_device_address(gp))
+ goto err_out_iounmap;
for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i],
@@ -1415,15 +1701,20 @@
gp->link_timer.function = gem_link_timer;
gp->link_timer.data = (unsigned long) gp;
- gp->dev = dev;
dev->open = gem_open;
dev->stop = gem_close;
dev->hard_start_xmit = gem_start_xmit;
dev->get_stats = gem_get_stats;
dev->set_multicast_list = gem_set_multicast;
dev->do_ioctl = gem_ioctl;
+ dev->tx_timeout = gem_tx_timeout;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->change_mtu = gem_change_mtu;
dev->irq = pdev->irq;
dev->dma = 0;
+
+ /* GEM can do it all... */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
return 0;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)