X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fsunqe.c;h=e811331d4608a86abc8514848cdacddf73b83123;hb=697c269610179051cf19e45566fee3dcebbb1e93;hp=1b65ae8a1c7c6e3ef778d727fc6f0be83c40c9b3;hpb=2f41fc806434f8466bb361570589a3f6099ca65d;p=linux-2.6-omap-h63xx.git diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 1b65ae8a1c7..e811331d460 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -260,31 +260,31 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_carrier_errors++; + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.collisions++; + dev->stats.tx_errors++; + dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } @@ -297,104 +297,104 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) } if (qe_status & CREG_STAT_CCOFLOW) { - qep->net_stats.tx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.tx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.rx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_over_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_missed_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_over_errors++; + dev->stats.rx_errors++; + dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.collisions++; + dev->stats.rx_errors++; + dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_frame_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_crc_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_dropped++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); - qep->net_stats.rx_errors++; + dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } @@ -409,6 +409,7 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; + struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; @@ -428,14 +429,14 @@ static void qe_rx(struct sunqe *qep) /* Check for errors. */ if (len < ETH_ZLEN) { - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; - qep->net_stats.rx_dropped++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if (skb == NULL) { drops++; - qep->net_stats.rx_dropped++; + dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); @@ -444,8 +445,8 @@ static void qe_rx(struct sunqe *qep) skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); qep->dev->last_rx = jiffies; - qep->net_stats.rx_packets++; - qep->net_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; @@ -603,8 +604,8 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); - qep->net_stats.tx_packets++; - qep->net_stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. @@ -622,13 +623,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *qe_get_stats(struct net_device *dev) -{ - struct sunqe *qep = (struct sunqe *) dev->priv; - - return &qep->net_stats; -} - static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = (struct sunqe *) dev->priv; @@ -753,7 +747,7 @@ static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev) qecp->gregs + GLOB_RSIZE); } -static u8 __init qec_get_burst(struct device_node *dp) +static u8 __devinit qec_get_burst(struct device_node *dp) { u8 bsizes, bsizes_more; @@ -773,7 +767,7 @@ static u8 __init qec_get_burst(struct device_node *dp) return bsizes; } -static struct sunqec * __init get_qec(struct sbus_dev *child_sdev) +static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev) { struct sbus_dev *qec_sdev = child_sdev->parent; struct sunqec *qecp; @@ -829,7 +823,7 @@ fail: return NULL; } -static int __init qec_ether_init(struct sbus_dev *sdev) +static int __devinit qec_ether_init(struct sbus_dev *sdev) { static unsigned version_printed; struct net_device *dev; @@ -898,13 +892,11 @@ static int __init qec_ether_init(struct sbus_dev *sdev) /* Stop this QE. */ qe_stop(qe); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); dev->open = qe_open; dev->stop = qe_close; dev->hard_start_xmit = qe_start_xmit; - dev->get_stats = qe_get_stats; dev->set_multicast_list = qe_set_multicast; dev->tx_timeout = qe_tx_timeout; dev->watchdog_timeo = 5*HZ;