X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=drivers%2Fnet%2Fgianfar.c;h=7fcc3af3f86bd2ad696145520b71e6d6122b1288;hb=41efea5a34caa76c11e56458db21eb259d5c6384;hp=3e611a69df132b7a57ddec797a1d785a849383f0;hpb=5a5efed4817ef931f648b118aeb9222e53122d2b;p=linux-2.6-omap-h63xx.git diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 3e611a69df1..7fcc3af3f86 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -304,8 +304,9 @@ static int gfar_probe(struct of_device *ofdev, u32 tempval; struct net_device *dev = NULL; struct gfar_private *priv = NULL; - int err = 0; DECLARE_MAC_BUF(mac); + int err = 0; + int len_devname; /* Create an ethernet device instance */ dev = alloc_etherdev(sizeof (*priv)); @@ -368,7 +369,7 @@ static int gfar_probe(struct of_device *ofdev, if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { priv->rx_csum_enable = 1; - dev->features |= NETIF_F_IP_CSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; } else priv->rx_csum_enable = 0; @@ -426,6 +427,7 @@ static int gfar_probe(struct of_device *ofdev, priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; priv->tx_ring_size = DEFAULT_TX_RING_SIZE; priv->rx_ring_size = DEFAULT_RX_RING_SIZE; + priv->num_txbdfree = DEFAULT_TX_RING_SIZE; priv->txcoalescing = DEFAULT_TX_COALESCE; priv->txic = DEFAULT_TXIC; @@ -446,6 +448,23 @@ static int gfar_probe(struct of_device *ofdev, goto register_fail; } + /* fill out IRQ number and name fields */ + len_devname = strlen(dev->name); + strncpy(&priv->int_name_tx[0], dev->name, len_devname); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + strncpy(&priv->int_name_tx[len_devname], + "_tx", sizeof("_tx") + 1); + + strncpy(&priv->int_name_rx[0], dev->name, len_devname); + strncpy(&priv->int_name_rx[len_devname], + "_rx", sizeof("_rx") + 1); + + strncpy(&priv->int_name_er[0], dev->name, len_devname); + strncpy(&priv->int_name_er[len_devname], + "_er", sizeof("_er") + 1); + } else + priv->int_name_tx[len_devname] = '\0'; + /* Create all the sysfs files */ gfar_init_sysfs(dev); @@ -819,22 +838,26 @@ static void free_skb_resources(struct gfar_private *priv) { struct rxbd8 *rxbdp; struct txbd8 *txbdp; - int i; + int i, j; /* Go through all the buffer descriptors and free their data buffers */ txbdp = priv->tx_bd_base; for (i = 0; i < priv->tx_ring_size; i++) { + if (!priv->tx_skbuff[i]) + continue; - if (priv->tx_skbuff[i]) { - dma_unmap_single(&priv->dev->dev, txbdp->bufPtr, - txbdp->length, - DMA_TO_DEVICE); - dev_kfree_skb_any(priv->tx_skbuff[i]); - priv->tx_skbuff[i] = NULL; + dma_unmap_single(&priv->dev->dev, txbdp->bufPtr, + txbdp->length, DMA_TO_DEVICE); + txbdp->lstatus = 0; + for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { + txbdp++; + dma_unmap_page(&priv->dev->dev, txbdp->bufPtr, + txbdp->length, DMA_TO_DEVICE); } - txbdp++; + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; } kfree(priv->tx_skbuff); @@ -967,6 +990,7 @@ int startup_gfar(struct net_device *dev) priv->rx_skbuff[i] = NULL; /* Initialize some variables in our dev structure */ + priv->num_txbdfree = priv->tx_ring_size; priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; priv->cur_rx = priv->rx_bd_base; priv->skb_curtx = priv->skb_dirtytx = 0; @@ -1014,7 +1038,7 @@ int startup_gfar(struct net_device *dev) /* Install our interrupt handlers for Error, * Transmit, and Receive */ if (request_irq(priv->interruptError, gfar_error, - 0, "enet_error", dev) < 0) { + 0, priv->int_name_er, dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, priv->interruptError); @@ -1024,7 +1048,7 @@ int startup_gfar(struct net_device *dev) } if (request_irq(priv->interruptTransmit, gfar_transmit, - 0, "enet_tx", dev) < 0) { + 0, priv->int_name_tx, dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, priv->interruptTransmit); @@ -1035,7 +1059,7 @@ int startup_gfar(struct net_device *dev) } if (request_irq(priv->interruptReceive, gfar_receive, - 0, "enet_rx", dev) < 0) { + 0, priv->int_name_rx, dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", dev->name, priv->interruptReceive); @@ -1045,10 +1069,10 @@ int startup_gfar(struct net_device *dev) } } else { if (request_irq(priv->interruptTransmit, gfar_interrupt, - 0, "gfar_interrupt", dev) < 0) { + 0, priv->int_name_tx, dev) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", - dev->name, priv->interruptError); + dev->name, priv->interruptTransmit); err = -1; goto err_irq_fail; @@ -1207,28 +1231,84 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) fcb->vlctl = vlan_tx_tag_get(skb); } +static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, + struct txbd8 *base, int ring_size) +{ + struct txbd8 *new_bd = bdp + stride; + + return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; +} + +static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, + int ring_size) +{ + return skip_txbd(bdp, 1, base, ring_size); +} + /* This is called by the kernel when a frame is ready for transmission. */ /* It is pointed to by the dev->hard_start_xmit function pointer */ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct txfcb *fcb = NULL; - struct txbd8 *txbdp, *base; + struct txbd8 *txbdp, *txbdp_start, *base; u32 lstatus; + int i; + u32 bufaddr; unsigned long flags; + unsigned int nr_frags, length; + + base = priv->tx_bd_base; + + /* total number of fragments in the SKB */ + nr_frags = skb_shinfo(skb)->nr_frags; + + spin_lock_irqsave(&priv->txlock, flags); + + /* check if there is space to queue this packet */ + if (nr_frags > priv->num_txbdfree) { + /* no space, stop the queue */ + netif_stop_queue(dev); + dev->stats.tx_fifo_errors++; + spin_unlock_irqrestore(&priv->txlock, flags); + return NETDEV_TX_BUSY; + } /* Update transmit stats */ dev->stats.tx_bytes += skb->len; - /* Lock priv now */ - spin_lock_irqsave(&priv->txlock, flags); + txbdp = txbdp_start = priv->cur_tx; - /* Point at the first free tx descriptor */ - txbdp = priv->cur_tx; - base = priv->tx_bd_base; + if (nr_frags == 0) { + lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } else { + /* Place the fragment addresses and lengths into the TxBDs */ + for (i = 0; i < nr_frags; i++) { + /* Point at the next BD, wrapping as needed */ + txbdp = next_txbd(txbdp, base, priv->tx_ring_size); + + length = skb_shinfo(skb)->frags[i].size; - /* Clear all but the WRAP status flags */ - lstatus = txbdp->lstatus & BD_LFLAG(TXBD_WRAP); + lstatus = txbdp->lstatus | length | + BD_LFLAG(TXBD_READY); + + /* Handle the last BD specially */ + if (i == nr_frags - 1) + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + + bufaddr = dma_map_page(&dev->dev, + skb_shinfo(skb)->frags[i].page, + skb_shinfo(skb)->frags[i].page_offset, + length, + DMA_TO_DEVICE); + + /* set the TxBD length and buffer pointer */ + txbdp->bufPtr = bufaddr; + txbdp->lstatus = lstatus; + } + + lstatus = txbdp_start->lstatus; + } /* Set up checksumming */ if (CHECKSUM_PARTIAL == skb->ip_summed) { @@ -1246,48 +1326,45 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_tx_vlan(skb, fcb); } - /* Set buffer length and pointer */ - txbdp->bufPtr = dma_map_single(&dev->dev, skb->data, - skb->len, DMA_TO_DEVICE); - - /* Save the skb pointer so we can free it later */ + /* setup the TxBD length and buffer pointer for the first BD */ priv->tx_skbuff[priv->skb_curtx] = skb; + txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); - /* Update the current skb pointer (wrapping if this was the last) */ - priv->skb_curtx = - (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); - - /* Flag the BD as ready, interrupt-causing, last, and in need of CRC */ - lstatus |= - BD_LFLAG(TXBD_READY | TXBD_LAST | TXBD_CRC | TXBD_INTERRUPT) | - skb->len; + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); - dev->trans_start = jiffies; - - /* The powerpc-specific eieio() is used, as wmb() has too strong + /* + * The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and * uncacheable mappings, which eieio doesn't provide and which we * don't need), thus requiring a more expensive sync instruction. At * some point, the set of architecture-independent barrier functions * should be expanded to include weaker barriers. */ - eieio(); - txbdp->lstatus = lstatus; - txbdp = next_bd(txbdp, base, priv->tx_ring_size); + txbdp_start->lstatus = lstatus; + + /* Update the current skb pointer to the next entry we will use + * (wrapping if necessary) */ + priv->skb_curtx = (priv->skb_curtx + 1) & + TX_RING_MOD_MASK(priv->tx_ring_size); + + priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); + + /* reduce TxBD free count */ + priv->num_txbdfree -= (nr_frags + 1); + + dev->trans_start = jiffies; /* If the next BD still needs to be cleaned up, then the bds are full. We need to tell the kernel to stop sending us stuff. */ - if (txbdp == priv->dirty_tx) { + if (!priv->num_txbdfree) { netif_stop_queue(dev); dev->stats.tx_fifo_errors++; } - /* Update the current txbd to the next one */ - priv->cur_tx = txbdp; - /* Tell the DMA to go go go */ gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); @@ -1461,79 +1538,85 @@ static void gfar_timeout(struct net_device *dev) /* Interrupt Handler for Transmit complete */ static int gfar_clean_tx_ring(struct net_device *dev) { - struct txbd8 *bdp, *base; struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *bdp; + struct txbd8 *lbdp = NULL; + struct txbd8 *base = priv->tx_bd_base; + struct sk_buff *skb; + int skb_dirtytx; + int tx_ring_size = priv->tx_ring_size; + int frags = 0; + int i; int howmany = 0; + u32 lstatus; bdp = priv->dirty_tx; - base = priv->tx_bd_base; - while ((bdp->status & TXBD_READY) == 0) { - /* If dirty_tx and cur_tx are the same, then either the */ - /* ring is empty or full now (it could only be full in the beginning, */ - /* obviously). If it is empty, we are done. */ - if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) - break; + skb_dirtytx = priv->skb_dirtytx; - howmany++; + while ((skb = priv->tx_skbuff[skb_dirtytx])) { + frags = skb_shinfo(skb)->nr_frags; + lbdp = skip_txbd(bdp, frags, base, tx_ring_size); - /* Deferred means some collisions occurred during transmit, */ - /* but we eventually sent the packet. */ - if (bdp->status & TXBD_DEF) - dev->stats.collisions++; + lstatus = lbdp->lstatus; - /* Unmap the DMA memory */ - dma_unmap_single(&priv->dev->dev, bdp->bufPtr, - bdp->length, DMA_TO_DEVICE); + /* Only clean completed frames */ + if ((lstatus & BD_LFLAG(TXBD_READY)) && + (lstatus & BD_LENGTH_MASK)) + break; + + dma_unmap_single(&dev->dev, + bdp->bufPtr, + bdp->length, + DMA_TO_DEVICE); - /* Free the sk buffer associated with this TxBD */ - dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); + bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + bdp = next_txbd(bdp, base, tx_ring_size); - priv->tx_skbuff[priv->skb_dirtytx] = NULL; - priv->skb_dirtytx = - (priv->skb_dirtytx + - 1) & TX_RING_MOD_MASK(priv->tx_ring_size); + for (i = 0; i < frags; i++) { + dma_unmap_page(&dev->dev, + bdp->bufPtr, + bdp->length, + DMA_TO_DEVICE); + bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + bdp = next_txbd(bdp, base, tx_ring_size); + } - /* Clean BD length for empty detection */ - bdp->length = 0; + dev_kfree_skb_any(skb); + priv->tx_skbuff[skb_dirtytx] = NULL; - bdp = next_bd(bdp, base, priv->tx_ring_size); + skb_dirtytx = (skb_dirtytx + 1) & + TX_RING_MOD_MASK(tx_ring_size); - /* Move dirty_tx to be the next bd */ - priv->dirty_tx = bdp; + howmany++; + priv->num_txbdfree += frags + 1; + } - /* We freed a buffer, so now we can restart transmission */ - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); - } /* while ((bdp->status & TXBD_READY) == 0) */ + /* If we freed a buffer, we can restart transmission, if necessary */ + if (netif_queue_stopped(dev) && priv->num_txbdfree) + netif_wake_queue(dev); + + /* Update dirty indicators */ + priv->skb_dirtytx = skb_dirtytx; + priv->dirty_tx = bdp; dev->stats.tx_packets += howmany; return howmany; } -/* Interrupt Handler for Transmit complete */ -static irqreturn_t gfar_transmit(int irq, void *dev_id) +static void gfar_schedule_cleanup(struct net_device *dev) { - struct net_device *dev = (struct net_device *) dev_id; struct gfar_private *priv = netdev_priv(dev); - - /* Clear IEVENT */ - gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); - - /* Lock priv */ - spin_lock(&priv->txlock); - - gfar_clean_tx_ring(dev); - - /* If we are coalescing the interrupts, reset the timer */ - /* Otherwise, clear it */ - if (likely(priv->txcoalescing)) { - gfar_write(&priv->regs->txic, 0); - gfar_write(&priv->regs->txic, priv->txic); + if (netif_rx_schedule_prep(&priv->napi)) { + gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); + __netif_rx_schedule(&priv->napi); } +} - spin_unlock(&priv->txlock); - +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *dev_id) +{ + gfar_schedule_cleanup((struct net_device *)dev_id); return IRQ_HANDLED; } @@ -1620,28 +1703,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev) irqreturn_t gfar_receive(int irq, void *dev_id) { - struct net_device *dev = (struct net_device *) dev_id; - struct gfar_private *priv = netdev_priv(dev); - u32 tempval; - - /* support NAPI */ - /* Clear IEVENT, so interrupts aren't called again - * because of the packets that have already arrived */ - gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); - - if (netif_rx_schedule_prep(dev, &priv->napi)) { - tempval = gfar_read(&priv->regs->imask); - tempval &= IMASK_RTX_DISABLED; - gfar_write(&priv->regs->imask, tempval); - - __netif_rx_schedule(dev, &priv->napi); - } else { - if (netif_msg_rx_err(priv)) - printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", - dev->name, gfar_read(&priv->regs->ievent), - gfar_read(&priv->regs->imask)); - } - + gfar_schedule_cleanup((struct net_device *)dev_id); return IRQ_HANDLED; } @@ -1732,8 +1794,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) if (unlikely(!newskb)) newskb = skb; - - if (skb) + else if (skb) dev_kfree_skb_any(skb); } else { /* Increment the number of packets */ @@ -1782,19 +1843,27 @@ static int gfar_poll(struct napi_struct *napi, int budget) { struct gfar_private *priv = container_of(napi, struct gfar_private, napi); struct net_device *dev = priv->dev; - int howmany; + int tx_cleaned = 0; + int rx_cleaned = 0; unsigned long flags; + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived */ + gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); + /* If we fail to get the lock, don't bother with the TX BDs */ if (spin_trylock_irqsave(&priv->txlock, flags)) { - gfar_clean_tx_ring(dev); + tx_cleaned = gfar_clean_tx_ring(dev); spin_unlock_irqrestore(&priv->txlock, flags); } - howmany = gfar_clean_rx_ring(dev, budget); + rx_cleaned = gfar_clean_rx_ring(dev, budget); + + if (tx_cleaned) + return budget; - if (howmany < budget) { - netif_rx_complete(dev, napi); + if (rx_cleaned < budget) { + netif_rx_complete(napi); /* Clear the halt bit in RSTAT */ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); @@ -1807,9 +1876,13 @@ static int gfar_poll(struct napi_struct *napi, int budget) gfar_write(&priv->regs->rxic, 0); gfar_write(&priv->regs->rxic, priv->rxic); } + if (likely(priv->txcoalescing)) { + gfar_write(&priv->regs->txic, 0); + gfar_write(&priv->regs->txic, priv->txic); + } } - return howmany; + return rx_cleaned; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1900,6 +1973,8 @@ static void adjust_link(struct net_device *dev) case 1000: tempval = ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); break; case 100: case 10: