u32 tempval;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
- int err = 0;
DECLARE_MAC_BUF(mac);
+ int err = 0;
+ int len_devname;
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof (*priv));
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
priv->rx_csum_enable = 1;
- dev->features |= NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
} else
priv->rx_csum_enable = 0;
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
+ priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
priv->txcoalescing = DEFAULT_TX_COALESCE;
priv->txic = DEFAULT_TXIC;
goto register_fail;
}
+ /* fill out IRQ number and name fields */
+ len_devname = strlen(dev->name);
+ strncpy(&priv->int_name_tx[0], dev->name, len_devname);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ strncpy(&priv->int_name_tx[len_devname],
+ "_tx", sizeof("_tx") + 1);
+
+ strncpy(&priv->int_name_rx[0], dev->name, len_devname);
+ strncpy(&priv->int_name_rx[len_devname],
+ "_rx", sizeof("_rx") + 1);
+
+ strncpy(&priv->int_name_er[0], dev->name, len_devname);
+ strncpy(&priv->int_name_er[len_devname],
+ "_er", sizeof("_er") + 1);
+ } else
+ priv->int_name_tx[len_devname] = '\0';
+
/* Create all the sysfs files */
gfar_init_sysfs(dev);
{
struct rxbd8 *rxbdp;
struct txbd8 *txbdp;
- int i;
+ int i, j;
/* Go through all the buffer descriptors and free their data buffers */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
+ if (!priv->tx_skbuff[i])
+ continue;
- if (priv->tx_skbuff[i]) {
- dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
- txbdp->length,
- DMA_TO_DEVICE);
- dev_kfree_skb_any(priv->tx_skbuff[i]);
- priv->tx_skbuff[i] = NULL;
+ dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
+ txbdp->length, DMA_TO_DEVICE);
+ txbdp->lstatus = 0;
+ for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
+ txbdp++;
+ dma_unmap_page(&priv->dev->dev, txbdp->bufPtr,
+ txbdp->length, DMA_TO_DEVICE);
}
-
txbdp++;
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
}
kfree(priv->tx_skbuff);
priv->rx_skbuff[i] = NULL;
/* Initialize some variables in our dev structure */
+ priv->num_txbdfree = priv->tx_ring_size;
priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
priv->cur_rx = priv->rx_bd_base;
priv->skb_curtx = priv->skb_dirtytx = 0;
/* Install our interrupt handlers for Error,
* Transmit, and Receive */
if (request_irq(priv->interruptError, gfar_error,
- 0, "enet_error", dev) < 0) {
+ 0, priv->int_name_er, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->interruptError);
}
if (request_irq(priv->interruptTransmit, gfar_transmit,
- 0, "enet_tx", dev) < 0) {
+ 0, priv->int_name_tx, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->interruptTransmit);
}
if (request_irq(priv->interruptReceive, gfar_receive,
- 0, "enet_rx", dev) < 0) {
+ 0, priv->int_name_rx, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
dev->name, priv->interruptReceive);
}
} else {
if (request_irq(priv->interruptTransmit, gfar_interrupt,
- 0, "gfar_interrupt", dev) < 0) {
+ 0, priv->int_name_tx, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d\n",
- dev->name, priv->interruptError);
+ dev->name, priv->interruptTransmit);
err = -1;
goto err_irq_fail;
fcb->vlctl = vlan_tx_tag_get(skb);
}
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+ struct txbd8 *base, int ring_size)
+{
+ struct txbd8 *new_bd = bdp + stride;
+
+ return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+ int ring_size)
+{
+ return skip_txbd(bdp, 1, base, ring_size);
+}
+
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct txfcb *fcb = NULL;
- struct txbd8 *txbdp, *base;
+ struct txbd8 *txbdp, *txbdp_start, *base;
u32 lstatus;
+ int i;
+ u32 bufaddr;
unsigned long flags;
+ unsigned int nr_frags, length;
+
+ base = priv->tx_bd_base;
+
+ /* total number of fragments in the SKB */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ spin_lock_irqsave(&priv->txlock, flags);
+
+ /* check if there is space to queue this packet */
+ if (nr_frags > priv->num_txbdfree) {
+ /* no space, stop the queue */
+ netif_stop_queue(dev);
+ dev->stats.tx_fifo_errors++;
+ spin_unlock_irqrestore(&priv->txlock, flags);
+ return NETDEV_TX_BUSY;
+ }
/* Update transmit stats */
dev->stats.tx_bytes += skb->len;
- /* Lock priv now */
- spin_lock_irqsave(&priv->txlock, flags);
+ txbdp = txbdp_start = priv->cur_tx;
- /* Point at the first free tx descriptor */
- txbdp = priv->cur_tx;
- base = priv->tx_bd_base;
+ if (nr_frags == 0) {
+ lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ } else {
+ /* Place the fragment addresses and lengths into the TxBDs */
+ for (i = 0; i < nr_frags; i++) {
+ /* Point at the next BD, wrapping as needed */
+ txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
+
+ length = skb_shinfo(skb)->frags[i].size;
- /* Clear all but the WRAP status flags */
- lstatus = txbdp->lstatus & BD_LFLAG(TXBD_WRAP);
+ lstatus = txbdp->lstatus | length |
+ BD_LFLAG(TXBD_READY);
+
+ /* Handle the last BD specially */
+ if (i == nr_frags - 1)
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ bufaddr = dma_map_page(&dev->dev,
+ skb_shinfo(skb)->frags[i].page,
+ skb_shinfo(skb)->frags[i].page_offset,
+ length,
+ DMA_TO_DEVICE);
+
+ /* set the TxBD length and buffer pointer */
+ txbdp->bufPtr = bufaddr;
+ txbdp->lstatus = lstatus;
+ }
+
+ lstatus = txbdp_start->lstatus;
+ }
/* Set up checksumming */
if (CHECKSUM_PARTIAL == skb->ip_summed) {
gfar_tx_vlan(skb, fcb);
}
- /* Set buffer length and pointer */
- txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
-
- /* Save the skb pointer so we can free it later */
+ /* setup the TxBD length and buffer pointer for the first BD */
priv->tx_skbuff[priv->skb_curtx] = skb;
+ txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
- /* Update the current skb pointer (wrapping if this was the last) */
- priv->skb_curtx =
- (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
-
- /* Flag the BD as ready, interrupt-causing, last, and in need of CRC */
- lstatus |=
- BD_LFLAG(TXBD_READY | TXBD_LAST | TXBD_CRC | TXBD_INTERRUPT) |
- skb->len;
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
- dev->trans_start = jiffies;
-
- /* The powerpc-specific eieio() is used, as wmb() has too strong
+ /*
+ * The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
* don't need), thus requiring a more expensive sync instruction. At
* some point, the set of architecture-independent barrier functions
* should be expanded to include weaker barriers.
*/
-
eieio();
- txbdp->lstatus = lstatus;
- txbdp = next_bd(txbdp, base, priv->tx_ring_size);
+ txbdp_start->lstatus = lstatus;
+
+ /* Update the current skb pointer to the next entry we will use
+ * (wrapping if necessary) */
+ priv->skb_curtx = (priv->skb_curtx + 1) &
+ TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
+
+ /* reduce TxBD free count */
+ priv->num_txbdfree -= (nr_frags + 1);
+
+ dev->trans_start = jiffies;
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
- if (txbdp == priv->dirty_tx) {
+ if (!priv->num_txbdfree) {
netif_stop_queue(dev);
dev->stats.tx_fifo_errors++;
}
- /* Update the current txbd to the next one */
- priv->cur_tx = txbdp;
-
/* Tell the DMA to go go go */
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
/* Interrupt Handler for Transmit complete */
static int gfar_clean_tx_ring(struct net_device *dev)
{
- struct txbd8 *bdp, *base;
struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp;
+ struct txbd8 *lbdp = NULL;
+ struct txbd8 *base = priv->tx_bd_base;
+ struct sk_buff *skb;
+ int skb_dirtytx;
+ int tx_ring_size = priv->tx_ring_size;
+ int frags = 0;
+ int i;
int howmany = 0;
+ u32 lstatus;
bdp = priv->dirty_tx;
- base = priv->tx_bd_base;
- while ((bdp->status & TXBD_READY) == 0) {
- /* If dirty_tx and cur_tx are the same, then either the */
- /* ring is empty or full now (it could only be full in the beginning, */
- /* obviously). If it is empty, we are done. */
- if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
- break;
+ skb_dirtytx = priv->skb_dirtytx;
- howmany++;
+ while ((skb = priv->tx_skbuff[skb_dirtytx])) {
+ frags = skb_shinfo(skb)->nr_frags;
+ lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
- /* Deferred means some collisions occurred during transmit, */
- /* but we eventually sent the packet. */
- if (bdp->status & TXBD_DEF)
- dev->stats.collisions++;
+ lstatus = lbdp->lstatus;
- /* Unmap the DMA memory */
- dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
- bdp->length, DMA_TO_DEVICE);
+ /* Only clean completed frames */
+ if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+ (lstatus & BD_LENGTH_MASK))
+ break;
+
+ dma_unmap_single(&dev->dev,
+ bdp->bufPtr,
+ bdp->length,
+ DMA_TO_DEVICE);
- /* Free the sk buffer associated with this TxBD */
- dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next_txbd(bdp, base, tx_ring_size);
- priv->tx_skbuff[priv->skb_dirtytx] = NULL;
- priv->skb_dirtytx =
- (priv->skb_dirtytx +
- 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+ for (i = 0; i < frags; i++) {
+ dma_unmap_page(&dev->dev,
+ bdp->bufPtr,
+ bdp->length,
+ DMA_TO_DEVICE);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+ }
- /* Clean BD length for empty detection */
- bdp->length = 0;
+ dev_kfree_skb_any(skb);
+ priv->tx_skbuff[skb_dirtytx] = NULL;
- bdp = next_bd(bdp, base, priv->tx_ring_size);
+ skb_dirtytx = (skb_dirtytx + 1) &
+ TX_RING_MOD_MASK(tx_ring_size);
- /* Move dirty_tx to be the next bd */
- priv->dirty_tx = bdp;
+ howmany++;
+ priv->num_txbdfree += frags + 1;
+ }
- /* We freed a buffer, so now we can restart transmission */
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
- } /* while ((bdp->status & TXBD_READY) == 0) */
+ /* If we freed a buffer, we can restart transmission, if necessary */
+ if (netif_queue_stopped(dev) && priv->num_txbdfree)
+ netif_wake_queue(dev);
+
+ /* Update dirty indicators */
+ priv->skb_dirtytx = skb_dirtytx;
+ priv->dirty_tx = bdp;
dev->stats.tx_packets += howmany;
return howmany;
}
-/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+static void gfar_schedule_cleanup(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
-
- /* Clear IEVENT */
- gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
-
- /* Lock priv */
- spin_lock(&priv->txlock);
-
- gfar_clean_tx_ring(dev);
-
- /* If we are coalescing the interrupts, reset the timer */
- /* Otherwise, clear it */
- if (likely(priv->txcoalescing)) {
- gfar_write(&priv->regs->txic, 0);
- gfar_write(&priv->regs->txic, priv->txic);
+ if (netif_rx_schedule_prep(&priv->napi)) {
+ gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
+ __netif_rx_schedule(&priv->napi);
}
+}
- spin_unlock(&priv->txlock);
-
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id)
+{
+ gfar_schedule_cleanup((struct net_device *)dev_id);
return IRQ_HANDLED;
}
irqreturn_t gfar_receive(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct gfar_private *priv = netdev_priv(dev);
- u32 tempval;
-
- /* support NAPI */
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived */
- gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
-
- if (netif_rx_schedule_prep(dev, &priv->napi)) {
- tempval = gfar_read(&priv->regs->imask);
- tempval &= IMASK_RTX_DISABLED;
- gfar_write(&priv->regs->imask, tempval);
-
- __netif_rx_schedule(dev, &priv->napi);
- } else {
- if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
- dev->name, gfar_read(&priv->regs->ievent),
- gfar_read(&priv->regs->imask));
- }
-
+ gfar_schedule_cleanup((struct net_device *)dev_id);
return IRQ_HANDLED;
}
if (unlikely(!newskb))
newskb = skb;
-
- if (skb)
+ else if (skb)
dev_kfree_skb_any(skb);
} else {
/* Increment the number of packets */
{
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
struct net_device *dev = priv->dev;
- int howmany;
+ int tx_cleaned = 0;
+ int rx_cleaned = 0;
unsigned long flags;
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived */
+ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+
/* If we fail to get the lock, don't bother with the TX BDs */
if (spin_trylock_irqsave(&priv->txlock, flags)) {
- gfar_clean_tx_ring(dev);
+ tx_cleaned = gfar_clean_tx_ring(dev);
spin_unlock_irqrestore(&priv->txlock, flags);
}
- howmany = gfar_clean_rx_ring(dev, budget);
+ rx_cleaned = gfar_clean_rx_ring(dev, budget);
+
+ if (tx_cleaned)
+ return budget;
- if (howmany < budget) {
- netif_rx_complete(dev, napi);
+ if (rx_cleaned < budget) {
+ netif_rx_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic, priv->rxic);
}
+ if (likely(priv->txcoalescing)) {
+ gfar_write(&priv->regs->txic, 0);
+ gfar_write(&priv->regs->txic, priv->txic);
+ }
}
- return howmany;
+ return rx_cleaned;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
case 1000:
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+ ecntrl &= ~(ECNTRL_R100);
break;
case 100:
case 10: