#endif
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
static int gfar_clean_tx_ring(struct net_device *dev);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
+static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull);
static void gfar_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
void gfar_halt(struct net_device *dev);
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return (priv->vlan_enable || priv->rx_csum_enable);
+ return priv->vlgrp || priv->rx_csum_enable;
}
static int gfar_of_init(struct net_device *dev)
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_PADDING |
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
u32 tempval;
struct net_device *dev = NULL;
struct gfar_private *priv = NULL;
- int err = 0;
DECLARE_MAC_BUF(mac);
+ int err = 0;
+ int len_devname;
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof (*priv));
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
priv->rx_csum_enable = 1;
- dev->features |= NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
} else
priv->rx_csum_enable = 0;
dev->vlan_rx_register = gfar_vlan_rx_register;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-
- priv->vlan_enable = 1;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
+ priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
priv->txcoalescing = DEFAULT_TX_COALESCE;
priv->txic = DEFAULT_TXIC;
goto register_fail;
}
+ /* fill out IRQ number and name fields */
+ len_devname = strlen(dev->name);
+ strncpy(&priv->int_name_tx[0], dev->name, len_devname);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ strncpy(&priv->int_name_tx[len_devname],
+ "_tx", sizeof("_tx") + 1);
+
+ strncpy(&priv->int_name_rx[0], dev->name, len_devname);
+ strncpy(&priv->int_name_rx[len_devname],
+ "_rx", sizeof("_rx") + 1);
+
+ strncpy(&priv->int_name_er[0], dev->name, len_devname);
+ strncpy(&priv->int_name_er[len_devname],
+ "_er", sizeof("_er") + 1);
+ } else
+ priv->int_name_tx[len_devname] = '\0';
+
/* Create all the sysfs files */
gfar_init_sysfs(dev);
{
struct rxbd8 *rxbdp;
struct txbd8 *txbdp;
- int i;
+ int i, j;
/* Go through all the buffer descriptors and free their data buffers */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
-
- if (priv->tx_skbuff[i]) {
- dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
- txbdp->length,
- DMA_TO_DEVICE);
- dev_kfree_skb_any(priv->tx_skbuff[i]);
- priv->tx_skbuff[i] = NULL;
+ if (!priv->tx_skbuff[i])
+ continue;
+
+ dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
+ txbdp->length, DMA_TO_DEVICE);
+ txbdp->lstatus = 0;
+ for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
+ txbdp++;
+ dma_unmap_page(&priv->dev->dev, txbdp->bufPtr,
+ txbdp->length, DMA_TO_DEVICE);
}
-
txbdp++;
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
}
kfree(priv->tx_skbuff);
priv->rx_skbuff[i] = NULL;
}
- rxbdp->status = 0;
- rxbdp->length = 0;
+ rxbdp->lstatus = 0;
rxbdp->bufPtr = 0;
rxbdp++;
/* Unmask the interrupts we look for */
gfar_write(®s->imask, IMASK_DEFAULT);
+
+ dev->trans_start = jiffies;
}
/* Bring the controller up and running */
priv->rx_skbuff[i] = NULL;
/* Initialize some variables in our dev structure */
+ priv->num_txbdfree = priv->tx_ring_size;
priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
priv->cur_rx = priv->rx_bd_base;
priv->skb_curtx = priv->skb_dirtytx = 0;
/* Initialize Transmit Descriptor Ring */
txbdp = priv->tx_bd_base;
for (i = 0; i < priv->tx_ring_size; i++) {
- txbdp->status = 0;
- txbdp->length = 0;
+ txbdp->lstatus = 0;
txbdp->bufPtr = 0;
txbdp++;
}
/* Install our interrupt handlers for Error,
* Transmit, and Receive */
if (request_irq(priv->interruptError, gfar_error,
- 0, "enet_error", dev) < 0) {
+ 0, priv->int_name_er, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->interruptError);
}
if (request_irq(priv->interruptTransmit, gfar_transmit,
- 0, "enet_tx", dev) < 0) {
+ 0, priv->int_name_tx, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d\n",
dev->name, priv->interruptTransmit);
}
if (request_irq(priv->interruptReceive, gfar_receive,
- 0, "enet_rx", dev) < 0) {
+ 0, priv->int_name_rx, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
dev->name, priv->interruptReceive);
}
} else {
if (request_irq(priv->interruptTransmit, gfar_interrupt,
- 0, "gfar_interrupt", dev) < 0) {
+ 0, priv->int_name_tx, dev) < 0) {
if (netif_msg_intr(priv))
printk(KERN_ERR "%s: Can't get IRQ %d\n",
- dev->name, priv->interruptError);
+ dev->name, priv->interruptTransmit);
err = -1;
goto err_irq_fail;
rctrl |= RCTRL_EMEN;
}
- if (priv->vlan_enable)
- rctrl |= RCTRL_VLAN;
-
if (priv->padding) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PADDING(priv->padding);
return err;
}
-static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
{
struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
- memset(fcb, 0, GMAC_FCB_LEN);
+ cacheable_memzero(fcb, GMAC_FCB_LEN);
return fcb;
}
fcb->vlctl = vlan_tx_tag_get(skb);
}
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+ struct txbd8 *base, int ring_size)
+{
+ struct txbd8 *new_bd = bdp + stride;
+
+ return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+ int ring_size)
+{
+ return skip_txbd(bdp, 1, base, ring_size);
+}
+
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct txfcb *fcb = NULL;
- struct txbd8 *txbdp;
- u16 status;
+ struct txbd8 *txbdp, *txbdp_start, *base;
+ u32 lstatus;
+ int i;
+ u32 bufaddr;
unsigned long flags;
+ unsigned int nr_frags, length;
+
+ base = priv->tx_bd_base;
+
+ /* total number of fragments in the SKB */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ spin_lock_irqsave(&priv->txlock, flags);
+
+ /* check if there is space to queue this packet */
+ if (nr_frags > priv->num_txbdfree) {
+ /* no space, stop the queue */
+ netif_stop_queue(dev);
+ dev->stats.tx_fifo_errors++;
+ spin_unlock_irqrestore(&priv->txlock, flags);
+ return NETDEV_TX_BUSY;
+ }
/* Update transmit stats */
dev->stats.tx_bytes += skb->len;
- /* Lock priv now */
- spin_lock_irqsave(&priv->txlock, flags);
+ txbdp = txbdp_start = priv->cur_tx;
+
+ if (nr_frags == 0) {
+ lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ } else {
+ /* Place the fragment addresses and lengths into the TxBDs */
+ for (i = 0; i < nr_frags; i++) {
+ /* Point at the next BD, wrapping as needed */
+ txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
+
+ length = skb_shinfo(skb)->frags[i].size;
+
+ lstatus = txbdp->lstatus | length |
+ BD_LFLAG(TXBD_READY);
+
+ /* Handle the last BD specially */
+ if (i == nr_frags - 1)
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ bufaddr = dma_map_page(&dev->dev,
+ skb_shinfo(skb)->frags[i].page,
+ skb_shinfo(skb)->frags[i].page_offset,
+ length,
+ DMA_TO_DEVICE);
- /* Point at the first free tx descriptor */
- txbdp = priv->cur_tx;
+ /* set the TxBD length and buffer pointer */
+ txbdp->bufPtr = bufaddr;
+ txbdp->lstatus = lstatus;
+ }
- /* Clear all but the WRAP status flags */
- status = txbdp->status & TXBD_WRAP;
+ lstatus = txbdp_start->lstatus;
+ }
/* Set up checksumming */
- if (likely((dev->features & NETIF_F_IP_CSUM)
- && (CHECKSUM_PARTIAL == skb->ip_summed))) {
- fcb = gfar_add_fcb(skb, txbdp);
- status |= TXBD_TOE;
+ if (CHECKSUM_PARTIAL == skb->ip_summed) {
+ fcb = gfar_add_fcb(skb);
+ lstatus |= BD_LFLAG(TXBD_TOE);
gfar_tx_checksum(skb, fcb);
}
- if (priv->vlan_enable &&
- unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
+ if (priv->vlgrp && vlan_tx_tag_present(skb)) {
if (unlikely(NULL == fcb)) {
- fcb = gfar_add_fcb(skb, txbdp);
- status |= TXBD_TOE;
+ fcb = gfar_add_fcb(skb);
+ lstatus |= BD_LFLAG(TXBD_TOE);
}
gfar_tx_vlan(skb, fcb);
}
- /* Set buffer length and pointer */
- txbdp->length = skb->len;
- txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
-
- /* Save the skb pointer so we can free it later */
+ /* setup the TxBD length and buffer pointer for the first BD */
priv->tx_skbuff[priv->skb_curtx] = skb;
+ txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
- /* Update the current skb pointer (wrapping if this was the last) */
- priv->skb_curtx =
- (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
-
- /* Flag the BD as interrupt-causing */
- status |= TXBD_INTERRUPT;
-
- /* Flag the BD as ready to go, last in frame, and */
- /* in need of CRC */
- status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
- dev->trans_start = jiffies;
-
- /* The powerpc-specific eieio() is used, as wmb() has too strong
+ /*
+ * The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
* don't need), thus requiring a more expensive sync instruction. At
* some point, the set of architecture-independent barrier functions
* should be expanded to include weaker barriers.
*/
-
eieio();
- txbdp->status = status;
- /* If this was the last BD in the ring, the next one */
- /* is at the beginning of the ring */
- if (txbdp->status & TXBD_WRAP)
- txbdp = priv->tx_bd_base;
- else
- txbdp++;
+ txbdp_start->lstatus = lstatus;
+
+ /* Update the current skb pointer to the next entry we will use
+ * (wrapping if necessary) */
+ priv->skb_curtx = (priv->skb_curtx + 1) &
+ TX_RING_MOD_MASK(priv->tx_ring_size);
+
+ priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
+
+ /* reduce TxBD free count */
+ priv->num_txbdfree -= (nr_frags + 1);
+
+ dev->trans_start = jiffies;
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
- if (txbdp == priv->dirty_tx) {
+ if (!priv->num_txbdfree) {
netif_stop_queue(dev);
dev->stats.tx_fifo_errors++;
}
- /* Update the current txbd to the next one */
- priv->cur_tx = txbdp;
-
/* Tell the DMA to go go go */
gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
{
struct gfar_private *priv = netdev_priv(dev);
unsigned long flags;
+ struct vlan_group *old_grp;
u32 tempval;
spin_lock_irqsave(&priv->rxlock, flags);
- priv->vlgrp = grp;
+ old_grp = priv->vlgrp;
+
+ if (old_grp == grp)
+ return;
if (grp) {
/* Enable VLAN tag insertion */
/* Enable VLAN tag extraction */
tempval = gfar_read(&priv->regs->rctrl);
tempval |= RCTRL_VLEX;
+ tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&priv->regs->rctrl, tempval);
} else {
/* Disable VLAN tag insertion */
/* Disable VLAN tag extraction */
tempval = gfar_read(&priv->regs->rctrl);
tempval &= ~RCTRL_VLEX;
+ /* If parse is no longer required, then disable parser */
+ if (tempval & RCTRL_REQ_PARSER)
+ tempval |= RCTRL_PRSDEP_INIT;
+ else
+ tempval &= ~RCTRL_PRSDEP_INIT;
gfar_write(&priv->regs->rctrl, tempval);
}
+ gfar_change_mtu(dev, dev->mtu);
+
spin_unlock_irqrestore(&priv->rxlock, flags);
}
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
- if (priv->vlan_enable)
+ if (priv->vlgrp)
frame_size += VLAN_HLEN;
- if (gfar_uses_fcb(priv))
- frame_size += GMAC_FCB_LEN;
-
- frame_size += priv->padding;
-
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: Invalid MTU setting\n",
return -EINVAL;
}
+ if (gfar_uses_fcb(priv))
+ frame_size += GMAC_FCB_LEN;
+
+ frame_size += priv->padding;
+
tempsize =
(frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
INCREMENTAL_BUFFER_SIZE;
/* Interrupt Handler for Transmit complete */
static int gfar_clean_tx_ring(struct net_device *dev)
{
- struct txbd8 *bdp;
struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp;
+ struct txbd8 *lbdp = NULL;
+ struct txbd8 *base = priv->tx_bd_base;
+ struct sk_buff *skb;
+ int skb_dirtytx;
+ int tx_ring_size = priv->tx_ring_size;
+ int frags = 0;
+ int i;
int howmany = 0;
+ u32 lstatus;
bdp = priv->dirty_tx;
- while ((bdp->status & TXBD_READY) == 0) {
- /* If dirty_tx and cur_tx are the same, then either the */
- /* ring is empty or full now (it could only be full in the beginning, */
- /* obviously). If it is empty, we are done. */
- if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
- break;
+ skb_dirtytx = priv->skb_dirtytx;
- howmany++;
+ while ((skb = priv->tx_skbuff[skb_dirtytx])) {
+ frags = skb_shinfo(skb)->nr_frags;
+ lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
- /* Deferred means some collisions occurred during transmit, */
- /* but we eventually sent the packet. */
- if (bdp->status & TXBD_DEF)
- dev->stats.collisions++;
+ lstatus = lbdp->lstatus;
- /* Unmap the DMA memory */
- dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
- bdp->length, DMA_TO_DEVICE);
+ /* Only clean completed frames */
+ if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+ (lstatus & BD_LENGTH_MASK))
+ break;
- /* Free the sk buffer associated with this TxBD */
- dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
+ dma_unmap_single(&dev->dev,
+ bdp->bufPtr,
+ bdp->length,
+ DMA_TO_DEVICE);
- priv->tx_skbuff[priv->skb_dirtytx] = NULL;
- priv->skb_dirtytx =
- (priv->skb_dirtytx +
- 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next_txbd(bdp, base, tx_ring_size);
- /* Clean BD length for empty detection */
- bdp->length = 0;
+ for (i = 0; i < frags; i++) {
+ dma_unmap_page(&dev->dev,
+ bdp->bufPtr,
+ bdp->length,
+ DMA_TO_DEVICE);
+ bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+ }
- /* update bdp to point at next bd in the ring (wrapping if necessary) */
- if (bdp->status & TXBD_WRAP)
- bdp = priv->tx_bd_base;
- else
- bdp++;
+ dev_kfree_skb_any(skb);
+ priv->tx_skbuff[skb_dirtytx] = NULL;
+
+ skb_dirtytx = (skb_dirtytx + 1) &
+ TX_RING_MOD_MASK(tx_ring_size);
+
+ howmany++;
+ priv->num_txbdfree += frags + 1;
+ }
- /* Move dirty_tx to be the next bd */
- priv->dirty_tx = bdp;
+ /* If we freed a buffer, we can restart transmission, if necessary */
+ if (netif_queue_stopped(dev) && priv->num_txbdfree)
+ netif_wake_queue(dev);
- /* We freed a buffer, so now we can restart transmission */
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
- } /* while ((bdp->status & TXBD_READY) == 0) */
+ /* Update dirty indicators */
+ priv->skb_dirtytx = skb_dirtytx;
+ priv->dirty_tx = bdp;
dev->stats.tx_packets += howmany;
return howmany;
}
-/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *dev_id)
+static void gfar_schedule_cleanup(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
-
- /* Clear IEVENT */
- gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
-
- /* Lock priv */
- spin_lock(&priv->txlock);
-
- gfar_clean_tx_ring(dev);
-
- /* If we are coalescing the interrupts, reset the timer */
- /* Otherwise, clear it */
- if (likely(priv->txcoalescing)) {
- gfar_write(&priv->regs->txic, 0);
- gfar_write(&priv->regs->txic, priv->txic);
+ if (netif_rx_schedule_prep(&priv->napi)) {
+ gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
+ __netif_rx_schedule(&priv->napi);
}
+}
- spin_unlock(&priv->txlock);
-
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *dev_id)
+{
+ gfar_schedule_cleanup((struct net_device *)dev_id);
return IRQ_HANDLED;
}
struct sk_buff *skb)
{
struct gfar_private *priv = netdev_priv(dev);
- u32 * status_len = (u32 *)bdp;
- u16 flags;
+ u32 lstatus;
bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
- flags = RXBD_EMPTY | RXBD_INTERRUPT;
+ lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
- flags |= RXBD_WRAP;
+ lstatus |= BD_LFLAG(RXBD_WRAP);
eieio();
- *status_len = (u32)flags << 16;
+ bdp->lstatus = lstatus;
}
irqreturn_t gfar_receive(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct gfar_private *priv = netdev_priv(dev);
- u32 tempval;
-
- /* support NAPI */
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived */
- gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
-
- if (netif_rx_schedule_prep(dev, &priv->napi)) {
- tempval = gfar_read(&priv->regs->imask);
- tempval &= IMASK_RTX_DISABLED;
- gfar_write(&priv->regs->imask, tempval);
-
- __netif_rx_schedule(dev, &priv->napi);
- } else {
- if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
- dev->name, gfar_read(&priv->regs->ievent),
- gfar_read(&priv->regs->imask));
- }
-
+ gfar_schedule_cleanup((struct net_device *)dev_id);
return IRQ_HANDLED;
}
}
-static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
-{
- struct rxfcb *fcb = (struct rxfcb *)skb->data;
-
- /* Remove the FCB from the skb */
- skb_pull(skb, GMAC_FCB_LEN);
-
- return fcb;
-}
-
/* gfar_process_frame() -- handle one incoming packet if skb
* isn't NULL. */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int length)
+ int amount_pull)
{
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
- if (NULL == skb) {
- if (netif_msg_rx_err(priv))
- printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
- dev->stats.rx_dropped++;
- priv->extra_stats.rx_skbmissing++;
- } else {
- int ret;
+ int ret;
- /* Prep the skb for the packet */
- skb_put(skb, length);
+ /* fcb is at the beginning if exists */
+ fcb = (struct rxfcb *)skb->data;
- /* Grab the FCB if there is one */
- if (gfar_uses_fcb(priv))
- fcb = gfar_get_fcb(skb);
-
- /* Remove the padded bytes, if there are any */
- if (priv->padding)
- skb_pull(skb, priv->padding);
+ /* Remove the FCB from the skb */
+ /* Remove the padded bytes, if there are any */
+ if (amount_pull)
+ skb_pull(skb, amount_pull);
- if (priv->rx_csum_enable)
- gfar_rx_checksum(skb, fcb);
+ if (priv->rx_csum_enable)
+ gfar_rx_checksum(skb, fcb);
- /* Tell the skb what kind of packet this is */
- skb->protocol = eth_type_trans(skb, dev);
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, dev);
- /* Send the packet up the stack */
- if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
- ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
- fcb->vlctl);
- } else
- ret = netif_receive_skb(skb);
+ /* Send the packet up the stack */
+ if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
+ ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
+ else
+ ret = netif_receive_skb(skb);
- if (NET_RX_DROP == ret)
- priv->extra_stats.kernel_dropped++;
- }
+ if (NET_RX_DROP == ret)
+ priv->extra_stats.kernel_dropped++;
return 0;
}
*/
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
{
- struct rxbd8 *bdp;
+ struct rxbd8 *bdp, *base;
struct sk_buff *skb;
- u16 pkt_len;
+ int pkt_len;
+ int amount_pull;
int howmany = 0;
struct gfar_private *priv = netdev_priv(dev);
/* Get the first full descriptor */
bdp = priv->cur_rx;
+ base = priv->rx_bd_base;
+
+ amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
+ priv->padding;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
if (unlikely(!newskb))
newskb = skb;
-
- if (skb)
+ else if (skb)
dev_kfree_skb_any(skb);
} else {
/* Increment the number of packets */
dev->stats.rx_packets++;
howmany++;
- /* Remove the FCS from the packet length */
- pkt_len = bdp->length - 4;
+ if (likely(skb)) {
+ pkt_len = bdp->length - ETH_FCS_LEN;
+ /* Remove the FCS from the packet length */
+ skb_put(skb, pkt_len);
+ dev->stats.rx_bytes += pkt_len;
- gfar_process_frame(dev, skb, pkt_len);
+ gfar_process_frame(dev, skb, amount_pull);
+
+ } else {
+ if (netif_msg_rx_err(priv))
+ printk(KERN_WARNING
+ "%s: Missing skb!\n", dev->name);
+ dev->stats.rx_dropped++;
+ priv->extra_stats.rx_skbmissing++;
+ }
- dev->stats.rx_bytes += pkt_len;
}
priv->rx_skbuff[priv->skb_currx] = newskb;
gfar_new_rxbdp(dev, bdp, newskb);
/* Update to the next pointer */
- if (bdp->status & RXBD_WRAP)
- bdp = priv->rx_bd_base;
- else
- bdp++;
+ bdp = next_bd(bdp, base, priv->rx_ring_size);
/* update to point at the next skb */
priv->skb_currx =
{
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
struct net_device *dev = priv->dev;
- int howmany;
+ int tx_cleaned = 0;
+ int rx_cleaned = 0;
unsigned long flags;
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived */
+ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
+
/* If we fail to get the lock, don't bother with the TX BDs */
if (spin_trylock_irqsave(&priv->txlock, flags)) {
- gfar_clean_tx_ring(dev);
+ tx_cleaned = gfar_clean_tx_ring(dev);
spin_unlock_irqrestore(&priv->txlock, flags);
}
- howmany = gfar_clean_rx_ring(dev, budget);
+ rx_cleaned = gfar_clean_rx_ring(dev, budget);
- if (howmany < budget) {
- netif_rx_complete(dev, napi);
+ if (tx_cleaned)
+ return budget;
+
+ if (rx_cleaned < budget) {
+ netif_rx_complete(napi);
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic, priv->rxic);
}
+ if (likely(priv->txcoalescing)) {
+ gfar_write(&priv->regs->txic, 0);
+ gfar_write(&priv->regs->txic, priv->txic);
+ }
}
- return howmany;
+ return rx_cleaned;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
case 1000:
tempval =
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+ ecntrl &= ~(ECNTRL_R100);
break;
case 100:
case 10: