]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/gianfar.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[linux-2.6-omap-h63xx.git] / drivers / net / gianfar.c
1 /*
2  * drivers/net/gianfar.c
3  *
4  * Gianfar Ethernet Driver
5  * This driver is designed for the non-CPM ethernet controllers
6  * on the 85xx and 83xx family of integrated processors
7  * Based on 8260_io/fcc_enet.c
8  *
9  * Author: Andy Fleming
10  * Maintainer: Kumar Gala
11  *
12  * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13  * Copyright (c) 2007 MontaVista Software, Inc.
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  *  Gianfar:  AKA Lambda Draconis, "Dragon"
21  *  RA 11 31 24.2
22  *  Dec +69 19 52
23  *  V 3.84
24  *  B-V +1.62
25  *
26  *  Theory of operation
27  *
28  *  The driver is initialized through platform_device.  Structures which
29  *  define the configuration needed by the board are defined in a
30  *  board structure in arch/ppc/platforms (though I do not
31  *  discount the possibility that other architectures could one
32  *  day be supported.
33  *
34  *  The Gianfar Ethernet Controller uses a ring of buffer
35  *  descriptors.  The beginning is indicated by a register
36  *  pointing to the physical address of the start of the ring.
37  *  The end is determined by a "wrap" bit being set in the
38  *  last descriptor of the ring.
39  *
40  *  When a packet is received, the RXF bit in the
41  *  IEVENT register is set, triggering an interrupt when the
42  *  corresponding bit in the IMASK register is also set (if
43  *  interrupt coalescing is active, then the interrupt may not
44  *  happen immediately, but will wait until either a set number
45  *  of frames or amount of time have passed).  In NAPI, the
46  *  interrupt handler will signal there is work to be done, and
47  *  exit.  Without NAPI, the packet(s) will be handled
48  *  immediately.  Both methods will start at the last known empty
49  *  descriptor, and process every subsequent descriptor until there
50  *  are none left with data (NAPI will stop after a set number of
51  *  packets to give time to other tasks, but will eventually
52  *  process all the packets).  The data arrives inside a
53  *  pre-allocated skb, and so after the skb is passed up to the
54  *  stack, a new skb must be allocated, and the address field in
55  *  the buffer descriptor must be updated to indicate this new
56  *  skb.
57  *
58  *  When the kernel requests that a packet be transmitted, the
59  *  driver starts where it left off last time, and points the
60  *  descriptor at the buffer which was passed in.  The driver
61  *  then informs the DMA engine that there are packets ready to
62  *  be transmitted.  Once the controller is finished transmitting
63  *  the packet, an interrupt may be triggered (under the same
64  *  conditions as for reception, but depending on the TXF bit).
65  *  The driver then cleans up the buffer.
66  */
67
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/platform_device.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
87
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
96
97 #include "gianfar.h"
98 #include "gianfar_mii.h"
99
100 #define TX_TIMEOUT      (1*HZ)
101 #define SKB_ALLOC_TIMEOUT 1000000
102 #undef BRIEF_GFAR_ERRORS
103 #undef VERBOSE_GFAR_ERRORS
104
105 #ifdef CONFIG_GFAR_NAPI
106 #define RECEIVE(x) netif_receive_skb(x)
107 #else
108 #define RECEIVE(x) netif_rx(x)
109 #endif
110
111 const char gfar_driver_name[] = "Gianfar Ethernet";
112 const char gfar_driver_version[] = "1.3";
113
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_timeout(struct net_device *dev);
117 static int gfar_close(struct net_device *dev);
118 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
119 static struct net_device_stats *gfar_get_stats(struct net_device *dev);
120 static int gfar_set_mac_address(struct net_device *dev);
121 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
122 static irqreturn_t gfar_error(int irq, void *dev_id);
123 static irqreturn_t gfar_transmit(int irq, void *dev_id);
124 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
125 static void adjust_link(struct net_device *dev);
126 static void init_registers(struct net_device *dev);
127 static int init_phy(struct net_device *dev);
128 static int gfar_probe(struct platform_device *pdev);
129 static int gfar_remove(struct platform_device *pdev);
130 static void free_skb_resources(struct gfar_private *priv);
131 static void gfar_set_multi(struct net_device *dev);
132 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133 #ifdef CONFIG_GFAR_NAPI
134 static int gfar_poll(struct net_device *dev, int *budget);
135 #endif
136 #ifdef CONFIG_NET_POLL_CONTROLLER
137 static void gfar_netpoll(struct net_device *dev);
138 #endif
139 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
140 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
141 static void gfar_vlan_rx_register(struct net_device *netdev,
142                                 struct vlan_group *grp);
143 void gfar_halt(struct net_device *dev);
144 void gfar_start(struct net_device *dev);
145 static void gfar_clear_exact_match(struct net_device *dev);
146 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
147
148 extern const struct ethtool_ops gfar_ethtool_ops;
149
150 MODULE_AUTHOR("Freescale Semiconductor, Inc");
151 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
152 MODULE_LICENSE("GPL");
153
154 /* Returns 1 if incoming frames use an FCB */
155 static inline int gfar_uses_fcb(struct gfar_private *priv)
156 {
157         return (priv->vlan_enable || priv->rx_csum_enable);
158 }
159
160 /* Set up the ethernet device structure, private data,
161  * and anything else we need before we start */
162 static int gfar_probe(struct platform_device *pdev)
163 {
164         u32 tempval;
165         struct net_device *dev = NULL;
166         struct gfar_private *priv = NULL;
167         struct gianfar_platform_data *einfo;
168         struct resource *r;
169         int idx;
170         int err = 0;
171
172         einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
173
174         if (NULL == einfo) {
175                 printk(KERN_ERR "gfar %d: Missing additional data!\n",
176                        pdev->id);
177
178                 return -ENODEV;
179         }
180
181         /* Create an ethernet device instance */
182         dev = alloc_etherdev(sizeof (*priv));
183
184         if (NULL == dev)
185                 return -ENOMEM;
186
187         priv = netdev_priv(dev);
188
189         /* Set the info in the priv to the current info */
190         priv->einfo = einfo;
191
192         /* fill out IRQ fields */
193         if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
194                 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
195                 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
196                 priv->interruptError = platform_get_irq_byname(pdev, "error");
197                 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
198                         goto regs_fail;
199         } else {
200                 priv->interruptTransmit = platform_get_irq(pdev, 0);
201                 if (priv->interruptTransmit < 0)
202                         goto regs_fail;
203         }
204
205         /* get a pointer to the register memory */
206         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
207         priv->regs = ioremap(r->start, sizeof (struct gfar));
208
209         if (NULL == priv->regs) {
210                 err = -ENOMEM;
211                 goto regs_fail;
212         }
213
214         spin_lock_init(&priv->txlock);
215         spin_lock_init(&priv->rxlock);
216
217         platform_set_drvdata(pdev, dev);
218
219         /* Stop the DMA engine now, in case it was running before */
220         /* (The firmware could have used it, and left it running). */
221         /* To do this, we write Graceful Receive Stop and Graceful */
222         /* Transmit Stop, and then wait until the corresponding bits */
223         /* in IEVENT indicate the stops have completed. */
224         tempval = gfar_read(&priv->regs->dmactrl);
225         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
226         gfar_write(&priv->regs->dmactrl, tempval);
227
228         tempval = gfar_read(&priv->regs->dmactrl);
229         tempval |= (DMACTRL_GRS | DMACTRL_GTS);
230         gfar_write(&priv->regs->dmactrl, tempval);
231
232         while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
233                 cpu_relax();
234
235         /* Reset MAC layer */
236         gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
237
238         tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
239         gfar_write(&priv->regs->maccfg1, tempval);
240
241         /* Initialize MACCFG2. */
242         gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
243
244         /* Initialize ECNTRL */
245         gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
246
247         /* Copy the station address into the dev structure, */
248         memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
249
250         /* Set the dev->base_addr to the gfar reg region */
251         dev->base_addr = (unsigned long) (priv->regs);
252
253         SET_MODULE_OWNER(dev);
254         SET_NETDEV_DEV(dev, &pdev->dev);
255
256         /* Fill in the dev structure */
257         dev->open = gfar_enet_open;
258         dev->hard_start_xmit = gfar_start_xmit;
259         dev->tx_timeout = gfar_timeout;
260         dev->watchdog_timeo = TX_TIMEOUT;
261 #ifdef CONFIG_GFAR_NAPI
262         dev->poll = gfar_poll;
263         dev->weight = GFAR_DEV_WEIGHT;
264 #endif
265 #ifdef CONFIG_NET_POLL_CONTROLLER
266         dev->poll_controller = gfar_netpoll;
267 #endif
268         dev->stop = gfar_close;
269         dev->get_stats = gfar_get_stats;
270         dev->change_mtu = gfar_change_mtu;
271         dev->mtu = 1500;
272         dev->set_multicast_list = gfar_set_multi;
273
274         dev->ethtool_ops = &gfar_ethtool_ops;
275
276         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
277                 priv->rx_csum_enable = 1;
278                 dev->features |= NETIF_F_IP_CSUM;
279         } else
280                 priv->rx_csum_enable = 0;
281
282         priv->vlgrp = NULL;
283
284         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
285                 dev->vlan_rx_register = gfar_vlan_rx_register;
286
287                 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
288
289                 priv->vlan_enable = 1;
290         }
291
292         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
293                 priv->extended_hash = 1;
294                 priv->hash_width = 9;
295
296                 priv->hash_regs[0] = &priv->regs->igaddr0;
297                 priv->hash_regs[1] = &priv->regs->igaddr1;
298                 priv->hash_regs[2] = &priv->regs->igaddr2;
299                 priv->hash_regs[3] = &priv->regs->igaddr3;
300                 priv->hash_regs[4] = &priv->regs->igaddr4;
301                 priv->hash_regs[5] = &priv->regs->igaddr5;
302                 priv->hash_regs[6] = &priv->regs->igaddr6;
303                 priv->hash_regs[7] = &priv->regs->igaddr7;
304                 priv->hash_regs[8] = &priv->regs->gaddr0;
305                 priv->hash_regs[9] = &priv->regs->gaddr1;
306                 priv->hash_regs[10] = &priv->regs->gaddr2;
307                 priv->hash_regs[11] = &priv->regs->gaddr3;
308                 priv->hash_regs[12] = &priv->regs->gaddr4;
309                 priv->hash_regs[13] = &priv->regs->gaddr5;
310                 priv->hash_regs[14] = &priv->regs->gaddr6;
311                 priv->hash_regs[15] = &priv->regs->gaddr7;
312
313         } else {
314                 priv->extended_hash = 0;
315                 priv->hash_width = 8;
316
317                 priv->hash_regs[0] = &priv->regs->gaddr0;
318                 priv->hash_regs[1] = &priv->regs->gaddr1;
319                 priv->hash_regs[2] = &priv->regs->gaddr2;
320                 priv->hash_regs[3] = &priv->regs->gaddr3;
321                 priv->hash_regs[4] = &priv->regs->gaddr4;
322                 priv->hash_regs[5] = &priv->regs->gaddr5;
323                 priv->hash_regs[6] = &priv->regs->gaddr6;
324                 priv->hash_regs[7] = &priv->regs->gaddr7;
325         }
326
327         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
328                 priv->padding = DEFAULT_PADDING;
329         else
330                 priv->padding = 0;
331
332         if (dev->features & NETIF_F_IP_CSUM)
333                 dev->hard_header_len += GMAC_FCB_LEN;
334
335         priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
336         priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
337         priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
338
339         priv->txcoalescing = DEFAULT_TX_COALESCE;
340         priv->txcount = DEFAULT_TXCOUNT;
341         priv->txtime = DEFAULT_TXTIME;
342         priv->rxcoalescing = DEFAULT_RX_COALESCE;
343         priv->rxcount = DEFAULT_RXCOUNT;
344         priv->rxtime = DEFAULT_RXTIME;
345
346         /* Enable most messages by default */
347         priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
348
349         err = register_netdev(dev);
350
351         if (err) {
352                 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
353                                 dev->name);
354                 goto register_fail;
355         }
356
357         /* Create all the sysfs files */
358         gfar_init_sysfs(dev);
359
360         /* Print out the device info */
361         printk(KERN_INFO DEVICE_NAME, dev->name);
362         for (idx = 0; idx < 6; idx++)
363                 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
364         printk("\n");
365
366         /* Even more device info helps when determining which kernel */
367         /* provided which set of benchmarks. */
368 #ifdef CONFIG_GFAR_NAPI
369         printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
370 #else
371         printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
372 #endif
373         printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
374                dev->name, priv->rx_ring_size, priv->tx_ring_size);
375
376         return 0;
377
378 register_fail:
379         iounmap(priv->regs);
380 regs_fail:
381         free_netdev(dev);
382         return err;
383 }
384
385 static int gfar_remove(struct platform_device *pdev)
386 {
387         struct net_device *dev = platform_get_drvdata(pdev);
388         struct gfar_private *priv = netdev_priv(dev);
389
390         platform_set_drvdata(pdev, NULL);
391
392         iounmap(priv->regs);
393         free_netdev(dev);
394
395         return 0;
396 }
397
398
399 /* Reads the controller's registers to determine what interface
400  * connects it to the PHY.
401  */
402 static phy_interface_t gfar_get_interface(struct net_device *dev)
403 {
404         struct gfar_private *priv = netdev_priv(dev);
405         u32 ecntrl = gfar_read(&priv->regs->ecntrl);
406
407         if (ecntrl & ECNTRL_SGMII_MODE)
408                 return PHY_INTERFACE_MODE_SGMII;
409
410         if (ecntrl & ECNTRL_TBI_MODE) {
411                 if (ecntrl & ECNTRL_REDUCED_MODE)
412                         return PHY_INTERFACE_MODE_RTBI;
413                 else
414                         return PHY_INTERFACE_MODE_TBI;
415         }
416
417         if (ecntrl & ECNTRL_REDUCED_MODE) {
418                 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
419                         return PHY_INTERFACE_MODE_RMII;
420                 else
421                         return PHY_INTERFACE_MODE_RGMII;
422         }
423
424         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
425                 return PHY_INTERFACE_MODE_GMII;
426
427         return PHY_INTERFACE_MODE_MII;
428 }
429
430
431 /* Initializes driver's PHY state, and attaches to the PHY.
432  * Returns 0 on success.
433  */
434 static int init_phy(struct net_device *dev)
435 {
436         struct gfar_private *priv = netdev_priv(dev);
437         uint gigabit_support =
438                 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
439                 SUPPORTED_1000baseT_Full : 0;
440         struct phy_device *phydev;
441         char phy_id[BUS_ID_SIZE];
442         phy_interface_t interface;
443
444         priv->oldlink = 0;
445         priv->oldspeed = 0;
446         priv->oldduplex = -1;
447
448         snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
449
450         interface = gfar_get_interface(dev);
451
452         phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
453
454         if (IS_ERR(phydev)) {
455                 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
456                 return PTR_ERR(phydev);
457         }
458
459         /* Remove any features not supported by the controller */
460         phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
461         phydev->advertising = phydev->supported;
462
463         priv->phydev = phydev;
464
465         return 0;
466 }
467
468 static void init_registers(struct net_device *dev)
469 {
470         struct gfar_private *priv = netdev_priv(dev);
471
472         /* Clear IEVENT */
473         gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
474
475         /* Initialize IMASK */
476         gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
477
478         /* Init hash registers to zero */
479         gfar_write(&priv->regs->igaddr0, 0);
480         gfar_write(&priv->regs->igaddr1, 0);
481         gfar_write(&priv->regs->igaddr2, 0);
482         gfar_write(&priv->regs->igaddr3, 0);
483         gfar_write(&priv->regs->igaddr4, 0);
484         gfar_write(&priv->regs->igaddr5, 0);
485         gfar_write(&priv->regs->igaddr6, 0);
486         gfar_write(&priv->regs->igaddr7, 0);
487
488         gfar_write(&priv->regs->gaddr0, 0);
489         gfar_write(&priv->regs->gaddr1, 0);
490         gfar_write(&priv->regs->gaddr2, 0);
491         gfar_write(&priv->regs->gaddr3, 0);
492         gfar_write(&priv->regs->gaddr4, 0);
493         gfar_write(&priv->regs->gaddr5, 0);
494         gfar_write(&priv->regs->gaddr6, 0);
495         gfar_write(&priv->regs->gaddr7, 0);
496
497         /* Zero out the rmon mib registers if it has them */
498         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
499                 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
500
501                 /* Mask off the CAM interrupts */
502                 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
503                 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
504         }
505
506         /* Initialize the max receive buffer length */
507         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
508
509         /* Initialize the Minimum Frame Length Register */
510         gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
511
512         /* Assign the TBI an address which won't conflict with the PHYs */
513         gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
514 }
515
516
517 /* Halt the receive and transmit queues */
518 void gfar_halt(struct net_device *dev)
519 {
520         struct gfar_private *priv = netdev_priv(dev);
521         struct gfar __iomem *regs = priv->regs;
522         u32 tempval;
523
524         /* Mask all interrupts */
525         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
526
527         /* Clear all interrupts */
528         gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
529
530         /* Stop the DMA, and wait for it to stop */
531         tempval = gfar_read(&priv->regs->dmactrl);
532         if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
533             != (DMACTRL_GRS | DMACTRL_GTS)) {
534                 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
535                 gfar_write(&priv->regs->dmactrl, tempval);
536
537                 while (!(gfar_read(&priv->regs->ievent) &
538                          (IEVENT_GRSC | IEVENT_GTSC)))
539                         cpu_relax();
540         }
541
542         /* Disable Rx and Tx */
543         tempval = gfar_read(&regs->maccfg1);
544         tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
545         gfar_write(&regs->maccfg1, tempval);
546 }
547
548 void stop_gfar(struct net_device *dev)
549 {
550         struct gfar_private *priv = netdev_priv(dev);
551         struct gfar __iomem *regs = priv->regs;
552         unsigned long flags;
553
554         phy_stop(priv->phydev);
555
556         /* Lock it down */
557         spin_lock_irqsave(&priv->txlock, flags);
558         spin_lock(&priv->rxlock);
559
560         gfar_halt(dev);
561
562         spin_unlock(&priv->rxlock);
563         spin_unlock_irqrestore(&priv->txlock, flags);
564
565         /* Free the IRQs */
566         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
567                 free_irq(priv->interruptError, dev);
568                 free_irq(priv->interruptTransmit, dev);
569                 free_irq(priv->interruptReceive, dev);
570         } else {
571                 free_irq(priv->interruptTransmit, dev);
572         }
573
574         free_skb_resources(priv);
575
576         dma_free_coherent(NULL,
577                         sizeof(struct txbd8)*priv->tx_ring_size
578                         + sizeof(struct rxbd8)*priv->rx_ring_size,
579                         priv->tx_bd_base,
580                         gfar_read(&regs->tbase0));
581 }
582
583 /* If there are any tx skbs or rx skbs still around, free them.
584  * Then free tx_skbuff and rx_skbuff */
585 static void free_skb_resources(struct gfar_private *priv)
586 {
587         struct rxbd8 *rxbdp;
588         struct txbd8 *txbdp;
589         int i;
590
591         /* Go through all the buffer descriptors and free their data buffers */
592         txbdp = priv->tx_bd_base;
593
594         for (i = 0; i < priv->tx_ring_size; i++) {
595
596                 if (priv->tx_skbuff[i]) {
597                         dma_unmap_single(NULL, txbdp->bufPtr,
598                                         txbdp->length,
599                                         DMA_TO_DEVICE);
600                         dev_kfree_skb_any(priv->tx_skbuff[i]);
601                         priv->tx_skbuff[i] = NULL;
602                 }
603         }
604
605         kfree(priv->tx_skbuff);
606
607         rxbdp = priv->rx_bd_base;
608
609         /* rx_skbuff is not guaranteed to be allocated, so only
610          * free it and its contents if it is allocated */
611         if(priv->rx_skbuff != NULL) {
612                 for (i = 0; i < priv->rx_ring_size; i++) {
613                         if (priv->rx_skbuff[i]) {
614                                 dma_unmap_single(NULL, rxbdp->bufPtr,
615                                                 priv->rx_buffer_size,
616                                                 DMA_FROM_DEVICE);
617
618                                 dev_kfree_skb_any(priv->rx_skbuff[i]);
619                                 priv->rx_skbuff[i] = NULL;
620                         }
621
622                         rxbdp->status = 0;
623                         rxbdp->length = 0;
624                         rxbdp->bufPtr = 0;
625
626                         rxbdp++;
627                 }
628
629                 kfree(priv->rx_skbuff);
630         }
631 }
632
633 void gfar_start(struct net_device *dev)
634 {
635         struct gfar_private *priv = netdev_priv(dev);
636         struct gfar __iomem *regs = priv->regs;
637         u32 tempval;
638
639         /* Enable Rx and Tx in MACCFG1 */
640         tempval = gfar_read(&regs->maccfg1);
641         tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
642         gfar_write(&regs->maccfg1, tempval);
643
644         /* Initialize DMACTRL to have WWR and WOP */
645         tempval = gfar_read(&priv->regs->dmactrl);
646         tempval |= DMACTRL_INIT_SETTINGS;
647         gfar_write(&priv->regs->dmactrl, tempval);
648
649         /* Make sure we aren't stopped */
650         tempval = gfar_read(&priv->regs->dmactrl);
651         tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
652         gfar_write(&priv->regs->dmactrl, tempval);
653
654         /* Clear THLT/RHLT, so that the DMA starts polling now */
655         gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
656         gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
657
658         /* Unmask the interrupts we look for */
659         gfar_write(&regs->imask, IMASK_DEFAULT);
660 }
661
662 /* Bring the controller up and running */
663 int startup_gfar(struct net_device *dev)
664 {
665         struct txbd8 *txbdp;
666         struct rxbd8 *rxbdp;
667         dma_addr_t addr;
668         unsigned long vaddr;
669         int i;
670         struct gfar_private *priv = netdev_priv(dev);
671         struct gfar __iomem *regs = priv->regs;
672         int err = 0;
673         u32 rctrl = 0;
674         u32 attrs = 0;
675
676         gfar_write(&regs->imask, IMASK_INIT_CLEAR);
677
678         /* Allocate memory for the buffer descriptors */
679         vaddr = (unsigned long) dma_alloc_coherent(NULL,
680                         sizeof (struct txbd8) * priv->tx_ring_size +
681                         sizeof (struct rxbd8) * priv->rx_ring_size,
682                         &addr, GFP_KERNEL);
683
684         if (vaddr == 0) {
685                 if (netif_msg_ifup(priv))
686                         printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
687                                         dev->name);
688                 return -ENOMEM;
689         }
690
691         priv->tx_bd_base = (struct txbd8 *) vaddr;
692
693         /* enet DMA only understands physical addresses */
694         gfar_write(&regs->tbase0, addr);
695
696         /* Start the rx descriptor ring where the tx ring leaves off */
697         addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
698         vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
699         priv->rx_bd_base = (struct rxbd8 *) vaddr;
700         gfar_write(&regs->rbase0, addr);
701
702         /* Setup the skbuff rings */
703         priv->tx_skbuff =
704             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
705                                         priv->tx_ring_size, GFP_KERNEL);
706
707         if (NULL == priv->tx_skbuff) {
708                 if (netif_msg_ifup(priv))
709                         printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
710                                         dev->name);
711                 err = -ENOMEM;
712                 goto tx_skb_fail;
713         }
714
715         for (i = 0; i < priv->tx_ring_size; i++)
716                 priv->tx_skbuff[i] = NULL;
717
718         priv->rx_skbuff =
719             (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
720                                         priv->rx_ring_size, GFP_KERNEL);
721
722         if (NULL == priv->rx_skbuff) {
723                 if (netif_msg_ifup(priv))
724                         printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
725                                         dev->name);
726                 err = -ENOMEM;
727                 goto rx_skb_fail;
728         }
729
730         for (i = 0; i < priv->rx_ring_size; i++)
731                 priv->rx_skbuff[i] = NULL;
732
733         /* Initialize some variables in our dev structure */
734         priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
735         priv->cur_rx = priv->rx_bd_base;
736         priv->skb_curtx = priv->skb_dirtytx = 0;
737         priv->skb_currx = 0;
738
739         /* Initialize Transmit Descriptor Ring */
740         txbdp = priv->tx_bd_base;
741         for (i = 0; i < priv->tx_ring_size; i++) {
742                 txbdp->status = 0;
743                 txbdp->length = 0;
744                 txbdp->bufPtr = 0;
745                 txbdp++;
746         }
747
748         /* Set the last descriptor in the ring to indicate wrap */
749         txbdp--;
750         txbdp->status |= TXBD_WRAP;
751
752         rxbdp = priv->rx_bd_base;
753         for (i = 0; i < priv->rx_ring_size; i++) {
754                 struct sk_buff *skb = NULL;
755
756                 rxbdp->status = 0;
757
758                 skb = gfar_new_skb(dev, rxbdp);
759
760                 priv->rx_skbuff[i] = skb;
761
762                 rxbdp++;
763         }
764
765         /* Set the last descriptor in the ring to wrap */
766         rxbdp--;
767         rxbdp->status |= RXBD_WRAP;
768
769         /* If the device has multiple interrupts, register for
770          * them.  Otherwise, only register for the one */
771         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
772                 /* Install our interrupt handlers for Error,
773                  * Transmit, and Receive */
774                 if (request_irq(priv->interruptError, gfar_error,
775                                 0, "enet_error", dev) < 0) {
776                         if (netif_msg_intr(priv))
777                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
778                                         dev->name, priv->interruptError);
779
780                         err = -1;
781                         goto err_irq_fail;
782                 }
783
784                 if (request_irq(priv->interruptTransmit, gfar_transmit,
785                                 0, "enet_tx", dev) < 0) {
786                         if (netif_msg_intr(priv))
787                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
788                                         dev->name, priv->interruptTransmit);
789
790                         err = -1;
791
792                         goto tx_irq_fail;
793                 }
794
795                 if (request_irq(priv->interruptReceive, gfar_receive,
796                                 0, "enet_rx", dev) < 0) {
797                         if (netif_msg_intr(priv))
798                                 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
799                                                 dev->name, priv->interruptReceive);
800
801                         err = -1;
802                         goto rx_irq_fail;
803                 }
804         } else {
805                 if (request_irq(priv->interruptTransmit, gfar_interrupt,
806                                 0, "gfar_interrupt", dev) < 0) {
807                         if (netif_msg_intr(priv))
808                                 printk(KERN_ERR "%s: Can't get IRQ %d\n",
809                                         dev->name, priv->interruptError);
810
811                         err = -1;
812                         goto err_irq_fail;
813                 }
814         }
815
816         phy_start(priv->phydev);
817
818         /* Configure the coalescing support */
819         if (priv->txcoalescing)
820                 gfar_write(&regs->txic,
821                            mk_ic_value(priv->txcount, priv->txtime));
822         else
823                 gfar_write(&regs->txic, 0);
824
825         if (priv->rxcoalescing)
826                 gfar_write(&regs->rxic,
827                            mk_ic_value(priv->rxcount, priv->rxtime));
828         else
829                 gfar_write(&regs->rxic, 0);
830
831         if (priv->rx_csum_enable)
832                 rctrl |= RCTRL_CHECKSUMMING;
833
834         if (priv->extended_hash) {
835                 rctrl |= RCTRL_EXTHASH;
836
837                 gfar_clear_exact_match(dev);
838                 rctrl |= RCTRL_EMEN;
839         }
840
841         if (priv->vlan_enable)
842                 rctrl |= RCTRL_VLAN;
843
844         if (priv->padding) {
845                 rctrl &= ~RCTRL_PAL_MASK;
846                 rctrl |= RCTRL_PADDING(priv->padding);
847         }
848
849         /* Init rctrl based on our settings */
850         gfar_write(&priv->regs->rctrl, rctrl);
851
852         if (dev->features & NETIF_F_IP_CSUM)
853                 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
854
855         /* Set the extraction length and index */
856         attrs = ATTRELI_EL(priv->rx_stash_size) |
857                 ATTRELI_EI(priv->rx_stash_index);
858
859         gfar_write(&priv->regs->attreli, attrs);
860
861         /* Start with defaults, and add stashing or locking
862          * depending on the approprate variables */
863         attrs = ATTR_INIT_SETTINGS;
864
865         if (priv->bd_stash_en)
866                 attrs |= ATTR_BDSTASH;
867
868         if (priv->rx_stash_size != 0)
869                 attrs |= ATTR_BUFSTASH;
870
871         gfar_write(&priv->regs->attr, attrs);
872
873         gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
874         gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
875         gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
876
877         /* Start the controller */
878         gfar_start(dev);
879
880         return 0;
881
882 rx_irq_fail:
883         free_irq(priv->interruptTransmit, dev);
884 tx_irq_fail:
885         free_irq(priv->interruptError, dev);
886 err_irq_fail:
887 rx_skb_fail:
888         free_skb_resources(priv);
889 tx_skb_fail:
890         dma_free_coherent(NULL,
891                         sizeof(struct txbd8)*priv->tx_ring_size
892                         + sizeof(struct rxbd8)*priv->rx_ring_size,
893                         priv->tx_bd_base,
894                         gfar_read(&regs->tbase0));
895
896         return err;
897 }
898
899 /* Called when something needs to use the ethernet device */
900 /* Returns 0 for success. */
901 static int gfar_enet_open(struct net_device *dev)
902 {
903         int err;
904
905         /* Initialize a bunch of registers */
906         init_registers(dev);
907
908         gfar_set_mac_address(dev);
909
910         err = init_phy(dev);
911
912         if(err)
913                 return err;
914
915         err = startup_gfar(dev);
916
917         netif_start_queue(dev);
918
919         return err;
920 }
921
922 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
923 {
924         struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
925
926         memset(fcb, 0, GMAC_FCB_LEN);
927
928         return fcb;
929 }
930
931 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
932 {
933         u8 flags = 0;
934
935         /* If we're here, it's a IP packet with a TCP or UDP
936          * payload.  We set it to checksum, using a pseudo-header
937          * we provide
938          */
939         flags = TXFCB_DEFAULT;
940
941         /* Tell the controller what the protocol is */
942         /* And provide the already calculated phcs */
943         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
944                 flags |= TXFCB_UDP;
945                 fcb->phcs = udp_hdr(skb)->check;
946         } else
947                 fcb->phcs = udp_hdr(skb)->check;
948
949         /* l3os is the distance between the start of the
950          * frame (skb->data) and the start of the IP hdr.
951          * l4os is the distance between the start of the
952          * l3 hdr and the l4 hdr */
953         fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
954         fcb->l4os = skb_network_header_len(skb);
955
956         fcb->flags = flags;
957 }
958
959 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
960 {
961         fcb->flags |= TXFCB_VLN;
962         fcb->vlctl = vlan_tx_tag_get(skb);
963 }
964
965 /* This is called by the kernel when a frame is ready for transmission. */
966 /* It is pointed to by the dev->hard_start_xmit function pointer */
967 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
968 {
969         struct gfar_private *priv = netdev_priv(dev);
970         struct txfcb *fcb = NULL;
971         struct txbd8 *txbdp;
972         u16 status;
973         unsigned long flags;
974
975         /* Update transmit stats */
976         priv->stats.tx_bytes += skb->len;
977
978         /* Lock priv now */
979         spin_lock_irqsave(&priv->txlock, flags);
980
981         /* Point at the first free tx descriptor */
982         txbdp = priv->cur_tx;
983
984         /* Clear all but the WRAP status flags */
985         status = txbdp->status & TXBD_WRAP;
986
987         /* Set up checksumming */
988         if (likely((dev->features & NETIF_F_IP_CSUM)
989                         && (CHECKSUM_PARTIAL == skb->ip_summed))) {
990                 fcb = gfar_add_fcb(skb, txbdp);
991                 status |= TXBD_TOE;
992                 gfar_tx_checksum(skb, fcb);
993         }
994
995         if (priv->vlan_enable &&
996                         unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
997                 if (unlikely(NULL == fcb)) {
998                         fcb = gfar_add_fcb(skb, txbdp);
999                         status |= TXBD_TOE;
1000                 }
1001
1002                 gfar_tx_vlan(skb, fcb);
1003         }
1004
1005         /* Set buffer length and pointer */
1006         txbdp->length = skb->len;
1007         txbdp->bufPtr = dma_map_single(NULL, skb->data,
1008                         skb->len, DMA_TO_DEVICE);
1009
1010         /* Save the skb pointer so we can free it later */
1011         priv->tx_skbuff[priv->skb_curtx] = skb;
1012
1013         /* Update the current skb pointer (wrapping if this was the last) */
1014         priv->skb_curtx =
1015             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1016
1017         /* Flag the BD as interrupt-causing */
1018         status |= TXBD_INTERRUPT;
1019
1020         /* Flag the BD as ready to go, last in frame, and  */
1021         /* in need of CRC */
1022         status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1023
1024         dev->trans_start = jiffies;
1025
1026         /* The powerpc-specific eieio() is used, as wmb() has too strong
1027          * semantics (it requires synchronization between cacheable and
1028          * uncacheable mappings, which eieio doesn't provide and which we
1029          * don't need), thus requiring a more expensive sync instruction.  At
1030          * some point, the set of architecture-independent barrier functions
1031          * should be expanded to include weaker barriers.
1032          */
1033
1034         eieio();
1035         txbdp->status = status;
1036
1037         /* If this was the last BD in the ring, the next one */
1038         /* is at the beginning of the ring */
1039         if (txbdp->status & TXBD_WRAP)
1040                 txbdp = priv->tx_bd_base;
1041         else
1042                 txbdp++;
1043
1044         /* If the next BD still needs to be cleaned up, then the bds
1045            are full.  We need to tell the kernel to stop sending us stuff. */
1046         if (txbdp == priv->dirty_tx) {
1047                 netif_stop_queue(dev);
1048
1049                 priv->stats.tx_fifo_errors++;
1050         }
1051
1052         /* Update the current txbd to the next one */
1053         priv->cur_tx = txbdp;
1054
1055         /* Tell the DMA to go go go */
1056         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1057
1058         /* Unlock priv */
1059         spin_unlock_irqrestore(&priv->txlock, flags);
1060
1061         return 0;
1062 }
1063
1064 /* Stops the kernel queue, and halts the controller */
1065 static int gfar_close(struct net_device *dev)
1066 {
1067         struct gfar_private *priv = netdev_priv(dev);
1068         stop_gfar(dev);
1069
1070         /* Disconnect from the PHY */
1071         phy_disconnect(priv->phydev);
1072         priv->phydev = NULL;
1073
1074         netif_stop_queue(dev);
1075
1076         return 0;
1077 }
1078
1079 /* returns a net_device_stats structure pointer */
1080 static struct net_device_stats * gfar_get_stats(struct net_device *dev)
1081 {
1082         struct gfar_private *priv = netdev_priv(dev);
1083
1084         return &(priv->stats);
1085 }
1086
1087 /* Changes the mac address if the controller is not running. */
1088 int gfar_set_mac_address(struct net_device *dev)
1089 {
1090         gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1091
1092         return 0;
1093 }
1094
1095
1096 /* Enables and disables VLAN insertion/extraction */
1097 static void gfar_vlan_rx_register(struct net_device *dev,
1098                 struct vlan_group *grp)
1099 {
1100         struct gfar_private *priv = netdev_priv(dev);
1101         unsigned long flags;
1102         u32 tempval;
1103
1104         spin_lock_irqsave(&priv->rxlock, flags);
1105
1106         priv->vlgrp = grp;
1107
1108         if (grp) {
1109                 /* Enable VLAN tag insertion */
1110                 tempval = gfar_read(&priv->regs->tctrl);
1111                 tempval |= TCTRL_VLINS;
1112
1113                 gfar_write(&priv->regs->tctrl, tempval);
1114
1115                 /* Enable VLAN tag extraction */
1116                 tempval = gfar_read(&priv->regs->rctrl);
1117                 tempval |= RCTRL_VLEX;
1118                 gfar_write(&priv->regs->rctrl, tempval);
1119         } else {
1120                 /* Disable VLAN tag insertion */
1121                 tempval = gfar_read(&priv->regs->tctrl);
1122                 tempval &= ~TCTRL_VLINS;
1123                 gfar_write(&priv->regs->tctrl, tempval);
1124
1125                 /* Disable VLAN tag extraction */
1126                 tempval = gfar_read(&priv->regs->rctrl);
1127                 tempval &= ~RCTRL_VLEX;
1128                 gfar_write(&priv->regs->rctrl, tempval);
1129         }
1130
1131         spin_unlock_irqrestore(&priv->rxlock, flags);
1132 }
1133
1134 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1135 {
1136         int tempsize, tempval;
1137         struct gfar_private *priv = netdev_priv(dev);
1138         int oldsize = priv->rx_buffer_size;
1139         int frame_size = new_mtu + ETH_HLEN;
1140
1141         if (priv->vlan_enable)
1142                 frame_size += VLAN_ETH_HLEN;
1143
1144         if (gfar_uses_fcb(priv))
1145                 frame_size += GMAC_FCB_LEN;
1146
1147         frame_size += priv->padding;
1148
1149         if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1150                 if (netif_msg_drv(priv))
1151                         printk(KERN_ERR "%s: Invalid MTU setting\n",
1152                                         dev->name);
1153                 return -EINVAL;
1154         }
1155
1156         tempsize =
1157             (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1158             INCREMENTAL_BUFFER_SIZE;
1159
1160         /* Only stop and start the controller if it isn't already
1161          * stopped, and we changed something */
1162         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1163                 stop_gfar(dev);
1164
1165         priv->rx_buffer_size = tempsize;
1166
1167         dev->mtu = new_mtu;
1168
1169         gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1170         gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1171
1172         /* If the mtu is larger than the max size for standard
1173          * ethernet frames (ie, a jumbo frame), then set maccfg2
1174          * to allow huge frames, and to check the length */
1175         tempval = gfar_read(&priv->regs->maccfg2);
1176
1177         if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1178                 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1179         else
1180                 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1181
1182         gfar_write(&priv->regs->maccfg2, tempval);
1183
1184         if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1185                 startup_gfar(dev);
1186
1187         return 0;
1188 }
1189
1190 /* gfar_timeout gets called when a packet has not been
1191  * transmitted after a set amount of time.
1192  * For now, assume that clearing out all the structures, and
1193  * starting over will fix the problem. */
1194 static void gfar_timeout(struct net_device *dev)
1195 {
1196         struct gfar_private *priv = netdev_priv(dev);
1197
1198         priv->stats.tx_errors++;
1199
1200         if (dev->flags & IFF_UP) {
1201                 stop_gfar(dev);
1202                 startup_gfar(dev);
1203         }
1204
1205         netif_schedule(dev);
1206 }
1207
1208 /* Interrupt Handler for Transmit complete */
1209 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1210 {
1211         struct net_device *dev = (struct net_device *) dev_id;
1212         struct gfar_private *priv = netdev_priv(dev);
1213         struct txbd8 *bdp;
1214
1215         /* Clear IEVENT */
1216         gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1217
1218         /* Lock priv */
1219         spin_lock(&priv->txlock);
1220         bdp = priv->dirty_tx;
1221         while ((bdp->status & TXBD_READY) == 0) {
1222                 /* If dirty_tx and cur_tx are the same, then either the */
1223                 /* ring is empty or full now (it could only be full in the beginning, */
1224                 /* obviously).  If it is empty, we are done. */
1225                 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1226                         break;
1227
1228                 priv->stats.tx_packets++;
1229
1230                 /* Deferred means some collisions occurred during transmit, */
1231                 /* but we eventually sent the packet. */
1232                 if (bdp->status & TXBD_DEF)
1233                         priv->stats.collisions++;
1234
1235                 /* Free the sk buffer associated with this TxBD */
1236                 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1237                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1238                 priv->skb_dirtytx =
1239                     (priv->skb_dirtytx +
1240                      1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1241
1242                 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1243                 if (bdp->status & TXBD_WRAP)
1244                         bdp = priv->tx_bd_base;
1245                 else
1246                         bdp++;
1247
1248                 /* Move dirty_tx to be the next bd */
1249                 priv->dirty_tx = bdp;
1250
1251                 /* We freed a buffer, so now we can restart transmission */
1252                 if (netif_queue_stopped(dev))
1253                         netif_wake_queue(dev);
1254         } /* while ((bdp->status & TXBD_READY) == 0) */
1255
1256         /* If we are coalescing the interrupts, reset the timer */
1257         /* Otherwise, clear it */
1258         if (priv->txcoalescing)
1259                 gfar_write(&priv->regs->txic,
1260                            mk_ic_value(priv->txcount, priv->txtime));
1261         else
1262                 gfar_write(&priv->regs->txic, 0);
1263
1264         spin_unlock(&priv->txlock);
1265
1266         return IRQ_HANDLED;
1267 }
1268
1269 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1270 {
1271         unsigned int alignamount;
1272         struct gfar_private *priv = netdev_priv(dev);
1273         struct sk_buff *skb = NULL;
1274         unsigned int timeout = SKB_ALLOC_TIMEOUT;
1275
1276         /* We have to allocate the skb, so keep trying till we succeed */
1277         while ((!skb) && timeout--)
1278                 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1279
1280         if (NULL == skb)
1281                 return NULL;
1282
1283         alignamount = RXBUF_ALIGNMENT -
1284                 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1));
1285
1286         /* We need the data buffer to be aligned properly.  We will reserve
1287          * as many bytes as needed to align the data properly
1288          */
1289         skb_reserve(skb, alignamount);
1290
1291         bdp->bufPtr = dma_map_single(NULL, skb->data,
1292                         priv->rx_buffer_size, DMA_FROM_DEVICE);
1293
1294         bdp->length = 0;
1295
1296         /* Mark the buffer empty */
1297         eieio();
1298         bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1299
1300         return skb;
1301 }
1302
1303 static inline void count_errors(unsigned short status, struct gfar_private *priv)
1304 {
1305         struct net_device_stats *stats = &priv->stats;
1306         struct gfar_extra_stats *estats = &priv->extra_stats;
1307
1308         /* If the packet was truncated, none of the other errors
1309          * matter */
1310         if (status & RXBD_TRUNCATED) {
1311                 stats->rx_length_errors++;
1312
1313                 estats->rx_trunc++;
1314
1315                 return;
1316         }
1317         /* Count the errors, if there were any */
1318         if (status & (RXBD_LARGE | RXBD_SHORT)) {
1319                 stats->rx_length_errors++;
1320
1321                 if (status & RXBD_LARGE)
1322                         estats->rx_large++;
1323                 else
1324                         estats->rx_short++;
1325         }
1326         if (status & RXBD_NONOCTET) {
1327                 stats->rx_frame_errors++;
1328                 estats->rx_nonoctet++;
1329         }
1330         if (status & RXBD_CRCERR) {
1331                 estats->rx_crcerr++;
1332                 stats->rx_crc_errors++;
1333         }
1334         if (status & RXBD_OVERRUN) {
1335                 estats->rx_overrun++;
1336                 stats->rx_crc_errors++;
1337         }
1338 }
1339
1340 irqreturn_t gfar_receive(int irq, void *dev_id)
1341 {
1342         struct net_device *dev = (struct net_device *) dev_id;
1343         struct gfar_private *priv = netdev_priv(dev);
1344 #ifdef CONFIG_GFAR_NAPI
1345         u32 tempval;
1346 #else
1347         unsigned long flags;
1348 #endif
1349
1350         /* Clear IEVENT, so rx interrupt isn't called again
1351          * because of this interrupt */
1352         gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1353
1354         /* support NAPI */
1355 #ifdef CONFIG_GFAR_NAPI
1356         if (netif_rx_schedule_prep(dev)) {
1357                 tempval = gfar_read(&priv->regs->imask);
1358                 tempval &= IMASK_RX_DISABLED;
1359                 gfar_write(&priv->regs->imask, tempval);
1360
1361                 __netif_rx_schedule(dev);
1362         } else {
1363                 if (netif_msg_rx_err(priv))
1364                         printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1365                                 dev->name, gfar_read(&priv->regs->ievent),
1366                                 gfar_read(&priv->regs->imask));
1367         }
1368 #else
1369
1370         spin_lock_irqsave(&priv->rxlock, flags);
1371         gfar_clean_rx_ring(dev, priv->rx_ring_size);
1372
1373         /* If we are coalescing interrupts, update the timer */
1374         /* Otherwise, clear it */
1375         if (priv->rxcoalescing)
1376                 gfar_write(&priv->regs->rxic,
1377                            mk_ic_value(priv->rxcount, priv->rxtime));
1378         else
1379                 gfar_write(&priv->regs->rxic, 0);
1380
1381         spin_unlock_irqrestore(&priv->rxlock, flags);
1382 #endif
1383
1384         return IRQ_HANDLED;
1385 }
1386
1387 static inline int gfar_rx_vlan(struct sk_buff *skb,
1388                 struct vlan_group *vlgrp, unsigned short vlctl)
1389 {
1390 #ifdef CONFIG_GFAR_NAPI
1391         return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1392 #else
1393         return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1394 #endif
1395 }
1396
1397 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1398 {
1399         /* If valid headers were found, and valid sums
1400          * were verified, then we tell the kernel that no
1401          * checksumming is necessary.  Otherwise, it is */
1402         if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1403                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1404         else
1405                 skb->ip_summed = CHECKSUM_NONE;
1406 }
1407
1408
1409 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1410 {
1411         struct rxfcb *fcb = (struct rxfcb *)skb->data;
1412
1413         /* Remove the FCB from the skb */
1414         skb_pull(skb, GMAC_FCB_LEN);
1415
1416         return fcb;
1417 }
1418
1419 /* gfar_process_frame() -- handle one incoming packet if skb
1420  * isn't NULL.  */
1421 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1422                 int length)
1423 {
1424         struct gfar_private *priv = netdev_priv(dev);
1425         struct rxfcb *fcb = NULL;
1426
1427         if (NULL == skb) {
1428                 if (netif_msg_rx_err(priv))
1429                         printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1430                 priv->stats.rx_dropped++;
1431                 priv->extra_stats.rx_skbmissing++;
1432         } else {
1433                 int ret;
1434
1435                 /* Prep the skb for the packet */
1436                 skb_put(skb, length);
1437
1438                 /* Grab the FCB if there is one */
1439                 if (gfar_uses_fcb(priv))
1440                         fcb = gfar_get_fcb(skb);
1441
1442                 /* Remove the padded bytes, if there are any */
1443                 if (priv->padding)
1444                         skb_pull(skb, priv->padding);
1445
1446                 if (priv->rx_csum_enable)
1447                         gfar_rx_checksum(skb, fcb);
1448
1449                 /* Tell the skb what kind of packet this is */
1450                 skb->protocol = eth_type_trans(skb, dev);
1451
1452                 /* Send the packet up the stack */
1453                 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1454                         ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1455                 else
1456                         ret = RECEIVE(skb);
1457
1458                 if (NET_RX_DROP == ret)
1459                         priv->extra_stats.kernel_dropped++;
1460         }
1461
1462         return 0;
1463 }
1464
1465 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1466  *   until the budget/quota has been reached. Returns the number
1467  *   of frames handled
1468  */
1469 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1470 {
1471         struct rxbd8 *bdp;
1472         struct sk_buff *skb;
1473         u16 pkt_len;
1474         int howmany = 0;
1475         struct gfar_private *priv = netdev_priv(dev);
1476
1477         /* Get the first full descriptor */
1478         bdp = priv->cur_rx;
1479
1480         while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1481                 rmb();
1482                 skb = priv->rx_skbuff[priv->skb_currx];
1483
1484                 if (!(bdp->status &
1485                       (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1486                        | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1487                         /* Increment the number of packets */
1488                         priv->stats.rx_packets++;
1489                         howmany++;
1490
1491                         /* Remove the FCS from the packet length */
1492                         pkt_len = bdp->length - 4;
1493
1494                         gfar_process_frame(dev, skb, pkt_len);
1495
1496                         priv->stats.rx_bytes += pkt_len;
1497                 } else {
1498                         count_errors(bdp->status, priv);
1499
1500                         if (skb)
1501                                 dev_kfree_skb_any(skb);
1502
1503                         priv->rx_skbuff[priv->skb_currx] = NULL;
1504                 }
1505
1506                 dev->last_rx = jiffies;
1507
1508                 /* Clear the status flags for this buffer */
1509                 bdp->status &= ~RXBD_STATS;
1510
1511                 /* Add another skb for the future */
1512                 skb = gfar_new_skb(dev, bdp);
1513                 priv->rx_skbuff[priv->skb_currx] = skb;
1514
1515                 /* Update to the next pointer */
1516                 if (bdp->status & RXBD_WRAP)
1517                         bdp = priv->rx_bd_base;
1518                 else
1519                         bdp++;
1520
1521                 /* update to point at the next skb */
1522                 priv->skb_currx =
1523                     (priv->skb_currx +
1524                      1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1525
1526         }
1527
1528         /* Update the current rxbd pointer to be the next one */
1529         priv->cur_rx = bdp;
1530
1531         return howmany;
1532 }
1533
1534 #ifdef CONFIG_GFAR_NAPI
1535 static int gfar_poll(struct net_device *dev, int *budget)
1536 {
1537         int howmany;
1538         struct gfar_private *priv = netdev_priv(dev);
1539         int rx_work_limit = *budget;
1540
1541         if (rx_work_limit > dev->quota)
1542                 rx_work_limit = dev->quota;
1543
1544         howmany = gfar_clean_rx_ring(dev, rx_work_limit);
1545
1546         dev->quota -= howmany;
1547         rx_work_limit -= howmany;
1548         *budget -= howmany;
1549
1550         if (rx_work_limit > 0) {
1551                 netif_rx_complete(dev);
1552
1553                 /* Clear the halt bit in RSTAT */
1554                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1555
1556                 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1557
1558                 /* If we are coalescing interrupts, update the timer */
1559                 /* Otherwise, clear it */
1560                 if (priv->rxcoalescing)
1561                         gfar_write(&priv->regs->rxic,
1562                                    mk_ic_value(priv->rxcount, priv->rxtime));
1563                 else
1564                         gfar_write(&priv->regs->rxic, 0);
1565         }
1566
1567         /* Return 1 if there's more work to do */
1568         return (rx_work_limit > 0) ? 0 : 1;
1569 }
1570 #endif
1571
1572 #ifdef CONFIG_NET_POLL_CONTROLLER
1573 /*
1574  * Polling 'interrupt' - used by things like netconsole to send skbs
1575  * without having to re-enable interrupts. It's not called while
1576  * the interrupt routine is executing.
1577  */
1578 static void gfar_netpoll(struct net_device *dev)
1579 {
1580         struct gfar_private *priv = netdev_priv(dev);
1581
1582         /* If the device has multiple interrupts, run tx/rx */
1583         if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1584                 disable_irq(priv->interruptTransmit);
1585                 disable_irq(priv->interruptReceive);
1586                 disable_irq(priv->interruptError);
1587                 gfar_interrupt(priv->interruptTransmit, dev);
1588                 enable_irq(priv->interruptError);
1589                 enable_irq(priv->interruptReceive);
1590                 enable_irq(priv->interruptTransmit);
1591         } else {
1592                 disable_irq(priv->interruptTransmit);
1593                 gfar_interrupt(priv->interruptTransmit, dev);
1594                 enable_irq(priv->interruptTransmit);
1595         }
1596 }
1597 #endif
1598
1599 /* The interrupt handler for devices with one interrupt */
1600 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1601 {
1602         struct net_device *dev = dev_id;
1603         struct gfar_private *priv = netdev_priv(dev);
1604
1605         /* Save ievent for future reference */
1606         u32 events = gfar_read(&priv->regs->ievent);
1607
1608         /* Check for reception */
1609         if (events & IEVENT_RX_MASK)
1610                 gfar_receive(irq, dev_id);
1611
1612         /* Check for transmit completion */
1613         if (events & IEVENT_TX_MASK)
1614                 gfar_transmit(irq, dev_id);
1615
1616         /* Check for errors */
1617         if (events & IEVENT_ERR_MASK)
1618                 gfar_error(irq, dev_id);
1619
1620         return IRQ_HANDLED;
1621 }
1622
1623 /* Called every time the controller might need to be made
1624  * aware of new link state.  The PHY code conveys this
1625  * information through variables in the phydev structure, and this
1626  * function converts those variables into the appropriate
1627  * register values, and can bring down the device if needed.
1628  */
1629 static void adjust_link(struct net_device *dev)
1630 {
1631         struct gfar_private *priv = netdev_priv(dev);
1632         struct gfar __iomem *regs = priv->regs;
1633         unsigned long flags;
1634         struct phy_device *phydev = priv->phydev;
1635         int new_state = 0;
1636
1637         spin_lock_irqsave(&priv->txlock, flags);
1638         if (phydev->link) {
1639                 u32 tempval = gfar_read(&regs->maccfg2);
1640                 u32 ecntrl = gfar_read(&regs->ecntrl);
1641
1642                 /* Now we make sure that we can be in full duplex mode.
1643                  * If not, we operate in half-duplex mode. */
1644                 if (phydev->duplex != priv->oldduplex) {
1645                         new_state = 1;
1646                         if (!(phydev->duplex))
1647                                 tempval &= ~(MACCFG2_FULL_DUPLEX);
1648                         else
1649                                 tempval |= MACCFG2_FULL_DUPLEX;
1650
1651                         priv->oldduplex = phydev->duplex;
1652                 }
1653
1654                 if (phydev->speed != priv->oldspeed) {
1655                         new_state = 1;
1656                         switch (phydev->speed) {
1657                         case 1000:
1658                                 tempval =
1659                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1660                                 break;
1661                         case 100:
1662                         case 10:
1663                                 tempval =
1664                                     ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1665
1666                                 /* Reduced mode distinguishes
1667                                  * between 10 and 100 */
1668                                 if (phydev->speed == SPEED_100)
1669                                         ecntrl |= ECNTRL_R100;
1670                                 else
1671                                         ecntrl &= ~(ECNTRL_R100);
1672                                 break;
1673                         default:
1674                                 if (netif_msg_link(priv))
1675                                         printk(KERN_WARNING
1676                                                 "%s: Ack!  Speed (%d) is not 10/100/1000!\n",
1677                                                 dev->name, phydev->speed);
1678                                 break;
1679                         }
1680
1681                         priv->oldspeed = phydev->speed;
1682                 }
1683
1684                 gfar_write(&regs->maccfg2, tempval);
1685                 gfar_write(&regs->ecntrl, ecntrl);
1686
1687                 if (!priv->oldlink) {
1688                         new_state = 1;
1689                         priv->oldlink = 1;
1690                         netif_schedule(dev);
1691                 }
1692         } else if (priv->oldlink) {
1693                 new_state = 1;
1694                 priv->oldlink = 0;
1695                 priv->oldspeed = 0;
1696                 priv->oldduplex = -1;
1697         }
1698
1699         if (new_state && netif_msg_link(priv))
1700                 phy_print_status(phydev);
1701
1702         spin_unlock_irqrestore(&priv->txlock, flags);
1703 }
1704
1705 /* Update the hash table based on the current list of multicast
1706  * addresses we subscribe to.  Also, change the promiscuity of
1707  * the device based on the flags (this function is called
1708  * whenever dev->flags is changed */
1709 static void gfar_set_multi(struct net_device *dev)
1710 {
1711         struct dev_mc_list *mc_ptr;
1712         struct gfar_private *priv = netdev_priv(dev);
1713         struct gfar __iomem *regs = priv->regs;
1714         u32 tempval;
1715
1716         if(dev->flags & IFF_PROMISC) {
1717                 /* Set RCTRL to PROM */
1718                 tempval = gfar_read(&regs->rctrl);
1719                 tempval |= RCTRL_PROM;
1720                 gfar_write(&regs->rctrl, tempval);
1721         } else {
1722                 /* Set RCTRL to not PROM */
1723                 tempval = gfar_read(&regs->rctrl);
1724                 tempval &= ~(RCTRL_PROM);
1725                 gfar_write(&regs->rctrl, tempval);
1726         }
1727
1728         if(dev->flags & IFF_ALLMULTI) {
1729                 /* Set the hash to rx all multicast frames */
1730                 gfar_write(&regs->igaddr0, 0xffffffff);
1731                 gfar_write(&regs->igaddr1, 0xffffffff);
1732                 gfar_write(&regs->igaddr2, 0xffffffff);
1733                 gfar_write(&regs->igaddr3, 0xffffffff);
1734                 gfar_write(&regs->igaddr4, 0xffffffff);
1735                 gfar_write(&regs->igaddr5, 0xffffffff);
1736                 gfar_write(&regs->igaddr6, 0xffffffff);
1737                 gfar_write(&regs->igaddr7, 0xffffffff);
1738                 gfar_write(&regs->gaddr0, 0xffffffff);
1739                 gfar_write(&regs->gaddr1, 0xffffffff);
1740                 gfar_write(&regs->gaddr2, 0xffffffff);
1741                 gfar_write(&regs->gaddr3, 0xffffffff);
1742                 gfar_write(&regs->gaddr4, 0xffffffff);
1743                 gfar_write(&regs->gaddr5, 0xffffffff);
1744                 gfar_write(&regs->gaddr6, 0xffffffff);
1745                 gfar_write(&regs->gaddr7, 0xffffffff);
1746         } else {
1747                 int em_num;
1748                 int idx;
1749
1750                 /* zero out the hash */
1751                 gfar_write(&regs->igaddr0, 0x0);
1752                 gfar_write(&regs->igaddr1, 0x0);
1753                 gfar_write(&regs->igaddr2, 0x0);
1754                 gfar_write(&regs->igaddr3, 0x0);
1755                 gfar_write(&regs->igaddr4, 0x0);
1756                 gfar_write(&regs->igaddr5, 0x0);
1757                 gfar_write(&regs->igaddr6, 0x0);
1758                 gfar_write(&regs->igaddr7, 0x0);
1759                 gfar_write(&regs->gaddr0, 0x0);
1760                 gfar_write(&regs->gaddr1, 0x0);
1761                 gfar_write(&regs->gaddr2, 0x0);
1762                 gfar_write(&regs->gaddr3, 0x0);
1763                 gfar_write(&regs->gaddr4, 0x0);
1764                 gfar_write(&regs->gaddr5, 0x0);
1765                 gfar_write(&regs->gaddr6, 0x0);
1766                 gfar_write(&regs->gaddr7, 0x0);
1767
1768                 /* If we have extended hash tables, we need to
1769                  * clear the exact match registers to prepare for
1770                  * setting them */
1771                 if (priv->extended_hash) {
1772                         em_num = GFAR_EM_NUM + 1;
1773                         gfar_clear_exact_match(dev);
1774                         idx = 1;
1775                 } else {
1776                         idx = 0;
1777                         em_num = 0;
1778                 }
1779
1780                 if(dev->mc_count == 0)
1781                         return;
1782
1783                 /* Parse the list, and set the appropriate bits */
1784                 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1785                         if (idx < em_num) {
1786                                 gfar_set_mac_for_addr(dev, idx,
1787                                                 mc_ptr->dmi_addr);
1788                                 idx++;
1789                         } else
1790                                 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1791                 }
1792         }
1793
1794         return;
1795 }
1796
1797
1798 /* Clears each of the exact match registers to zero, so they
1799  * don't interfere with normal reception */
1800 static void gfar_clear_exact_match(struct net_device *dev)
1801 {
1802         int idx;
1803         u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1804
1805         for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1806                 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1807 }
1808
1809 /* Set the appropriate hash bit for the given addr */
1810 /* The algorithm works like so:
1811  * 1) Take the Destination Address (ie the multicast address), and
1812  * do a CRC on it (little endian), and reverse the bits of the
1813  * result.
1814  * 2) Use the 8 most significant bits as a hash into a 256-entry
1815  * table.  The table is controlled through 8 32-bit registers:
1816  * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
1817  * gaddr7.  This means that the 3 most significant bits in the
1818  * hash index which gaddr register to use, and the 5 other bits
1819  * indicate which bit (assuming an IBM numbering scheme, which
1820  * for PowerPC (tm) is usually the case) in the register holds
1821  * the entry. */
1822 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1823 {
1824         u32 tempval;
1825         struct gfar_private *priv = netdev_priv(dev);
1826         u32 result = ether_crc(MAC_ADDR_LEN, addr);
1827         int width = priv->hash_width;
1828         u8 whichbit = (result >> (32 - width)) & 0x1f;
1829         u8 whichreg = result >> (32 - width + 5);
1830         u32 value = (1 << (31-whichbit));
1831
1832         tempval = gfar_read(priv->hash_regs[whichreg]);
1833         tempval |= value;
1834         gfar_write(priv->hash_regs[whichreg], tempval);
1835
1836         return;
1837 }
1838
1839
1840 /* There are multiple MAC Address register pairs on some controllers
1841  * This function sets the numth pair to a given address
1842  */
1843 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1844 {
1845         struct gfar_private *priv = netdev_priv(dev);
1846         int idx;
1847         char tmpbuf[MAC_ADDR_LEN];
1848         u32 tempval;
1849         u32 __iomem *macptr = &priv->regs->macstnaddr1;
1850
1851         macptr += num*2;
1852
1853         /* Now copy it into the mac registers backwards, cuz */
1854         /* little endian is silly */
1855         for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1856                 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1857
1858         gfar_write(macptr, *((u32 *) (tmpbuf)));
1859
1860         tempval = *((u32 *) (tmpbuf + 4));
1861
1862         gfar_write(macptr+1, tempval);
1863 }
1864
1865 /* GFAR error interrupt handler */
1866 static irqreturn_t gfar_error(int irq, void *dev_id)
1867 {
1868         struct net_device *dev = dev_id;
1869         struct gfar_private *priv = netdev_priv(dev);
1870
1871         /* Save ievent for future reference */
1872         u32 events = gfar_read(&priv->regs->ievent);
1873
1874         /* Clear IEVENT */
1875         gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1876
1877         /* Hmm... */
1878         if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1879                 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1880                        dev->name, events, gfar_read(&priv->regs->imask));
1881
1882         /* Update the error counters */
1883         if (events & IEVENT_TXE) {
1884                 priv->stats.tx_errors++;
1885
1886                 if (events & IEVENT_LC)
1887                         priv->stats.tx_window_errors++;
1888                 if (events & IEVENT_CRL)
1889                         priv->stats.tx_aborted_errors++;
1890                 if (events & IEVENT_XFUN) {
1891                         if (netif_msg_tx_err(priv))
1892                                 printk(KERN_DEBUG "%s: TX FIFO underrun, "
1893                                        "packet dropped.\n", dev->name);
1894                         priv->stats.tx_dropped++;
1895                         priv->extra_stats.tx_underrun++;
1896
1897                         /* Reactivate the Tx Queues */
1898                         gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1899                 }
1900                 if (netif_msg_tx_err(priv))
1901                         printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1902         }
1903         if (events & IEVENT_BSY) {
1904                 priv->stats.rx_errors++;
1905                 priv->extra_stats.rx_bsy++;
1906
1907                 gfar_receive(irq, dev_id);
1908
1909 #ifndef CONFIG_GFAR_NAPI
1910                 /* Clear the halt bit in RSTAT */
1911                 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1912 #endif
1913
1914                 if (netif_msg_rx_err(priv))
1915                         printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
1916                                dev->name, gfar_read(&priv->regs->rstat));
1917         }
1918         if (events & IEVENT_BABR) {
1919                 priv->stats.rx_errors++;
1920                 priv->extra_stats.rx_babr++;
1921
1922                 if (netif_msg_rx_err(priv))
1923                         printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
1924         }
1925         if (events & IEVENT_EBERR) {
1926                 priv->extra_stats.eberr++;
1927                 if (netif_msg_rx_err(priv))
1928                         printk(KERN_DEBUG "%s: bus error\n", dev->name);
1929         }
1930         if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1931                 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1932
1933         if (events & IEVENT_BABT) {
1934                 priv->extra_stats.tx_babt++;
1935                 if (netif_msg_tx_err(priv))
1936                         printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
1937         }
1938         return IRQ_HANDLED;
1939 }
1940
1941 /* Structure for a device driver */
1942 static struct platform_driver gfar_driver = {
1943         .probe = gfar_probe,
1944         .remove = gfar_remove,
1945         .driver = {
1946                 .name = "fsl-gianfar",
1947         },
1948 };
1949
1950 static int __init gfar_init(void)
1951 {
1952         int err = gfar_mdio_init();
1953
1954         if (err)
1955                 return err;
1956
1957         err = platform_driver_register(&gfar_driver);
1958
1959         if (err)
1960                 gfar_mdio_exit();
1961
1962         return err;
1963 }
1964
1965 static void __exit gfar_exit(void)
1966 {
1967         platform_driver_unregister(&gfar_driver);
1968         gfar_mdio_exit();
1969 }
1970
1971 module_init(gfar_init);
1972 module_exit(gfar_exit);
1973