2 * drivers/net/ibm_newemac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/crc32.h>
31 #include <linux/ethtool.h>
32 #include <linux/mii.h>
33 #include <linux/bitops.h>
34 #include <linux/workqueue.h>
36 #include <asm/processor.h>
39 #include <asm/uaccess.h>
44 * Lack of dma_unmap_???? calls is intentional.
46 * API-correct usage requires additional support state information to be
47 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
48 * EMAC design (e.g. TX buffer passed from network stack can be split into
49 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
50 * maintaining such information will add additional overhead.
51 * Current DMA API implementation for 4xx processors only ensures cache coherency
52 * and dma_unmap_???? routines are empty and are likely to stay this way.
53 * I decided to omit dma_unmap_??? calls because I don't want to add additional
54 * complexity just for the sake of following some abstract API, when it doesn't
55 * add any real benefit to the driver. I understand that this decision maybe
56 * controversial, but I really tried to make code API-correct and efficient
57 * at the same time and didn't come up with code I liked :(. --ebs
60 #define DRV_NAME "emac"
61 #define DRV_VERSION "3.54"
62 #define DRV_DESC "PPC 4xx OCP EMAC driver"
64 MODULE_DESCRIPTION(DRV_DESC);
66 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
67 MODULE_LICENSE("GPL");
70 * PPC64 doesn't (yet) have a cacheable_memcpy
73 #define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
79 /* If packet size is less than this number, we allocate small skb and copy packet
80 * contents into it instead of just sending original big skb up
82 #define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85 * to avoid re-using the same PHY ID in cases where the arch didn't
86 * setup precise phy_map entries
88 * XXX This is something that needs to be reworked as we can have multiple
89 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
90 * probably require in that case to have explicit PHY IDs in the device-tree
92 static u32 busy_phy_map;
93 static DEFINE_MUTEX(emac_phy_map_lock);
95 /* This is the wait queue used to wait on any event related to probe, that
96 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
98 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
100 /* Having stable interface names is a doomed idea. However, it would be nice
101 * if we didn't have completely random interface names at boot too :-) It's
102 * just a matter of making everybody's life easier. Since we are doing
103 * threaded probing, it's a bit harder though. The base idea here is that
104 * we make up a list of all emacs in the device-tree before we register the
105 * driver. Every emac will then wait for the previous one in the list to
106 * initialize before itself. We should also keep that list ordered by
108 * That list is only 4 entries long, meaning that additional EMACs don't
109 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
112 #define EMAC_BOOT_LIST_SIZE 4
113 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
115 /* How long should I wait for dependent devices ? */
116 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
118 /* I don't want to litter system log with timeout errors
119 * when we have brain-damaged PHY.
121 static inline void emac_report_timeout_error(struct emac_instance *dev,
125 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
128 /* PHY polling intervals */
129 #define PHY_POLL_LINK_ON HZ
130 #define PHY_POLL_LINK_OFF (HZ / 5)
132 /* Graceful stop timeouts in us.
133 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
135 #define STOP_TIMEOUT_10 1230
136 #define STOP_TIMEOUT_100 124
137 #define STOP_TIMEOUT_1000 13
138 #define STOP_TIMEOUT_1000_JUMBO 73
140 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
141 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
142 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
143 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
144 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
145 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
146 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
147 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
148 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
149 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
150 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
151 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
152 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
153 "tx_bd_excessive_collisions", "tx_bd_late_collision",
154 "tx_bd_multple_collisions", "tx_bd_single_collision",
155 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
159 static irqreturn_t emac_irq(int irq, void *dev_instance);
160 static void emac_clean_tx_ring(struct emac_instance *dev);
161 static void __emac_set_multicast_list(struct emac_instance *dev);
163 static inline int emac_phy_supports_gige(int phy_mode)
165 return phy_mode == PHY_MODE_GMII ||
166 phy_mode == PHY_MODE_RGMII ||
167 phy_mode == PHY_MODE_TBI ||
168 phy_mode == PHY_MODE_RTBI;
171 static inline int emac_phy_gpcs(int phy_mode)
173 return phy_mode == PHY_MODE_TBI ||
174 phy_mode == PHY_MODE_RTBI;
177 static inline void emac_tx_enable(struct emac_instance *dev)
179 struct emac_regs __iomem *p = dev->emacp;
182 DBG(dev, "tx_enable" NL);
184 r = in_be32(&p->mr0);
185 if (!(r & EMAC_MR0_TXE))
186 out_be32(&p->mr0, r | EMAC_MR0_TXE);
189 static void emac_tx_disable(struct emac_instance *dev)
191 struct emac_regs __iomem *p = dev->emacp;
194 DBG(dev, "tx_disable" NL);
196 r = in_be32(&p->mr0);
197 if (r & EMAC_MR0_TXE) {
198 int n = dev->stop_timeout;
199 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
200 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
205 emac_report_timeout_error(dev, "TX disable timeout");
209 static void emac_rx_enable(struct emac_instance *dev)
211 struct emac_regs __iomem *p = dev->emacp;
214 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
217 DBG(dev, "rx_enable" NL);
219 r = in_be32(&p->mr0);
220 if (!(r & EMAC_MR0_RXE)) {
221 if (unlikely(!(r & EMAC_MR0_RXI))) {
222 /* Wait if previous async disable is still in progress */
223 int n = dev->stop_timeout;
224 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
229 emac_report_timeout_error(dev,
230 "RX disable timeout");
232 out_be32(&p->mr0, r | EMAC_MR0_RXE);
238 static void emac_rx_disable(struct emac_instance *dev)
240 struct emac_regs __iomem *p = dev->emacp;
243 DBG(dev, "rx_disable" NL);
245 r = in_be32(&p->mr0);
246 if (r & EMAC_MR0_RXE) {
247 int n = dev->stop_timeout;
248 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
249 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
254 emac_report_timeout_error(dev, "RX disable timeout");
258 static inline void emac_netif_stop(struct emac_instance *dev)
260 netif_tx_lock_bh(dev->ndev);
262 netif_tx_unlock_bh(dev->ndev);
263 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
264 mal_poll_disable(dev->mal, &dev->commac);
265 netif_tx_disable(dev->ndev);
268 static inline void emac_netif_start(struct emac_instance *dev)
270 netif_tx_lock_bh(dev->ndev);
272 if (dev->mcast_pending && netif_running(dev->ndev))
273 __emac_set_multicast_list(dev);
274 netif_tx_unlock_bh(dev->ndev);
276 netif_wake_queue(dev->ndev);
278 /* NOTE: unconditional netif_wake_queue is only appropriate
279 * so long as all callers are assured to have free tx slots
280 * (taken from tg3... though the case where that is wrong is
281 * not terribly harmful)
283 mal_poll_enable(dev->mal, &dev->commac);
286 static inline void emac_rx_disable_async(struct emac_instance *dev)
288 struct emac_regs __iomem *p = dev->emacp;
291 DBG(dev, "rx_disable_async" NL);
293 r = in_be32(&p->mr0);
294 if (r & EMAC_MR0_RXE)
295 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 static int emac_reset(struct emac_instance *dev)
300 struct emac_regs __iomem *p = dev->emacp;
303 DBG(dev, "reset" NL);
305 if (!dev->reset_failed) {
306 /* 40x erratum suggests stopping RX channel before reset,
309 emac_rx_disable(dev);
310 emac_tx_disable(dev);
313 out_be32(&p->mr0, EMAC_MR0_SRST);
314 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
318 dev->reset_failed = 0;
321 emac_report_timeout_error(dev, "reset timeout");
322 dev->reset_failed = 1;
327 static void emac_hash_mc(struct emac_instance *dev)
329 struct emac_regs __iomem *p = dev->emacp;
331 struct dev_mc_list *dmi;
333 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
335 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
337 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
338 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
339 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
341 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
342 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
344 out_be32(&p->gaht1, gaht[0]);
345 out_be32(&p->gaht2, gaht[1]);
346 out_be32(&p->gaht3, gaht[2]);
347 out_be32(&p->gaht4, gaht[3]);
350 static inline u32 emac_iff2rmr(struct net_device *ndev)
352 struct emac_instance *dev = netdev_priv(ndev);
355 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
357 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
362 if (ndev->flags & IFF_PROMISC)
364 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
366 else if (ndev->mc_count > 0)
372 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
374 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
376 DBG2(dev, "__emac_calc_base_mr1" NL);
380 ret |= EMAC_MR1_TFS_2K;
383 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
384 dev->ndev->name, tx_size);
389 ret |= EMAC_MR1_RFS_16K;
392 ret |= EMAC_MR1_RFS_4K;
395 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
396 dev->ndev->name, rx_size);
402 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
404 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
405 EMAC4_MR1_OBCI(dev->opb_bus_freq);
407 DBG2(dev, "__emac4_calc_base_mr1" NL);
411 ret |= EMAC4_MR1_TFS_4K;
414 ret |= EMAC4_MR1_TFS_2K;
417 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
418 dev->ndev->name, tx_size);
423 ret |= EMAC4_MR1_RFS_16K;
426 ret |= EMAC4_MR1_RFS_4K;
429 ret |= EMAC4_MR1_RFS_2K;
432 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
433 dev->ndev->name, rx_size);
439 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
441 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
442 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
443 __emac_calc_base_mr1(dev, tx_size, rx_size);
446 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
448 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
449 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
451 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
454 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
455 unsigned int low, unsigned int high)
457 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
458 return (low << 22) | ( (high & 0x3ff) << 6);
460 return (low << 23) | ( (high & 0x1ff) << 7);
463 static int emac_configure(struct emac_instance *dev)
465 struct emac_regs __iomem *p = dev->emacp;
466 struct net_device *ndev = dev->ndev;
467 int tx_size, rx_size;
470 DBG(dev, "configure" NL);
472 if (emac_reset(dev) < 0)
475 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
476 tah_reset(dev->tah_dev);
478 DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n",
479 dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
481 /* Default fifo sizes */
482 tx_size = dev->tx_fifo_size;
483 rx_size = dev->rx_fifo_size;
485 /* Check for full duplex */
486 if (dev->phy.duplex == DUPLEX_FULL)
487 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
489 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
490 dev->stop_timeout = STOP_TIMEOUT_10;
491 switch (dev->phy.speed) {
493 if (emac_phy_gpcs(dev->phy.mode)) {
494 mr1 |= EMAC_MR1_MF_1000GPCS |
495 EMAC_MR1_MF_IPPA(dev->phy.address);
497 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
498 * identify this GPCS PHY later.
500 out_be32(&p->ipcr, 0xdeadbeef);
502 mr1 |= EMAC_MR1_MF_1000;
504 /* Extended fifo sizes */
505 tx_size = dev->tx_fifo_size_gige;
506 rx_size = dev->rx_fifo_size_gige;
508 if (dev->ndev->mtu > ETH_DATA_LEN) {
509 mr1 |= EMAC_MR1_JPSM;
510 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
512 dev->stop_timeout = STOP_TIMEOUT_1000;
515 mr1 |= EMAC_MR1_MF_100;
516 dev->stop_timeout = STOP_TIMEOUT_100;
518 default: /* make gcc happy */
522 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
523 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
525 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
526 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
528 /* on 40x erratum forces us to NOT use integrated flow control,
529 * let's hope it works on 44x ;)
531 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
532 dev->phy.duplex == DUPLEX_FULL) {
534 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
535 else if (dev->phy.asym_pause)
539 /* Add base settings & fifo sizes & program MR1 */
540 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
541 out_be32(&p->mr1, mr1);
543 /* Set individual MAC address */
544 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
545 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
546 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
549 /* VLAN Tag Protocol ID */
550 out_be32(&p->vtpid, 0x8100);
552 /* Receive mode register */
553 r = emac_iff2rmr(ndev);
554 if (r & EMAC_RMR_MAE)
556 out_be32(&p->rmr, r);
558 /* FIFOs thresholds */
559 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
560 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
561 tx_size / 2 / dev->fifo_entry_size);
563 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
564 tx_size / 2 / dev->fifo_entry_size);
565 out_be32(&p->tmr1, r);
566 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
568 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
569 there should be still enough space in FIFO to allow the our link
570 partner time to process this frame and also time to send PAUSE
573 Here is the worst case scenario for the RX FIFO "headroom"
574 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
576 1) One maximum-length frame on TX 1522 bytes
577 2) One PAUSE frame time 64 bytes
578 3) PAUSE frame decode time allowance 64 bytes
579 4) One maximum-length frame on RX 1522 bytes
580 5) Round-trip propagation delay of the link (100Mb) 15 bytes
584 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
585 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
587 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
588 rx_size / 4 / dev->fifo_entry_size);
589 out_be32(&p->rwmr, r);
591 /* Set PAUSE timer to the maximum */
592 out_be32(&p->ptr, 0xffff);
595 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
596 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
597 EMAC_ISR_IRE | EMAC_ISR_TE;
598 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
599 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
601 out_be32(&p->iser, r);
603 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
604 if (emac_phy_gpcs(dev->phy.mode))
605 emac_mii_reset_phy(&dev->phy);
610 static void emac_reinitialize(struct emac_instance *dev)
612 DBG(dev, "reinitialize" NL);
614 emac_netif_stop(dev);
615 if (!emac_configure(dev)) {
619 emac_netif_start(dev);
622 static void emac_full_tx_reset(struct emac_instance *dev)
624 DBG(dev, "full_tx_reset" NL);
626 emac_tx_disable(dev);
627 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
628 emac_clean_tx_ring(dev);
629 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
633 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
638 static void emac_reset_work(struct work_struct *work)
640 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
642 DBG(dev, "reset_work" NL);
644 mutex_lock(&dev->link_lock);
646 emac_netif_stop(dev);
647 emac_full_tx_reset(dev);
648 emac_netif_start(dev);
650 mutex_unlock(&dev->link_lock);
653 static void emac_tx_timeout(struct net_device *ndev)
655 struct emac_instance *dev = netdev_priv(ndev);
657 DBG(dev, "tx_timeout" NL);
659 schedule_work(&dev->reset_work);
663 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
665 int done = !!(stacr & EMAC_STACR_OC);
667 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
673 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
675 struct emac_regs __iomem *p = dev->emacp;
677 int n, err = -ETIMEDOUT;
679 mutex_lock(&dev->mdio_lock);
681 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
683 /* Enable proper MDIO port */
684 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
685 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
686 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
687 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
689 /* Wait for management interface to become idle */
691 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
694 DBG2(dev, " -> timeout wait idle\n");
699 /* Issue read command */
700 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
701 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
703 r = EMAC_STACR_BASE(dev->opb_bus_freq);
704 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
706 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
707 r |= EMACX_STACR_STAC_READ;
709 r |= EMAC_STACR_STAC_READ;
710 r |= (reg & EMAC_STACR_PRA_MASK)
711 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
712 out_be32(&p->stacr, r);
714 /* Wait for read to complete */
716 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
719 DBG2(dev, " -> timeout wait complete\n");
724 if (unlikely(r & EMAC_STACR_PHYE)) {
725 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
730 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
732 DBG2(dev, "mdio_read -> %04x" NL, r);
735 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
736 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
737 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
738 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
739 mutex_unlock(&dev->mdio_lock);
741 return err == 0 ? r : err;
744 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
747 struct emac_regs __iomem *p = dev->emacp;
749 int n, err = -ETIMEDOUT;
751 mutex_lock(&dev->mdio_lock);
753 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
755 /* Enable proper MDIO port */
756 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
757 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
758 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
759 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
761 /* Wait for management interface to be idle */
763 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
766 DBG2(dev, " -> timeout wait idle\n");
771 /* Issue write command */
772 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
773 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
775 r = EMAC_STACR_BASE(dev->opb_bus_freq);
776 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
778 if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR))
779 r |= EMACX_STACR_STAC_WRITE;
781 r |= EMAC_STACR_STAC_WRITE;
782 r |= (reg & EMAC_STACR_PRA_MASK) |
783 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
784 (val << EMAC_STACR_PHYD_SHIFT);
785 out_be32(&p->stacr, r);
787 /* Wait for write to complete */
789 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
792 DBG2(dev, " -> timeout wait complete\n");
798 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
799 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
800 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
801 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
802 mutex_unlock(&dev->mdio_lock);
805 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
807 struct emac_instance *dev = netdev_priv(ndev);
810 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
815 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
817 struct emac_instance *dev = netdev_priv(ndev);
819 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
820 (u8) id, (u8) reg, (u16) val);
824 static void __emac_set_multicast_list(struct emac_instance *dev)
826 struct emac_regs __iomem *p = dev->emacp;
827 u32 rmr = emac_iff2rmr(dev->ndev);
829 DBG(dev, "__multicast %08x" NL, rmr);
831 /* I decided to relax register access rules here to avoid
834 * There is a real problem with EMAC4 core if we use MWSW_001 bit
835 * in MR1 register and do a full EMAC reset.
836 * One TX BD status update is delayed and, after EMAC reset, it
837 * never happens, resulting in TX hung (it'll be recovered by TX
838 * timeout handler eventually, but this is just gross).
839 * So we either have to do full TX reset or try to cheat here :)
841 * The only required change is to RX mode register, so I *think* all
842 * we need is just to stop RX channel. This seems to work on all
845 * If we need the full reset, we might just trigger the workqueue
846 * and do it async... a bit nasty but should work --BenH
848 dev->mcast_pending = 0;
849 emac_rx_disable(dev);
850 if (rmr & EMAC_RMR_MAE)
852 out_be32(&p->rmr, rmr);
857 static void emac_set_multicast_list(struct net_device *ndev)
859 struct emac_instance *dev = netdev_priv(ndev);
861 DBG(dev, "multicast" NL);
863 BUG_ON(!netif_running(dev->ndev));
866 dev->mcast_pending = 1;
869 __emac_set_multicast_list(dev);
872 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
874 int rx_sync_size = emac_rx_sync_size(new_mtu);
875 int rx_skb_size = emac_rx_skb_size(new_mtu);
878 mutex_lock(&dev->link_lock);
879 emac_netif_stop(dev);
880 emac_rx_disable(dev);
881 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
883 if (dev->rx_sg_skb) {
884 ++dev->estats.rx_dropped_resize;
885 dev_kfree_skb(dev->rx_sg_skb);
886 dev->rx_sg_skb = NULL;
889 /* Make a first pass over RX ring and mark BDs ready, dropping
890 * non-processed packets on the way. We need this as a separate pass
891 * to simplify error recovery in the case of allocation failure later.
893 for (i = 0; i < NUM_RX_BUFF; ++i) {
894 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
895 ++dev->estats.rx_dropped_resize;
897 dev->rx_desc[i].data_len = 0;
898 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
899 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
902 /* Reallocate RX ring only if bigger skb buffers are required */
903 if (rx_skb_size <= dev->rx_skb_size)
906 /* Second pass, allocate new skbs */
907 for (i = 0; i < NUM_RX_BUFF; ++i) {
908 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
914 BUG_ON(!dev->rx_skb[i]);
915 dev_kfree_skb(dev->rx_skb[i]);
917 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
918 dev->rx_desc[i].data_ptr =
919 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
920 DMA_FROM_DEVICE) + 2;
921 dev->rx_skb[i] = skb;
924 /* Check if we need to change "Jumbo" bit in MR1 */
925 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
926 /* This is to prevent starting RX channel in emac_rx_enable() */
927 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
929 dev->ndev->mtu = new_mtu;
930 emac_full_tx_reset(dev);
933 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
936 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
938 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
940 emac_netif_start(dev);
941 mutex_unlock(&dev->link_lock);
946 /* Process ctx, rtnl_lock semaphore */
947 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
949 struct emac_instance *dev = netdev_priv(ndev);
952 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
955 DBG(dev, "change_mtu(%d)" NL, new_mtu);
957 if (netif_running(ndev)) {
958 /* Check if we really need to reinitalize RX ring */
959 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
960 ret = emac_resize_rx_ring(dev, new_mtu);
965 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
966 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
972 static void emac_clean_tx_ring(struct emac_instance *dev)
976 for (i = 0; i < NUM_TX_BUFF; ++i) {
977 if (dev->tx_skb[i]) {
978 dev_kfree_skb(dev->tx_skb[i]);
979 dev->tx_skb[i] = NULL;
980 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
981 ++dev->estats.tx_dropped;
983 dev->tx_desc[i].ctrl = 0;
984 dev->tx_desc[i].data_ptr = 0;
988 static void emac_clean_rx_ring(struct emac_instance *dev)
992 for (i = 0; i < NUM_RX_BUFF; ++i)
993 if (dev->rx_skb[i]) {
994 dev->rx_desc[i].ctrl = 0;
995 dev_kfree_skb(dev->rx_skb[i]);
996 dev->rx_skb[i] = NULL;
997 dev->rx_desc[i].data_ptr = 0;
1000 if (dev->rx_sg_skb) {
1001 dev_kfree_skb(dev->rx_sg_skb);
1002 dev->rx_sg_skb = NULL;
1006 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1009 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1013 dev->rx_skb[slot] = skb;
1014 dev->rx_desc[slot].data_len = 0;
1016 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1017 dev->rx_desc[slot].data_ptr =
1018 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1019 DMA_FROM_DEVICE) + 2;
1021 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1022 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1027 static void emac_print_link_status(struct emac_instance *dev)
1029 if (netif_carrier_ok(dev->ndev))
1030 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1031 dev->ndev->name, dev->phy.speed,
1032 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1033 dev->phy.pause ? ", pause enabled" :
1034 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1036 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1039 /* Process ctx, rtnl_lock semaphore */
1040 static int emac_open(struct net_device *ndev)
1042 struct emac_instance *dev = netdev_priv(ndev);
1045 DBG(dev, "open" NL);
1047 /* Setup error IRQ handler */
1048 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1050 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1051 ndev->name, dev->emac_irq);
1055 /* Allocate RX ring */
1056 for (i = 0; i < NUM_RX_BUFF; ++i)
1057 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1058 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1063 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1064 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1065 dev->rx_sg_skb = NULL;
1067 mutex_lock(&dev->link_lock);
1070 /* Start PHY polling now.
1072 if (dev->phy.address >= 0) {
1073 int link_poll_interval;
1074 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1075 dev->phy.def->ops->read_link(&dev->phy);
1076 netif_carrier_on(dev->ndev);
1077 link_poll_interval = PHY_POLL_LINK_ON;
1079 netif_carrier_off(dev->ndev);
1080 link_poll_interval = PHY_POLL_LINK_OFF;
1082 dev->link_polling = 1;
1084 schedule_delayed_work(&dev->link_work, link_poll_interval);
1085 emac_print_link_status(dev);
1087 netif_carrier_on(dev->ndev);
1089 emac_configure(dev);
1090 mal_poll_add(dev->mal, &dev->commac);
1091 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1092 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1093 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1094 emac_tx_enable(dev);
1095 emac_rx_enable(dev);
1096 emac_netif_start(dev);
1098 mutex_unlock(&dev->link_lock);
1102 emac_clean_rx_ring(dev);
1103 free_irq(dev->emac_irq, dev);
1110 static int emac_link_differs(struct emac_instance *dev)
1112 u32 r = in_be32(&dev->emacp->mr1);
1114 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1115 int speed, pause, asym_pause;
1117 if (r & EMAC_MR1_MF_1000)
1119 else if (r & EMAC_MR1_MF_100)
1124 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1125 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1134 pause = asym_pause = 0;
1136 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1137 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1141 static void emac_link_timer(struct work_struct *work)
1143 struct emac_instance *dev =
1144 container_of((struct delayed_work *)work,
1145 struct emac_instance, link_work);
1146 int link_poll_interval;
1148 mutex_lock(&dev->link_lock);
1149 DBG2(dev, "link timer" NL);
1154 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1155 if (!netif_carrier_ok(dev->ndev)) {
1156 /* Get new link parameters */
1157 dev->phy.def->ops->read_link(&dev->phy);
1159 netif_carrier_on(dev->ndev);
1160 emac_netif_stop(dev);
1161 emac_full_tx_reset(dev);
1162 emac_netif_start(dev);
1163 emac_print_link_status(dev);
1165 link_poll_interval = PHY_POLL_LINK_ON;
1167 if (netif_carrier_ok(dev->ndev)) {
1168 emac_reinitialize(dev);
1169 netif_carrier_off(dev->ndev);
1170 netif_tx_disable(dev->ndev);
1171 emac_print_link_status(dev);
1173 link_poll_interval = PHY_POLL_LINK_OFF;
1175 schedule_delayed_work(&dev->link_work, link_poll_interval);
1177 mutex_unlock(&dev->link_lock);
1180 static void emac_force_link_update(struct emac_instance *dev)
1182 netif_carrier_off(dev->ndev);
1184 if (dev->link_polling) {
1185 cancel_rearming_delayed_work(&dev->link_work);
1186 if (dev->link_polling)
1187 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1191 /* Process ctx, rtnl_lock semaphore */
1192 static int emac_close(struct net_device *ndev)
1194 struct emac_instance *dev = netdev_priv(ndev);
1196 DBG(dev, "close" NL);
1198 if (dev->phy.address >= 0) {
1199 dev->link_polling = 0;
1200 cancel_rearming_delayed_work(&dev->link_work);
1202 mutex_lock(&dev->link_lock);
1203 emac_netif_stop(dev);
1205 mutex_unlock(&dev->link_lock);
1207 emac_rx_disable(dev);
1208 emac_tx_disable(dev);
1209 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1210 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1211 mal_poll_del(dev->mal, &dev->commac);
1213 emac_clean_tx_ring(dev);
1214 emac_clean_rx_ring(dev);
1216 free_irq(dev->emac_irq, dev);
1221 static inline u16 emac_tx_csum(struct emac_instance *dev,
1222 struct sk_buff *skb)
1224 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH &&
1225 skb->ip_summed == CHECKSUM_PARTIAL)) {
1226 ++dev->stats.tx_packets_csum;
1227 return EMAC_TX_CTRL_TAH_CSUM;
1232 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1234 struct emac_regs __iomem *p = dev->emacp;
1235 struct net_device *ndev = dev->ndev;
1237 /* Send the packet out. If the if makes a significant perf
1238 * difference, then we can store the TMR0 value in "dev"
1241 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1242 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1244 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1246 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1247 netif_stop_queue(ndev);
1248 DBG2(dev, "stopped TX queue" NL);
1251 ndev->trans_start = jiffies;
1252 ++dev->stats.tx_packets;
1253 dev->stats.tx_bytes += len;
1259 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1261 struct emac_instance *dev = netdev_priv(ndev);
1262 unsigned int len = skb->len;
1265 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1266 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1268 slot = dev->tx_slot++;
1269 if (dev->tx_slot == NUM_TX_BUFF) {
1271 ctrl |= MAL_TX_CTRL_WRAP;
1274 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1276 dev->tx_skb[slot] = skb;
1277 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1280 dev->tx_desc[slot].data_len = (u16) len;
1282 dev->tx_desc[slot].ctrl = ctrl;
1284 return emac_xmit_finish(dev, len);
1287 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1288 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1289 u32 pd, int len, int last, u16 base_ctrl)
1292 u16 ctrl = base_ctrl;
1293 int chunk = min(len, MAL_MAX_TX_SIZE);
1296 slot = (slot + 1) % NUM_TX_BUFF;
1299 ctrl |= MAL_TX_CTRL_LAST;
1300 if (slot == NUM_TX_BUFF - 1)
1301 ctrl |= MAL_TX_CTRL_WRAP;
1303 dev->tx_skb[slot] = NULL;
1304 dev->tx_desc[slot].data_ptr = pd;
1305 dev->tx_desc[slot].data_len = (u16) chunk;
1306 dev->tx_desc[slot].ctrl = ctrl;
1317 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1318 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1320 struct emac_instance *dev = netdev_priv(ndev);
1321 int nr_frags = skb_shinfo(skb)->nr_frags;
1322 int len = skb->len, chunk;
1327 /* This is common "fast" path */
1328 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1329 return emac_start_xmit(skb, ndev);
1331 len -= skb->data_len;
1333 /* Note, this is only an *estimation*, we can still run out of empty
1334 * slots because of the additional fragmentation into
1335 * MAL_MAX_TX_SIZE-sized chunks
1337 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1340 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1341 emac_tx_csum(dev, skb);
1342 slot = dev->tx_slot;
1345 dev->tx_skb[slot] = NULL;
1346 chunk = min(len, MAL_MAX_TX_SIZE);
1347 dev->tx_desc[slot].data_ptr = pd =
1348 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1349 dev->tx_desc[slot].data_len = (u16) chunk;
1352 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1355 for (i = 0; i < nr_frags; ++i) {
1356 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1359 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1362 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1365 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1369 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1371 /* Attach skb to the last slot so we don't release it too early */
1372 dev->tx_skb[slot] = skb;
1374 /* Send the packet out */
1375 if (dev->tx_slot == NUM_TX_BUFF - 1)
1376 ctrl |= MAL_TX_CTRL_WRAP;
1378 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1379 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1381 return emac_xmit_finish(dev, skb->len);
1384 /* Well, too bad. Our previous estimation was overly optimistic.
1387 while (slot != dev->tx_slot) {
1388 dev->tx_desc[slot].ctrl = 0;
1391 slot = NUM_TX_BUFF - 1;
1393 ++dev->estats.tx_undo;
1396 netif_stop_queue(ndev);
1397 DBG2(dev, "stopped TX queue" NL);
1401 # define emac_start_xmit_sg emac_start_xmit
1402 #endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */
1405 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1407 struct emac_error_stats *st = &dev->estats;
1409 DBG(dev, "BD TX error %04x" NL, ctrl);
1412 if (ctrl & EMAC_TX_ST_BFCS)
1413 ++st->tx_bd_bad_fcs;
1414 if (ctrl & EMAC_TX_ST_LCS)
1415 ++st->tx_bd_carrier_loss;
1416 if (ctrl & EMAC_TX_ST_ED)
1417 ++st->tx_bd_excessive_deferral;
1418 if (ctrl & EMAC_TX_ST_EC)
1419 ++st->tx_bd_excessive_collisions;
1420 if (ctrl & EMAC_TX_ST_LC)
1421 ++st->tx_bd_late_collision;
1422 if (ctrl & EMAC_TX_ST_MC)
1423 ++st->tx_bd_multple_collisions;
1424 if (ctrl & EMAC_TX_ST_SC)
1425 ++st->tx_bd_single_collision;
1426 if (ctrl & EMAC_TX_ST_UR)
1427 ++st->tx_bd_underrun;
1428 if (ctrl & EMAC_TX_ST_SQE)
1432 static void emac_poll_tx(void *param)
1434 struct emac_instance *dev = param;
1437 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1439 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1440 bad_mask = EMAC_IS_BAD_TX_TAH;
1442 bad_mask = EMAC_IS_BAD_TX;
1444 netif_tx_lock_bh(dev->ndev);
1447 int slot = dev->ack_slot, n = 0;
1449 ctrl = dev->tx_desc[slot].ctrl;
1450 if (!(ctrl & MAL_TX_CTRL_READY)) {
1451 struct sk_buff *skb = dev->tx_skb[slot];
1456 dev->tx_skb[slot] = NULL;
1458 slot = (slot + 1) % NUM_TX_BUFF;
1460 if (unlikely(ctrl & bad_mask))
1461 emac_parse_tx_error(dev, ctrl);
1467 dev->ack_slot = slot;
1468 if (netif_queue_stopped(dev->ndev) &&
1469 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1470 netif_wake_queue(dev->ndev);
1472 DBG2(dev, "tx %d pkts" NL, n);
1475 netif_tx_unlock_bh(dev->ndev);
1478 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1481 struct sk_buff *skb = dev->rx_skb[slot];
1483 DBG2(dev, "recycle %d %d" NL, slot, len);
1486 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1487 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1489 dev->rx_desc[slot].data_len = 0;
1491 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1492 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1495 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1497 struct emac_error_stats *st = &dev->estats;
1499 DBG(dev, "BD RX error %04x" NL, ctrl);
1502 if (ctrl & EMAC_RX_ST_OE)
1503 ++st->rx_bd_overrun;
1504 if (ctrl & EMAC_RX_ST_BP)
1505 ++st->rx_bd_bad_packet;
1506 if (ctrl & EMAC_RX_ST_RP)
1507 ++st->rx_bd_runt_packet;
1508 if (ctrl & EMAC_RX_ST_SE)
1509 ++st->rx_bd_short_event;
1510 if (ctrl & EMAC_RX_ST_AE)
1511 ++st->rx_bd_alignment_error;
1512 if (ctrl & EMAC_RX_ST_BFCS)
1513 ++st->rx_bd_bad_fcs;
1514 if (ctrl & EMAC_RX_ST_PTL)
1515 ++st->rx_bd_packet_too_long;
1516 if (ctrl & EMAC_RX_ST_ORE)
1517 ++st->rx_bd_out_of_range;
1518 if (ctrl & EMAC_RX_ST_IRE)
1519 ++st->rx_bd_in_range;
1522 static inline void emac_rx_csum(struct emac_instance *dev,
1523 struct sk_buff *skb, u16 ctrl)
1525 #ifdef CONFIG_IBM_NEW_EMAC_TAH
1526 if (!ctrl && dev->tah_dev) {
1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 ++dev->stats.rx_packets_csum;
1533 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1535 if (likely(dev->rx_sg_skb != NULL)) {
1536 int len = dev->rx_desc[slot].data_len;
1537 int tot_len = dev->rx_sg_skb->len + len;
1539 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1540 ++dev->estats.rx_dropped_mtu;
1541 dev_kfree_skb(dev->rx_sg_skb);
1542 dev->rx_sg_skb = NULL;
1544 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1545 dev->rx_skb[slot]->data, len);
1546 skb_put(dev->rx_sg_skb, len);
1547 emac_recycle_rx_skb(dev, slot, len);
1551 emac_recycle_rx_skb(dev, slot, 0);
1555 /* NAPI poll context */
1556 static int emac_poll_rx(void *param, int budget)
1558 struct emac_instance *dev = param;
1559 int slot = dev->rx_slot, received = 0;
1561 DBG2(dev, "poll_rx(%d)" NL, budget);
1564 while (budget > 0) {
1566 struct sk_buff *skb;
1567 u16 ctrl = dev->rx_desc[slot].ctrl;
1569 if (ctrl & MAL_RX_CTRL_EMPTY)
1572 skb = dev->rx_skb[slot];
1574 len = dev->rx_desc[slot].data_len;
1576 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1579 ctrl &= EMAC_BAD_RX_MASK;
1580 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1581 emac_parse_rx_error(dev, ctrl);
1582 ++dev->estats.rx_dropped_error;
1583 emac_recycle_rx_skb(dev, slot, 0);
1588 if (len && len < EMAC_RX_COPY_THRESH) {
1589 struct sk_buff *copy_skb =
1590 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1591 if (unlikely(!copy_skb))
1594 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1595 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1597 emac_recycle_rx_skb(dev, slot, len);
1599 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1604 skb->dev = dev->ndev;
1605 skb->protocol = eth_type_trans(skb, dev->ndev);
1606 emac_rx_csum(dev, skb, ctrl);
1608 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1609 ++dev->estats.rx_dropped_stack;
1611 ++dev->stats.rx_packets;
1613 dev->stats.rx_bytes += len;
1614 slot = (slot + 1) % NUM_RX_BUFF;
1619 if (ctrl & MAL_RX_CTRL_FIRST) {
1620 BUG_ON(dev->rx_sg_skb);
1621 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1622 DBG(dev, "rx OOM %d" NL, slot);
1623 ++dev->estats.rx_dropped_oom;
1624 emac_recycle_rx_skb(dev, slot, 0);
1626 dev->rx_sg_skb = skb;
1629 } else if (!emac_rx_sg_append(dev, slot) &&
1630 (ctrl & MAL_RX_CTRL_LAST)) {
1632 skb = dev->rx_sg_skb;
1633 dev->rx_sg_skb = NULL;
1635 ctrl &= EMAC_BAD_RX_MASK;
1636 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1637 emac_parse_rx_error(dev, ctrl);
1638 ++dev->estats.rx_dropped_error;
1646 DBG(dev, "rx OOM %d" NL, slot);
1647 /* Drop the packet and recycle skb */
1648 ++dev->estats.rx_dropped_oom;
1649 emac_recycle_rx_skb(dev, slot, 0);
1654 DBG2(dev, "rx %d BDs" NL, received);
1655 dev->rx_slot = slot;
1658 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1660 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1661 DBG2(dev, "rx restart" NL);
1666 if (dev->rx_sg_skb) {
1667 DBG2(dev, "dropping partial rx packet" NL);
1668 ++dev->estats.rx_dropped_error;
1669 dev_kfree_skb(dev->rx_sg_skb);
1670 dev->rx_sg_skb = NULL;
1673 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1674 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1675 emac_rx_enable(dev);
1681 /* NAPI poll context */
1682 static int emac_peek_rx(void *param)
1684 struct emac_instance *dev = param;
1686 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1689 /* NAPI poll context */
1690 static int emac_peek_rx_sg(void *param)
1692 struct emac_instance *dev = param;
1694 int slot = dev->rx_slot;
1696 u16 ctrl = dev->rx_desc[slot].ctrl;
1697 if (ctrl & MAL_RX_CTRL_EMPTY)
1699 else if (ctrl & MAL_RX_CTRL_LAST)
1702 slot = (slot + 1) % NUM_RX_BUFF;
1704 /* I'm just being paranoid here :) */
1705 if (unlikely(slot == dev->rx_slot))
1711 static void emac_rxde(void *param)
1713 struct emac_instance *dev = param;
1715 ++dev->estats.rx_stopped;
1716 emac_rx_disable_async(dev);
1720 static irqreturn_t emac_irq(int irq, void *dev_instance)
1722 struct emac_instance *dev = dev_instance;
1723 struct emac_regs __iomem *p = dev->emacp;
1724 struct emac_error_stats *st = &dev->estats;
1727 spin_lock(&dev->lock);
1729 isr = in_be32(&p->isr);
1730 out_be32(&p->isr, isr);
1732 DBG(dev, "isr = %08x" NL, isr);
1734 if (isr & EMAC4_ISR_TXPE)
1736 if (isr & EMAC4_ISR_RXPE)
1738 if (isr & EMAC4_ISR_TXUE)
1740 if (isr & EMAC4_ISR_RXOE)
1741 ++st->rx_fifo_overrun;
1742 if (isr & EMAC_ISR_OVR)
1744 if (isr & EMAC_ISR_BP)
1745 ++st->rx_bad_packet;
1746 if (isr & EMAC_ISR_RP)
1747 ++st->rx_runt_packet;
1748 if (isr & EMAC_ISR_SE)
1749 ++st->rx_short_event;
1750 if (isr & EMAC_ISR_ALE)
1751 ++st->rx_alignment_error;
1752 if (isr & EMAC_ISR_BFCS)
1754 if (isr & EMAC_ISR_PTLE)
1755 ++st->rx_packet_too_long;
1756 if (isr & EMAC_ISR_ORE)
1757 ++st->rx_out_of_range;
1758 if (isr & EMAC_ISR_IRE)
1760 if (isr & EMAC_ISR_SQE)
1762 if (isr & EMAC_ISR_TE)
1765 spin_unlock(&dev->lock);
1770 static struct net_device_stats *emac_stats(struct net_device *ndev)
1772 struct emac_instance *dev = netdev_priv(ndev);
1773 struct emac_stats *st = &dev->stats;
1774 struct emac_error_stats *est = &dev->estats;
1775 struct net_device_stats *nst = &dev->nstats;
1776 unsigned long flags;
1778 DBG2(dev, "stats" NL);
1780 /* Compute "legacy" statistics */
1781 spin_lock_irqsave(&dev->lock, flags);
1782 nst->rx_packets = (unsigned long)st->rx_packets;
1783 nst->rx_bytes = (unsigned long)st->rx_bytes;
1784 nst->tx_packets = (unsigned long)st->tx_packets;
1785 nst->tx_bytes = (unsigned long)st->tx_bytes;
1786 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1787 est->rx_dropped_error +
1788 est->rx_dropped_resize +
1789 est->rx_dropped_mtu);
1790 nst->tx_dropped = (unsigned long)est->tx_dropped;
1792 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1793 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1794 est->rx_fifo_overrun +
1796 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1797 est->rx_alignment_error);
1798 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1800 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1801 est->rx_bd_short_event +
1802 est->rx_bd_packet_too_long +
1803 est->rx_bd_out_of_range +
1804 est->rx_bd_in_range +
1805 est->rx_runt_packet +
1806 est->rx_short_event +
1807 est->rx_packet_too_long +
1808 est->rx_out_of_range +
1811 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1812 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1814 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1815 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1816 est->tx_bd_excessive_collisions +
1817 est->tx_bd_late_collision +
1818 est->tx_bd_multple_collisions);
1819 spin_unlock_irqrestore(&dev->lock, flags);
1823 static struct mal_commac_ops emac_commac_ops = {
1824 .poll_tx = &emac_poll_tx,
1825 .poll_rx = &emac_poll_rx,
1826 .peek_rx = &emac_peek_rx,
1830 static struct mal_commac_ops emac_commac_sg_ops = {
1831 .poll_tx = &emac_poll_tx,
1832 .poll_rx = &emac_poll_rx,
1833 .peek_rx = &emac_peek_rx_sg,
1837 /* Ethtool support */
1838 static int emac_ethtool_get_settings(struct net_device *ndev,
1839 struct ethtool_cmd *cmd)
1841 struct emac_instance *dev = netdev_priv(ndev);
1843 cmd->supported = dev->phy.features;
1844 cmd->port = PORT_MII;
1845 cmd->phy_address = dev->phy.address;
1847 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1849 mutex_lock(&dev->link_lock);
1850 cmd->advertising = dev->phy.advertising;
1851 cmd->autoneg = dev->phy.autoneg;
1852 cmd->speed = dev->phy.speed;
1853 cmd->duplex = dev->phy.duplex;
1854 mutex_unlock(&dev->link_lock);
1859 static int emac_ethtool_set_settings(struct net_device *ndev,
1860 struct ethtool_cmd *cmd)
1862 struct emac_instance *dev = netdev_priv(ndev);
1863 u32 f = dev->phy.features;
1865 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1866 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1868 /* Basic sanity checks */
1869 if (dev->phy.address < 0)
1871 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1873 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1875 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1878 if (cmd->autoneg == AUTONEG_DISABLE) {
1879 switch (cmd->speed) {
1881 if (cmd->duplex == DUPLEX_HALF
1882 && !(f & SUPPORTED_10baseT_Half))
1884 if (cmd->duplex == DUPLEX_FULL
1885 && !(f & SUPPORTED_10baseT_Full))
1889 if (cmd->duplex == DUPLEX_HALF
1890 && !(f & SUPPORTED_100baseT_Half))
1892 if (cmd->duplex == DUPLEX_FULL
1893 && !(f & SUPPORTED_100baseT_Full))
1897 if (cmd->duplex == DUPLEX_HALF
1898 && !(f & SUPPORTED_1000baseT_Half))
1900 if (cmd->duplex == DUPLEX_FULL
1901 && !(f & SUPPORTED_1000baseT_Full))
1908 mutex_lock(&dev->link_lock);
1909 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1911 mutex_unlock(&dev->link_lock);
1914 if (!(f & SUPPORTED_Autoneg))
1917 mutex_lock(&dev->link_lock);
1918 dev->phy.def->ops->setup_aneg(&dev->phy,
1919 (cmd->advertising & f) |
1920 (dev->phy.advertising &
1922 ADVERTISED_Asym_Pause)));
1923 mutex_unlock(&dev->link_lock);
1925 emac_force_link_update(dev);
1930 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1931 struct ethtool_ringparam *rp)
1933 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1934 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1937 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1938 struct ethtool_pauseparam *pp)
1940 struct emac_instance *dev = netdev_priv(ndev);
1942 mutex_lock(&dev->link_lock);
1943 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1944 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1947 if (dev->phy.duplex == DUPLEX_FULL) {
1949 pp->rx_pause = pp->tx_pause = 1;
1950 else if (dev->phy.asym_pause)
1953 mutex_unlock(&dev->link_lock);
1956 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1958 struct emac_instance *dev = netdev_priv(ndev);
1960 return dev->tah_dev != NULL;
1963 static int emac_get_regs_len(struct emac_instance *dev)
1965 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1966 return sizeof(struct emac_ethtool_regs_subhdr) +
1967 EMAC4_ETHTOOL_REGS_SIZE;
1969 return sizeof(struct emac_ethtool_regs_subhdr) +
1970 EMAC_ETHTOOL_REGS_SIZE;
1973 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1975 struct emac_instance *dev = netdev_priv(ndev);
1978 size = sizeof(struct emac_ethtool_regs_hdr) +
1979 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
1980 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
1981 size += zmii_get_regs_len(dev->zmii_dev);
1982 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
1983 size += rgmii_get_regs_len(dev->rgmii_dev);
1984 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1985 size += tah_get_regs_len(dev->tah_dev);
1990 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
1992 struct emac_ethtool_regs_subhdr *hdr = buf;
1994 hdr->index = dev->cell_index;
1995 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
1996 hdr->version = EMAC4_ETHTOOL_REGS_VER;
1997 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE);
1998 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE);
2000 hdr->version = EMAC_ETHTOOL_REGS_VER;
2001 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
2002 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
2006 static void emac_ethtool_get_regs(struct net_device *ndev,
2007 struct ethtool_regs *regs, void *buf)
2009 struct emac_instance *dev = netdev_priv(ndev);
2010 struct emac_ethtool_regs_hdr *hdr = buf;
2012 hdr->components = 0;
2015 buf = mal_dump_regs(dev->mal, buf);
2016 buf = emac_dump_regs(dev, buf);
2017 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2018 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2019 buf = zmii_dump_regs(dev->zmii_dev, buf);
2021 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2022 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2023 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2025 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2026 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2027 buf = tah_dump_regs(dev->tah_dev, buf);
2031 static int emac_ethtool_nway_reset(struct net_device *ndev)
2033 struct emac_instance *dev = netdev_priv(ndev);
2036 DBG(dev, "nway_reset" NL);
2038 if (dev->phy.address < 0)
2041 mutex_lock(&dev->link_lock);
2042 if (!dev->phy.autoneg) {
2047 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2049 mutex_unlock(&dev->link_lock);
2050 emac_force_link_update(dev);
2054 static int emac_ethtool_get_stats_count(struct net_device *ndev)
2056 return EMAC_ETHTOOL_STATS_COUNT;
2059 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2062 if (stringset == ETH_SS_STATS)
2063 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2066 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2067 struct ethtool_stats *estats,
2070 struct emac_instance *dev = netdev_priv(ndev);
2072 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2073 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2074 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2077 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2078 struct ethtool_drvinfo *info)
2080 struct emac_instance *dev = netdev_priv(ndev);
2082 strcpy(info->driver, "ibm_emac");
2083 strcpy(info->version, DRV_VERSION);
2084 info->fw_version[0] = '\0';
2085 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2086 dev->cell_index, dev->ofdev->node->full_name);
2087 info->n_stats = emac_ethtool_get_stats_count(ndev);
2088 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2091 static const struct ethtool_ops emac_ethtool_ops = {
2092 .get_settings = emac_ethtool_get_settings,
2093 .set_settings = emac_ethtool_set_settings,
2094 .get_drvinfo = emac_ethtool_get_drvinfo,
2096 .get_regs_len = emac_ethtool_get_regs_len,
2097 .get_regs = emac_ethtool_get_regs,
2099 .nway_reset = emac_ethtool_nway_reset,
2101 .get_ringparam = emac_ethtool_get_ringparam,
2102 .get_pauseparam = emac_ethtool_get_pauseparam,
2104 .get_rx_csum = emac_ethtool_get_rx_csum,
2106 .get_strings = emac_ethtool_get_strings,
2107 .get_stats_count = emac_ethtool_get_stats_count,
2108 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2110 .get_link = ethtool_op_get_link,
2111 .get_tx_csum = ethtool_op_get_tx_csum,
2112 .get_sg = ethtool_op_get_sg,
2115 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2117 struct emac_instance *dev = netdev_priv(ndev);
2118 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2120 DBG(dev, "ioctl %08x" NL, cmd);
2122 if (dev->phy.address < 0)
2127 case SIOCDEVPRIVATE:
2128 data[0] = dev->phy.address;
2131 case SIOCDEVPRIVATE + 1:
2132 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2136 case SIOCDEVPRIVATE + 2:
2137 if (!capable(CAP_NET_ADMIN))
2139 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2146 struct emac_depentry {
2148 struct device_node *node;
2149 struct of_device *ofdev;
2153 #define EMAC_DEP_MAL_IDX 0
2154 #define EMAC_DEP_ZMII_IDX 1
2155 #define EMAC_DEP_RGMII_IDX 2
2156 #define EMAC_DEP_TAH_IDX 3
2157 #define EMAC_DEP_MDIO_IDX 4
2158 #define EMAC_DEP_PREV_IDX 5
2159 #define EMAC_DEP_COUNT 6
2161 static int __devinit emac_check_deps(struct emac_instance *dev,
2162 struct emac_depentry *deps)
2165 struct device_node *np;
2167 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2168 /* no dependency on that item, allright */
2169 if (deps[i].phandle == 0) {
2173 /* special case for blist as the dependency might go away */
2174 if (i == EMAC_DEP_PREV_IDX) {
2175 np = *(dev->blist - 1);
2177 deps[i].phandle = 0;
2181 if (deps[i].node == NULL)
2182 deps[i].node = of_node_get(np);
2184 if (deps[i].node == NULL)
2185 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2186 if (deps[i].node == NULL)
2188 if (deps[i].ofdev == NULL)
2189 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2190 if (deps[i].ofdev == NULL)
2192 if (deps[i].drvdata == NULL)
2193 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2194 if (deps[i].drvdata != NULL)
2197 return (there == EMAC_DEP_COUNT);
2200 static void emac_put_deps(struct emac_instance *dev)
2203 of_dev_put(dev->mal_dev);
2205 of_dev_put(dev->zmii_dev);
2207 of_dev_put(dev->rgmii_dev);
2209 of_dev_put(dev->mdio_dev);
2211 of_dev_put(dev->tah_dev);
2214 static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2215 unsigned long action, void *data)
2217 /* We are only intereted in device addition */
2218 if (action == BUS_NOTIFY_BOUND_DRIVER)
2219 wake_up_all(&emac_probe_wait);
2223 static struct notifier_block emac_of_bus_notifier = {
2224 .notifier_call = emac_of_bus_notify
2227 static int __devinit emac_wait_deps(struct emac_instance *dev)
2229 struct emac_depentry deps[EMAC_DEP_COUNT];
2232 memset(&deps, 0, sizeof(deps));
2234 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2235 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2236 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2238 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2240 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2241 if (dev->blist && dev->blist > emac_boot_list)
2242 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2243 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2244 wait_event_timeout(emac_probe_wait,
2245 emac_check_deps(dev, deps),
2246 EMAC_PROBE_DEP_TIMEOUT);
2247 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2248 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2249 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2251 of_node_put(deps[i].node);
2252 if (err && deps[i].ofdev)
2253 of_dev_put(deps[i].ofdev);
2256 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2257 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2258 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2259 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2260 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2262 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2263 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2267 static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2268 u32 *val, int fatal)
2271 const u32 *prop = of_get_property(np, name, &len);
2272 if (prop == NULL || len < sizeof(u32)) {
2274 printk(KERN_ERR "%s: missing %s property\n",
2275 np->full_name, name);
2282 static int __devinit emac_init_phy(struct emac_instance *dev)
2284 struct device_node *np = dev->ofdev->node;
2285 struct net_device *ndev = dev->ndev;
2289 dev->phy.dev = ndev;
2290 dev->phy.mode = dev->phy_mode;
2292 /* PHY-less configuration.
2293 * XXX I probably should move these settings to the dev tree
2295 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2298 /* PHY-less configuration.
2299 * XXX I probably should move these settings to the dev tree
2301 dev->phy.address = -1;
2302 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2308 mutex_lock(&emac_phy_map_lock);
2309 phy_map = dev->phy_map | busy_phy_map;
2311 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2313 dev->phy.mdio_read = emac_mdio_read;
2314 dev->phy.mdio_write = emac_mdio_write;
2316 /* Configure EMAC with defaults so we can at least use MDIO
2317 * This is needed mostly for 440GX
2319 if (emac_phy_gpcs(dev->phy.mode)) {
2321 * Make GPCS PHY address equal to EMAC index.
2322 * We probably should take into account busy_phy_map
2323 * and/or phy_map here.
2325 * Note that the busy_phy_map is currently global
2326 * while it should probably be per-ASIC...
2328 dev->phy.address = dev->cell_index;
2331 emac_configure(dev);
2333 if (dev->phy_address != 0xffffffff)
2334 phy_map = ~(1 << dev->phy_address);
2336 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2337 if (!(phy_map & 1)) {
2339 busy_phy_map |= 1 << i;
2341 /* Quick check if there is a PHY at the address */
2342 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2343 if (r == 0xffff || r < 0)
2345 if (!emac_mii_phy_probe(&dev->phy, i))
2348 mutex_unlock(&emac_phy_map_lock);
2350 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2355 if (dev->phy.def->ops->init)
2356 dev->phy.def->ops->init(&dev->phy);
2358 /* Disable any PHY features not supported by the platform */
2359 dev->phy.def->features &= ~dev->phy_feat_exc;
2361 /* Setup initial link parameters */
2362 if (dev->phy.features & SUPPORTED_Autoneg) {
2363 adv = dev->phy.features;
2364 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2365 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2366 /* Restart autonegotiation */
2367 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2369 u32 f = dev->phy.def->features;
2370 int speed = SPEED_10, fd = DUPLEX_HALF;
2372 /* Select highest supported speed/duplex */
2373 if (f & SUPPORTED_1000baseT_Full) {
2376 } else if (f & SUPPORTED_1000baseT_Half)
2378 else if (f & SUPPORTED_100baseT_Full) {
2381 } else if (f & SUPPORTED_100baseT_Half)
2383 else if (f & SUPPORTED_10baseT_Full)
2386 /* Force link parameters */
2387 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2392 static int __devinit emac_init_config(struct emac_instance *dev)
2394 struct device_node *np = dev->ofdev->node;
2397 const char *pm, *phy_modes[] = {
2399 [PHY_MODE_MII] = "mii",
2400 [PHY_MODE_RMII] = "rmii",
2401 [PHY_MODE_SMII] = "smii",
2402 [PHY_MODE_RGMII] = "rgmii",
2403 [PHY_MODE_TBI] = "tbi",
2404 [PHY_MODE_GMII] = "gmii",
2405 [PHY_MODE_RTBI] = "rtbi",
2406 [PHY_MODE_SGMII] = "sgmii",
2409 /* Read config from device-tree */
2410 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2412 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2414 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2416 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2418 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2419 dev->max_mtu = 1500;
2420 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2421 dev->rx_fifo_size = 2048;
2422 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2423 dev->tx_fifo_size = 2048;
2424 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2425 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2426 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2427 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2428 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2429 dev->phy_address = 0xffffffff;
2430 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2431 dev->phy_map = 0xffffffff;
2432 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2434 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2436 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2438 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2440 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2442 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2443 dev->zmii_port = 0xffffffff;;
2444 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2446 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2447 dev->rgmii_port = 0xffffffff;;
2448 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2449 dev->fifo_entry_size = 16;
2450 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2451 dev->mal_burst_size = 256;
2453 /* PHY mode needs some decoding */
2454 dev->phy_mode = PHY_MODE_NA;
2455 pm = of_get_property(np, "phy-mode", &plen);
2458 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2459 if (!strcasecmp(pm, phy_modes[i])) {
2465 /* Backward compat with non-final DT */
2466 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2467 u32 nmode = *(const u32 *)pm;
2468 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2469 dev->phy_mode = nmode;
2472 /* Check EMAC version */
2473 if (of_device_is_compatible(np, "ibm,emac4"))
2474 dev->features |= EMAC_FTR_EMAC4;
2475 if (of_device_is_compatible(np, "ibm,emac-axon")
2476 || of_device_is_compatible(np, "ibm,emac-440epx"))
2477 dev->features |= EMAC_FTR_HAS_AXON_STACR
2478 | EMAC_FTR_STACR_OC_INVERT;
2479 if (of_device_is_compatible(np, "ibm,emac-440spe"))
2480 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2482 /* Fixup some feature bits based on the device tree and verify
2483 * we have support for them compiled in
2485 if (dev->tah_ph != 0) {
2486 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2487 dev->features |= EMAC_FTR_HAS_TAH;
2489 printk(KERN_ERR "%s: TAH support not enabled !\n",
2495 if (dev->zmii_ph != 0) {
2496 #ifdef CONFIG_IBM_NEW_EMAC_ZMII
2497 dev->features |= EMAC_FTR_HAS_ZMII;
2499 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2505 if (dev->rgmii_ph != 0) {
2506 #ifdef CONFIG_IBM_NEW_EMAC_RGMII
2507 dev->features |= EMAC_FTR_HAS_RGMII;
2509 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2515 /* Read MAC-address */
2516 p = of_get_property(np, "local-mac-address", NULL);
2518 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2522 memcpy(dev->ndev->dev_addr, p, 6);
2524 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2525 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2526 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2527 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2528 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2533 static int __devinit emac_probe(struct of_device *ofdev,
2534 const struct of_device_id *match)
2536 struct net_device *ndev;
2537 struct emac_instance *dev;
2538 struct device_node *np = ofdev->node;
2539 struct device_node **blist = NULL;
2542 /* Find ourselves in the bootlist if we are there */
2543 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2544 if (emac_boot_list[i] == np)
2545 blist = &emac_boot_list[i];
2547 /* Allocate our net_device structure */
2549 ndev = alloc_etherdev(sizeof(struct emac_instance));
2551 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2555 dev = netdev_priv(ndev);
2559 SET_NETDEV_DEV(ndev, &ofdev->dev);
2561 /* Initialize some embedded data structures */
2562 mutex_init(&dev->mdio_lock);
2563 mutex_init(&dev->link_lock);
2564 spin_lock_init(&dev->lock);
2565 INIT_WORK(&dev->reset_work, emac_reset_work);
2567 /* Init various config data based on device-tree */
2568 err = emac_init_config(dev);
2572 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2573 dev->emac_irq = irq_of_parse_and_map(np, 0);
2574 dev->wol_irq = irq_of_parse_and_map(np, 1);
2575 if (dev->emac_irq == NO_IRQ) {
2576 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2579 ndev->irq = dev->emac_irq;
2582 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2583 printk(KERN_ERR "%s: Can't get registers address\n",
2587 // TODO : request_mem_region
2588 dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs));
2589 if (dev->emacp == NULL) {
2590 printk(KERN_ERR "%s: Can't map device registers!\n",
2596 /* Wait for dependent devices */
2597 err = emac_wait_deps(dev);
2600 "%s: Timeout waiting for dependent devices\n",
2602 /* display more info about what's missing ? */
2605 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2606 if (dev->mdio_dev != NULL)
2607 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2609 /* Register with MAL */
2610 dev->commac.ops = &emac_commac_ops;
2611 dev->commac.dev = dev;
2612 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2613 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2614 err = mal_register_commac(dev->mal, &dev->commac);
2616 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2617 np->full_name, dev->mal_dev->node->full_name);
2620 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2621 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2623 /* Get pointers to BD rings */
2625 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2627 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2629 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2630 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2633 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2634 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2636 /* Attach to ZMII, if needed */
2637 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2638 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2639 goto err_unreg_commac;
2641 /* Attach to RGMII, if needed */
2642 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2643 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2644 goto err_detach_zmii;
2646 /* Attach to TAH, if needed */
2647 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2648 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2649 goto err_detach_rgmii;
2651 /* Set some link defaults before we can find out real parameters */
2652 dev->phy.speed = SPEED_100;
2653 dev->phy.duplex = DUPLEX_FULL;
2654 dev->phy.autoneg = AUTONEG_DISABLE;
2655 dev->phy.pause = dev->phy.asym_pause = 0;
2656 dev->stop_timeout = STOP_TIMEOUT_100;
2657 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2659 /* Find PHY if any */
2660 err = emac_init_phy(dev);
2662 goto err_detach_tah;
2664 /* Fill in the driver function table */
2665 ndev->open = &emac_open;
2666 #ifdef CONFIG_IBM_NEW_EMAC_TAH
2668 ndev->hard_start_xmit = &emac_start_xmit_sg;
2669 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2672 ndev->hard_start_xmit = &emac_start_xmit;
2673 ndev->tx_timeout = &emac_tx_timeout;
2674 ndev->watchdog_timeo = 5 * HZ;
2675 ndev->stop = &emac_close;
2676 ndev->get_stats = &emac_stats;
2677 ndev->set_multicast_list = &emac_set_multicast_list;
2678 ndev->do_ioctl = &emac_ioctl;
2679 if (emac_phy_supports_gige(dev->phy_mode)) {
2680 ndev->change_mtu = &emac_change_mtu;
2681 dev->commac.ops = &emac_commac_sg_ops;
2683 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2685 netif_carrier_off(ndev);
2686 netif_stop_queue(ndev);
2688 err = register_netdev(ndev);
2690 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2691 np->full_name, err);
2692 goto err_detach_tah;
2695 /* Set our drvdata last as we don't want them visible until we are
2699 dev_set_drvdata(&ofdev->dev, dev);
2701 /* There's a new kid in town ! Let's tell everybody */
2702 wake_up_all(&emac_probe_wait);
2706 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2707 ndev->name, dev->cell_index, np->full_name,
2708 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2709 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2711 if (dev->phy.address >= 0)
2712 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2713 dev->phy.def->name, dev->phy.address);
2715 emac_dbg_register(dev);
2720 /* I have a bad feeling about this ... */
2723 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2724 tah_detach(dev->tah_dev, dev->tah_port);
2726 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2727 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2729 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2730 zmii_detach(dev->zmii_dev, dev->zmii_port);
2732 mal_unregister_commac(dev->mal, &dev->commac);
2736 iounmap(dev->emacp);
2738 if (dev->wol_irq != NO_IRQ)
2739 irq_dispose_mapping(dev->wol_irq);
2740 if (dev->emac_irq != NO_IRQ)
2741 irq_dispose_mapping(dev->emac_irq);
2745 /* if we were on the bootlist, remove us as we won't show up and
2746 * wake up all waiters to notify them in case they were waiting
2751 wake_up_all(&emac_probe_wait);
2756 static int __devexit emac_remove(struct of_device *ofdev)
2758 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2760 DBG(dev, "remove" NL);
2762 dev_set_drvdata(&ofdev->dev, NULL);
2764 unregister_netdev(dev->ndev);
2766 flush_scheduled_work();
2768 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2769 tah_detach(dev->tah_dev, dev->tah_port);
2770 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2771 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2772 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2773 zmii_detach(dev->zmii_dev, dev->zmii_port);
2775 mal_unregister_commac(dev->mal, &dev->commac);
2778 emac_dbg_unregister(dev);
2779 iounmap(dev->emacp);
2781 if (dev->wol_irq != NO_IRQ)
2782 irq_dispose_mapping(dev->wol_irq);
2783 if (dev->emac_irq != NO_IRQ)
2784 irq_dispose_mapping(dev->emac_irq);
2791 /* XXX Features in here should be replaced by properties... */
2792 static struct of_device_id emac_match[] =
2796 .compatible = "ibm,emac",
2800 .compatible = "ibm,emac4",
2805 static struct of_platform_driver emac_driver = {
2807 .match_table = emac_match,
2809 .probe = emac_probe,
2810 .remove = emac_remove,
2813 static void __init emac_make_bootlist(void)
2815 struct device_node *np = NULL;
2816 int j, max, i = 0, k;
2817 int cell_indices[EMAC_BOOT_LIST_SIZE];
2820 while((np = of_find_all_nodes(np)) != NULL) {
2823 if (of_match_node(emac_match, np) == NULL)
2825 if (of_get_property(np, "unused", NULL))
2827 idx = of_get_property(np, "cell-index", NULL);
2830 cell_indices[i] = *idx;
2831 emac_boot_list[i++] = of_node_get(np);
2832 if (i >= EMAC_BOOT_LIST_SIZE) {
2839 /* Bubble sort them (doh, what a creative algorithm :-) */
2840 for (i = 0; max > 1 && (i < (max - 1)); i++)
2841 for (j = i; j < max; j++) {
2842 if (cell_indices[i] > cell_indices[j]) {
2843 np = emac_boot_list[i];
2844 emac_boot_list[i] = emac_boot_list[j];
2845 emac_boot_list[j] = np;
2846 k = cell_indices[i];
2847 cell_indices[i] = cell_indices[j];
2848 cell_indices[j] = k;
2853 static int __init emac_init(void)
2857 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2859 /* Init debug stuff */
2862 /* Build EMAC boot list */
2863 emac_make_bootlist();
2865 /* Init submodules */
2878 rc = of_register_platform_driver(&emac_driver);
2896 static void __exit emac_exit(void)
2900 of_unregister_platform_driver(&emac_driver);
2908 /* Destroy EMAC boot list */
2909 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2910 if (emac_boot_list[i])
2911 of_node_put(emac_boot_list[i]);
2914 module_init(emac_init);
2915 module_exit(emac_exit);