]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/tg3.c
tg3: Add 5761S support
[linux-2.6-omap-h63xx.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58 #define TG3_VLAN_TAG_USED 1
59 #else
60 #define TG3_VLAN_TAG_USED 0
61 #endif
62
63 #define TG3_TSO_SUPPORT 1
64
65 #include "tg3.h"
66
67 #define DRV_MODULE_NAME         "tg3"
68 #define PFX DRV_MODULE_NAME     ": "
69 #define DRV_MODULE_VERSION      "3.94"
70 #define DRV_MODULE_RELDATE      "August 14, 2008"
71
72 #define TG3_DEF_MAC_MODE        0
73 #define TG3_DEF_RX_MODE         0
74 #define TG3_DEF_TX_MODE         0
75 #define TG3_DEF_MSG_ENABLE        \
76         (NETIF_MSG_DRV          | \
77          NETIF_MSG_PROBE        | \
78          NETIF_MSG_LINK         | \
79          NETIF_MSG_TIMER        | \
80          NETIF_MSG_IFDOWN       | \
81          NETIF_MSG_IFUP         | \
82          NETIF_MSG_RX_ERR       | \
83          NETIF_MSG_TX_ERR)
84
85 /* length of time before we decide the hardware is borked,
86  * and dev->tx_timeout() should be called to fix the problem
87  */
88 #define TG3_TX_TIMEOUT                  (5 * HZ)
89
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU                     60
92 #define TG3_MAX_MTU(tp) \
93         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96  * You can't change the ring sizes, but you can change where you place
97  * them in the NIC onboard memory.
98  */
99 #define TG3_RX_RING_SIZE                512
100 #define TG3_DEF_RX_RING_PENDING         200
101 #define TG3_RX_JUMBO_RING_SIZE          256
102 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
103
104 /* Do not place this n-ring entries value into the tp struct itself,
105  * we really want to expose these constants to GCC so that modulo et
106  * al.  operations are done with shifts and masks instead of with
107  * hw multiply/modulo instructions.  Another solution would be to
108  * replace things like '% foo' with '& (foo - 1)'.
109  */
110 #define TG3_RX_RCB_RING_SIZE(tp)        \
111         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
112
113 #define TG3_TX_RING_SIZE                512
114 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
115
116 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_RING_SIZE)
118 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_JUMBO_RING_SIZE)
120 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
121                                    TG3_RX_RCB_RING_SIZE(tp))
122 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
123                                  TG3_TX_RING_SIZE)
124 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125
126 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
127 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
128
129 /* minimum number of free TX descriptors required to wake up TX process */
130 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
131
132 /* number of ETHTOOL_GSTATS u64's */
133 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134
135 #define TG3_NUM_TEST            6
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
211         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
212         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
213         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
214         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
215         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
216         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
217         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
218         {}
219 };
220
221 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
222
223 static const struct {
224         const char string[ETH_GSTRING_LEN];
225 } ethtool_stats_keys[TG3_NUM_STATS] = {
226         { "rx_octets" },
227         { "rx_fragments" },
228         { "rx_ucast_packets" },
229         { "rx_mcast_packets" },
230         { "rx_bcast_packets" },
231         { "rx_fcs_errors" },
232         { "rx_align_errors" },
233         { "rx_xon_pause_rcvd" },
234         { "rx_xoff_pause_rcvd" },
235         { "rx_mac_ctrl_rcvd" },
236         { "rx_xoff_entered" },
237         { "rx_frame_too_long_errors" },
238         { "rx_jabbers" },
239         { "rx_undersize_packets" },
240         { "rx_in_length_errors" },
241         { "rx_out_length_errors" },
242         { "rx_64_or_less_octet_packets" },
243         { "rx_65_to_127_octet_packets" },
244         { "rx_128_to_255_octet_packets" },
245         { "rx_256_to_511_octet_packets" },
246         { "rx_512_to_1023_octet_packets" },
247         { "rx_1024_to_1522_octet_packets" },
248         { "rx_1523_to_2047_octet_packets" },
249         { "rx_2048_to_4095_octet_packets" },
250         { "rx_4096_to_8191_octet_packets" },
251         { "rx_8192_to_9022_octet_packets" },
252
253         { "tx_octets" },
254         { "tx_collisions" },
255
256         { "tx_xon_sent" },
257         { "tx_xoff_sent" },
258         { "tx_flow_control" },
259         { "tx_mac_errors" },
260         { "tx_single_collisions" },
261         { "tx_mult_collisions" },
262         { "tx_deferred" },
263         { "tx_excessive_collisions" },
264         { "tx_late_collisions" },
265         { "tx_collide_2times" },
266         { "tx_collide_3times" },
267         { "tx_collide_4times" },
268         { "tx_collide_5times" },
269         { "tx_collide_6times" },
270         { "tx_collide_7times" },
271         { "tx_collide_8times" },
272         { "tx_collide_9times" },
273         { "tx_collide_10times" },
274         { "tx_collide_11times" },
275         { "tx_collide_12times" },
276         { "tx_collide_13times" },
277         { "tx_collide_14times" },
278         { "tx_collide_15times" },
279         { "tx_ucast_packets" },
280         { "tx_mcast_packets" },
281         { "tx_bcast_packets" },
282         { "tx_carrier_sense_errors" },
283         { "tx_discards" },
284         { "tx_errors" },
285
286         { "dma_writeq_full" },
287         { "dma_write_prioq_full" },
288         { "rxbds_empty" },
289         { "rx_discards" },
290         { "rx_errors" },
291         { "rx_threshold_hit" },
292
293         { "dma_readq_full" },
294         { "dma_read_prioq_full" },
295         { "tx_comp_queue_full" },
296
297         { "ring_set_send_prod_index" },
298         { "ring_status_update" },
299         { "nic_irqs" },
300         { "nic_avoided_irqs" },
301         { "nic_tx_threshold_hit" }
302 };
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_test_keys[TG3_NUM_TEST] = {
307         { "nvram test     (online) " },
308         { "link test      (online) " },
309         { "register test  (offline)" },
310         { "memory test    (offline)" },
311         { "loopback test  (offline)" },
312         { "interrupt test (offline)" },
313 };
314
315 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
316 {
317         writel(val, tp->regs + off);
318 }
319
320 static u32 tg3_read32(struct tg3 *tp, u32 off)
321 {
322         return (readl(tp->regs + off));
323 }
324
325 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
326 {
327         writel(val, tp->aperegs + off);
328 }
329
330 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
331 {
332         return (readl(tp->aperegs + off));
333 }
334
335 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
336 {
337         unsigned long flags;
338
339         spin_lock_irqsave(&tp->indirect_lock, flags);
340         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
341         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
342         spin_unlock_irqrestore(&tp->indirect_lock, flags);
343 }
344
345 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
346 {
347         writel(val, tp->regs + off);
348         readl(tp->regs + off);
349 }
350
351 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
352 {
353         unsigned long flags;
354         u32 val;
355
356         spin_lock_irqsave(&tp->indirect_lock, flags);
357         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
358         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
359         spin_unlock_irqrestore(&tp->indirect_lock, flags);
360         return val;
361 }
362
363 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
364 {
365         unsigned long flags;
366
367         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
373                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
374                                        TG3_64BIT_REG_LOW, val);
375                 return;
376         }
377
378         spin_lock_irqsave(&tp->indirect_lock, flags);
379         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
381         spin_unlock_irqrestore(&tp->indirect_lock, flags);
382
383         /* In indirect mode when disabling interrupts, we also need
384          * to clear the interrupt bit in the GRC local ctrl register.
385          */
386         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
387             (val == 0x1)) {
388                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
389                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
390         }
391 }
392
393 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
394 {
395         unsigned long flags;
396         u32 val;
397
398         spin_lock_irqsave(&tp->indirect_lock, flags);
399         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
400         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
401         spin_unlock_irqrestore(&tp->indirect_lock, flags);
402         return val;
403 }
404
405 /* usec_wait specifies the wait time in usec when writing to certain registers
406  * where it is unsafe to read back the register without some delay.
407  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
408  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
409  */
410 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
411 {
412         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
413             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414                 /* Non-posted methods */
415                 tp->write32(tp, off, val);
416         else {
417                 /* Posted method */
418                 tg3_write32(tp, off, val);
419                 if (usec_wait)
420                         udelay(usec_wait);
421                 tp->read32(tp, off);
422         }
423         /* Wait again after the read for the posted method to guarantee that
424          * the wait time is met.
425          */
426         if (usec_wait)
427                 udelay(usec_wait);
428 }
429
430 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
431 {
432         tp->write32_mbox(tp, off, val);
433         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
434             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
435                 tp->read32_mbox(tp, off);
436 }
437
438 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
439 {
440         void __iomem *mbox = tp->regs + off;
441         writel(val, mbox);
442         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
443                 writel(val, mbox);
444         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
445                 readl(mbox);
446 }
447
448 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
449 {
450         return (readl(tp->regs + off + GRCMBOX_BASE));
451 }
452
453 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
454 {
455         writel(val, tp->regs + off + GRCMBOX_BASE);
456 }
457
458 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
459 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
460 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
461 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
462 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
463
464 #define tw32(reg,val)           tp->write32(tp, reg, val)
465 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
466 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
467 #define tr32(reg)               tp->read32(tp, reg)
468
469 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
470 {
471         unsigned long flags;
472
473         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
474             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
475                 return;
476
477         spin_lock_irqsave(&tp->indirect_lock, flags);
478         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
479                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
480                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
481
482                 /* Always leave this as zero. */
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
484         } else {
485                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
486                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
487
488                 /* Always leave this as zero. */
489                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
490         }
491         spin_unlock_irqrestore(&tp->indirect_lock, flags);
492 }
493
494 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
495 {
496         unsigned long flags;
497
498         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
499             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
500                 *val = 0;
501                 return;
502         }
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
506                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
508
509                 /* Always leave this as zero. */
510                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
511         } else {
512                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
513                 *val = tr32(TG3PCI_MEM_WIN_DATA);
514
515                 /* Always leave this as zero. */
516                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
517         }
518         spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 }
520
521 static void tg3_ape_lock_init(struct tg3 *tp)
522 {
523         int i;
524
525         /* Make sure the driver hasn't any stale locks. */
526         for (i = 0; i < 8; i++)
527                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
528                                 APE_LOCK_GRANT_DRIVER);
529 }
530
531 static int tg3_ape_lock(struct tg3 *tp, int locknum)
532 {
533         int i, off;
534         int ret = 0;
535         u32 status;
536
537         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
538                 return 0;
539
540         switch (locknum) {
541                 case TG3_APE_LOCK_GRC:
542                 case TG3_APE_LOCK_MEM:
543                         break;
544                 default:
545                         return -EINVAL;
546         }
547
548         off = 4 * locknum;
549
550         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
551
552         /* Wait for up to 1 millisecond to acquire lock. */
553         for (i = 0; i < 100; i++) {
554                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
555                 if (status == APE_LOCK_GRANT_DRIVER)
556                         break;
557                 udelay(10);
558         }
559
560         if (status != APE_LOCK_GRANT_DRIVER) {
561                 /* Revoke the lock request. */
562                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
563                                 APE_LOCK_GRANT_DRIVER);
564
565                 ret = -EBUSY;
566         }
567
568         return ret;
569 }
570
571 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
572 {
573         int off;
574
575         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
576                 return;
577
578         switch (locknum) {
579                 case TG3_APE_LOCK_GRC:
580                 case TG3_APE_LOCK_MEM:
581                         break;
582                 default:
583                         return;
584         }
585
586         off = 4 * locknum;
587         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
588 }
589
590 static void tg3_disable_ints(struct tg3 *tp)
591 {
592         tw32(TG3PCI_MISC_HOST_CTRL,
593              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
594         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
595 }
596
597 static inline void tg3_cond_int(struct tg3 *tp)
598 {
599         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
600             (tp->hw_status->status & SD_STATUS_UPDATED))
601                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
602         else
603                 tw32(HOSTCC_MODE, tp->coalesce_mode |
604                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
605 }
606
607 static void tg3_enable_ints(struct tg3 *tp)
608 {
609         tp->irq_sync = 0;
610         wmb();
611
612         tw32(TG3PCI_MISC_HOST_CTRL,
613              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
614         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
615                        (tp->last_tag << 24));
616         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
617                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618                                (tp->last_tag << 24));
619         tg3_cond_int(tp);
620 }
621
622 static inline unsigned int tg3_has_work(struct tg3 *tp)
623 {
624         struct tg3_hw_status *sblk = tp->hw_status;
625         unsigned int work_exists = 0;
626
627         /* check for phy events */
628         if (!(tp->tg3_flags &
629               (TG3_FLAG_USE_LINKCHG_REG |
630                TG3_FLAG_POLL_SERDES))) {
631                 if (sblk->status & SD_STATUS_LINK_CHG)
632                         work_exists = 1;
633         }
634         /* check for RX/TX work to do */
635         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
636             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
637                 work_exists = 1;
638
639         return work_exists;
640 }
641
642 /* tg3_restart_ints
643  *  similar to tg3_enable_ints, but it accurately determines whether there
644  *  is new work pending and can return without flushing the PIO write
645  *  which reenables interrupts
646  */
647 static void tg3_restart_ints(struct tg3 *tp)
648 {
649         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
650                      tp->last_tag << 24);
651         mmiowb();
652
653         /* When doing tagged status, this work check is unnecessary.
654          * The last_tag we write above tells the chip which piece of
655          * work we've completed.
656          */
657         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
658             tg3_has_work(tp))
659                 tw32(HOSTCC_MODE, tp->coalesce_mode |
660                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
661 }
662
663 static inline void tg3_netif_stop(struct tg3 *tp)
664 {
665         tp->dev->trans_start = jiffies; /* prevent tx timeout */
666         napi_disable(&tp->napi);
667         netif_tx_disable(tp->dev);
668 }
669
670 static inline void tg3_netif_start(struct tg3 *tp)
671 {
672         netif_wake_queue(tp->dev);
673         /* NOTE: unconditional netif_wake_queue is only appropriate
674          * so long as all callers are assured to have free tx slots
675          * (such as after tg3_init_hw)
676          */
677         napi_enable(&tp->napi);
678         tp->hw_status->status |= SD_STATUS_UPDATED;
679         tg3_enable_ints(tp);
680 }
681
682 static void tg3_switch_clocks(struct tg3 *tp)
683 {
684         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
685         u32 orig_clock_ctrl;
686
687         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
688             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
689                 return;
690
691         orig_clock_ctrl = clock_ctrl;
692         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
693                        CLOCK_CTRL_CLKRUN_OENABLE |
694                        0x1f);
695         tp->pci_clock_ctrl = clock_ctrl;
696
697         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
698                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
699                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
700                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
701                 }
702         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
703                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
704                             clock_ctrl |
705                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
706                             40);
707                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
708                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
709                             40);
710         }
711         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
712 }
713
714 #define PHY_BUSY_LOOPS  5000
715
716 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
717 {
718         u32 frame_val;
719         unsigned int loops;
720         int ret;
721
722         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
723                 tw32_f(MAC_MI_MODE,
724                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
725                 udelay(80);
726         }
727
728         *val = 0x0;
729
730         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
731                       MI_COM_PHY_ADDR_MASK);
732         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
733                       MI_COM_REG_ADDR_MASK);
734         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
735
736         tw32_f(MAC_MI_COM, frame_val);
737
738         loops = PHY_BUSY_LOOPS;
739         while (loops != 0) {
740                 udelay(10);
741                 frame_val = tr32(MAC_MI_COM);
742
743                 if ((frame_val & MI_COM_BUSY) == 0) {
744                         udelay(5);
745                         frame_val = tr32(MAC_MI_COM);
746                         break;
747                 }
748                 loops -= 1;
749         }
750
751         ret = -EBUSY;
752         if (loops != 0) {
753                 *val = frame_val & MI_COM_DATA_MASK;
754                 ret = 0;
755         }
756
757         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
758                 tw32_f(MAC_MI_MODE, tp->mi_mode);
759                 udelay(80);
760         }
761
762         return ret;
763 }
764
765 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
766 {
767         u32 frame_val;
768         unsigned int loops;
769         int ret;
770
771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
772             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
773                 return 0;
774
775         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
776                 tw32_f(MAC_MI_MODE,
777                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
778                 udelay(80);
779         }
780
781         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
782                       MI_COM_PHY_ADDR_MASK);
783         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
784                       MI_COM_REG_ADDR_MASK);
785         frame_val |= (val & MI_COM_DATA_MASK);
786         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
787
788         tw32_f(MAC_MI_COM, frame_val);
789
790         loops = PHY_BUSY_LOOPS;
791         while (loops != 0) {
792                 udelay(10);
793                 frame_val = tr32(MAC_MI_COM);
794                 if ((frame_val & MI_COM_BUSY) == 0) {
795                         udelay(5);
796                         frame_val = tr32(MAC_MI_COM);
797                         break;
798                 }
799                 loops -= 1;
800         }
801
802         ret = -EBUSY;
803         if (loops != 0)
804                 ret = 0;
805
806         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
807                 tw32_f(MAC_MI_MODE, tp->mi_mode);
808                 udelay(80);
809         }
810
811         return ret;
812 }
813
814 static int tg3_bmcr_reset(struct tg3 *tp)
815 {
816         u32 phy_control;
817         int limit, err;
818
819         /* OK, reset it, and poll the BMCR_RESET bit until it
820          * clears or we time out.
821          */
822         phy_control = BMCR_RESET;
823         err = tg3_writephy(tp, MII_BMCR, phy_control);
824         if (err != 0)
825                 return -EBUSY;
826
827         limit = 5000;
828         while (limit--) {
829                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
830                 if (err != 0)
831                         return -EBUSY;
832
833                 if ((phy_control & BMCR_RESET) == 0) {
834                         udelay(40);
835                         break;
836                 }
837                 udelay(10);
838         }
839         if (limit <= 0)
840                 return -EBUSY;
841
842         return 0;
843 }
844
845 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
846 {
847         struct tg3 *tp = (struct tg3 *)bp->priv;
848         u32 val;
849
850         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
851                 return -EAGAIN;
852
853         if (tg3_readphy(tp, reg, &val))
854                 return -EIO;
855
856         return val;
857 }
858
859 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
860 {
861         struct tg3 *tp = (struct tg3 *)bp->priv;
862
863         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
864                 return -EAGAIN;
865
866         if (tg3_writephy(tp, reg, val))
867                 return -EIO;
868
869         return 0;
870 }
871
872 static int tg3_mdio_reset(struct mii_bus *bp)
873 {
874         return 0;
875 }
876
877 static void tg3_mdio_config(struct tg3 *tp)
878 {
879         u32 val;
880
881         if (tp->mdio_bus->phy_map[PHY_ADDR]->interface !=
882             PHY_INTERFACE_MODE_RGMII)
883                 return;
884
885         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
886                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
887         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
888                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
889                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
890                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
891                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
892         }
893         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
894
895         val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
896         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
897                 val |= MAC_PHYCFG2_INBAND_ENABLE;
898         tw32(MAC_PHYCFG2, val);
899
900         val = tr32(MAC_EXT_RGMII_MODE);
901         val &= ~(MAC_RGMII_MODE_RX_INT_B |
902                  MAC_RGMII_MODE_RX_QUALITY |
903                  MAC_RGMII_MODE_RX_ACTIVITY |
904                  MAC_RGMII_MODE_RX_ENG_DET |
905                  MAC_RGMII_MODE_TX_ENABLE |
906                  MAC_RGMII_MODE_TX_LOWPWR |
907                  MAC_RGMII_MODE_TX_RESET);
908         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
909                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
910                         val |= MAC_RGMII_MODE_RX_INT_B |
911                                MAC_RGMII_MODE_RX_QUALITY |
912                                MAC_RGMII_MODE_RX_ACTIVITY |
913                                MAC_RGMII_MODE_RX_ENG_DET;
914                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
915                         val |= MAC_RGMII_MODE_TX_ENABLE |
916                                MAC_RGMII_MODE_TX_LOWPWR |
917                                MAC_RGMII_MODE_TX_RESET;
918         }
919         tw32(MAC_EXT_RGMII_MODE, val);
920 }
921
922 static void tg3_mdio_start(struct tg3 *tp)
923 {
924         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
925                 mutex_lock(&tp->mdio_bus->mdio_lock);
926                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
927                 mutex_unlock(&tp->mdio_bus->mdio_lock);
928         }
929
930         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
931         tw32_f(MAC_MI_MODE, tp->mi_mode);
932         udelay(80);
933
934         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED)
935                 tg3_mdio_config(tp);
936 }
937
938 static void tg3_mdio_stop(struct tg3 *tp)
939 {
940         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
941                 mutex_lock(&tp->mdio_bus->mdio_lock);
942                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
943                 mutex_unlock(&tp->mdio_bus->mdio_lock);
944         }
945 }
946
947 static int tg3_mdio_init(struct tg3 *tp)
948 {
949         int i;
950         u32 reg;
951         struct phy_device *phydev;
952
953         tg3_mdio_start(tp);
954
955         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
956             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
957                 return 0;
958
959         tp->mdio_bus = mdiobus_alloc();
960         if (tp->mdio_bus == NULL)
961                 return -ENOMEM;
962
963         tp->mdio_bus->name     = "tg3 mdio bus";
964         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
965                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
966         tp->mdio_bus->priv     = tp;
967         tp->mdio_bus->parent   = &tp->pdev->dev;
968         tp->mdio_bus->read     = &tg3_mdio_read;
969         tp->mdio_bus->write    = &tg3_mdio_write;
970         tp->mdio_bus->reset    = &tg3_mdio_reset;
971         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
972         tp->mdio_bus->irq      = &tp->mdio_irq[0];
973
974         for (i = 0; i < PHY_MAX_ADDR; i++)
975                 tp->mdio_bus->irq[i] = PHY_POLL;
976
977         /* The bus registration will look for all the PHYs on the mdio bus.
978          * Unfortunately, it does not ensure the PHY is powered up before
979          * accessing the PHY ID registers.  A chip reset is the
980          * quickest way to bring the device back to an operational state..
981          */
982         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
983                 tg3_bmcr_reset(tp);
984
985         i = mdiobus_register(tp->mdio_bus);
986         if (i) {
987                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
988                         tp->dev->name, i);
989                 return i;
990         }
991
992         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
993
994         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
995
996         switch (phydev->phy_id) {
997         case TG3_PHY_ID_BCM50610:
998                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
999                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1000                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1001                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1002                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1003                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1004                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1005                 break;
1006         case TG3_PHY_ID_BCMAC131:
1007                 phydev->interface = PHY_INTERFACE_MODE_MII;
1008                 break;
1009         }
1010
1011         tg3_mdio_config(tp);
1012
1013         return 0;
1014 }
1015
1016 static void tg3_mdio_fini(struct tg3 *tp)
1017 {
1018         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1019                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1020                 mdiobus_unregister(tp->mdio_bus);
1021                 mdiobus_free(tp->mdio_bus);
1022                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1023         }
1024 }
1025
1026 /* tp->lock is held. */
1027 static inline void tg3_generate_fw_event(struct tg3 *tp)
1028 {
1029         u32 val;
1030
1031         val = tr32(GRC_RX_CPU_EVENT);
1032         val |= GRC_RX_CPU_DRIVER_EVENT;
1033         tw32_f(GRC_RX_CPU_EVENT, val);
1034
1035         tp->last_event_jiffies = jiffies;
1036 }
1037
1038 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1039
1040 /* tp->lock is held. */
1041 static void tg3_wait_for_event_ack(struct tg3 *tp)
1042 {
1043         int i;
1044         unsigned int delay_cnt;
1045         long time_remain;
1046
1047         /* If enough time has passed, no wait is necessary. */
1048         time_remain = (long)(tp->last_event_jiffies + 1 +
1049                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1050                       (long)jiffies;
1051         if (time_remain < 0)
1052                 return;
1053
1054         /* Check if we can shorten the wait time. */
1055         delay_cnt = jiffies_to_usecs(time_remain);
1056         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1057                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1058         delay_cnt = (delay_cnt >> 3) + 1;
1059
1060         for (i = 0; i < delay_cnt; i++) {
1061                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1062                         break;
1063                 udelay(8);
1064         }
1065 }
1066
1067 /* tp->lock is held. */
1068 static void tg3_ump_link_report(struct tg3 *tp)
1069 {
1070         u32 reg;
1071         u32 val;
1072
1073         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1074             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1075                 return;
1076
1077         tg3_wait_for_event_ack(tp);
1078
1079         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1080
1081         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1082
1083         val = 0;
1084         if (!tg3_readphy(tp, MII_BMCR, &reg))
1085                 val = reg << 16;
1086         if (!tg3_readphy(tp, MII_BMSR, &reg))
1087                 val |= (reg & 0xffff);
1088         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1089
1090         val = 0;
1091         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1092                 val = reg << 16;
1093         if (!tg3_readphy(tp, MII_LPA, &reg))
1094                 val |= (reg & 0xffff);
1095         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1096
1097         val = 0;
1098         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1099                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1100                         val = reg << 16;
1101                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1102                         val |= (reg & 0xffff);
1103         }
1104         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1105
1106         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1107                 val = reg << 16;
1108         else
1109                 val = 0;
1110         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1111
1112         tg3_generate_fw_event(tp);
1113 }
1114
1115 static void tg3_link_report(struct tg3 *tp)
1116 {
1117         if (!netif_carrier_ok(tp->dev)) {
1118                 if (netif_msg_link(tp))
1119                         printk(KERN_INFO PFX "%s: Link is down.\n",
1120                                tp->dev->name);
1121                 tg3_ump_link_report(tp);
1122         } else if (netif_msg_link(tp)) {
1123                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1124                        tp->dev->name,
1125                        (tp->link_config.active_speed == SPEED_1000 ?
1126                         1000 :
1127                         (tp->link_config.active_speed == SPEED_100 ?
1128                          100 : 10)),
1129                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1130                         "full" : "half"));
1131
1132                 printk(KERN_INFO PFX
1133                        "%s: Flow control is %s for TX and %s for RX.\n",
1134                        tp->dev->name,
1135                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1136                        "on" : "off",
1137                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1138                        "on" : "off");
1139                 tg3_ump_link_report(tp);
1140         }
1141 }
1142
1143 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1144 {
1145         u16 miireg;
1146
1147         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1148                 miireg = ADVERTISE_PAUSE_CAP;
1149         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1150                 miireg = ADVERTISE_PAUSE_ASYM;
1151         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1152                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1153         else
1154                 miireg = 0;
1155
1156         return miireg;
1157 }
1158
1159 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1160 {
1161         u16 miireg;
1162
1163         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1164                 miireg = ADVERTISE_1000XPAUSE;
1165         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1166                 miireg = ADVERTISE_1000XPSE_ASYM;
1167         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1168                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1169         else
1170                 miireg = 0;
1171
1172         return miireg;
1173 }
1174
1175 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1176 {
1177         u8 cap = 0;
1178
1179         if (lcladv & ADVERTISE_PAUSE_CAP) {
1180                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1181                         if (rmtadv & LPA_PAUSE_CAP)
1182                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1183                         else if (rmtadv & LPA_PAUSE_ASYM)
1184                                 cap = TG3_FLOW_CTRL_RX;
1185                 } else {
1186                         if (rmtadv & LPA_PAUSE_CAP)
1187                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1188                 }
1189         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1190                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1191                         cap = TG3_FLOW_CTRL_TX;
1192         }
1193
1194         return cap;
1195 }
1196
1197 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1198 {
1199         u8 cap = 0;
1200
1201         if (lcladv & ADVERTISE_1000XPAUSE) {
1202                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1203                         if (rmtadv & LPA_1000XPAUSE)
1204                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1205                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1206                                 cap = TG3_FLOW_CTRL_RX;
1207                 } else {
1208                         if (rmtadv & LPA_1000XPAUSE)
1209                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1210                 }
1211         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1212                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1213                         cap = TG3_FLOW_CTRL_TX;
1214         }
1215
1216         return cap;
1217 }
1218
1219 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1220 {
1221         u8 autoneg;
1222         u8 flowctrl = 0;
1223         u32 old_rx_mode = tp->rx_mode;
1224         u32 old_tx_mode = tp->tx_mode;
1225
1226         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1227                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1228         else
1229                 autoneg = tp->link_config.autoneg;
1230
1231         if (autoneg == AUTONEG_ENABLE &&
1232             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1233                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1234                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1235                 else
1236                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1237         } else
1238                 flowctrl = tp->link_config.flowctrl;
1239
1240         tp->link_config.active_flowctrl = flowctrl;
1241
1242         if (flowctrl & TG3_FLOW_CTRL_RX)
1243                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1244         else
1245                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1246
1247         if (old_rx_mode != tp->rx_mode)
1248                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1249
1250         if (flowctrl & TG3_FLOW_CTRL_TX)
1251                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1252         else
1253                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1254
1255         if (old_tx_mode != tp->tx_mode)
1256                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1257 }
1258
1259 static void tg3_adjust_link(struct net_device *dev)
1260 {
1261         u8 oldflowctrl, linkmesg = 0;
1262         u32 mac_mode, lcl_adv, rmt_adv;
1263         struct tg3 *tp = netdev_priv(dev);
1264         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1265
1266         spin_lock(&tp->lock);
1267
1268         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1269                                     MAC_MODE_HALF_DUPLEX);
1270
1271         oldflowctrl = tp->link_config.active_flowctrl;
1272
1273         if (phydev->link) {
1274                 lcl_adv = 0;
1275                 rmt_adv = 0;
1276
1277                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1278                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1279                 else
1280                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1281
1282                 if (phydev->duplex == DUPLEX_HALF)
1283                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1284                 else {
1285                         lcl_adv = tg3_advert_flowctrl_1000T(
1286                                   tp->link_config.flowctrl);
1287
1288                         if (phydev->pause)
1289                                 rmt_adv = LPA_PAUSE_CAP;
1290                         if (phydev->asym_pause)
1291                                 rmt_adv |= LPA_PAUSE_ASYM;
1292                 }
1293
1294                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1295         } else
1296                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1297
1298         if (mac_mode != tp->mac_mode) {
1299                 tp->mac_mode = mac_mode;
1300                 tw32_f(MAC_MODE, tp->mac_mode);
1301                 udelay(40);
1302         }
1303
1304         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1305                 tw32(MAC_TX_LENGTHS,
1306                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1307                       (6 << TX_LENGTHS_IPG_SHIFT) |
1308                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1309         else
1310                 tw32(MAC_TX_LENGTHS,
1311                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1312                       (6 << TX_LENGTHS_IPG_SHIFT) |
1313                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1314
1315         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1316             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1317             phydev->speed != tp->link_config.active_speed ||
1318             phydev->duplex != tp->link_config.active_duplex ||
1319             oldflowctrl != tp->link_config.active_flowctrl)
1320             linkmesg = 1;
1321
1322         tp->link_config.active_speed = phydev->speed;
1323         tp->link_config.active_duplex = phydev->duplex;
1324
1325         spin_unlock(&tp->lock);
1326
1327         if (linkmesg)
1328                 tg3_link_report(tp);
1329 }
1330
1331 static int tg3_phy_init(struct tg3 *tp)
1332 {
1333         struct phy_device *phydev;
1334
1335         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1336                 return 0;
1337
1338         /* Bring the PHY back to a known state. */
1339         tg3_bmcr_reset(tp);
1340
1341         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1342
1343         /* Attach the MAC to the PHY. */
1344         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1345                              phydev->dev_flags, phydev->interface);
1346         if (IS_ERR(phydev)) {
1347                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1348                 return PTR_ERR(phydev);
1349         }
1350
1351         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1352
1353         /* Mask with MAC supported features. */
1354         phydev->supported &= (PHY_GBIT_FEATURES |
1355                               SUPPORTED_Pause |
1356                               SUPPORTED_Asym_Pause);
1357
1358         phydev->advertising = phydev->supported;
1359
1360         printk(KERN_INFO
1361                "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1362                tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1363
1364         return 0;
1365 }
1366
1367 static void tg3_phy_start(struct tg3 *tp)
1368 {
1369         struct phy_device *phydev;
1370
1371         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1372                 return;
1373
1374         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1375
1376         if (tp->link_config.phy_is_low_power) {
1377                 tp->link_config.phy_is_low_power = 0;
1378                 phydev->speed = tp->link_config.orig_speed;
1379                 phydev->duplex = tp->link_config.orig_duplex;
1380                 phydev->autoneg = tp->link_config.orig_autoneg;
1381                 phydev->advertising = tp->link_config.orig_advertising;
1382         }
1383
1384         phy_start(phydev);
1385
1386         phy_start_aneg(phydev);
1387 }
1388
1389 static void tg3_phy_stop(struct tg3 *tp)
1390 {
1391         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1392                 return;
1393
1394         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1395 }
1396
1397 static void tg3_phy_fini(struct tg3 *tp)
1398 {
1399         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1400                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1401                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1402         }
1403 }
1404
1405 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1406 {
1407         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1408         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1409 }
1410
1411 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1412 {
1413         u32 phy;
1414
1415         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1416             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1417                 return;
1418
1419         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1420                 u32 ephy;
1421
1422                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1423                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1424                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1425                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1426                                 if (enable)
1427                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1428                                 else
1429                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1430                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1431                         }
1432                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1433                 }
1434         } else {
1435                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1436                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1437                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1438                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1439                         if (enable)
1440                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1441                         else
1442                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1443                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1444                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1445                 }
1446         }
1447 }
1448
1449 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1450 {
1451         u32 val;
1452
1453         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1454                 return;
1455
1456         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1457             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1458                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1459                              (val | (1 << 15) | (1 << 4)));
1460 }
1461
1462 static void tg3_phy_apply_otp(struct tg3 *tp)
1463 {
1464         u32 otp, phy;
1465
1466         if (!tp->phy_otp)
1467                 return;
1468
1469         otp = tp->phy_otp;
1470
1471         /* Enable SM_DSP clock and tx 6dB coding. */
1472         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1473               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1474               MII_TG3_AUXCTL_ACTL_TX_6DB;
1475         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1476
1477         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1478         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1479         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1480
1481         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1482               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1483         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1484
1485         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1486         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1487         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1488
1489         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1490         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1491
1492         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1493         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1494
1495         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1496               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1497         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1498
1499         /* Turn off SM_DSP clock. */
1500         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1501               MII_TG3_AUXCTL_ACTL_TX_6DB;
1502         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1503 }
1504
1505 static int tg3_wait_macro_done(struct tg3 *tp)
1506 {
1507         int limit = 100;
1508
1509         while (limit--) {
1510                 u32 tmp32;
1511
1512                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1513                         if ((tmp32 & 0x1000) == 0)
1514                                 break;
1515                 }
1516         }
1517         if (limit <= 0)
1518                 return -EBUSY;
1519
1520         return 0;
1521 }
1522
1523 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1524 {
1525         static const u32 test_pat[4][6] = {
1526         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1527         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1528         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1529         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1530         };
1531         int chan;
1532
1533         for (chan = 0; chan < 4; chan++) {
1534                 int i;
1535
1536                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1537                              (chan * 0x2000) | 0x0200);
1538                 tg3_writephy(tp, 0x16, 0x0002);
1539
1540                 for (i = 0; i < 6; i++)
1541                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1542                                      test_pat[chan][i]);
1543
1544                 tg3_writephy(tp, 0x16, 0x0202);
1545                 if (tg3_wait_macro_done(tp)) {
1546                         *resetp = 1;
1547                         return -EBUSY;
1548                 }
1549
1550                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1551                              (chan * 0x2000) | 0x0200);
1552                 tg3_writephy(tp, 0x16, 0x0082);
1553                 if (tg3_wait_macro_done(tp)) {
1554                         *resetp = 1;
1555                         return -EBUSY;
1556                 }
1557
1558                 tg3_writephy(tp, 0x16, 0x0802);
1559                 if (tg3_wait_macro_done(tp)) {
1560                         *resetp = 1;
1561                         return -EBUSY;
1562                 }
1563
1564                 for (i = 0; i < 6; i += 2) {
1565                         u32 low, high;
1566
1567                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1568                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1569                             tg3_wait_macro_done(tp)) {
1570                                 *resetp = 1;
1571                                 return -EBUSY;
1572                         }
1573                         low &= 0x7fff;
1574                         high &= 0x000f;
1575                         if (low != test_pat[chan][i] ||
1576                             high != test_pat[chan][i+1]) {
1577                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1578                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1579                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1580
1581                                 return -EBUSY;
1582                         }
1583                 }
1584         }
1585
1586         return 0;
1587 }
1588
1589 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1590 {
1591         int chan;
1592
1593         for (chan = 0; chan < 4; chan++) {
1594                 int i;
1595
1596                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1597                              (chan * 0x2000) | 0x0200);
1598                 tg3_writephy(tp, 0x16, 0x0002);
1599                 for (i = 0; i < 6; i++)
1600                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1601                 tg3_writephy(tp, 0x16, 0x0202);
1602                 if (tg3_wait_macro_done(tp))
1603                         return -EBUSY;
1604         }
1605
1606         return 0;
1607 }
1608
1609 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1610 {
1611         u32 reg32, phy9_orig;
1612         int retries, do_phy_reset, err;
1613
1614         retries = 10;
1615         do_phy_reset = 1;
1616         do {
1617                 if (do_phy_reset) {
1618                         err = tg3_bmcr_reset(tp);
1619                         if (err)
1620                                 return err;
1621                         do_phy_reset = 0;
1622                 }
1623
1624                 /* Disable transmitter and interrupt.  */
1625                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1626                         continue;
1627
1628                 reg32 |= 0x3000;
1629                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1630
1631                 /* Set full-duplex, 1000 mbps.  */
1632                 tg3_writephy(tp, MII_BMCR,
1633                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1634
1635                 /* Set to master mode.  */
1636                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1637                         continue;
1638
1639                 tg3_writephy(tp, MII_TG3_CTRL,
1640                              (MII_TG3_CTRL_AS_MASTER |
1641                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1642
1643                 /* Enable SM_DSP_CLOCK and 6dB.  */
1644                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1645
1646                 /* Block the PHY control access.  */
1647                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1648                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1649
1650                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1651                 if (!err)
1652                         break;
1653         } while (--retries);
1654
1655         err = tg3_phy_reset_chanpat(tp);
1656         if (err)
1657                 return err;
1658
1659         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1660         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1661
1662         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1663         tg3_writephy(tp, 0x16, 0x0000);
1664
1665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1666             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1667                 /* Set Extended packet length bit for jumbo frames */
1668                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1669         }
1670         else {
1671                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1672         }
1673
1674         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1675
1676         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1677                 reg32 &= ~0x3000;
1678                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1679         } else if (!err)
1680                 err = -EBUSY;
1681
1682         return err;
1683 }
1684
1685 /* This will reset the tigon3 PHY if there is no valid
1686  * link unless the FORCE argument is non-zero.
1687  */
1688 static int tg3_phy_reset(struct tg3 *tp)
1689 {
1690         u32 cpmuctrl;
1691         u32 phy_status;
1692         int err;
1693
1694         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1695                 u32 val;
1696
1697                 val = tr32(GRC_MISC_CFG);
1698                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1699                 udelay(40);
1700         }
1701         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1702         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1703         if (err != 0)
1704                 return -EBUSY;
1705
1706         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1707                 netif_carrier_off(tp->dev);
1708                 tg3_link_report(tp);
1709         }
1710
1711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1714                 err = tg3_phy_reset_5703_4_5(tp);
1715                 if (err)
1716                         return err;
1717                 goto out;
1718         }
1719
1720         cpmuctrl = 0;
1721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1722             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1723                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1724                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1725                         tw32(TG3_CPMU_CTRL,
1726                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1727         }
1728
1729         err = tg3_bmcr_reset(tp);
1730         if (err)
1731                 return err;
1732
1733         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1734                 u32 phy;
1735
1736                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1737                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1738
1739                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1740         }
1741
1742         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1743                 u32 val;
1744
1745                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1746                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1747                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1748                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1749                         udelay(40);
1750                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1751                 }
1752
1753                 /* Disable GPHY autopowerdown. */
1754                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1755                              MII_TG3_MISC_SHDW_WREN |
1756                              MII_TG3_MISC_SHDW_APD_SEL |
1757                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1758         }
1759
1760         tg3_phy_apply_otp(tp);
1761
1762 out:
1763         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1764                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1765                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1766                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1767                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1768                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1769                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1770         }
1771         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1772                 tg3_writephy(tp, 0x1c, 0x8d68);
1773                 tg3_writephy(tp, 0x1c, 0x8d68);
1774         }
1775         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1776                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1777                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1778                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1779                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1780                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1781                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1782                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1783                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1784         }
1785         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1786                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1787                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1788                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1789                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1790                         tg3_writephy(tp, MII_TG3_TEST1,
1791                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1792                 } else
1793                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1794                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1795         }
1796         /* Set Extended packet length bit (bit 14) on all chips that */
1797         /* support jumbo frames */
1798         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1799                 /* Cannot do read-modify-write on 5401 */
1800                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1801         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1802                 u32 phy_reg;
1803
1804                 /* Set bit 14 with read-modify-write to preserve other bits */
1805                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1806                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1807                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1808         }
1809
1810         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1811          * jumbo frames transmission.
1812          */
1813         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1814                 u32 phy_reg;
1815
1816                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1817                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1818                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1819         }
1820
1821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1822                 /* adjust output voltage */
1823                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1824         }
1825
1826         tg3_phy_toggle_automdix(tp, 1);
1827         tg3_phy_set_wirespeed(tp);
1828         return 0;
1829 }
1830
1831 static void tg3_frob_aux_power(struct tg3 *tp)
1832 {
1833         struct tg3 *tp_peer = tp;
1834
1835         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1836                 return;
1837
1838         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1839             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1840                 struct net_device *dev_peer;
1841
1842                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1843                 /* remove_one() may have been run on the peer. */
1844                 if (!dev_peer)
1845                         tp_peer = tp;
1846                 else
1847                         tp_peer = netdev_priv(dev_peer);
1848         }
1849
1850         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1851             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1852             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1853             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1854                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1855                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1856                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1857                                     (GRC_LCLCTRL_GPIO_OE0 |
1858                                      GRC_LCLCTRL_GPIO_OE1 |
1859                                      GRC_LCLCTRL_GPIO_OE2 |
1860                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1861                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1862                                     100);
1863                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1864                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1865                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1866                                              GRC_LCLCTRL_GPIO_OE1 |
1867                                              GRC_LCLCTRL_GPIO_OE2 |
1868                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1869                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1870                                              tp->grc_local_ctrl;
1871                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1872
1873                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1874                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1875
1876                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1877                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1878                 } else {
1879                         u32 no_gpio2;
1880                         u32 grc_local_ctrl = 0;
1881
1882                         if (tp_peer != tp &&
1883                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1884                                 return;
1885
1886                         /* Workaround to prevent overdrawing Amps. */
1887                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1888                             ASIC_REV_5714) {
1889                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1890                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1891                                             grc_local_ctrl, 100);
1892                         }
1893
1894                         /* On 5753 and variants, GPIO2 cannot be used. */
1895                         no_gpio2 = tp->nic_sram_data_cfg &
1896                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1897
1898                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1899                                          GRC_LCLCTRL_GPIO_OE1 |
1900                                          GRC_LCLCTRL_GPIO_OE2 |
1901                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1902                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1903                         if (no_gpio2) {
1904                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1905                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1906                         }
1907                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1908                                                     grc_local_ctrl, 100);
1909
1910                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1911
1912                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1913                                                     grc_local_ctrl, 100);
1914
1915                         if (!no_gpio2) {
1916                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1917                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1918                                             grc_local_ctrl, 100);
1919                         }
1920                 }
1921         } else {
1922                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1923                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1924                         if (tp_peer != tp &&
1925                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1926                                 return;
1927
1928                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1929                                     (GRC_LCLCTRL_GPIO_OE1 |
1930                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1931
1932                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1933                                     GRC_LCLCTRL_GPIO_OE1, 100);
1934
1935                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1936                                     (GRC_LCLCTRL_GPIO_OE1 |
1937                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1938                 }
1939         }
1940 }
1941
1942 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1943 {
1944         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1945                 return 1;
1946         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1947                 if (speed != SPEED_10)
1948                         return 1;
1949         } else if (speed == SPEED_10)
1950                 return 1;
1951
1952         return 0;
1953 }
1954
1955 static int tg3_setup_phy(struct tg3 *, int);
1956
1957 #define RESET_KIND_SHUTDOWN     0
1958 #define RESET_KIND_INIT         1
1959 #define RESET_KIND_SUSPEND      2
1960
1961 static void tg3_write_sig_post_reset(struct tg3 *, int);
1962 static int tg3_halt_cpu(struct tg3 *, u32);
1963 static int tg3_nvram_lock(struct tg3 *);
1964 static void tg3_nvram_unlock(struct tg3 *);
1965
1966 static void tg3_power_down_phy(struct tg3 *tp)
1967 {
1968         u32 val;
1969
1970         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1971                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1972                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1973                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1974
1975                         sg_dig_ctrl |=
1976                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1977                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1978                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1979                 }
1980                 return;
1981         }
1982
1983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1984                 tg3_bmcr_reset(tp);
1985                 val = tr32(GRC_MISC_CFG);
1986                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1987                 udelay(40);
1988                 return;
1989         } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1990                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1991                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1992                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1993         }
1994
1995         /* The PHY should not be powered down on some chips because
1996          * of bugs.
1997          */
1998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2000             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2001              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2002                 return;
2003
2004         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
2005                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2006                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2007                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2008                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2009         }
2010
2011         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2012 }
2013
2014 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2015 {
2016         u32 misc_host_ctrl;
2017
2018         /* Make sure register accesses (indirect or otherwise)
2019          * will function correctly.
2020          */
2021         pci_write_config_dword(tp->pdev,
2022                                TG3PCI_MISC_HOST_CTRL,
2023                                tp->misc_host_ctrl);
2024
2025         switch (state) {
2026         case PCI_D0:
2027                 pci_enable_wake(tp->pdev, state, false);
2028                 pci_set_power_state(tp->pdev, PCI_D0);
2029
2030                 /* Switch out of Vaux if it is a NIC */
2031                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2032                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2033
2034                 return 0;
2035
2036         case PCI_D1:
2037         case PCI_D2:
2038         case PCI_D3hot:
2039                 break;
2040
2041         default:
2042                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2043                         tp->dev->name, state);
2044                 return -EINVAL;
2045         }
2046         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2047         tw32(TG3PCI_MISC_HOST_CTRL,
2048              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2049
2050         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2051                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2052                     !tp->link_config.phy_is_low_power) {
2053                         struct phy_device *phydev;
2054                         u32 advertising;
2055
2056                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2057
2058                         tp->link_config.phy_is_low_power = 1;
2059
2060                         tp->link_config.orig_speed = phydev->speed;
2061                         tp->link_config.orig_duplex = phydev->duplex;
2062                         tp->link_config.orig_autoneg = phydev->autoneg;
2063                         tp->link_config.orig_advertising = phydev->advertising;
2064
2065                         advertising = ADVERTISED_TP |
2066                                       ADVERTISED_Pause |
2067                                       ADVERTISED_Autoneg |
2068                                       ADVERTISED_10baseT_Half;
2069
2070                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2071                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
2072                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2073                                         advertising |=
2074                                                 ADVERTISED_100baseT_Half |
2075                                                 ADVERTISED_100baseT_Full |
2076                                                 ADVERTISED_10baseT_Full;
2077                                 else
2078                                         advertising |= ADVERTISED_10baseT_Full;
2079                         }
2080
2081                         phydev->advertising = advertising;
2082
2083                         phy_start_aneg(phydev);
2084                 }
2085         } else {
2086                 if (tp->link_config.phy_is_low_power == 0) {
2087                         tp->link_config.phy_is_low_power = 1;
2088                         tp->link_config.orig_speed = tp->link_config.speed;
2089                         tp->link_config.orig_duplex = tp->link_config.duplex;
2090                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2091                 }
2092
2093                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2094                         tp->link_config.speed = SPEED_10;
2095                         tp->link_config.duplex = DUPLEX_HALF;
2096                         tp->link_config.autoneg = AUTONEG_ENABLE;
2097                         tg3_setup_phy(tp, 0);
2098                 }
2099         }
2100
2101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2102                 u32 val;
2103
2104                 val = tr32(GRC_VCPU_EXT_CTRL);
2105                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2106         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2107                 int i;
2108                 u32 val;
2109
2110                 for (i = 0; i < 200; i++) {
2111                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2112                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2113                                 break;
2114                         msleep(1);
2115                 }
2116         }
2117         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2118                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2119                                                      WOL_DRV_STATE_SHUTDOWN |
2120                                                      WOL_DRV_WOL |
2121                                                      WOL_SET_MAGIC_PKT);
2122
2123         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
2124                 u32 mac_mode;
2125
2126                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2127                         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
2128                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2129                                 udelay(40);
2130                         }
2131
2132                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2133                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2134                         else
2135                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2136
2137                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2138                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2139                             ASIC_REV_5700) {
2140                                 u32 speed = (tp->tg3_flags &
2141                                              TG3_FLAG_WOL_SPEED_100MB) ?
2142                                              SPEED_100 : SPEED_10;
2143                                 if (tg3_5700_link_polarity(tp, speed))
2144                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2145                                 else
2146                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2147                         }
2148                 } else {
2149                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2150                 }
2151
2152                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2153                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2154
2155                 if (pci_pme_capable(tp->pdev, state) &&
2156                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE))
2157                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2158
2159                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2160                         mac_mode |= tp->mac_mode &
2161                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2162                         if (mac_mode & MAC_MODE_APE_TX_EN)
2163                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2164                 }
2165
2166                 tw32_f(MAC_MODE, mac_mode);
2167                 udelay(100);
2168
2169                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2170                 udelay(10);
2171         }
2172
2173         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2174             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2175              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2176                 u32 base_val;
2177
2178                 base_val = tp->pci_clock_ctrl;
2179                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2180                              CLOCK_CTRL_TXCLK_DISABLE);
2181
2182                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2183                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2184         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2185                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2186                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2187                 /* do nothing */
2188         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2189                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2190                 u32 newbits1, newbits2;
2191
2192                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2193                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2194                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2195                                     CLOCK_CTRL_TXCLK_DISABLE |
2196                                     CLOCK_CTRL_ALTCLK);
2197                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2198                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2199                         newbits1 = CLOCK_CTRL_625_CORE;
2200                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2201                 } else {
2202                         newbits1 = CLOCK_CTRL_ALTCLK;
2203                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2204                 }
2205
2206                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2207                             40);
2208
2209                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2210                             40);
2211
2212                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2213                         u32 newbits3;
2214
2215                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2216                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2217                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2218                                             CLOCK_CTRL_TXCLK_DISABLE |
2219                                             CLOCK_CTRL_44MHZ_CORE);
2220                         } else {
2221                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2222                         }
2223
2224                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2225                                     tp->pci_clock_ctrl | newbits3, 40);
2226                 }
2227         }
2228
2229         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
2230             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2231             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2232                 tg3_power_down_phy(tp);
2233
2234         tg3_frob_aux_power(tp);
2235
2236         /* Workaround for unstable PLL clock */
2237         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2238             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2239                 u32 val = tr32(0x7d00);
2240
2241                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2242                 tw32(0x7d00, val);
2243                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2244                         int err;
2245
2246                         err = tg3_nvram_lock(tp);
2247                         tg3_halt_cpu(tp, RX_CPU_BASE);
2248                         if (!err)
2249                                 tg3_nvram_unlock(tp);
2250                 }
2251         }
2252
2253         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2254
2255         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
2256                 pci_enable_wake(tp->pdev, state, true);
2257
2258         /* Finally, set the new power state. */
2259         pci_set_power_state(tp->pdev, state);
2260
2261         return 0;
2262 }
2263
2264 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2265 {
2266         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2267         case MII_TG3_AUX_STAT_10HALF:
2268                 *speed = SPEED_10;
2269                 *duplex = DUPLEX_HALF;
2270                 break;
2271
2272         case MII_TG3_AUX_STAT_10FULL:
2273                 *speed = SPEED_10;
2274                 *duplex = DUPLEX_FULL;
2275                 break;
2276
2277         case MII_TG3_AUX_STAT_100HALF:
2278                 *speed = SPEED_100;
2279                 *duplex = DUPLEX_HALF;
2280                 break;
2281
2282         case MII_TG3_AUX_STAT_100FULL:
2283                 *speed = SPEED_100;
2284                 *duplex = DUPLEX_FULL;
2285                 break;
2286
2287         case MII_TG3_AUX_STAT_1000HALF:
2288                 *speed = SPEED_1000;
2289                 *duplex = DUPLEX_HALF;
2290                 break;
2291
2292         case MII_TG3_AUX_STAT_1000FULL:
2293                 *speed = SPEED_1000;
2294                 *duplex = DUPLEX_FULL;
2295                 break;
2296
2297         default:
2298                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2299                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2300                                  SPEED_10;
2301                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2302                                   DUPLEX_HALF;
2303                         break;
2304                 }
2305                 *speed = SPEED_INVALID;
2306                 *duplex = DUPLEX_INVALID;
2307                 break;
2308         }
2309 }
2310
2311 static void tg3_phy_copper_begin(struct tg3 *tp)
2312 {
2313         u32 new_adv;
2314         int i;
2315
2316         if (tp->link_config.phy_is_low_power) {
2317                 /* Entering low power mode.  Disable gigabit and
2318                  * 100baseT advertisements.
2319                  */
2320                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2321
2322                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2323                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2324                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2325                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2326
2327                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2328         } else if (tp->link_config.speed == SPEED_INVALID) {
2329                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2330                         tp->link_config.advertising &=
2331                                 ~(ADVERTISED_1000baseT_Half |
2332                                   ADVERTISED_1000baseT_Full);
2333
2334                 new_adv = ADVERTISE_CSMA;
2335                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2336                         new_adv |= ADVERTISE_10HALF;
2337                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2338                         new_adv |= ADVERTISE_10FULL;
2339                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2340                         new_adv |= ADVERTISE_100HALF;
2341                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2342                         new_adv |= ADVERTISE_100FULL;
2343
2344                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2345
2346                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2347
2348                 if (tp->link_config.advertising &
2349                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2350                         new_adv = 0;
2351                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2352                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2353                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2354                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2355                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2356                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2357                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2358                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2359                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2360                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2361                 } else {
2362                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2363                 }
2364         } else {
2365                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2366                 new_adv |= ADVERTISE_CSMA;
2367
2368                 /* Asking for a specific link mode. */
2369                 if (tp->link_config.speed == SPEED_1000) {
2370                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2371
2372                         if (tp->link_config.duplex == DUPLEX_FULL)
2373                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2374                         else
2375                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2376                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2377                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2378                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2379                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2380                 } else {
2381                         if (tp->link_config.speed == SPEED_100) {
2382                                 if (tp->link_config.duplex == DUPLEX_FULL)
2383                                         new_adv |= ADVERTISE_100FULL;
2384                                 else
2385                                         new_adv |= ADVERTISE_100HALF;
2386                         } else {
2387                                 if (tp->link_config.duplex == DUPLEX_FULL)
2388                                         new_adv |= ADVERTISE_10FULL;
2389                                 else
2390                                         new_adv |= ADVERTISE_10HALF;
2391                         }
2392                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2393
2394                         new_adv = 0;
2395                 }
2396
2397                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2398         }
2399
2400         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2401             tp->link_config.speed != SPEED_INVALID) {
2402                 u32 bmcr, orig_bmcr;
2403
2404                 tp->link_config.active_speed = tp->link_config.speed;
2405                 tp->link_config.active_duplex = tp->link_config.duplex;
2406
2407                 bmcr = 0;
2408                 switch (tp->link_config.speed) {
2409                 default:
2410                 case SPEED_10:
2411                         break;
2412
2413                 case SPEED_100:
2414                         bmcr |= BMCR_SPEED100;
2415                         break;
2416
2417                 case SPEED_1000:
2418                         bmcr |= TG3_BMCR_SPEED1000;
2419                         break;
2420                 }
2421
2422                 if (tp->link_config.duplex == DUPLEX_FULL)
2423                         bmcr |= BMCR_FULLDPLX;
2424
2425                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2426                     (bmcr != orig_bmcr)) {
2427                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2428                         for (i = 0; i < 1500; i++) {
2429                                 u32 tmp;
2430
2431                                 udelay(10);
2432                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2433                                     tg3_readphy(tp, MII_BMSR, &tmp))
2434                                         continue;
2435                                 if (!(tmp & BMSR_LSTATUS)) {
2436                                         udelay(40);
2437                                         break;
2438                                 }
2439                         }
2440                         tg3_writephy(tp, MII_BMCR, bmcr);
2441                         udelay(40);
2442                 }
2443         } else {
2444                 tg3_writephy(tp, MII_BMCR,
2445                              BMCR_ANENABLE | BMCR_ANRESTART);
2446         }
2447 }
2448
2449 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2450 {
2451         int err;
2452
2453         /* Turn off tap power management. */
2454         /* Set Extended packet length bit */
2455         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2456
2457         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2458         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2459
2460         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2461         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2462
2463         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2464         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2465
2466         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2467         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2468
2469         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2470         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2471
2472         udelay(40);
2473
2474         return err;
2475 }
2476
2477 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2478 {
2479         u32 adv_reg, all_mask = 0;
2480
2481         if (mask & ADVERTISED_10baseT_Half)
2482                 all_mask |= ADVERTISE_10HALF;
2483         if (mask & ADVERTISED_10baseT_Full)
2484                 all_mask |= ADVERTISE_10FULL;
2485         if (mask & ADVERTISED_100baseT_Half)
2486                 all_mask |= ADVERTISE_100HALF;
2487         if (mask & ADVERTISED_100baseT_Full)
2488                 all_mask |= ADVERTISE_100FULL;
2489
2490         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2491                 return 0;
2492
2493         if ((adv_reg & all_mask) != all_mask)
2494                 return 0;
2495         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2496                 u32 tg3_ctrl;
2497
2498                 all_mask = 0;
2499                 if (mask & ADVERTISED_1000baseT_Half)
2500                         all_mask |= ADVERTISE_1000HALF;
2501                 if (mask & ADVERTISED_1000baseT_Full)
2502                         all_mask |= ADVERTISE_1000FULL;
2503
2504                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2505                         return 0;
2506
2507                 if ((tg3_ctrl & all_mask) != all_mask)
2508                         return 0;
2509         }
2510         return 1;
2511 }
2512
2513 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2514 {
2515         u32 curadv, reqadv;
2516
2517         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2518                 return 1;
2519
2520         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2521         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2522
2523         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2524                 if (curadv != reqadv)
2525                         return 0;
2526
2527                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2528                         tg3_readphy(tp, MII_LPA, rmtadv);
2529         } else {
2530                 /* Reprogram the advertisement register, even if it
2531                  * does not affect the current link.  If the link
2532                  * gets renegotiated in the future, we can save an
2533                  * additional renegotiation cycle by advertising
2534                  * it correctly in the first place.
2535                  */
2536                 if (curadv != reqadv) {
2537                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2538                                      ADVERTISE_PAUSE_ASYM);
2539                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2540                 }
2541         }
2542
2543         return 1;
2544 }
2545
2546 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2547 {
2548         int current_link_up;
2549         u32 bmsr, dummy;
2550         u32 lcl_adv, rmt_adv;
2551         u16 current_speed;
2552         u8 current_duplex;
2553         int i, err;
2554
2555         tw32(MAC_EVENT, 0);
2556
2557         tw32_f(MAC_STATUS,
2558              (MAC_STATUS_SYNC_CHANGED |
2559               MAC_STATUS_CFG_CHANGED |
2560               MAC_STATUS_MI_COMPLETION |
2561               MAC_STATUS_LNKSTATE_CHANGED));
2562         udelay(40);
2563
2564         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2565                 tw32_f(MAC_MI_MODE,
2566                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2567                 udelay(80);
2568         }
2569
2570         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2571
2572         /* Some third-party PHYs need to be reset on link going
2573          * down.
2574          */
2575         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2576              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2577              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2578             netif_carrier_ok(tp->dev)) {
2579                 tg3_readphy(tp, MII_BMSR, &bmsr);
2580                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2581                     !(bmsr & BMSR_LSTATUS))
2582                         force_reset = 1;
2583         }
2584         if (force_reset)
2585                 tg3_phy_reset(tp);
2586
2587         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2588                 tg3_readphy(tp, MII_BMSR, &bmsr);
2589                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2590                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2591                         bmsr = 0;
2592
2593                 if (!(bmsr & BMSR_LSTATUS)) {
2594                         err = tg3_init_5401phy_dsp(tp);
2595                         if (err)
2596                                 return err;
2597
2598                         tg3_readphy(tp, MII_BMSR, &bmsr);
2599                         for (i = 0; i < 1000; i++) {
2600                                 udelay(10);
2601                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2602                                     (bmsr & BMSR_LSTATUS)) {
2603                                         udelay(40);
2604                                         break;
2605                                 }
2606                         }
2607
2608                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2609                             !(bmsr & BMSR_LSTATUS) &&
2610                             tp->link_config.active_speed == SPEED_1000) {
2611                                 err = tg3_phy_reset(tp);
2612                                 if (!err)
2613                                         err = tg3_init_5401phy_dsp(tp);
2614                                 if (err)
2615                                         return err;
2616                         }
2617                 }
2618         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2619                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2620                 /* 5701 {A0,B0} CRC bug workaround */
2621                 tg3_writephy(tp, 0x15, 0x0a75);
2622                 tg3_writephy(tp, 0x1c, 0x8c68);
2623                 tg3_writephy(tp, 0x1c, 0x8d68);
2624                 tg3_writephy(tp, 0x1c, 0x8c68);
2625         }
2626
2627         /* Clear pending interrupts... */
2628         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2629         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2630
2631         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2632                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2633         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2634                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2635
2636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2638                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2639                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2640                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2641                 else
2642                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2643         }
2644
2645         current_link_up = 0;
2646         current_speed = SPEED_INVALID;
2647         current_duplex = DUPLEX_INVALID;
2648
2649         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2650                 u32 val;
2651
2652                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2653                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2654                 if (!(val & (1 << 10))) {
2655                         val |= (1 << 10);
2656                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2657                         goto relink;
2658                 }
2659         }
2660
2661         bmsr = 0;
2662         for (i = 0; i < 100; i++) {
2663                 tg3_readphy(tp, MII_BMSR, &bmsr);
2664                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2665                     (bmsr & BMSR_LSTATUS))
2666                         break;
2667                 udelay(40);
2668         }
2669
2670         if (bmsr & BMSR_LSTATUS) {
2671                 u32 aux_stat, bmcr;
2672
2673                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2674                 for (i = 0; i < 2000; i++) {
2675                         udelay(10);
2676                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2677                             aux_stat)
2678                                 break;
2679                 }
2680
2681                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2682                                              &current_speed,
2683                                              &current_duplex);
2684
2685                 bmcr = 0;
2686                 for (i = 0; i < 200; i++) {
2687                         tg3_readphy(tp, MII_BMCR, &bmcr);
2688                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2689                                 continue;
2690                         if (bmcr && bmcr != 0x7fff)
2691                                 break;
2692                         udelay(10);
2693                 }
2694
2695                 lcl_adv = 0;
2696                 rmt_adv = 0;
2697
2698                 tp->link_config.active_speed = current_speed;
2699                 tp->link_config.active_duplex = current_duplex;
2700
2701                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2702                         if ((bmcr & BMCR_ANENABLE) &&
2703                             tg3_copper_is_advertising_all(tp,
2704                                                 tp->link_config.advertising)) {
2705                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2706                                                                   &rmt_adv))
2707                                         current_link_up = 1;
2708                         }
2709                 } else {
2710                         if (!(bmcr & BMCR_ANENABLE) &&
2711                             tp->link_config.speed == current_speed &&
2712                             tp->link_config.duplex == current_duplex &&
2713                             tp->link_config.flowctrl ==
2714                             tp->link_config.active_flowctrl) {
2715                                 current_link_up = 1;
2716                         }
2717                 }
2718
2719                 if (current_link_up == 1 &&
2720                     tp->link_config.active_duplex == DUPLEX_FULL)
2721                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2722         }
2723
2724 relink:
2725         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2726                 u32 tmp;
2727
2728                 tg3_phy_copper_begin(tp);
2729
2730                 tg3_readphy(tp, MII_BMSR, &tmp);
2731                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2732                     (tmp & BMSR_LSTATUS))
2733                         current_link_up = 1;
2734         }
2735
2736         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2737         if (current_link_up == 1) {
2738                 if (tp->link_config.active_speed == SPEED_100 ||
2739                     tp->link_config.active_speed == SPEED_10)
2740                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2741                 else
2742                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2743         } else
2744                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2745
2746         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2747         if (tp->link_config.active_duplex == DUPLEX_HALF)
2748                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2749
2750         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2751                 if (current_link_up == 1 &&
2752                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2753                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2754                 else
2755                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2756         }
2757
2758         /* ??? Without this setting Netgear GA302T PHY does not
2759          * ??? send/receive packets...
2760          */
2761         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2762             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2763                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2764                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2765                 udelay(80);
2766         }
2767
2768         tw32_f(MAC_MODE, tp->mac_mode);
2769         udelay(40);
2770
2771         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2772                 /* Polled via timer. */
2773                 tw32_f(MAC_EVENT, 0);
2774         } else {
2775                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2776         }
2777         udelay(40);
2778
2779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2780             current_link_up == 1 &&
2781             tp->link_config.active_speed == SPEED_1000 &&
2782             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2783              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2784                 udelay(120);
2785                 tw32_f(MAC_STATUS,
2786                      (MAC_STATUS_SYNC_CHANGED |
2787                       MAC_STATUS_CFG_CHANGED));
2788                 udelay(40);
2789                 tg3_write_mem(tp,
2790                               NIC_SRAM_FIRMWARE_MBOX,
2791                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2792         }
2793
2794         if (current_link_up != netif_carrier_ok(tp->dev)) {
2795                 if (current_link_up)
2796                         netif_carrier_on(tp->dev);
2797                 else
2798                         netif_carrier_off(tp->dev);
2799                 tg3_link_report(tp);
2800         }
2801
2802         return 0;
2803 }
2804
2805 struct tg3_fiber_aneginfo {
2806         int state;
2807 #define ANEG_STATE_UNKNOWN              0
2808 #define ANEG_STATE_AN_ENABLE            1
2809 #define ANEG_STATE_RESTART_INIT         2
2810 #define ANEG_STATE_RESTART              3
2811 #define ANEG_STATE_DISABLE_LINK_OK      4
2812 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2813 #define ANEG_STATE_ABILITY_DETECT       6
2814 #define ANEG_STATE_ACK_DETECT_INIT      7
2815 #define ANEG_STATE_ACK_DETECT           8
2816 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2817 #define ANEG_STATE_COMPLETE_ACK         10
2818 #define ANEG_STATE_IDLE_DETECT_INIT     11
2819 #define ANEG_STATE_IDLE_DETECT          12
2820 #define ANEG_STATE_LINK_OK              13
2821 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2822 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2823
2824         u32 flags;
2825 #define MR_AN_ENABLE            0x00000001
2826 #define MR_RESTART_AN           0x00000002
2827 #define MR_AN_COMPLETE          0x00000004
2828 #define MR_PAGE_RX              0x00000008
2829 #define MR_NP_LOADED            0x00000010
2830 #define MR_TOGGLE_TX            0x00000020
2831 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2832 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2833 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2834 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2835 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2836 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2837 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2838 #define MR_TOGGLE_RX            0x00002000
2839 #define MR_NP_RX                0x00004000
2840
2841 #define MR_LINK_OK              0x80000000
2842
2843         unsigned long link_time, cur_time;
2844
2845         u32 ability_match_cfg;
2846         int ability_match_count;
2847
2848         char ability_match, idle_match, ack_match;
2849
2850         u32 txconfig, rxconfig;
2851 #define ANEG_CFG_NP             0x00000080
2852 #define ANEG_CFG_ACK            0x00000040
2853 #define ANEG_CFG_RF2            0x00000020
2854 #define ANEG_CFG_RF1            0x00000010
2855 #define ANEG_CFG_PS2            0x00000001
2856 #define ANEG_CFG_PS1            0x00008000
2857 #define ANEG_CFG_HD             0x00004000
2858 #define ANEG_CFG_FD             0x00002000
2859 #define ANEG_CFG_INVAL          0x00001f06
2860
2861 };
2862 #define ANEG_OK         0
2863 #define ANEG_DONE       1
2864 #define ANEG_TIMER_ENAB 2
2865 #define ANEG_FAILED     -1
2866
2867 #define ANEG_STATE_SETTLE_TIME  10000
2868
2869 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2870                                    struct tg3_fiber_aneginfo *ap)
2871 {
2872         u16 flowctrl;
2873         unsigned long delta;
2874         u32 rx_cfg_reg;
2875         int ret;
2876
2877         if (ap->state == ANEG_STATE_UNKNOWN) {
2878                 ap->rxconfig = 0;
2879                 ap->link_time = 0;
2880                 ap->cur_time = 0;
2881                 ap->ability_match_cfg = 0;
2882                 ap->ability_match_count = 0;
2883                 ap->ability_match = 0;
2884                 ap->idle_match = 0;
2885                 ap->ack_match = 0;
2886         }
2887         ap->cur_time++;
2888
2889         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2890                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2891
2892                 if (rx_cfg_reg != ap->ability_match_cfg) {
2893                         ap->ability_match_cfg = rx_cfg_reg;
2894                         ap->ability_match = 0;
2895                         ap->ability_match_count = 0;
2896                 } else {
2897                         if (++ap->ability_match_count > 1) {
2898                                 ap->ability_match = 1;
2899                                 ap->ability_match_cfg = rx_cfg_reg;
2900                         }
2901                 }
2902                 if (rx_cfg_reg & ANEG_CFG_ACK)
2903                         ap->ack_match = 1;
2904                 else
2905                         ap->ack_match = 0;
2906
2907                 ap->idle_match = 0;
2908         } else {
2909                 ap->idle_match = 1;
2910                 ap->ability_match_cfg = 0;
2911                 ap->ability_match_count = 0;
2912                 ap->ability_match = 0;
2913                 ap->ack_match = 0;
2914
2915                 rx_cfg_reg = 0;
2916         }
2917
2918         ap->rxconfig = rx_cfg_reg;
2919         ret = ANEG_OK;
2920
2921         switch(ap->state) {
2922         case ANEG_STATE_UNKNOWN:
2923                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2924                         ap->state = ANEG_STATE_AN_ENABLE;
2925
2926                 /* fallthru */
2927         case ANEG_STATE_AN_ENABLE:
2928                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2929                 if (ap->flags & MR_AN_ENABLE) {
2930                         ap->link_time = 0;
2931                         ap->cur_time = 0;
2932                         ap->ability_match_cfg = 0;
2933                         ap->ability_match_count = 0;
2934                         ap->ability_match = 0;
2935                         ap->idle_match = 0;
2936                         ap->ack_match = 0;
2937
2938                         ap->state = ANEG_STATE_RESTART_INIT;
2939                 } else {
2940                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2941                 }
2942                 break;
2943
2944         case ANEG_STATE_RESTART_INIT:
2945                 ap->link_time = ap->cur_time;
2946                 ap->flags &= ~(MR_NP_LOADED);
2947                 ap->txconfig = 0;
2948                 tw32(MAC_TX_AUTO_NEG, 0);
2949                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2950                 tw32_f(MAC_MODE, tp->mac_mode);
2951                 udelay(40);
2952
2953                 ret = ANEG_TIMER_ENAB;
2954                 ap->state = ANEG_STATE_RESTART;
2955
2956                 /* fallthru */
2957         case ANEG_STATE_RESTART:
2958                 delta = ap->cur_time - ap->link_time;
2959                 if (delta > ANEG_STATE_SETTLE_TIME) {
2960                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2961                 } else {
2962                         ret = ANEG_TIMER_ENAB;
2963                 }
2964                 break;
2965
2966         case ANEG_STATE_DISABLE_LINK_OK:
2967                 ret = ANEG_DONE;
2968                 break;
2969
2970         case ANEG_STATE_ABILITY_DETECT_INIT:
2971                 ap->flags &= ~(MR_TOGGLE_TX);
2972                 ap->txconfig = ANEG_CFG_FD;
2973                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2974                 if (flowctrl & ADVERTISE_1000XPAUSE)
2975                         ap->txconfig |= ANEG_CFG_PS1;
2976                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2977                         ap->txconfig |= ANEG_CFG_PS2;
2978                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2979                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2980                 tw32_f(MAC_MODE, tp->mac_mode);
2981                 udelay(40);
2982
2983                 ap->state = ANEG_STATE_ABILITY_DETECT;
2984                 break;
2985
2986         case ANEG_STATE_ABILITY_DETECT:
2987                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2988                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2989                 }
2990                 break;
2991
2992         case ANEG_STATE_ACK_DETECT_INIT:
2993                 ap->txconfig |= ANEG_CFG_ACK;
2994                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2995                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2996                 tw32_f(MAC_MODE, tp->mac_mode);
2997                 udelay(40);
2998
2999                 ap->state = ANEG_STATE_ACK_DETECT;
3000
3001                 /* fallthru */
3002         case ANEG_STATE_ACK_DETECT:
3003                 if (ap->ack_match != 0) {
3004                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3005                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3006                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3007                         } else {
3008                                 ap->state = ANEG_STATE_AN_ENABLE;
3009                         }
3010                 } else if (ap->ability_match != 0 &&
3011                            ap->rxconfig == 0) {
3012                         ap->state = ANEG_STATE_AN_ENABLE;
3013                 }
3014                 break;
3015
3016         case ANEG_STATE_COMPLETE_ACK_INIT:
3017                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3018                         ret = ANEG_FAILED;
3019                         break;
3020                 }
3021                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3022                                MR_LP_ADV_HALF_DUPLEX |
3023                                MR_LP_ADV_SYM_PAUSE |
3024                                MR_LP_ADV_ASYM_PAUSE |
3025                                MR_LP_ADV_REMOTE_FAULT1 |
3026                                MR_LP_ADV_REMOTE_FAULT2 |
3027                                MR_LP_ADV_NEXT_PAGE |
3028                                MR_TOGGLE_RX |
3029                                MR_NP_RX);
3030                 if (ap->rxconfig & ANEG_CFG_FD)
3031                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3032                 if (ap->rxconfig & ANEG_CFG_HD)
3033                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3034                 if (ap->rxconfig & ANEG_CFG_PS1)
3035                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3036                 if (ap->rxconfig & ANEG_CFG_PS2)
3037                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3038                 if (ap->rxconfig & ANEG_CFG_RF1)
3039                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3040                 if (ap->rxconfig & ANEG_CFG_RF2)
3041                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3042                 if (ap->rxconfig & ANEG_CFG_NP)
3043                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3044
3045                 ap->link_time = ap->cur_time;
3046
3047                 ap->flags ^= (MR_TOGGLE_TX);
3048                 if (ap->rxconfig & 0x0008)
3049                         ap->flags |= MR_TOGGLE_RX;
3050                 if (ap->rxconfig & ANEG_CFG_NP)
3051                         ap->flags |= MR_NP_RX;
3052                 ap->flags |= MR_PAGE_RX;
3053
3054                 ap->state = ANEG_STATE_COMPLETE_ACK;
3055                 ret = ANEG_TIMER_ENAB;
3056                 break;
3057
3058         case ANEG_STATE_COMPLETE_ACK:
3059                 if (ap->ability_match != 0 &&
3060                     ap->rxconfig == 0) {
3061                         ap->state = ANEG_STATE_AN_ENABLE;
3062                         break;
3063                 }
3064                 delta = ap->cur_time - ap->link_time;
3065                 if (delta > ANEG_STATE_SETTLE_TIME) {
3066                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3067                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3068                         } else {
3069                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3070                                     !(ap->flags & MR_NP_RX)) {
3071                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3072                                 } else {
3073                                         ret = ANEG_FAILED;
3074                                 }
3075                         }
3076                 }
3077                 break;
3078
3079         case ANEG_STATE_IDLE_DETECT_INIT:
3080                 ap->link_time = ap->cur_time;
3081                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3082                 tw32_f(MAC_MODE, tp->mac_mode);
3083                 udelay(40);
3084
3085                 ap->state = ANEG_STATE_IDLE_DETECT;
3086                 ret = ANEG_TIMER_ENAB;
3087                 break;
3088
3089         case ANEG_STATE_IDLE_DETECT:
3090                 if (ap->ability_match != 0 &&
3091                     ap->rxconfig == 0) {
3092                         ap->state = ANEG_STATE_AN_ENABLE;
3093                         break;
3094                 }
3095                 delta = ap->cur_time - ap->link_time;
3096                 if (delta > ANEG_STATE_SETTLE_TIME) {
3097                         /* XXX another gem from the Broadcom driver :( */
3098                         ap->state = ANEG_STATE_LINK_OK;
3099                 }
3100                 break;
3101
3102         case ANEG_STATE_LINK_OK:
3103                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3104                 ret = ANEG_DONE;
3105                 break;
3106
3107         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3108                 /* ??? unimplemented */
3109                 break;
3110
3111         case ANEG_STATE_NEXT_PAGE_WAIT:
3112                 /* ??? unimplemented */
3113                 break;
3114
3115         default:
3116                 ret = ANEG_FAILED;
3117                 break;
3118         }
3119
3120         return ret;
3121 }
3122
3123 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3124 {
3125         int res = 0;
3126         struct tg3_fiber_aneginfo aninfo;
3127         int status = ANEG_FAILED;
3128         unsigned int tick;
3129         u32 tmp;
3130
3131         tw32_f(MAC_TX_AUTO_NEG, 0);
3132
3133         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3134         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3135         udelay(40);
3136
3137         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3138         udelay(40);
3139
3140         memset(&aninfo, 0, sizeof(aninfo));
3141         aninfo.flags |= MR_AN_ENABLE;
3142         aninfo.state = ANEG_STATE_UNKNOWN;
3143         aninfo.cur_time = 0;
3144         tick = 0;
3145         while (++tick < 195000) {
3146                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3147                 if (status == ANEG_DONE || status == ANEG_FAILED)
3148                         break;
3149
3150                 udelay(1);
3151         }
3152
3153         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3154         tw32_f(MAC_MODE, tp->mac_mode);
3155         udelay(40);
3156
3157         *txflags = aninfo.txconfig;
3158         *rxflags = aninfo.flags;
3159
3160         if (status == ANEG_DONE &&
3161             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3162                              MR_LP_ADV_FULL_DUPLEX)))
3163                 res = 1;
3164
3165         return res;
3166 }
3167
3168 static void tg3_init_bcm8002(struct tg3 *tp)
3169 {
3170         u32 mac_status = tr32(MAC_STATUS);
3171         int i;
3172
3173         /* Reset when initting first time or we have a link. */
3174         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3175             !(mac_status & MAC_STATUS_PCS_SYNCED))
3176                 return;
3177
3178         /* Set PLL lock range. */
3179         tg3_writephy(tp, 0x16, 0x8007);
3180
3181         /* SW reset */
3182         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3183
3184         /* Wait for reset to complete. */
3185         /* XXX schedule_timeout() ... */
3186         for (i = 0; i < 500; i++)
3187                 udelay(10);
3188
3189         /* Config mode; select PMA/Ch 1 regs. */
3190         tg3_writephy(tp, 0x10, 0x8411);
3191
3192         /* Enable auto-lock and comdet, select txclk for tx. */
3193         tg3_writephy(tp, 0x11, 0x0a10);
3194
3195         tg3_writephy(tp, 0x18, 0x00a0);
3196         tg3_writephy(tp, 0x16, 0x41ff);
3197
3198         /* Assert and deassert POR. */
3199         tg3_writephy(tp, 0x13, 0x0400);
3200         udelay(40);
3201         tg3_writephy(tp, 0x13, 0x0000);
3202
3203         tg3_writephy(tp, 0x11, 0x0a50);
3204         udelay(40);
3205         tg3_writephy(tp, 0x11, 0x0a10);
3206
3207         /* Wait for signal to stabilize */
3208         /* XXX schedule_timeout() ... */
3209         for (i = 0; i < 15000; i++)
3210                 udelay(10);
3211
3212         /* Deselect the channel register so we can read the PHYID
3213          * later.
3214          */
3215         tg3_writephy(tp, 0x10, 0x8011);
3216 }
3217
3218 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3219 {
3220         u16 flowctrl;
3221         u32 sg_dig_ctrl, sg_dig_status;
3222         u32 serdes_cfg, expected_sg_dig_ctrl;
3223         int workaround, port_a;
3224         int current_link_up;
3225
3226         serdes_cfg = 0;
3227         expected_sg_dig_ctrl = 0;
3228         workaround = 0;
3229         port_a = 1;
3230         current_link_up = 0;
3231
3232         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3233             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3234                 workaround = 1;
3235                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3236                         port_a = 0;
3237
3238                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3239                 /* preserve bits 20-23 for voltage regulator */
3240                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3241         }
3242
3243         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3244
3245         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3246                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3247                         if (workaround) {
3248                                 u32 val = serdes_cfg;
3249
3250                                 if (port_a)
3251                                         val |= 0xc010000;
3252                                 else
3253                                         val |= 0x4010000;
3254                                 tw32_f(MAC_SERDES_CFG, val);
3255                         }
3256
3257                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3258                 }
3259                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3260                         tg3_setup_flow_control(tp, 0, 0);
3261                         current_link_up = 1;
3262                 }
3263                 goto out;
3264         }
3265
3266         /* Want auto-negotiation.  */
3267         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3268
3269         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3270         if (flowctrl & ADVERTISE_1000XPAUSE)
3271                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3272         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3273                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3274
3275         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3276                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3277                     tp->serdes_counter &&
3278                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3279                                     MAC_STATUS_RCVD_CFG)) ==
3280                      MAC_STATUS_PCS_SYNCED)) {
3281                         tp->serdes_counter--;
3282                         current_link_up = 1;
3283                         goto out;
3284                 }
3285 restart_autoneg:
3286                 if (workaround)
3287                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3288                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3289                 udelay(5);
3290                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3291
3292                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3293                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3294         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3295                                  MAC_STATUS_SIGNAL_DET)) {
3296                 sg_dig_status = tr32(SG_DIG_STATUS);
3297                 mac_status = tr32(MAC_STATUS);
3298
3299                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3300                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3301                         u32 local_adv = 0, remote_adv = 0;
3302
3303                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3304                                 local_adv |= ADVERTISE_1000XPAUSE;
3305                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3306                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3307
3308                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3309                                 remote_adv |= LPA_1000XPAUSE;
3310                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3311                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3312
3313                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3314                         current_link_up = 1;
3315                         tp->serdes_counter = 0;
3316                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3317                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3318                         if (tp->serdes_counter)
3319                                 tp->serdes_counter--;
3320                         else {
3321                                 if (workaround) {
3322                                         u32 val = serdes_cfg;
3323
3324                                         if (port_a)
3325                                                 val |= 0xc010000;
3326                                         else
3327                                                 val |= 0x4010000;
3328
3329                                         tw32_f(MAC_SERDES_CFG, val);
3330                                 }
3331
3332                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3333                                 udelay(40);
3334
3335                                 /* Link parallel detection - link is up */
3336                                 /* only if we have PCS_SYNC and not */
3337                                 /* receiving config code words */
3338                                 mac_status = tr32(MAC_STATUS);
3339                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3340                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3341                                         tg3_setup_flow_control(tp, 0, 0);
3342                                         current_link_up = 1;
3343                                         tp->tg3_flags2 |=
3344                                                 TG3_FLG2_PARALLEL_DETECT;
3345                                         tp->serdes_counter =
3346                                                 SERDES_PARALLEL_DET_TIMEOUT;
3347                                 } else
3348                                         goto restart_autoneg;
3349                         }
3350                 }
3351         } else {
3352                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3353                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3354         }
3355
3356 out:
3357         return current_link_up;
3358 }
3359
3360 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3361 {
3362         int current_link_up = 0;
3363
3364         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3365                 goto out;
3366
3367         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3368                 u32 txflags, rxflags;
3369                 int i;
3370
3371                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3372                         u32 local_adv = 0, remote_adv = 0;
3373
3374                         if (txflags & ANEG_CFG_PS1)
3375                                 local_adv |= ADVERTISE_1000XPAUSE;
3376                         if (txflags & ANEG_CFG_PS2)
3377                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3378
3379                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3380                                 remote_adv |= LPA_1000XPAUSE;
3381                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3382                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3383
3384                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3385
3386                         current_link_up = 1;
3387                 }
3388                 for (i = 0; i < 30; i++) {
3389                         udelay(20);
3390                         tw32_f(MAC_STATUS,
3391                                (MAC_STATUS_SYNC_CHANGED |
3392                                 MAC_STATUS_CFG_CHANGED));
3393                         udelay(40);
3394                         if ((tr32(MAC_STATUS) &
3395                              (MAC_STATUS_SYNC_CHANGED |
3396                               MAC_STATUS_CFG_CHANGED)) == 0)
3397                                 break;
3398                 }
3399
3400                 mac_status = tr32(MAC_STATUS);
3401                 if (current_link_up == 0 &&
3402                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3403                     !(mac_status & MAC_STATUS_RCVD_CFG))
3404                         current_link_up = 1;
3405         } else {
3406                 tg3_setup_flow_control(tp, 0, 0);
3407
3408                 /* Forcing 1000FD link up. */
3409                 current_link_up = 1;
3410
3411                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3412                 udelay(40);
3413
3414                 tw32_f(MAC_MODE, tp->mac_mode);
3415                 udelay(40);
3416         }
3417
3418 out:
3419         return current_link_up;
3420 }
3421
3422 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3423 {
3424         u32 orig_pause_cfg;
3425         u16 orig_active_speed;
3426         u8 orig_active_duplex;
3427         u32 mac_status;
3428         int current_link_up;
3429         int i;
3430
3431         orig_pause_cfg = tp->link_config.active_flowctrl;
3432         orig_active_speed = tp->link_config.active_speed;
3433         orig_active_duplex = tp->link_config.active_duplex;
3434
3435         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3436             netif_carrier_ok(tp->dev) &&
3437             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3438                 mac_status = tr32(MAC_STATUS);
3439                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3440                                MAC_STATUS_SIGNAL_DET |
3441                                MAC_STATUS_CFG_CHANGED |
3442                                MAC_STATUS_RCVD_CFG);
3443                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3444                                    MAC_STATUS_SIGNAL_DET)) {
3445                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3446                                             MAC_STATUS_CFG_CHANGED));
3447                         return 0;
3448                 }
3449         }
3450
3451         tw32_f(MAC_TX_AUTO_NEG, 0);
3452
3453         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3454         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3455         tw32_f(MAC_MODE, tp->mac_mode);
3456         udelay(40);
3457
3458         if (tp->phy_id == PHY_ID_BCM8002)
3459                 tg3_init_bcm8002(tp);
3460
3461         /* Enable link change event even when serdes polling.  */
3462         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3463         udelay(40);
3464
3465         current_link_up = 0;
3466         mac_status = tr32(MAC_STATUS);
3467
3468         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3469                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3470         else
3471                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3472
3473         tp->hw_status->status =
3474                 (SD_STATUS_UPDATED |
3475                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3476
3477         for (i = 0; i < 100; i++) {
3478                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3479                                     MAC_STATUS_CFG_CHANGED));
3480                 udelay(5);
3481                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3482                                          MAC_STATUS_CFG_CHANGED |
3483                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3484                         break;
3485         }
3486
3487         mac_status = tr32(MAC_STATUS);
3488         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3489                 current_link_up = 0;
3490                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3491                     tp->serdes_counter == 0) {
3492                         tw32_f(MAC_MODE, (tp->mac_mode |
3493                                           MAC_MODE_SEND_CONFIGS));
3494                         udelay(1);
3495                         tw32_f(MAC_MODE, tp->mac_mode);
3496                 }
3497         }
3498
3499         if (current_link_up == 1) {
3500                 tp->link_config.active_speed = SPEED_1000;
3501                 tp->link_config.active_duplex = DUPLEX_FULL;
3502                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3503                                     LED_CTRL_LNKLED_OVERRIDE |
3504                                     LED_CTRL_1000MBPS_ON));
3505         } else {
3506                 tp->link_config.active_speed = SPEED_INVALID;
3507                 tp->link_config.active_duplex = DUPLEX_INVALID;
3508                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3509                                     LED_CTRL_LNKLED_OVERRIDE |
3510                                     LED_CTRL_TRAFFIC_OVERRIDE));
3511         }
3512
3513         if (current_link_up != netif_carrier_ok(tp->dev)) {
3514                 if (current_link_up)
3515                         netif_carrier_on(tp->dev);
3516                 else
3517                         netif_carrier_off(tp->dev);
3518                 tg3_link_report(tp);
3519         } else {
3520                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3521                 if (orig_pause_cfg != now_pause_cfg ||
3522                     orig_active_speed != tp->link_config.active_speed ||
3523                     orig_active_duplex != tp->link_config.active_duplex)
3524                         tg3_link_report(tp);
3525         }
3526
3527         return 0;
3528 }
3529
3530 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3531 {
3532         int current_link_up, err = 0;
3533         u32 bmsr, bmcr;
3534         u16 current_speed;
3535         u8 current_duplex;
3536         u32 local_adv, remote_adv;
3537
3538         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3539         tw32_f(MAC_MODE, tp->mac_mode);
3540         udelay(40);
3541
3542         tw32(MAC_EVENT, 0);
3543
3544         tw32_f(MAC_STATUS,
3545              (MAC_STATUS_SYNC_CHANGED |
3546               MAC_STATUS_CFG_CHANGED |
3547               MAC_STATUS_MI_COMPLETION |
3548               MAC_STATUS_LNKSTATE_CHANGED));
3549         udelay(40);
3550
3551         if (force_reset)
3552                 tg3_phy_reset(tp);
3553
3554         current_link_up = 0;
3555         current_speed = SPEED_INVALID;
3556         current_duplex = DUPLEX_INVALID;
3557
3558         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3559         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3561                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3562                         bmsr |= BMSR_LSTATUS;
3563                 else
3564                         bmsr &= ~BMSR_LSTATUS;
3565         }
3566
3567         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3568
3569         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3570             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3571                 /* do nothing, just check for link up at the end */
3572         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3573                 u32 adv, new_adv;
3574
3575                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3576                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3577                                   ADVERTISE_1000XPAUSE |
3578                                   ADVERTISE_1000XPSE_ASYM |
3579                                   ADVERTISE_SLCT);
3580
3581                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3582
3583                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3584                         new_adv |= ADVERTISE_1000XHALF;
3585                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3586                         new_adv |= ADVERTISE_1000XFULL;
3587
3588                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3589                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3590                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3591                         tg3_writephy(tp, MII_BMCR, bmcr);
3592
3593                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3594                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3595                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3596
3597                         return err;
3598                 }
3599         } else {
3600                 u32 new_bmcr;
3601
3602                 bmcr &= ~BMCR_SPEED1000;
3603                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3604
3605                 if (tp->link_config.duplex == DUPLEX_FULL)
3606                         new_bmcr |= BMCR_FULLDPLX;
3607
3608                 if (new_bmcr != bmcr) {
3609                         /* BMCR_SPEED1000 is a reserved bit that needs
3610                          * to be set on write.
3611                          */
3612                         new_bmcr |= BMCR_SPEED1000;
3613
3614                         /* Force a linkdown */
3615                         if (netif_carrier_ok(tp->dev)) {
3616                                 u32 adv;
3617
3618                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3619                                 adv &= ~(ADVERTISE_1000XFULL |
3620                                          ADVERTISE_1000XHALF |
3621                                          ADVERTISE_SLCT);
3622                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3623                                 tg3_writephy(tp, MII_BMCR, bmcr |
3624                                                            BMCR_ANRESTART |
3625                                                            BMCR_ANENABLE);
3626                                 udelay(10);
3627                                 netif_carrier_off(tp->dev);
3628                         }
3629                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3630                         bmcr = new_bmcr;
3631                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3632                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3633                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3634                             ASIC_REV_5714) {
3635                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3636                                         bmsr |= BMSR_LSTATUS;
3637                                 else
3638                                         bmsr &= ~BMSR_LSTATUS;
3639                         }
3640                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3641                 }
3642         }
3643
3644         if (bmsr & BMSR_LSTATUS) {
3645                 current_speed = SPEED_1000;
3646                 current_link_up = 1;
3647                 if (bmcr & BMCR_FULLDPLX)
3648                         current_duplex = DUPLEX_FULL;
3649                 else
3650                         current_duplex = DUPLEX_HALF;
3651
3652                 local_adv = 0;
3653                 remote_adv = 0;
3654
3655                 if (bmcr & BMCR_ANENABLE) {
3656                         u32 common;
3657
3658                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3659                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3660                         common = local_adv & remote_adv;
3661                         if (common & (ADVERTISE_1000XHALF |
3662                                       ADVERTISE_1000XFULL)) {
3663                                 if (common & ADVERTISE_1000XFULL)
3664                                         current_duplex = DUPLEX_FULL;
3665                                 else
3666                                         current_duplex = DUPLEX_HALF;
3667                         }
3668                         else
3669                                 current_link_up = 0;
3670                 }
3671         }
3672
3673         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3674                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3675
3676         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3677         if (tp->link_config.active_duplex == DUPLEX_HALF)
3678                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3679
3680         tw32_f(MAC_MODE, tp->mac_mode);
3681         udelay(40);
3682
3683         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3684
3685         tp->link_config.active_speed = current_speed;
3686         tp->link_config.active_duplex = current_duplex;
3687
3688         if (current_link_up != netif_carrier_ok(tp->dev)) {
3689                 if (current_link_up)
3690                         netif_carrier_on(tp->dev);
3691                 else {
3692                         netif_carrier_off(tp->dev);
3693                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3694                 }
3695                 tg3_link_report(tp);
3696         }
3697         return err;
3698 }
3699
3700 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3701 {
3702         if (tp->serdes_counter) {
3703                 /* Give autoneg time to complete. */
3704                 tp->serdes_counter--;
3705                 return;
3706         }
3707         if (!netif_carrier_ok(tp->dev) &&
3708             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3709                 u32 bmcr;
3710
3711                 tg3_readphy(tp, MII_BMCR, &bmcr);
3712                 if (bmcr & BMCR_ANENABLE) {
3713                         u32 phy1, phy2;
3714
3715                         /* Select shadow register 0x1f */
3716                         tg3_writephy(tp, 0x1c, 0x7c00);
3717                         tg3_readphy(tp, 0x1c, &phy1);
3718
3719                         /* Select expansion interrupt status register */
3720                         tg3_writephy(tp, 0x17, 0x0f01);
3721                         tg3_readphy(tp, 0x15, &phy2);
3722                         tg3_readphy(tp, 0x15, &phy2);
3723
3724                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3725                                 /* We have signal detect and not receiving
3726                                  * config code words, link is up by parallel
3727                                  * detection.
3728                                  */
3729
3730                                 bmcr &= ~BMCR_ANENABLE;
3731                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3732                                 tg3_writephy(tp, MII_BMCR, bmcr);
3733                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3734                         }
3735                 }
3736         }
3737         else if (netif_carrier_ok(tp->dev) &&
3738                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3739                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3740                 u32 phy2;
3741
3742                 /* Select expansion interrupt status register */
3743                 tg3_writephy(tp, 0x17, 0x0f01);
3744                 tg3_readphy(tp, 0x15, &phy2);
3745                 if (phy2 & 0x20) {
3746                         u32 bmcr;
3747
3748                         /* Config code words received, turn on autoneg. */
3749                         tg3_readphy(tp, MII_BMCR, &bmcr);
3750                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3751
3752                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3753
3754                 }
3755         }
3756 }
3757
3758 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3759 {
3760         int err;
3761
3762         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3763                 err = tg3_setup_fiber_phy(tp, force_reset);
3764         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3765                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3766         } else {
3767                 err = tg3_setup_copper_phy(tp, force_reset);
3768         }
3769
3770         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3771             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3772                 u32 val, scale;
3773
3774                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3775                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3776                         scale = 65;
3777                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3778                         scale = 6;
3779                 else
3780                         scale = 12;
3781
3782                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3783                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3784                 tw32(GRC_MISC_CFG, val);
3785         }
3786
3787         if (tp->link_config.active_speed == SPEED_1000 &&
3788             tp->link_config.active_duplex == DUPLEX_HALF)
3789                 tw32(MAC_TX_LENGTHS,
3790                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3791                       (6 << TX_LENGTHS_IPG_SHIFT) |
3792                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3793         else
3794                 tw32(MAC_TX_LENGTHS,
3795                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3796                       (6 << TX_LENGTHS_IPG_SHIFT) |
3797                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3798
3799         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3800                 if (netif_carrier_ok(tp->dev)) {
3801                         tw32(HOSTCC_STAT_COAL_TICKS,
3802                              tp->coal.stats_block_coalesce_usecs);
3803                 } else {
3804                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3805                 }
3806         }
3807
3808         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3809                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3810                 if (!netif_carrier_ok(tp->dev))
3811                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3812                               tp->pwrmgmt_thresh;
3813                 else
3814                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3815                 tw32(PCIE_PWR_MGMT_THRESH, val);
3816         }
3817
3818         return err;
3819 }
3820
3821 /* This is called whenever we suspect that the system chipset is re-
3822  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3823  * is bogus tx completions. We try to recover by setting the
3824  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3825  * in the workqueue.
3826  */
3827 static void tg3_tx_recover(struct tg3 *tp)
3828 {
3829         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3830                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3831
3832         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3833                "mapped I/O cycles to the network device, attempting to "
3834                "recover. Please report the problem to the driver maintainer "
3835                "and include system chipset information.\n", tp->dev->name);
3836
3837         spin_lock(&tp->lock);
3838         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3839         spin_unlock(&tp->lock);
3840 }
3841
3842 static inline u32 tg3_tx_avail(struct tg3 *tp)
3843 {
3844         smp_mb();
3845         return (tp->tx_pending -
3846                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3847 }
3848
3849 /* Tigon3 never reports partial packet sends.  So we do not
3850  * need special logic to handle SKBs that have not had all
3851  * of their frags sent yet, like SunGEM does.
3852  */
3853 static void tg3_tx(struct tg3 *tp)
3854 {
3855         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3856         u32 sw_idx = tp->tx_cons;
3857
3858         while (sw_idx != hw_idx) {
3859                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3860                 struct sk_buff *skb = ri->skb;
3861                 int i, tx_bug = 0;
3862
3863                 if (unlikely(skb == NULL)) {
3864                         tg3_tx_recover(tp);
3865                         return;
3866                 }
3867
3868                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3869
3870                 ri->skb = NULL;
3871
3872                 sw_idx = NEXT_TX(sw_idx);
3873
3874                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3875                         ri = &tp->tx_buffers[sw_idx];
3876                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3877                                 tx_bug = 1;
3878                         sw_idx = NEXT_TX(sw_idx);
3879                 }
3880
3881                 dev_kfree_skb(skb);
3882
3883                 if (unlikely(tx_bug)) {
3884                         tg3_tx_recover(tp);
3885                         return;
3886                 }
3887         }
3888
3889         tp->tx_cons = sw_idx;
3890
3891         /* Need to make the tx_cons update visible to tg3_start_xmit()
3892          * before checking for netif_queue_stopped().  Without the
3893          * memory barrier, there is a small possibility that tg3_start_xmit()
3894          * will miss it and cause the queue to be stopped forever.
3895          */
3896         smp_mb();
3897
3898         if (unlikely(netif_queue_stopped(tp->dev) &&
3899                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3900                 netif_tx_lock(tp->dev);
3901                 if (netif_queue_stopped(tp->dev) &&
3902                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3903                         netif_wake_queue(tp->dev);
3904                 netif_tx_unlock(tp->dev);
3905         }
3906 }
3907
3908 /* Returns size of skb allocated or < 0 on error.
3909  *
3910  * We only need to fill in the address because the other members
3911  * of the RX descriptor are invariant, see tg3_init_rings.
3912  *
3913  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3914  * posting buffers we only dirty the first cache line of the RX
3915  * descriptor (containing the address).  Whereas for the RX status
3916  * buffers the cpu only reads the last cacheline of the RX descriptor
3917  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3918  */
3919 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3920                             int src_idx, u32 dest_idx_unmasked)
3921 {
3922         struct tg3_rx_buffer_desc *desc;
3923         struct ring_info *map, *src_map;
3924         struct sk_buff *skb;
3925         dma_addr_t mapping;
3926         int skb_size, dest_idx;
3927
3928         src_map = NULL;
3929         switch (opaque_key) {
3930         case RXD_OPAQUE_RING_STD:
3931                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3932                 desc = &tp->rx_std[dest_idx];
3933                 map = &tp->rx_std_buffers[dest_idx];
3934                 if (src_idx >= 0)
3935                         src_map = &tp->rx_std_buffers[src_idx];
3936                 skb_size = tp->rx_pkt_buf_sz;
3937                 break;
3938
3939         case RXD_OPAQUE_RING_JUMBO:
3940                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3941                 desc = &tp->rx_jumbo[dest_idx];
3942                 map = &tp->rx_jumbo_buffers[dest_idx];
3943                 if (src_idx >= 0)
3944                         src_map = &tp->rx_jumbo_buffers[src_idx];
3945                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3946                 break;
3947
3948         default:
3949                 return -EINVAL;
3950         }
3951
3952         /* Do not overwrite any of the map or rp information
3953          * until we are sure we can commit to a new buffer.
3954          *
3955          * Callers depend upon this behavior and assume that
3956          * we leave everything unchanged if we fail.
3957          */
3958         skb = netdev_alloc_skb(tp->dev, skb_size);
3959         if (skb == NULL)
3960                 return -ENOMEM;
3961
3962         skb_reserve(skb, tp->rx_offset);
3963
3964         mapping = pci_map_single(tp->pdev, skb->data,
3965                                  skb_size - tp->rx_offset,
3966                                  PCI_DMA_FROMDEVICE);
3967
3968         map->skb = skb;
3969         pci_unmap_addr_set(map, mapping, mapping);
3970
3971         if (src_map != NULL)
3972                 src_map->skb = NULL;
3973
3974         desc->addr_hi = ((u64)mapping >> 32);
3975         desc->addr_lo = ((u64)mapping & 0xffffffff);
3976
3977         return skb_size;
3978 }
3979
3980 /* We only need to move over in the address because the other
3981  * members of the RX descriptor are invariant.  See notes above
3982  * tg3_alloc_rx_skb for full details.
3983  */
3984 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3985                            int src_idx, u32 dest_idx_unmasked)
3986 {
3987         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3988         struct ring_info *src_map, *dest_map;
3989         int dest_idx;
3990
3991         switch (opaque_key) {
3992         case RXD_OPAQUE_RING_STD:
3993                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3994                 dest_desc = &tp->rx_std[dest_idx];
3995                 dest_map = &tp->rx_std_buffers[dest_idx];
3996                 src_desc = &tp->rx_std[src_idx];
3997                 src_map = &tp->rx_std_buffers[src_idx];
3998                 break;
3999
4000         case RXD_OPAQUE_RING_JUMBO:
4001                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4002                 dest_desc = &tp->rx_jumbo[dest_idx];
4003                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4004                 src_desc = &tp->rx_jumbo[src_idx];
4005                 src_map = &tp->rx_jumbo_buffers[src_idx];
4006                 break;
4007
4008         default:
4009                 return;
4010         }
4011
4012         dest_map->skb = src_map->skb;
4013         pci_unmap_addr_set(dest_map, mapping,
4014                            pci_unmap_addr(src_map, mapping));
4015         dest_desc->addr_hi = src_desc->addr_hi;
4016         dest_desc->addr_lo = src_desc->addr_lo;
4017
4018         src_map->skb = NULL;
4019 }
4020
4021 #if TG3_VLAN_TAG_USED
4022 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4023 {
4024         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4025 }
4026 #endif
4027
4028 /* The RX ring scheme is composed of multiple rings which post fresh
4029  * buffers to the chip, and one special ring the chip uses to report
4030  * status back to the host.
4031  *
4032  * The special ring reports the status of received packets to the
4033  * host.  The chip does not write into the original descriptor the
4034  * RX buffer was obtained from.  The chip simply takes the original
4035  * descriptor as provided by the host, updates the status and length
4036  * field, then writes this into the next status ring entry.
4037  *
4038  * Each ring the host uses to post buffers to the chip is described
4039  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4040  * it is first placed into the on-chip ram.  When the packet's length
4041  * is known, it walks down the TG3_BDINFO entries to select the ring.
4042  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4043  * which is within the range of the new packet's length is chosen.
4044  *
4045  * The "separate ring for rx status" scheme may sound queer, but it makes
4046  * sense from a cache coherency perspective.  If only the host writes
4047  * to the buffer post rings, and only the chip writes to the rx status
4048  * rings, then cache lines never move beyond shared-modified state.
4049  * If both the host and chip were to write into the same ring, cache line
4050  * eviction could occur since both entities want it in an exclusive state.
4051  */
4052 static int tg3_rx(struct tg3 *tp, int budget)
4053 {
4054         u32 work_mask, rx_std_posted = 0;
4055         u32 sw_idx = tp->rx_rcb_ptr;
4056         u16 hw_idx;
4057         int received;
4058
4059         hw_idx = tp->hw_status->idx[0].rx_producer;
4060         /*
4061          * We need to order the read of hw_idx and the read of
4062          * the opaque cookie.
4063          */
4064         rmb();
4065         work_mask = 0;
4066         received = 0;
4067         while (sw_idx != hw_idx && budget > 0) {
4068                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4069                 unsigned int len;
4070                 struct sk_buff *skb;
4071                 dma_addr_t dma_addr;
4072                 u32 opaque_key, desc_idx, *post_ptr;
4073
4074                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4075                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4076                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4077                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4078                                                   mapping);
4079                         skb = tp->rx_std_buffers[desc_idx].skb;
4080                         post_ptr = &tp->rx_std_ptr;
4081                         rx_std_posted++;
4082                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4083                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4084                                                   mapping);
4085                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4086                         post_ptr = &tp->rx_jumbo_ptr;
4087                 }
4088                 else {
4089                         goto next_pkt_nopost;
4090                 }
4091
4092                 work_mask |= opaque_key;
4093
4094                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4095                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4096                 drop_it:
4097                         tg3_recycle_rx(tp, opaque_key,
4098                                        desc_idx, *post_ptr);
4099                 drop_it_no_recycle:
4100                         /* Other statistics kept track of by card. */
4101                         tp->net_stats.rx_dropped++;
4102                         goto next_pkt;
4103                 }
4104
4105                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4106
4107                 if (len > RX_COPY_THRESHOLD
4108                         && tp->rx_offset == 2
4109                         /* rx_offset != 2 iff this is a 5701 card running
4110                          * in PCI-X mode [see tg3_get_invariants()] */
4111                 ) {
4112                         int skb_size;
4113
4114                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4115                                                     desc_idx, *post_ptr);
4116                         if (skb_size < 0)
4117                                 goto drop_it;
4118
4119                         pci_unmap_single(tp->pdev, dma_addr,
4120                                          skb_size - tp->rx_offset,
4121                                          PCI_DMA_FROMDEVICE);
4122
4123                         skb_put(skb, len);
4124                 } else {
4125                         struct sk_buff *copy_skb;
4126
4127                         tg3_recycle_rx(tp, opaque_key,
4128                                        desc_idx, *post_ptr);
4129
4130                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4131                         if (copy_skb == NULL)
4132                                 goto drop_it_no_recycle;
4133
4134                         skb_reserve(copy_skb, 2);
4135                         skb_put(copy_skb, len);
4136                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4137                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4138                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4139
4140                         /* We'll reuse the original ring buffer. */
4141                         skb = copy_skb;
4142                 }
4143
4144                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4145                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4146                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4147                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4148                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4149                 else
4150                         skb->ip_summed = CHECKSUM_NONE;
4151
4152                 skb->protocol = eth_type_trans(skb, tp->dev);
4153 #if TG3_VLAN_TAG_USED
4154                 if (tp->vlgrp != NULL &&
4155                     desc->type_flags & RXD_FLAG_VLAN) {
4156                         tg3_vlan_rx(tp, skb,
4157                                     desc->err_vlan & RXD_VLAN_MASK);
4158                 } else
4159 #endif
4160                         netif_receive_skb(skb);
4161
4162                 tp->dev->last_rx = jiffies;
4163                 received++;
4164                 budget--;
4165
4166 next_pkt:
4167                 (*post_ptr)++;
4168
4169                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4170                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4171
4172                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4173                                      TG3_64BIT_REG_LOW, idx);
4174                         work_mask &= ~RXD_OPAQUE_RING_STD;
4175                         rx_std_posted = 0;
4176                 }
4177 next_pkt_nopost:
4178                 sw_idx++;
4179                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4180
4181                 /* Refresh hw_idx to see if there is new work */
4182                 if (sw_idx == hw_idx) {
4183                         hw_idx = tp->hw_status->idx[0].rx_producer;
4184                         rmb();
4185                 }
4186         }
4187
4188         /* ACK the status ring. */
4189         tp->rx_rcb_ptr = sw_idx;
4190         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4191
4192         /* Refill RX ring(s). */
4193         if (work_mask & RXD_OPAQUE_RING_STD) {
4194                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4195                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4196                              sw_idx);
4197         }
4198         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4199                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4200                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4201                              sw_idx);
4202         }
4203         mmiowb();
4204
4205         return received;
4206 }
4207
4208 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4209 {
4210         struct tg3_hw_status *sblk = tp->hw_status;
4211
4212         /* handle link change and other phy events */
4213         if (!(tp->tg3_flags &
4214               (TG3_FLAG_USE_LINKCHG_REG |
4215                TG3_FLAG_POLL_SERDES))) {
4216                 if (sblk->status & SD_STATUS_LINK_CHG) {
4217                         sblk->status = SD_STATUS_UPDATED |
4218                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4219                         spin_lock(&tp->lock);
4220                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4221                                 tw32_f(MAC_STATUS,
4222                                      (MAC_STATUS_SYNC_CHANGED |
4223                                       MAC_STATUS_CFG_CHANGED |
4224                                       MAC_STATUS_MI_COMPLETION |
4225                                       MAC_STATUS_LNKSTATE_CHANGED));
4226                                 udelay(40);
4227                         } else
4228                                 tg3_setup_phy(tp, 0);
4229                         spin_unlock(&tp->lock);
4230                 }
4231         }
4232
4233         /* run TX completion thread */
4234         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4235                 tg3_tx(tp);
4236                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4237                         return work_done;
4238         }
4239
4240         /* run RX thread, within the bounds set by NAPI.
4241          * All RX "locking" is done by ensuring outside
4242          * code synchronizes with tg3->napi.poll()
4243          */
4244         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4245                 work_done += tg3_rx(tp, budget - work_done);
4246
4247         return work_done;
4248 }
4249
4250 static int tg3_poll(struct napi_struct *napi, int budget)
4251 {
4252         struct tg3 *tp = container_of(napi, struct tg3, napi);
4253         int work_done = 0;
4254         struct tg3_hw_status *sblk = tp->hw_status;
4255
4256         while (1) {
4257                 work_done = tg3_poll_work(tp, work_done, budget);
4258
4259                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4260                         goto tx_recovery;
4261
4262                 if (unlikely(work_done >= budget))
4263                         break;
4264
4265                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4266                         /* tp->last_tag is used in tg3_restart_ints() below
4267                          * to tell the hw how much work has been processed,
4268                          * so we must read it before checking for more work.
4269                          */
4270                         tp->last_tag = sblk->status_tag;
4271                         rmb();
4272                 } else
4273                         sblk->status &= ~SD_STATUS_UPDATED;
4274
4275                 if (likely(!tg3_has_work(tp))) {
4276                         netif_rx_complete(tp->dev, napi);
4277                         tg3_restart_ints(tp);
4278                         break;
4279                 }
4280         }
4281
4282         return work_done;
4283
4284 tx_recovery:
4285         /* work_done is guaranteed to be less than budget. */
4286         netif_rx_complete(tp->dev, napi);
4287         schedule_work(&tp->reset_task);
4288         return work_done;
4289 }
4290
4291 static void tg3_irq_quiesce(struct tg3 *tp)
4292 {
4293         BUG_ON(tp->irq_sync);
4294
4295         tp->irq_sync = 1;
4296         smp_mb();
4297
4298         synchronize_irq(tp->pdev->irq);
4299 }
4300
4301 static inline int tg3_irq_sync(struct tg3 *tp)
4302 {
4303         return tp->irq_sync;
4304 }
4305
4306 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4307  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4308  * with as well.  Most of the time, this is not necessary except when
4309  * shutting down the device.
4310  */
4311 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4312 {
4313         spin_lock_bh(&tp->lock);
4314         if (irq_sync)
4315                 tg3_irq_quiesce(tp);
4316 }
4317
4318 static inline void tg3_full_unlock(struct tg3 *tp)
4319 {
4320         spin_unlock_bh(&tp->lock);
4321 }
4322
4323 /* One-shot MSI handler - Chip automatically disables interrupt
4324  * after sending MSI so driver doesn't have to do it.
4325  */
4326 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4327 {
4328         struct net_device *dev = dev_id;
4329         struct tg3 *tp = netdev_priv(dev);
4330
4331         prefetch(tp->hw_status);
4332         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4333
4334         if (likely(!tg3_irq_sync(tp)))
4335                 netif_rx_schedule(dev, &tp->napi);
4336
4337         return IRQ_HANDLED;
4338 }
4339
4340 /* MSI ISR - No need to check for interrupt sharing and no need to
4341  * flush status block and interrupt mailbox. PCI ordering rules
4342  * guarantee that MSI will arrive after the status block.
4343  */
4344 static irqreturn_t tg3_msi(int irq, void *dev_id)
4345 {
4346         struct net_device *dev = dev_id;
4347         struct tg3 *tp = netdev_priv(dev);
4348
4349         prefetch(tp->hw_status);
4350         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4351         /*
4352          * Writing any value to intr-mbox-0 clears PCI INTA# and
4353          * chip-internal interrupt pending events.
4354          * Writing non-zero to intr-mbox-0 additional tells the
4355          * NIC to stop sending us irqs, engaging "in-intr-handler"
4356          * event coalescing.
4357          */
4358         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4359         if (likely(!tg3_irq_sync(tp)))
4360                 netif_rx_schedule(dev, &tp->napi);
4361
4362         return IRQ_RETVAL(1);
4363 }
4364
4365 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4366 {
4367         struct net_device *dev = dev_id;
4368         struct tg3 *tp = netdev_priv(dev);
4369         struct tg3_hw_status *sblk = tp->hw_status;
4370         unsigned int handled = 1;
4371
4372         /* In INTx mode, it is possible for the interrupt to arrive at
4373          * the CPU before the status block posted prior to the interrupt.
4374          * Reading the PCI State register will confirm whether the
4375          * interrupt is ours and will flush the status block.
4376          */
4377         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4378                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4379                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4380                         handled = 0;
4381                         goto out;
4382                 }
4383         }
4384
4385         /*
4386          * Writing any value to intr-mbox-0 clears PCI INTA# and
4387          * chip-internal interrupt pending events.
4388          * Writing non-zero to intr-mbox-0 additional tells the
4389          * NIC to stop sending us irqs, engaging "in-intr-handler"
4390          * event coalescing.
4391          *
4392          * Flush the mailbox to de-assert the IRQ immediately to prevent
4393          * spurious interrupts.  The flush impacts performance but
4394          * excessive spurious interrupts can be worse in some cases.
4395          */
4396         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4397         if (tg3_irq_sync(tp))
4398                 goto out;
4399         sblk->status &= ~SD_STATUS_UPDATED;
4400         if (likely(tg3_has_work(tp))) {
4401                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4402                 netif_rx_schedule(dev, &tp->napi);
4403         } else {
4404                 /* No work, shared interrupt perhaps?  re-enable
4405                  * interrupts, and flush that PCI write
4406                  */
4407                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4408                                0x00000000);
4409         }
4410 out:
4411         return IRQ_RETVAL(handled);
4412 }
4413
4414 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4415 {
4416         struct net_device *dev = dev_id;
4417         struct tg3 *tp = netdev_priv(dev);
4418         struct tg3_hw_status *sblk = tp->hw_status;
4419         unsigned int handled = 1;
4420
4421         /* In INTx mode, it is possible for the interrupt to arrive at
4422          * the CPU before the status block posted prior to the interrupt.
4423          * Reading the PCI State register will confirm whether the
4424          * interrupt is ours and will flush the status block.
4425          */
4426         if (unlikely(sblk->status_tag == tp->last_tag)) {
4427                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4428                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4429                         handled = 0;
4430                         goto out;
4431                 }
4432         }
4433
4434         /*
4435          * writing any value to intr-mbox-0 clears PCI INTA# and
4436          * chip-internal interrupt pending events.
4437          * writing non-zero to intr-mbox-0 additional tells the
4438          * NIC to stop sending us irqs, engaging "in-intr-handler"
4439          * event coalescing.
4440          *
4441          * Flush the mailbox to de-assert the IRQ immediately to prevent
4442          * spurious interrupts.  The flush impacts performance but
4443          * excessive spurious interrupts can be worse in some cases.
4444          */
4445         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4446         if (tg3_irq_sync(tp))
4447                 goto out;
4448         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4449                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4450                 /* Update last_tag to mark that this status has been
4451                  * seen. Because interrupt may be shared, we may be
4452                  * racing with tg3_poll(), so only update last_tag
4453                  * if tg3_poll() is not scheduled.
4454                  */
4455                 tp->last_tag = sblk->status_tag;
4456                 __netif_rx_schedule(dev, &tp->napi);
4457         }
4458 out:
4459         return IRQ_RETVAL(handled);
4460 }
4461
4462 /* ISR for interrupt test */
4463 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4464 {
4465         struct net_device *dev = dev_id;
4466         struct tg3 *tp = netdev_priv(dev);
4467         struct tg3_hw_status *sblk = tp->hw_status;
4468
4469         if ((sblk->status & SD_STATUS_UPDATED) ||
4470             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4471                 tg3_disable_ints(tp);
4472                 return IRQ_RETVAL(1);
4473         }
4474         return IRQ_RETVAL(0);
4475 }
4476
4477 static int tg3_init_hw(struct tg3 *, int);
4478 static int tg3_halt(struct tg3 *, int, int);
4479
4480 /* Restart hardware after configuration changes, self-test, etc.
4481  * Invoked with tp->lock held.
4482  */
4483 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4484         __releases(tp->lock)
4485         __acquires(tp->lock)
4486 {
4487         int err;
4488
4489         err = tg3_init_hw(tp, reset_phy);
4490         if (err) {
4491                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4492                        "aborting.\n", tp->dev->name);
4493                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4494                 tg3_full_unlock(tp);
4495                 del_timer_sync(&tp->timer);
4496                 tp->irq_sync = 0;
4497                 napi_enable(&tp->napi);
4498                 dev_close(tp->dev);
4499                 tg3_full_lock(tp, 0);
4500         }
4501         return err;
4502 }
4503
4504 #ifdef CONFIG_NET_POLL_CONTROLLER
4505 static void tg3_poll_controller(struct net_device *dev)
4506 {
4507         struct tg3 *tp = netdev_priv(dev);
4508
4509         tg3_interrupt(tp->pdev->irq, dev);
4510 }
4511 #endif
4512
4513 static void tg3_reset_task(struct work_struct *work)
4514 {
4515         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4516         int err;
4517         unsigned int restart_timer;
4518
4519         tg3_full_lock(tp, 0);
4520
4521         if (!netif_running(tp->dev)) {
4522                 tg3_full_unlock(tp);
4523                 return;
4524         }
4525
4526         tg3_full_unlock(tp);
4527
4528         tg3_phy_stop(tp);
4529
4530         tg3_netif_stop(tp);
4531
4532         tg3_full_lock(tp, 1);
4533
4534         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4535         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4536
4537         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4538                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4539                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4540                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4541                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4542         }
4543
4544         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4545         err = tg3_init_hw(tp, 1);
4546         if (err)
4547                 goto out;
4548
4549         tg3_netif_start(tp);
4550
4551         if (restart_timer)
4552                 mod_timer(&tp->timer, jiffies + 1);
4553
4554 out:
4555         tg3_full_unlock(tp);
4556
4557         if (!err)
4558                 tg3_phy_start(tp);
4559 }
4560
4561 static void tg3_dump_short_state(struct tg3 *tp)
4562 {
4563         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4564                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4565         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4566                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4567 }
4568
4569 static void tg3_tx_timeout(struct net_device *dev)
4570 {
4571         struct tg3 *tp = netdev_priv(dev);
4572
4573         if (netif_msg_tx_err(tp)) {
4574                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4575                        dev->name);
4576                 tg3_dump_short_state(tp);
4577         }
4578
4579         schedule_work(&tp->reset_task);
4580 }
4581
4582 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4583 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4584 {
4585         u32 base = (u32) mapping & 0xffffffff;
4586
4587         return ((base > 0xffffdcc0) &&
4588                 (base + len + 8 < base));
4589 }
4590
4591 /* Test for DMA addresses > 40-bit */
4592 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4593                                           int len)
4594 {
4595 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4596         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4597                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4598         return 0;
4599 #else
4600         return 0;
4601 #endif
4602 }
4603
4604 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4605
4606 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4607 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4608                                        u32 last_plus_one, u32 *start,
4609                                        u32 base_flags, u32 mss)
4610 {
4611         struct sk_buff *new_skb;
4612         dma_addr_t new_addr = 0;
4613         u32 entry = *start;
4614         int i, ret = 0;
4615
4616         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4617                 new_skb = skb_copy(skb, GFP_ATOMIC);
4618         else {
4619                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4620
4621                 new_skb = skb_copy_expand(skb,
4622                                           skb_headroom(skb) + more_headroom,
4623                                           skb_tailroom(skb), GFP_ATOMIC);
4624         }
4625
4626         if (!new_skb) {
4627                 ret = -1;
4628         } else {
4629                 /* New SKB is guaranteed to be linear. */
4630                 entry = *start;
4631                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4632                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4633
4634                 /* Make sure new skb does not cross any 4G boundaries.
4635                  * Drop the packet if it does.
4636                  */
4637                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4638                         if (!ret)
4639                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4640                                               DMA_TO_DEVICE);
4641                         ret = -1;
4642                         dev_kfree_skb(new_skb);
4643                         new_skb = NULL;
4644                 } else {
4645                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4646                                     base_flags, 1 | (mss << 1));
4647                         *start = NEXT_TX(entry);
4648                 }
4649         }
4650
4651         /* Now clean up the sw ring entries. */
4652         i = 0;
4653         while (entry != last_plus_one) {
4654                 if (i == 0) {
4655                         tp->tx_buffers[entry].skb = new_skb;
4656                 } else {
4657                         tp->tx_buffers[entry].skb = NULL;
4658                 }
4659                 entry = NEXT_TX(entry);
4660                 i++;
4661         }
4662
4663         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4664         dev_kfree_skb(skb);
4665
4666         return ret;
4667 }
4668
4669 static void tg3_set_txd(struct tg3 *tp, int entry,
4670                         dma_addr_t mapping, int len, u32 flags,
4671                         u32 mss_and_is_end)
4672 {
4673         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4674         int is_end = (mss_and_is_end & 0x1);
4675         u32 mss = (mss_and_is_end >> 1);
4676         u32 vlan_tag = 0;
4677
4678         if (is_end)
4679                 flags |= TXD_FLAG_END;
4680         if (flags & TXD_FLAG_VLAN) {
4681                 vlan_tag = flags >> 16;
4682                 flags &= 0xffff;
4683         }
4684         vlan_tag |= (mss << TXD_MSS_SHIFT);
4685
4686         txd->addr_hi = ((u64) mapping >> 32);
4687         txd->addr_lo = ((u64) mapping & 0xffffffff);
4688         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4689         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4690 }
4691
4692 /* hard_start_xmit for devices that don't have any bugs and
4693  * support TG3_FLG2_HW_TSO_2 only.
4694  */
4695 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4696 {
4697         struct tg3 *tp = netdev_priv(dev);
4698         u32 len, entry, base_flags, mss;
4699         struct skb_shared_info *sp;
4700         dma_addr_t mapping;
4701
4702         len = skb_headlen(skb);
4703
4704         /* We are running in BH disabled context with netif_tx_lock
4705          * and TX reclaim runs via tp->napi.poll inside of a software
4706          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4707          * no IRQ context deadlocks to worry about either.  Rejoice!
4708          */
4709         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4710                 if (!netif_queue_stopped(dev)) {
4711                         netif_stop_queue(dev);
4712
4713                         /* This is a hard error, log it. */
4714                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4715                                "queue awake!\n", dev->name);
4716                 }
4717                 return NETDEV_TX_BUSY;
4718         }
4719
4720         entry = tp->tx_prod;
4721         base_flags = 0;
4722         mss = 0;
4723         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4724                 int tcp_opt_len, ip_tcp_len;
4725
4726                 if (skb_header_cloned(skb) &&
4727                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4728                         dev_kfree_skb(skb);
4729                         goto out_unlock;
4730                 }
4731
4732                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4733                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4734                 else {
4735                         struct iphdr *iph = ip_hdr(skb);
4736
4737                         tcp_opt_len = tcp_optlen(skb);
4738                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4739
4740                         iph->check = 0;
4741                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4742                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4743                 }
4744
4745                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4746                                TXD_FLAG_CPU_POST_DMA);
4747
4748                 tcp_hdr(skb)->check = 0;
4749
4750         }
4751         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4752                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4753 #if TG3_VLAN_TAG_USED
4754         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4755                 base_flags |= (TXD_FLAG_VLAN |
4756                                (vlan_tx_tag_get(skb) << 16));
4757 #endif
4758
4759         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4760                 dev_kfree_skb(skb);
4761                 goto out_unlock;
4762         }
4763
4764         sp = skb_shinfo(skb);
4765
4766         mapping = sp->dma_maps[0];
4767
4768         tp->tx_buffers[entry].skb = skb;
4769
4770         tg3_set_txd(tp, entry, mapping, len, base_flags,
4771                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4772
4773         entry = NEXT_TX(entry);
4774
4775         /* Now loop through additional data fragments, and queue them. */
4776         if (skb_shinfo(skb)->nr_frags > 0) {
4777                 unsigned int i, last;
4778
4779                 last = skb_shinfo(skb)->nr_frags - 1;
4780                 for (i = 0; i <= last; i++) {
4781                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4782
4783                         len = frag->size;
4784                         mapping = sp->dma_maps[i + 1];
4785                         tp->tx_buffers[entry].skb = NULL;
4786
4787                         tg3_set_txd(tp, entry, mapping, len,
4788                                     base_flags, (i == last) | (mss << 1));
4789
4790                         entry = NEXT_TX(entry);
4791                 }
4792         }
4793
4794         /* Packets are ready, update Tx producer idx local and on card. */
4795         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4796
4797         tp->tx_prod = entry;
4798         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4799                 netif_stop_queue(dev);
4800                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4801                         netif_wake_queue(tp->dev);
4802         }
4803
4804 out_unlock:
4805         mmiowb();
4806
4807         dev->trans_start = jiffies;
4808
4809         return NETDEV_TX_OK;
4810 }
4811
4812 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4813
4814 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4815  * TSO header is greater than 80 bytes.
4816  */
4817 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4818 {
4819         struct sk_buff *segs, *nskb;
4820
4821         /* Estimate the number of fragments in the worst case */
4822         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4823                 netif_stop_queue(tp->dev);
4824                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4825                         return NETDEV_TX_BUSY;
4826
4827                 netif_wake_queue(tp->dev);
4828         }
4829
4830         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4831         if (IS_ERR(segs))
4832                 goto tg3_tso_bug_end;
4833
4834         do {
4835                 nskb = segs;
4836                 segs = segs->next;
4837                 nskb->next = NULL;
4838                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4839         } while (segs);
4840
4841 tg3_tso_bug_end:
4842         dev_kfree_skb(skb);
4843
4844         return NETDEV_TX_OK;
4845 }
4846
4847 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4848  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4849  */
4850 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4851 {
4852         struct tg3 *tp = netdev_priv(dev);
4853         u32 len, entry, base_flags, mss;
4854         struct skb_shared_info *sp;
4855         int would_hit_hwbug;
4856         dma_addr_t mapping;
4857
4858         len = skb_headlen(skb);
4859
4860         /* We are running in BH disabled context with netif_tx_lock
4861          * and TX reclaim runs via tp->napi.poll inside of a software
4862          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4863          * no IRQ context deadlocks to worry about either.  Rejoice!
4864          */
4865         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4866                 if (!netif_queue_stopped(dev)) {
4867                         netif_stop_queue(dev);
4868
4869                         /* This is a hard error, log it. */
4870                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4871                                "queue awake!\n", dev->name);
4872                 }
4873                 return NETDEV_TX_BUSY;
4874         }
4875
4876         entry = tp->tx_prod;
4877         base_flags = 0;
4878         if (skb->ip_summed == CHECKSUM_PARTIAL)
4879                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4880         mss = 0;
4881         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4882                 struct iphdr *iph;
4883                 int tcp_opt_len, ip_tcp_len, hdr_len;
4884
4885                 if (skb_header_cloned(skb) &&
4886                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4887                         dev_kfree_skb(skb);
4888                         goto out_unlock;
4889                 }
4890
4891                 tcp_opt_len = tcp_optlen(skb);
4892                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4893
4894                 hdr_len = ip_tcp_len + tcp_opt_len;
4895                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4896                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4897                         return (tg3_tso_bug(tp, skb));
4898
4899                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4900                                TXD_FLAG_CPU_POST_DMA);
4901
4902                 iph = ip_hdr(skb);
4903                 iph->check = 0;
4904                 iph->tot_len = htons(mss + hdr_len);
4905                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4906                         tcp_hdr(skb)->check = 0;
4907                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4908                 } else
4909                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4910                                                                  iph->daddr, 0,
4911                                                                  IPPROTO_TCP,
4912                                                                  0);
4913
4914                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4915                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4916                         if (tcp_opt_len || iph->ihl > 5) {
4917                                 int tsflags;
4918
4919                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4920                                 mss |= (tsflags << 11);
4921                         }
4922                 } else {
4923                         if (tcp_opt_len || iph->ihl > 5) {
4924                                 int tsflags;
4925
4926                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4927                                 base_flags |= tsflags << 12;
4928                         }
4929                 }
4930         }
4931 #if TG3_VLAN_TAG_USED
4932         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4933                 base_flags |= (TXD_FLAG_VLAN |
4934                                (vlan_tx_tag_get(skb) << 16));
4935 #endif
4936
4937         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4938                 dev_kfree_skb(skb);
4939                 goto out_unlock;
4940         }
4941
4942         sp = skb_shinfo(skb);
4943
4944         mapping = sp->dma_maps[0];
4945
4946         tp->tx_buffers[entry].skb = skb;
4947
4948         would_hit_hwbug = 0;
4949
4950         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4951                 would_hit_hwbug = 1;
4952         else if (tg3_4g_overflow_test(mapping, len))
4953                 would_hit_hwbug = 1;
4954
4955         tg3_set_txd(tp, entry, mapping, len, base_flags,
4956                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4957
4958         entry = NEXT_TX(entry);
4959
4960         /* Now loop through additional data fragments, and queue them. */
4961         if (skb_shinfo(skb)->nr_frags > 0) {
4962                 unsigned int i, last;
4963
4964                 last = skb_shinfo(skb)->nr_frags - 1;
4965                 for (i = 0; i <= last; i++) {
4966                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4967
4968                         len = frag->size;
4969                         mapping = sp->dma_maps[i + 1];
4970
4971                         tp->tx_buffers[entry].skb = NULL;
4972
4973                         if (tg3_4g_overflow_test(mapping, len))
4974                                 would_hit_hwbug = 1;
4975
4976                         if (tg3_40bit_overflow_test(tp, mapping, len))
4977                                 would_hit_hwbug = 1;
4978
4979                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4980                                 tg3_set_txd(tp, entry, mapping, len,
4981                                             base_flags, (i == last)|(mss << 1));
4982                         else
4983                                 tg3_set_txd(tp, entry, mapping, len,
4984                                             base_flags, (i == last));
4985
4986                         entry = NEXT_TX(entry);
4987                 }
4988         }
4989
4990         if (would_hit_hwbug) {
4991                 u32 last_plus_one = entry;
4992                 u32 start;
4993
4994                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4995                 start &= (TG3_TX_RING_SIZE - 1);
4996
4997                 /* If the workaround fails due to memory/mapping
4998                  * failure, silently drop this packet.
4999                  */
5000                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5001                                                 &start, base_flags, mss))
5002                         goto out_unlock;
5003
5004                 entry = start;
5005         }
5006
5007         /* Packets are ready, update Tx producer idx local and on card. */
5008         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5009
5010         tp->tx_prod = entry;
5011         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5012                 netif_stop_queue(dev);
5013                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5014                         netif_wake_queue(tp->dev);
5015         }
5016
5017 out_unlock:
5018         mmiowb();
5019
5020         dev->trans_start = jiffies;
5021
5022         return NETDEV_TX_OK;
5023 }
5024
5025 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5026                                int new_mtu)
5027 {
5028         dev->mtu = new_mtu;
5029
5030         if (new_mtu > ETH_DATA_LEN) {
5031                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5032                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5033                         ethtool_op_set_tso(dev, 0);
5034                 }
5035                 else
5036                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5037         } else {
5038                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5039                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5040                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5041         }
5042 }
5043
5044 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5045 {
5046         struct tg3 *tp = netdev_priv(dev);
5047         int err;
5048
5049         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5050                 return -EINVAL;
5051
5052         if (!netif_running(dev)) {
5053                 /* We'll just catch it later when the
5054                  * device is up'd.
5055                  */
5056                 tg3_set_mtu(dev, tp, new_mtu);
5057                 return 0;
5058         }
5059
5060         tg3_phy_stop(tp);
5061
5062         tg3_netif_stop(tp);
5063
5064         tg3_full_lock(tp, 1);
5065
5066         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5067
5068         tg3_set_mtu(dev, tp, new_mtu);
5069
5070         err = tg3_restart_hw(tp, 0);
5071
5072         if (!err)
5073                 tg3_netif_start(tp);
5074
5075         tg3_full_unlock(tp);
5076
5077         if (!err)
5078                 tg3_phy_start(tp);
5079
5080         return err;
5081 }
5082
5083 /* Free up pending packets in all rx/tx rings.
5084  *
5085  * The chip has been shut down and the driver detached from
5086  * the networking, so no interrupts or new tx packets will
5087  * end up in the driver.  tp->{tx,}lock is not held and we are not
5088  * in an interrupt context and thus may sleep.
5089  */
5090 static void tg3_free_rings(struct tg3 *tp)
5091 {
5092         struct ring_info *rxp;
5093         int i;
5094
5095         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5096                 rxp = &tp->rx_std_buffers[i];
5097
5098                 if (rxp->skb == NULL)
5099                         continue;
5100                 pci_unmap_single(tp->pdev,
5101                                  pci_unmap_addr(rxp, mapping),
5102                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5103                                  PCI_DMA_FROMDEVICE);
5104                 dev_kfree_skb_any(rxp->skb);
5105                 rxp->skb = NULL;
5106         }
5107
5108         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5109                 rxp = &tp->rx_jumbo_buffers[i];
5110
5111                 if (rxp->skb == NULL)
5112                         continue;
5113                 pci_unmap_single(tp->pdev,
5114                                  pci_unmap_addr(rxp, mapping),
5115                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5116                                  PCI_DMA_FROMDEVICE);
5117                 dev_kfree_skb_any(rxp->skb);
5118                 rxp->skb = NULL;
5119         }
5120
5121         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5122                 struct tx_ring_info *txp;
5123                 struct sk_buff *skb;
5124
5125                 txp = &tp->tx_buffers[i];
5126                 skb = txp->skb;
5127
5128                 if (skb == NULL) {
5129                         i++;
5130                         continue;
5131                 }
5132
5133                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5134
5135                 txp->skb = NULL;
5136
5137                 i += skb_shinfo(skb)->nr_frags + 1;
5138
5139                 dev_kfree_skb_any(skb);
5140         }
5141 }
5142
5143 /* Initialize tx/rx rings for packet processing.
5144  *
5145  * The chip has been shut down and the driver detached from
5146  * the networking, so no interrupts or new tx packets will
5147  * end up in the driver.  tp->{tx,}lock are held and thus
5148  * we may not sleep.
5149  */
5150 static int tg3_init_rings(struct tg3 *tp)
5151 {
5152         u32 i;
5153
5154         /* Free up all the SKBs. */
5155         tg3_free_rings(tp);
5156
5157         /* Zero out all descriptors. */
5158         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5159         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5160         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5161         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5162
5163         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5164         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5165             (tp->dev->mtu > ETH_DATA_LEN))
5166                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5167
5168         /* Initialize invariants of the rings, we only set this
5169          * stuff once.  This works because the card does not
5170          * write into the rx buffer posting rings.
5171          */
5172         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5173                 struct tg3_rx_buffer_desc *rxd;
5174
5175                 rxd = &tp->rx_std[i];
5176                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5177                         << RXD_LEN_SHIFT;
5178                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5179                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5180                                (i << RXD_OPAQUE_INDEX_SHIFT));
5181         }
5182
5183         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5184                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5185                         struct tg3_rx_buffer_desc *rxd;
5186
5187                         rxd = &tp->rx_jumbo[i];
5188                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5189                                 << RXD_LEN_SHIFT;
5190                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5191                                 RXD_FLAG_JUMBO;
5192                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5193                                (i << RXD_OPAQUE_INDEX_SHIFT));
5194                 }
5195         }
5196
5197         /* Now allocate fresh SKBs for each rx ring. */
5198         for (i = 0; i < tp->rx_pending; i++) {
5199                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5200                         printk(KERN_WARNING PFX
5201                                "%s: Using a smaller RX standard ring, "
5202                                "only %d out of %d buffers were allocated "
5203                                "successfully.\n",
5204                                tp->dev->name, i, tp->rx_pending);
5205                         if (i == 0)
5206                                 return -ENOMEM;
5207                         tp->rx_pending = i;
5208                         break;
5209                 }
5210         }
5211
5212         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5213                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5214                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5215                                              -1, i) < 0) {
5216                                 printk(KERN_WARNING PFX
5217                                        "%s: Using a smaller RX jumbo ring, "
5218                                        "only %d out of %d buffers were "
5219                                        "allocated successfully.\n",
5220                                        tp->dev->name, i, tp->rx_jumbo_pending);
5221                                 if (i == 0) {
5222                                         tg3_free_rings(tp);
5223                                         return -ENOMEM;
5224                                 }
5225                                 tp->rx_jumbo_pending = i;
5226                                 break;
5227                         }
5228                 }
5229         }
5230         return 0;
5231 }
5232
5233 /*
5234  * Must not be invoked with interrupt sources disabled and
5235  * the hardware shutdown down.
5236  */
5237 static void tg3_free_consistent(struct tg3 *tp)
5238 {
5239         kfree(tp->rx_std_buffers);
5240         tp->rx_std_buffers = NULL;
5241         if (tp->rx_std) {
5242                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5243                                     tp->rx_std, tp->rx_std_mapping);
5244                 tp->rx_std = NULL;
5245         }
5246         if (tp->rx_jumbo) {
5247                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5248                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5249                 tp->rx_jumbo = NULL;
5250         }
5251         if (tp->rx_rcb) {
5252                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5253                                     tp->rx_rcb, tp->rx_rcb_mapping);
5254                 tp->rx_rcb = NULL;
5255         }
5256         if (tp->tx_ring) {
5257                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5258                         tp->tx_ring, tp->tx_desc_mapping);
5259                 tp->tx_ring = NULL;
5260         }
5261         if (tp->hw_status) {
5262                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5263                                     tp->hw_status, tp->status_mapping);
5264                 tp->hw_status = NULL;
5265         }
5266         if (tp->hw_stats) {
5267                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5268                                     tp->hw_stats, tp->stats_mapping);
5269                 tp->hw_stats = NULL;
5270         }
5271 }
5272
5273 /*
5274  * Must not be invoked with interrupt sources disabled and
5275  * the hardware shutdown down.  Can sleep.
5276  */
5277 static int tg3_alloc_consistent(struct tg3 *tp)
5278 {
5279         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5280                                       (TG3_RX_RING_SIZE +
5281                                        TG3_RX_JUMBO_RING_SIZE)) +
5282                                      (sizeof(struct tx_ring_info) *
5283                                       TG3_TX_RING_SIZE),
5284                                      GFP_KERNEL);
5285         if (!tp->rx_std_buffers)
5286                 return -ENOMEM;
5287
5288         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5289         tp->tx_buffers = (struct tx_ring_info *)
5290                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5291
5292         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5293                                           &tp->rx_std_mapping);
5294         if (!tp->rx_std)
5295                 goto err_out;
5296
5297         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5298                                             &tp->rx_jumbo_mapping);
5299
5300         if (!tp->rx_jumbo)
5301                 goto err_out;
5302
5303         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5304                                           &tp->rx_rcb_mapping);
5305         if (!tp->rx_rcb)
5306                 goto err_out;
5307
5308         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5309                                            &tp->tx_desc_mapping);
5310         if (!tp->tx_ring)
5311                 goto err_out;
5312
5313         tp->hw_status = pci_alloc_consistent(tp->pdev,
5314                                              TG3_HW_STATUS_SIZE,
5315                                              &tp->status_mapping);
5316         if (!tp->hw_status)
5317                 goto err_out;
5318
5319         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5320                                             sizeof(struct tg3_hw_stats),
5321                                             &tp->stats_mapping);
5322         if (!tp->hw_stats)
5323                 goto err_out;
5324
5325         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5326         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5327
5328         return 0;
5329
5330 err_out:
5331         tg3_free_consistent(tp);
5332         return -ENOMEM;
5333 }
5334
5335 #define MAX_WAIT_CNT 1000
5336
5337 /* To stop a block, clear the enable bit and poll till it
5338  * clears.  tp->lock is held.
5339  */
5340 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5341 {
5342         unsigned int i;
5343         u32 val;
5344
5345         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5346                 switch (ofs) {
5347                 case RCVLSC_MODE:
5348                 case DMAC_MODE:
5349                 case MBFREE_MODE:
5350                 case BUFMGR_MODE:
5351                 case MEMARB_MODE:
5352                         /* We can't enable/disable these bits of the
5353                          * 5705/5750, just say success.
5354                          */
5355                         return 0;
5356
5357                 default:
5358                         break;
5359                 }
5360         }
5361
5362         val = tr32(ofs);
5363         val &= ~enable_bit;
5364         tw32_f(ofs, val);
5365
5366         for (i = 0; i < MAX_WAIT_CNT; i++) {
5367                 udelay(100);
5368                 val = tr32(ofs);
5369                 if ((val & enable_bit) == 0)
5370                         break;
5371         }
5372
5373         if (i == MAX_WAIT_CNT && !silent) {
5374                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5375                        "ofs=%lx enable_bit=%x\n",
5376                        ofs, enable_bit);
5377                 return -ENODEV;
5378         }
5379
5380         return 0;
5381 }
5382
5383 /* tp->lock is held. */
5384 static int tg3_abort_hw(struct tg3 *tp, int silent)
5385 {
5386         int i, err;
5387
5388         tg3_disable_ints(tp);
5389
5390         tp->rx_mode &= ~RX_MODE_ENABLE;
5391         tw32_f(MAC_RX_MODE, tp->rx_mode);
5392         udelay(10);
5393
5394         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5395         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5396         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5397         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5398         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5399         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5400
5401         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5402         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5403         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5404         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5405         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5406         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5407         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5408
5409         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5410         tw32_f(MAC_MODE, tp->mac_mode);
5411         udelay(40);
5412
5413         tp->tx_mode &= ~TX_MODE_ENABLE;
5414         tw32_f(MAC_TX_MODE, tp->tx_mode);
5415
5416         for (i = 0; i < MAX_WAIT_CNT; i++) {
5417                 udelay(100);
5418                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5419                         break;
5420         }
5421         if (i >= MAX_WAIT_CNT) {
5422                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5423                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5424                        tp->dev->name, tr32(MAC_TX_MODE));
5425                 err |= -ENODEV;
5426         }
5427
5428         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5429         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5430         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5431
5432         tw32(FTQ_RESET, 0xffffffff);
5433         tw32(FTQ_RESET, 0x00000000);
5434
5435         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5436         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5437
5438         if (tp->hw_status)
5439                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5440         if (tp->hw_stats)
5441                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5442
5443         return err;
5444 }
5445
5446 /* tp->lock is held. */
5447 static int tg3_nvram_lock(struct tg3 *tp)
5448 {
5449         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5450                 int i;
5451
5452                 if (tp->nvram_lock_cnt == 0) {
5453                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5454                         for (i = 0; i < 8000; i++) {
5455                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5456                                         break;
5457                                 udelay(20);
5458                         }
5459                         if (i == 8000) {
5460                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5461                                 return -ENODEV;
5462                         }
5463                 }
5464                 tp->nvram_lock_cnt++;
5465         }
5466         return 0;
5467 }
5468
5469 /* tp->lock is held. */
5470 static void tg3_nvram_unlock(struct tg3 *tp)
5471 {
5472         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5473                 if (tp->nvram_lock_cnt > 0)
5474                         tp->nvram_lock_cnt--;
5475                 if (tp->nvram_lock_cnt == 0)
5476                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5477         }
5478 }
5479
5480 /* tp->lock is held. */
5481 static void tg3_enable_nvram_access(struct tg3 *tp)
5482 {
5483         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5484             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5485                 u32 nvaccess = tr32(NVRAM_ACCESS);
5486
5487                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5488         }
5489 }
5490
5491 /* tp->lock is held. */
5492 static void tg3_disable_nvram_access(struct tg3 *tp)
5493 {
5494         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5495             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5496                 u32 nvaccess = tr32(NVRAM_ACCESS);
5497
5498                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5499         }
5500 }
5501
5502 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5503 {
5504         int i;
5505         u32 apedata;
5506
5507         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5508         if (apedata != APE_SEG_SIG_MAGIC)
5509                 return;
5510
5511         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5512         if (!(apedata & APE_FW_STATUS_READY))
5513                 return;
5514
5515         /* Wait for up to 1 millisecond for APE to service previous event. */
5516         for (i = 0; i < 10; i++) {
5517                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5518                         return;
5519
5520                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5521
5522                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5523                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5524                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5525
5526                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5527
5528                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5529                         break;
5530
5531                 udelay(100);
5532         }
5533
5534         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5535                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5536 }
5537
5538 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5539 {
5540         u32 event;
5541         u32 apedata;
5542
5543         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5544                 return;
5545
5546         switch (kind) {
5547                 case RESET_KIND_INIT:
5548                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5549                                         APE_HOST_SEG_SIG_MAGIC);
5550                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5551                                         APE_HOST_SEG_LEN_MAGIC);
5552                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5553                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5554                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5555                                         APE_HOST_DRIVER_ID_MAGIC);
5556                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5557                                         APE_HOST_BEHAV_NO_PHYLOCK);
5558
5559                         event = APE_EVENT_STATUS_STATE_START;
5560                         break;
5561                 case RESET_KIND_SHUTDOWN:
5562                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5563                         break;
5564                 case RESET_KIND_SUSPEND:
5565                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5566                         break;
5567                 default:
5568                         return;
5569         }
5570
5571         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5572
5573         tg3_ape_send_event(tp, event);
5574 }
5575
5576 /* tp->lock is held. */
5577 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5578 {
5579         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5580                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5581
5582         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5583                 switch (kind) {
5584                 case RESET_KIND_INIT:
5585                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5586                                       DRV_STATE_START);
5587                         break;
5588
5589                 case RESET_KIND_SHUTDOWN:
5590                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5591                                       DRV_STATE_UNLOAD);
5592                         break;
5593
5594                 case RESET_KIND_SUSPEND:
5595                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5596                                       DRV_STATE_SUSPEND);
5597                         break;
5598
5599                 default:
5600                         break;
5601                 }
5602         }
5603
5604         if (kind == RESET_KIND_INIT ||
5605             kind == RESET_KIND_SUSPEND)
5606                 tg3_ape_driver_state_change(tp, kind);
5607 }
5608
5609 /* tp->lock is held. */
5610 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5611 {
5612         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5613                 switch (kind) {
5614                 case RESET_KIND_INIT:
5615                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5616                                       DRV_STATE_START_DONE);
5617                         break;
5618
5619                 case RESET_KIND_SHUTDOWN:
5620                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5621                                       DRV_STATE_UNLOAD_DONE);
5622                         break;
5623
5624                 default:
5625                         break;
5626                 }
5627         }
5628
5629         if (kind == RESET_KIND_SHUTDOWN)
5630                 tg3_ape_driver_state_change(tp, kind);
5631 }
5632
5633 /* tp->lock is held. */
5634 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5635 {
5636         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5637                 switch (kind) {
5638                 case RESET_KIND_INIT:
5639                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5640                                       DRV_STATE_START);
5641                         break;
5642
5643                 case RESET_KIND_SHUTDOWN:
5644                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5645                                       DRV_STATE_UNLOAD);
5646                         break;
5647
5648                 case RESET_KIND_SUSPEND:
5649                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5650                                       DRV_STATE_SUSPEND);
5651                         break;
5652
5653                 default:
5654                         break;
5655                 }
5656         }
5657 }
5658
5659 static int tg3_poll_fw(struct tg3 *tp)
5660 {
5661         int i;
5662         u32 val;
5663
5664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5665                 /* Wait up to 20ms for init done. */
5666                 for (i = 0; i < 200; i++) {
5667                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5668                                 return 0;
5669                         udelay(100);
5670                 }
5671                 return -ENODEV;
5672         }
5673
5674         /* Wait for firmware initialization to complete. */
5675         for (i = 0; i < 100000; i++) {
5676                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5677                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5678                         break;
5679                 udelay(10);
5680         }
5681
5682         /* Chip might not be fitted with firmware.  Some Sun onboard
5683          * parts are configured like that.  So don't signal the timeout
5684          * of the above loop as an error, but do report the lack of
5685          * running firmware once.
5686          */
5687         if (i >= 100000 &&
5688             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5689                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5690
5691                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5692                        tp->dev->name);
5693         }
5694
5695         return 0;
5696 }
5697
5698 /* Save PCI command register before chip reset */
5699 static void tg3_save_pci_state(struct tg3 *tp)
5700 {
5701         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5702 }
5703
5704 /* Restore PCI state after chip reset */
5705 static void tg3_restore_pci_state(struct tg3 *tp)
5706 {
5707         u32 val;
5708
5709         /* Re-enable indirect register accesses. */
5710         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5711                                tp->misc_host_ctrl);
5712
5713         /* Set MAX PCI retry to zero. */
5714         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5715         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5716             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5717                 val |= PCISTATE_RETRY_SAME_DMA;
5718         /* Allow reads and writes to the APE register and memory space. */
5719         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5720                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5721                        PCISTATE_ALLOW_APE_SHMEM_WR;
5722         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5723
5724         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5725
5726         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5727                 pcie_set_readrq(tp->pdev, 4096);
5728         else {
5729                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5730                                       tp->pci_cacheline_sz);
5731                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5732                                       tp->pci_lat_timer);
5733         }
5734
5735         /* Make sure PCI-X relaxed ordering bit is clear. */
5736         if (tp->pcix_cap) {
5737                 u16 pcix_cmd;
5738
5739                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5740                                      &pcix_cmd);
5741                 pcix_cmd &= ~PCI_X_CMD_ERO;
5742                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5743                                       pcix_cmd);
5744         }
5745
5746         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5747
5748                 /* Chip reset on 5780 will reset MSI enable bit,
5749                  * so need to restore it.
5750                  */
5751                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5752                         u16 ctrl;
5753
5754                         pci_read_config_word(tp->pdev,
5755                                              tp->msi_cap + PCI_MSI_FLAGS,
5756                                              &ctrl);
5757                         pci_write_config_word(tp->pdev,
5758                                               tp->msi_cap + PCI_MSI_FLAGS,
5759                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5760                         val = tr32(MSGINT_MODE);
5761                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5762                 }
5763         }
5764 }
5765
5766 static void tg3_stop_fw(struct tg3 *);
5767
5768 /* tp->lock is held. */
5769 static int tg3_chip_reset(struct tg3 *tp)
5770 {
5771         u32 val;
5772         void (*write_op)(struct tg3 *, u32, u32);
5773         int err;
5774
5775         tg3_nvram_lock(tp);
5776
5777         tg3_mdio_stop(tp);
5778
5779         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5780
5781         /* No matching tg3_nvram_unlock() after this because
5782          * chip reset below will undo the nvram lock.
5783          */
5784         tp->nvram_lock_cnt = 0;
5785
5786         /* GRC_MISC_CFG core clock reset will clear the memory
5787          * enable bit in PCI register 4 and the MSI enable bit
5788          * on some chips, so we save relevant registers here.
5789          */
5790         tg3_save_pci_state(tp);
5791
5792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5794             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5796             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5797             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5798                 tw32(GRC_FASTBOOT_PC, 0);
5799
5800         /*
5801          * We must avoid the readl() that normally takes place.
5802          * It locks machines, causes machine checks, and other
5803          * fun things.  So, temporarily disable the 5701
5804          * hardware workaround, while we do the reset.
5805          */
5806         write_op = tp->write32;
5807         if (write_op == tg3_write_flush_reg32)
5808                 tp->write32 = tg3_write32;
5809
5810         /* Prevent the irq handler from reading or writing PCI registers
5811          * during chip reset when the memory enable bit in the PCI command
5812          * register may be cleared.  The chip does not generate interrupt
5813          * at this time, but the irq handler may still be called due to irq
5814          * sharing or irqpoll.
5815          */
5816         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5817         if (tp->hw_status) {
5818                 tp->hw_status->status = 0;
5819                 tp->hw_status->status_tag = 0;
5820         }
5821         tp->last_tag = 0;
5822         smp_mb();
5823         synchronize_irq(tp->pdev->irq);
5824
5825         /* do the reset */
5826         val = GRC_MISC_CFG_CORECLK_RESET;
5827
5828         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5829                 if (tr32(0x7e2c) == 0x60) {
5830                         tw32(0x7e2c, 0x20);
5831                 }
5832                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5833                         tw32(GRC_MISC_CFG, (1 << 29));
5834                         val |= (1 << 29);
5835                 }
5836         }
5837
5838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5839                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5840                 tw32(GRC_VCPU_EXT_CTRL,
5841                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5842         }
5843
5844         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5845                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5846         tw32(GRC_MISC_CFG, val);
5847
5848         /* restore 5701 hardware bug workaround write method */
5849         tp->write32 = write_op;
5850
5851         /* Unfortunately, we have to delay before the PCI read back.
5852          * Some 575X chips even will not respond to a PCI cfg access
5853          * when the reset command is given to the chip.
5854          *
5855          * How do these hardware designers expect things to work
5856          * properly if the PCI write is posted for a long period
5857          * of time?  It is always necessary to have some method by
5858          * which a register read back can occur to push the write
5859          * out which does the reset.
5860          *
5861          * For most tg3 variants the trick below was working.
5862          * Ho hum...
5863          */
5864         udelay(120);
5865
5866         /* Flush PCI posted writes.  The normal MMIO registers
5867          * are inaccessible at this time so this is the only
5868          * way to make this reliably (actually, this is no longer
5869          * the case, see above).  I tried to use indirect
5870          * register read/write but this upset some 5701 variants.
5871          */
5872         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5873
5874         udelay(120);
5875
5876         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5877                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5878                         int i;
5879                         u32 cfg_val;
5880
5881                         /* Wait for link training to complete.  */
5882                         for (i = 0; i < 5000; i++)
5883                                 udelay(100);
5884
5885                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5886                         pci_write_config_dword(tp->pdev, 0xc4,
5887                                                cfg_val | (1 << 15));
5888                 }
5889                 /* Set PCIE max payload size and clear error status.  */
5890                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5891         }
5892
5893         tg3_restore_pci_state(tp);
5894
5895         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5896
5897         val = 0;
5898         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5899                 val = tr32(MEMARB_MODE);
5900         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5901
5902         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5903                 tg3_stop_fw(tp);
5904                 tw32(0x5000, 0x400);
5905         }
5906
5907         tw32(GRC_MODE, tp->grc_mode);
5908
5909         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5910                 val = tr32(0xc4);
5911
5912                 tw32(0xc4, val | (1 << 15));
5913         }
5914
5915         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5917                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5918                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5919                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5920                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5921         }
5922
5923         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5924                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5925                 tw32_f(MAC_MODE, tp->mac_mode);
5926         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5927                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5928                 tw32_f(MAC_MODE, tp->mac_mode);
5929         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
5930                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
5931                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
5932                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
5933                 tw32_f(MAC_MODE, tp->mac_mode);
5934         } else
5935                 tw32_f(MAC_MODE, 0);
5936         udelay(40);
5937
5938         tg3_mdio_start(tp);
5939
5940         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
5941
5942         err = tg3_poll_fw(tp);
5943         if (err)
5944                 return err;
5945
5946         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5947             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5948                 val = tr32(0x7c00);
5949
5950                 tw32(0x7c00, val | (1 << 25));
5951         }
5952
5953         /* Reprobe ASF enable state.  */
5954         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5955         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5956         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5957         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5958                 u32 nic_cfg;
5959
5960                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5961                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5962                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5963                         tp->last_event_jiffies = jiffies;
5964                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5965                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5966                 }
5967         }
5968
5969         return 0;
5970 }
5971
5972 /* tp->lock is held. */
5973 static void tg3_stop_fw(struct tg3 *tp)
5974 {
5975         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5976            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5977                 /* Wait for RX cpu to ACK the previous event. */
5978                 tg3_wait_for_event_ack(tp);
5979
5980                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5981
5982                 tg3_generate_fw_event(tp);
5983
5984                 /* Wait for RX cpu to ACK this event. */
5985                 tg3_wait_for_event_ack(tp);
5986         }
5987 }
5988
5989 /* tp->lock is held. */
5990 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5991 {
5992         int err;
5993
5994         tg3_stop_fw(tp);
5995
5996         tg3_write_sig_pre_reset(tp, kind);
5997
5998         tg3_abort_hw(tp, silent);
5999         err = tg3_chip_reset(tp);
6000
6001         tg3_write_sig_legacy(tp, kind);
6002         tg3_write_sig_post_reset(tp, kind);
6003
6004         if (err)
6005                 return err;
6006
6007         return 0;
6008 }
6009
6010 #define TG3_FW_RELEASE_MAJOR    0x0
6011 #define TG3_FW_RELASE_MINOR     0x0
6012 #define TG3_FW_RELEASE_FIX      0x0
6013 #define TG3_FW_START_ADDR       0x08000000
6014 #define TG3_FW_TEXT_ADDR        0x08000000
6015 #define TG3_FW_TEXT_LEN         0x9c0
6016 #define TG3_FW_RODATA_ADDR      0x080009c0
6017 #define TG3_FW_RODATA_LEN       0x60
6018 #define TG3_FW_DATA_ADDR        0x08000a40
6019 #define TG3_FW_DATA_LEN         0x20
6020 #define TG3_FW_SBSS_ADDR        0x08000a60
6021 #define TG3_FW_SBSS_LEN         0xc
6022 #define TG3_FW_BSS_ADDR         0x08000a70
6023 #define TG3_FW_BSS_LEN          0x10
6024
6025 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6026         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6027         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6028         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6029         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6030         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6031         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6032         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6033         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6034         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6035         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6036         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6037         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6038         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6039         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6040         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6041         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6042         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6043         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6044         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6045         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6046         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6047         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6048         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6049         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6050         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6051         0, 0, 0, 0, 0, 0,
6052         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6053         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6054         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6055         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6056         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6057         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6058         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6059         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6060         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6061         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6062         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6063         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6064         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6065         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6066         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6067         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6068         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6069         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6070         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6071         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6072         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6073         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6074         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6075         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6076         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6077         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6078         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6079         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6080         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6081         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6082         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6083         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6084         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6085         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6086         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6087         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6088         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6089         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6090         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6091         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6092         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6093         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6094         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6095         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6096         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6097         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6098         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6099         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6100         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6101         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6102         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6103         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6104         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6105         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6106         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6107         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6108         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6109         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6110         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6111         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6112         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6113         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6114         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6115         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6116         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6117 };
6118
6119 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6120         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6121         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6122         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6123         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6124         0x00000000
6125 };
6126
6127 #if 0 /* All zeros, don't eat up space with it. */
6128 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6129         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6130         0x00000000, 0x00000000, 0x00000000, 0x00000000
6131 };
6132 #endif
6133
6134 #define RX_CPU_SCRATCH_BASE     0x30000
6135 #define RX_CPU_SCRATCH_SIZE     0x04000
6136 #define TX_CPU_SCRATCH_BASE     0x34000
6137 #define TX_CPU_SCRATCH_SIZE     0x04000
6138
6139 /* tp->lock is held. */
6140 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6141 {
6142         int i;
6143
6144         BUG_ON(offset == TX_CPU_BASE &&
6145             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6146
6147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6148                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6149
6150                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6151                 return 0;
6152         }
6153         if (offset == RX_CPU_BASE) {
6154                 for (i = 0; i < 10000; i++) {
6155                         tw32(offset + CPU_STATE, 0xffffffff);
6156                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6157                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6158                                 break;
6159                 }
6160
6161                 tw32(offset + CPU_STATE, 0xffffffff);
6162                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6163                 udelay(10);
6164         } else {
6165                 for (i = 0; i < 10000; i++) {
6166                         tw32(offset + CPU_STATE, 0xffffffff);
6167                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6168                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6169                                 break;
6170                 }
6171         }
6172
6173         if (i >= 10000) {
6174                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6175                        "and %s CPU\n",
6176                        tp->dev->name,
6177                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6178                 return -ENODEV;
6179         }
6180
6181         /* Clear firmware's nvram arbitration. */
6182         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6183                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6184         return 0;
6185 }
6186
6187 struct fw_info {
6188         unsigned int text_base;
6189         unsigned int text_len;
6190         const u32 *text_data;
6191         unsigned int rodata_base;
6192         unsigned int rodata_len;
6193         const u32 *rodata_data;
6194         unsigned int data_base;
6195         unsigned int data_len;
6196         const u32 *data_data;
6197 };
6198
6199 /* tp->lock is held. */
6200 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6201                                  int cpu_scratch_size, struct fw_info *info)
6202 {
6203         int err, lock_err, i;
6204         void (*write_op)(struct tg3 *, u32, u32);
6205
6206         if (cpu_base == TX_CPU_BASE &&
6207             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6208                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6209                        "TX cpu firmware on %s which is 5705.\n",
6210                        tp->dev->name);
6211                 return -EINVAL;
6212         }
6213
6214         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6215                 write_op = tg3_write_mem;
6216         else
6217                 write_op = tg3_write_indirect_reg32;
6218
6219         /* It is possible that bootcode is still loading at this point.
6220          * Get the nvram lock first before halting the cpu.
6221          */
6222         lock_err = tg3_nvram_lock(tp);
6223         err = tg3_halt_cpu(tp, cpu_base);
6224         if (!lock_err)
6225                 tg3_nvram_unlock(tp);
6226         if (err)
6227                 goto out;
6228
6229         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6230                 write_op(tp, cpu_scratch_base + i, 0);
6231         tw32(cpu_base + CPU_STATE, 0xffffffff);
6232         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6233         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6234                 write_op(tp, (cpu_scratch_base +
6235                               (info->text_base & 0xffff) +
6236                               (i * sizeof(u32))),
6237                          (info->text_data ?
6238                           info->text_data[i] : 0));
6239         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6240                 write_op(tp, (cpu_scratch_base +
6241                               (info->rodata_base & 0xffff) +
6242                               (i * sizeof(u32))),
6243                          (info->rodata_data ?
6244                           info->rodata_data[i] : 0));
6245         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6246                 write_op(tp, (cpu_scratch_base +
6247                               (info->data_base & 0xffff) +
6248                               (i * sizeof(u32))),
6249                          (info->data_data ?
6250                           info->data_data[i] : 0));
6251
6252         err = 0;
6253
6254 out:
6255         return err;
6256 }
6257
6258 /* tp->lock is held. */
6259 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6260 {
6261         struct fw_info info;
6262         int err, i;
6263
6264         info.text_base = TG3_FW_TEXT_ADDR;
6265         info.text_len = TG3_FW_TEXT_LEN;
6266         info.text_data = &tg3FwText[0];
6267         info.rodata_base = TG3_FW_RODATA_ADDR;
6268         info.rodata_len = TG3_FW_RODATA_LEN;
6269         info.rodata_data = &tg3FwRodata[0];
6270         info.data_base = TG3_FW_DATA_ADDR;
6271         info.data_len = TG3_FW_DATA_LEN;
6272         info.data_data = NULL;
6273
6274         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6275                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6276                                     &info);
6277         if (err)
6278                 return err;
6279
6280         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6281                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6282                                     &info);
6283         if (err)
6284                 return err;
6285
6286         /* Now startup only the RX cpu. */
6287         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6288         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6289
6290         for (i = 0; i < 5; i++) {
6291                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6292                         break;
6293                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6294                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6295                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6296                 udelay(1000);
6297         }
6298         if (i >= 5) {
6299                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6300                        "to set RX CPU PC, is %08x should be %08x\n",
6301                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6302                        TG3_FW_TEXT_ADDR);
6303                 return -ENODEV;
6304         }
6305         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6306         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6307
6308         return 0;
6309 }
6310
6311
6312 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6313 #define TG3_TSO_FW_RELASE_MINOR         0x6
6314 #define TG3_TSO_FW_RELEASE_FIX          0x0
6315 #define TG3_TSO_FW_START_ADDR           0x08000000
6316 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6317 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6318 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6319 #define TG3_TSO_FW_RODATA_LEN           0x60
6320 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6321 #define TG3_TSO_FW_DATA_LEN             0x30
6322 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6323 #define TG3_TSO_FW_SBSS_LEN             0x2c
6324 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6325 #define TG3_TSO_FW_BSS_LEN              0x894
6326
6327 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6328         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6329         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6330         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6331         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6332         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6333         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6334         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6335         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6336         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6337         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6338         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6339         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6340         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6341         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6342         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6343         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6344         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6345         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6346         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6347         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6348         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6349         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6350         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6351         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6352         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6353         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6354         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6355         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6356         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6357         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6358         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6359         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6360         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6361         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6362         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6363         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6364         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6365         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6366         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6367         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6368         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6369         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6370         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6371         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6372         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6373         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6374         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6375         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6376         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6377         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6378         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6379         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6380         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6381         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6382         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6383         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6384         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6385         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6386         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6387         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6388         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6389         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6390         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6391         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6392         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6393         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6394         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6395         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6396         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6397         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6398         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6399         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6400         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6401         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6402         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6403         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6404         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6405         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6406         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6407         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6408         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6409         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6410         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6411         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6412         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6413         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6414         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6415         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6416         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6417         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6418         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6419         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6420         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6421         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6422         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6423         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6424         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6425         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6426         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6427         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6428         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6429         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6430         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6431         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6432         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6433         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6434         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6435         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6436         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6437         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6438         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6439         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6440         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6441         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6442         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6443         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6444         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6445         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6446         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6447         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6448         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6449         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6450         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6451         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6452         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6453         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6454         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6455         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6456         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6457         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6458         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6459         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6460         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6461         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6462         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6463         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6464         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6465         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6466         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6467         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6468         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6469         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6470         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6471         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6472         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6473         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6474         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6475         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6476         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6477         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6478         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6479         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6480         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6481         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6482         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6483         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6484         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6485         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6486         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6487         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6488         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6489         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6490         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6491         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6492         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6493         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6494         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6495         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6496         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6497         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6498         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6499         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6500         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6501         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6502         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6503         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6504         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6505         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6506         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6507         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6508         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6509         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6510         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6511         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6512         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6513         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6514         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6515         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6516         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6517         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6518         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6519         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6520         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6521         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6522         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6523         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6524         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6525         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6526         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6527         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6528         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6529         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6530         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6531         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6532         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6533         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6534         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6535         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6536         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6537         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6538         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6539         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6540         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6541         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6542         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6543         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6544         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6545         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6546         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6547         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6548         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6549         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6550         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6551         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6552         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6553         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6554         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6555         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6556         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6557         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6558         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6559         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6560         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6561         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6562         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6563         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6564         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6565         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6566         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6567         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6568         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6569         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6570         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6571         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6572         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6573         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6574         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6575         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6576         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6577         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6578         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6579         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6580         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6581         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6582         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6583         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6584         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6585         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6586         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6587         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6588         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6589         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6590         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6591         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6592         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6593         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6594         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6595         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6596         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6597         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6598         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6599         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6600         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6601         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6602         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6603         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6604         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6605         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6606         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6607         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6608         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6609         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6610         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6611         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6612 };
6613
6614 static const u32 tg3TsoFwRodata[] = {
6615         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6616         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6617         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6618         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6619         0x00000000,
6620 };
6621
6622 static const u32 tg3TsoFwData[] = {
6623         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6624         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6625         0x00000000,
6626 };
6627
6628 /* 5705 needs a special version of the TSO firmware.  */
6629 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6630 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6631 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6632 #define TG3_TSO5_FW_START_ADDR          0x00010000
6633 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6634 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6635 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6636 #define TG3_TSO5_FW_RODATA_LEN          0x50
6637 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6638 #define TG3_TSO5_FW_DATA_LEN            0x20
6639 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6640 #define TG3_TSO5_FW_SBSS_LEN            0x28
6641 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6642 #define TG3_TSO5_FW_BSS_LEN             0x88
6643
6644 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6645         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6646         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6647         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6648         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6649         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6650         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6651         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6652         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6653         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6654         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6655         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6656         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6657         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6658         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6659         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6660         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6661         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6662         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6663         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6664         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6665         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6666         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6667         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6668         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6669         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6670         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6671         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6672         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6673         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6674         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6675         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6676         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6677         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6678         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6679         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6680         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6681         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6682         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6683         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6684         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6685         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6686         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6687         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6688         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6689         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6690         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6691         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6692         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6693         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6694         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6695         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6696         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6697         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6698         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6699         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6700         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6701         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6702         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6703         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6704         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6705         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6706         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6707         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6708         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6709         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6710         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6711         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6712         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6713         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6714         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6715         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6716         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6717         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6718         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6719         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6720         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6721         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6722         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6723         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6724         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6725         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6726         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6727         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6728         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6729         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6730         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6731         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6732         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6733         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6734         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6735         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6736         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6737         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6738         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6739         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6740         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6741         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6742         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6743         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6744         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6745         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6746         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6747         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6748         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6749         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6750         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6751         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6752         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6753         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6754         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6755         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6756         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6757         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6758         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6759         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6760         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6761         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6762         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6763         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6764         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6765         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6766         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6767         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6768         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6769         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6770         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6771         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6772         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6773         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6774         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6775         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6776         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6777         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6778         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6779         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6780         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6781         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6782         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6783         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6784         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6785         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6786         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6787         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6788         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6789         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6790         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6791         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6792         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6793         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6794         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6795         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6796         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6797         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6798         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6799         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6800         0x00000000, 0x00000000, 0x00000000,
6801 };
6802
6803 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6804         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6805         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6806         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6807         0x00000000, 0x00000000, 0x00000000,
6808 };
6809
6810 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6811         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6812         0x00000000, 0x00000000, 0x00000000,
6813 };
6814
6815 /* tp->lock is held. */
6816 static int tg3_load_tso_firmware(struct tg3 *tp)
6817 {
6818         struct fw_info info;
6819         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6820         int err, i;
6821
6822         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6823                 return 0;
6824
6825         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6826                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6827                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6828                 info.text_data = &tg3Tso5FwText[0];
6829                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6830                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6831                 info.rodata_data = &tg3Tso5FwRodata[0];
6832                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6833                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6834                 info.data_data = &tg3Tso5FwData[0];
6835                 cpu_base = RX_CPU_BASE;
6836                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6837                 cpu_scratch_size = (info.text_len +
6838                                     info.rodata_len +
6839                                     info.data_len +
6840                                     TG3_TSO5_FW_SBSS_LEN +
6841                                     TG3_TSO5_FW_BSS_LEN);
6842         } else {
6843                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6844                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6845                 info.text_data = &tg3TsoFwText[0];
6846                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6847                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6848                 info.rodata_data = &tg3TsoFwRodata[0];
6849                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6850                 info.data_len = TG3_TSO_FW_DATA_LEN;
6851                 info.data_data = &tg3TsoFwData[0];
6852                 cpu_base = TX_CPU_BASE;
6853                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6854                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6855         }
6856
6857         err = tg3_load_firmware_cpu(tp, cpu_base,
6858                                     cpu_scratch_base, cpu_scratch_size,
6859                                     &info);
6860         if (err)
6861                 return err;
6862
6863         /* Now startup the cpu. */
6864         tw32(cpu_base + CPU_STATE, 0xffffffff);
6865         tw32_f(cpu_base + CPU_PC,    info.text_base);
6866
6867         for (i = 0; i < 5; i++) {
6868                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6869                         break;
6870                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6871                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6872                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6873                 udelay(1000);
6874         }
6875         if (i >= 5) {
6876                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6877                        "to set CPU PC, is %08x should be %08x\n",
6878                        tp->dev->name, tr32(cpu_base + CPU_PC),
6879                        info.text_base);
6880                 return -ENODEV;
6881         }
6882         tw32(cpu_base + CPU_STATE, 0xffffffff);
6883         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6884         return 0;
6885 }
6886
6887
6888 /* tp->lock is held. */
6889 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6890 {
6891         u32 addr_high, addr_low;
6892         int i;
6893
6894         addr_high = ((tp->dev->dev_addr[0] << 8) |
6895                      tp->dev->dev_addr[1]);
6896         addr_low = ((tp->dev->dev_addr[2] << 24) |
6897                     (tp->dev->dev_addr[3] << 16) |
6898                     (tp->dev->dev_addr[4] <<  8) |
6899                     (tp->dev->dev_addr[5] <<  0));
6900         for (i = 0; i < 4; i++) {
6901                 if (i == 1 && skip_mac_1)
6902                         continue;
6903                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6904                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6905         }
6906
6907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6909                 for (i = 0; i < 12; i++) {
6910                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6911                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6912                 }
6913         }
6914
6915         addr_high = (tp->dev->dev_addr[0] +
6916                      tp->dev->dev_addr[1] +
6917                      tp->dev->dev_addr[2] +
6918                      tp->dev->dev_addr[3] +
6919                      tp->dev->dev_addr[4] +
6920                      tp->dev->dev_addr[5]) &
6921                 TX_BACKOFF_SEED_MASK;
6922         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6923 }
6924
6925 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6926 {
6927         struct tg3 *tp = netdev_priv(dev);
6928         struct sockaddr *addr = p;
6929         int err = 0, skip_mac_1 = 0;
6930
6931         if (!is_valid_ether_addr(addr->sa_data))
6932                 return -EINVAL;
6933
6934         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6935
6936         if (!netif_running(dev))
6937                 return 0;
6938
6939         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6940                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6941
6942                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6943                 addr0_low = tr32(MAC_ADDR_0_LOW);
6944                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6945                 addr1_low = tr32(MAC_ADDR_1_LOW);
6946
6947                 /* Skip MAC addr 1 if ASF is using it. */
6948                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6949                     !(addr1_high == 0 && addr1_low == 0))
6950                         skip_mac_1 = 1;
6951         }
6952         spin_lock_bh(&tp->lock);
6953         __tg3_set_mac_addr(tp, skip_mac_1);
6954         spin_unlock_bh(&tp->lock);
6955
6956         return err;
6957 }
6958
6959 /* tp->lock is held. */
6960 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6961                            dma_addr_t mapping, u32 maxlen_flags,
6962                            u32 nic_addr)
6963 {
6964         tg3_write_mem(tp,
6965                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6966                       ((u64) mapping >> 32));
6967         tg3_write_mem(tp,
6968                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6969                       ((u64) mapping & 0xffffffff));
6970         tg3_write_mem(tp,
6971                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6972                        maxlen_flags);
6973
6974         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6975                 tg3_write_mem(tp,
6976                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6977                               nic_addr);
6978 }
6979
6980 static void __tg3_set_rx_mode(struct net_device *);
6981 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6982 {
6983         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6984         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6985         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6986         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6987         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6988                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6989                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6990         }
6991         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6992         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6993         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6994                 u32 val = ec->stats_block_coalesce_usecs;
6995
6996                 if (!netif_carrier_ok(tp->dev))
6997                         val = 0;
6998
6999                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7000         }
7001 }
7002
7003 /* tp->lock is held. */
7004 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7005 {
7006         u32 val, rdmac_mode;
7007         int i, err, limit;
7008
7009         tg3_disable_ints(tp);
7010
7011         tg3_stop_fw(tp);
7012
7013         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7014
7015         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7016                 tg3_abort_hw(tp, 1);
7017         }
7018
7019         if (reset_phy &&
7020             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7021                 tg3_phy_reset(tp);
7022
7023         err = tg3_chip_reset(tp);
7024         if (err)
7025                 return err;
7026
7027         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7028
7029         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
7030             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7031                 val = tr32(TG3_CPMU_CTRL);
7032                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7033                 tw32(TG3_CPMU_CTRL, val);
7034
7035                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7036                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7037                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7038                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7039
7040                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7041                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7042                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7043                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7044
7045                 val = tr32(TG3_CPMU_HST_ACC);
7046                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7047                 val |= CPMU_HST_ACC_MACCLK_6_25;
7048                 tw32(TG3_CPMU_HST_ACC, val);
7049         }
7050
7051         /* This works around an issue with Athlon chipsets on
7052          * B3 tigon3 silicon.  This bit has no effect on any
7053          * other revision.  But do not set this on PCI Express
7054          * chips and don't even touch the clocks if the CPMU is present.
7055          */
7056         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7057                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7058                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7059                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7060         }
7061
7062         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7063             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7064                 val = tr32(TG3PCI_PCISTATE);
7065                 val |= PCISTATE_RETRY_SAME_DMA;
7066                 tw32(TG3PCI_PCISTATE, val);
7067         }
7068
7069         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7070                 /* Allow reads and writes to the
7071                  * APE register and memory space.
7072                  */
7073                 val = tr32(TG3PCI_PCISTATE);
7074                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7075                        PCISTATE_ALLOW_APE_SHMEM_WR;
7076                 tw32(TG3PCI_PCISTATE, val);
7077         }
7078
7079         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7080                 /* Enable some hw fixes.  */
7081                 val = tr32(TG3PCI_MSI_DATA);
7082                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7083                 tw32(TG3PCI_MSI_DATA, val);
7084         }
7085
7086         /* Descriptor ring init may make accesses to the
7087          * NIC SRAM area to setup the TX descriptors, so we
7088          * can only do this after the hardware has been
7089          * successfully reset.
7090          */
7091         err = tg3_init_rings(tp);
7092         if (err)
7093                 return err;
7094
7095         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7096             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7097             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7098                 /* This value is determined during the probe time DMA
7099                  * engine test, tg3_test_dma.
7100                  */
7101                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7102         }
7103
7104         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7105                           GRC_MODE_4X_NIC_SEND_RINGS |
7106                           GRC_MODE_NO_TX_PHDR_CSUM |
7107                           GRC_MODE_NO_RX_PHDR_CSUM);
7108         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7109
7110         /* Pseudo-header checksum is done by hardware logic and not
7111          * the offload processers, so make the chip do the pseudo-
7112          * header checksums on receive.  For transmit it is more
7113          * convenient to do the pseudo-header checksum in software
7114          * as Linux does that on transmit for us in all cases.
7115          */
7116         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7117
7118         tw32(GRC_MODE,
7119              tp->grc_mode |
7120              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7121
7122         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7123         val = tr32(GRC_MISC_CFG);
7124         val &= ~0xff;
7125         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7126         tw32(GRC_MISC_CFG, val);
7127
7128         /* Initialize MBUF/DESC pool. */
7129         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7130                 /* Do nothing.  */
7131         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7132                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7133                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7134                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7135                 else
7136                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7137                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7138                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7139         }
7140         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7141                 int fw_len;
7142
7143                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7144                           TG3_TSO5_FW_RODATA_LEN +
7145                           TG3_TSO5_FW_DATA_LEN +
7146                           TG3_TSO5_FW_SBSS_LEN +
7147                           TG3_TSO5_FW_BSS_LEN);
7148                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7149                 tw32(BUFMGR_MB_POOL_ADDR,
7150                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7151                 tw32(BUFMGR_MB_POOL_SIZE,
7152                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7153         }
7154
7155         if (tp->dev->mtu <= ETH_DATA_LEN) {
7156                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7157                      tp->bufmgr_config.mbuf_read_dma_low_water);
7158                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7159                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7160                 tw32(BUFMGR_MB_HIGH_WATER,
7161                      tp->bufmgr_config.mbuf_high_water);
7162         } else {
7163                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7164                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7165                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7166                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7167                 tw32(BUFMGR_MB_HIGH_WATER,
7168                      tp->bufmgr_config.mbuf_high_water_jumbo);
7169         }
7170         tw32(BUFMGR_DMA_LOW_WATER,
7171              tp->bufmgr_config.dma_low_water);
7172         tw32(BUFMGR_DMA_HIGH_WATER,
7173              tp->bufmgr_config.dma_high_water);
7174
7175         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7176         for (i = 0; i < 2000; i++) {
7177                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7178                         break;
7179                 udelay(10);
7180         }
7181         if (i >= 2000) {
7182                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7183                        tp->dev->name);
7184                 return -ENODEV;
7185         }
7186
7187         /* Setup replenish threshold. */
7188         val = tp->rx_pending / 8;
7189         if (val == 0)
7190                 val = 1;
7191         else if (val > tp->rx_std_max_post)
7192                 val = tp->rx_std_max_post;
7193         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7194                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7195                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7196
7197                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7198                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7199         }
7200
7201         tw32(RCVBDI_STD_THRESH, val);
7202
7203         /* Initialize TG3_BDINFO's at:
7204          *  RCVDBDI_STD_BD:     standard eth size rx ring
7205          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7206          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7207          *
7208          * like so:
7209          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7210          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7211          *                              ring attribute flags
7212          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7213          *
7214          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7215          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7216          *
7217          * The size of each ring is fixed in the firmware, but the location is
7218          * configurable.
7219          */
7220         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7221              ((u64) tp->rx_std_mapping >> 32));
7222         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7223              ((u64) tp->rx_std_mapping & 0xffffffff));
7224         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7225              NIC_SRAM_RX_BUFFER_DESC);
7226
7227         /* Don't even try to program the JUMBO/MINI buffer descriptor
7228          * configs on 5705.
7229          */
7230         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7231                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7232                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7233         } else {
7234                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7235                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7236
7237                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7238                      BDINFO_FLAGS_DISABLED);
7239
7240                 /* Setup replenish threshold. */
7241                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7242
7243                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7244                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7245                              ((u64) tp->rx_jumbo_mapping >> 32));
7246                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7247                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7248                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7249                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7250                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7251                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7252                 } else {
7253                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7254                              BDINFO_FLAGS_DISABLED);
7255                 }
7256
7257         }
7258
7259         /* There is only one send ring on 5705/5750, no need to explicitly
7260          * disable the others.
7261          */
7262         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7263                 /* Clear out send RCB ring in SRAM. */
7264                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7265                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7266                                       BDINFO_FLAGS_DISABLED);
7267         }
7268
7269         tp->tx_prod = 0;
7270         tp->tx_cons = 0;
7271         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7272         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7273
7274         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7275                        tp->tx_desc_mapping,
7276                        (TG3_TX_RING_SIZE <<
7277                         BDINFO_FLAGS_MAXLEN_SHIFT),
7278                        NIC_SRAM_TX_BUFFER_DESC);
7279
7280         /* There is only one receive return ring on 5705/5750, no need
7281          * to explicitly disable the others.
7282          */
7283         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7284                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7285                      i += TG3_BDINFO_SIZE) {
7286                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7287                                       BDINFO_FLAGS_DISABLED);
7288                 }
7289         }
7290
7291         tp->rx_rcb_ptr = 0;
7292         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7293
7294         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7295                        tp->rx_rcb_mapping,
7296                        (TG3_RX_RCB_RING_SIZE(tp) <<
7297                         BDINFO_FLAGS_MAXLEN_SHIFT),
7298                        0);
7299
7300         tp->rx_std_ptr = tp->rx_pending;
7301         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7302                      tp->rx_std_ptr);
7303
7304         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7305                                                 tp->rx_jumbo_pending : 0;
7306         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7307                      tp->rx_jumbo_ptr);
7308
7309         /* Initialize MAC address and backoff seed. */
7310         __tg3_set_mac_addr(tp, 0);
7311
7312         /* MTU + ethernet header + FCS + optional VLAN tag */
7313         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7314
7315         /* The slot time is changed by tg3_setup_phy if we
7316          * run at gigabit with half duplex.
7317          */
7318         tw32(MAC_TX_LENGTHS,
7319              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7320              (6 << TX_LENGTHS_IPG_SHIFT) |
7321              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7322
7323         /* Receive rules. */
7324         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7325         tw32(RCVLPC_CONFIG, 0x0181);
7326
7327         /* Calculate RDMAC_MODE setting early, we need it to determine
7328          * the RCVLPC_STATE_ENABLE mask.
7329          */
7330         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7331                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7332                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7333                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7334                       RDMAC_MODE_LNGREAD_ENAB);
7335
7336         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7337             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7338                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7339                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7340                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7341
7342         /* If statement applies to 5705 and 5750 PCI devices only */
7343         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7344              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7345             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7346                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7347                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7348                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7349                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7350                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7351                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7352                 }
7353         }
7354
7355         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7356                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7357
7358         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7359                 rdmac_mode |= (1 << 27);
7360
7361         /* Receive/send statistics. */
7362         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7363                 val = tr32(RCVLPC_STATS_ENABLE);
7364                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7365                 tw32(RCVLPC_STATS_ENABLE, val);
7366         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7367                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7368                 val = tr32(RCVLPC_STATS_ENABLE);
7369                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7370                 tw32(RCVLPC_STATS_ENABLE, val);
7371         } else {
7372                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7373         }
7374         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7375         tw32(SNDDATAI_STATSENAB, 0xffffff);
7376         tw32(SNDDATAI_STATSCTRL,
7377              (SNDDATAI_SCTRL_ENABLE |
7378               SNDDATAI_SCTRL_FASTUPD));
7379
7380         /* Setup host coalescing engine. */
7381         tw32(HOSTCC_MODE, 0);
7382         for (i = 0; i < 2000; i++) {
7383                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7384                         break;
7385                 udelay(10);
7386         }
7387
7388         __tg3_set_coalesce(tp, &tp->coal);
7389
7390         /* set status block DMA address */
7391         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7392              ((u64) tp->status_mapping >> 32));
7393         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7394              ((u64) tp->status_mapping & 0xffffffff));
7395
7396         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7397                 /* Status/statistics block address.  See tg3_timer,
7398                  * the tg3_periodic_fetch_stats call there, and
7399                  * tg3_get_stats to see how this works for 5705/5750 chips.
7400                  */
7401                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7402                      ((u64) tp->stats_mapping >> 32));
7403                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7404                      ((u64) tp->stats_mapping & 0xffffffff));
7405                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7406                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7407         }
7408
7409         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7410
7411         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7412         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7413         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7414                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7415
7416         /* Clear statistics/status block in chip, and status block in ram. */
7417         for (i = NIC_SRAM_STATS_BLK;
7418              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7419              i += sizeof(u32)) {
7420                 tg3_write_mem(tp, i, 0);
7421                 udelay(40);
7422         }
7423         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7424
7425         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7426                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7427                 /* reset to prevent losing 1st rx packet intermittently */
7428                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7429                 udelay(10);
7430         }
7431
7432         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7433                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7434         else
7435                 tp->mac_mode = 0;
7436         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7437                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7438         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7439             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7440             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7441                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7442         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7443         udelay(40);
7444
7445         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7446          * If TG3_FLG2_IS_NIC is zero, we should read the
7447          * register to preserve the GPIO settings for LOMs. The GPIOs,
7448          * whether used as inputs or outputs, are set by boot code after
7449          * reset.
7450          */
7451         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7452                 u32 gpio_mask;
7453
7454                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7455                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7456                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7457
7458                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7459                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7460                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7461
7462                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7463                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7464
7465                 tp->grc_local_ctrl &= ~gpio_mask;
7466                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7467
7468                 /* GPIO1 must be driven high for eeprom write protect */
7469                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7470                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7471                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7472         }
7473         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7474         udelay(100);
7475
7476         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7477         tp->last_tag = 0;
7478
7479         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7480                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7481                 udelay(40);
7482         }
7483
7484         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7485                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7486                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7487                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7488                WDMAC_MODE_LNGREAD_ENAB);
7489
7490         /* If statement applies to 5705 and 5750 PCI devices only */
7491         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7492              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7493             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7494                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7495                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7496                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7497                         /* nothing */
7498                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7499                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7500                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7501                         val |= WDMAC_MODE_RX_ACCEL;
7502                 }
7503         }
7504
7505         /* Enable host coalescing bug fix */
7506         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7507             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7508             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7509             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7510             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7511                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7512
7513         tw32_f(WDMAC_MODE, val);
7514         udelay(40);
7515
7516         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7517                 u16 pcix_cmd;
7518
7519                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7520                                      &pcix_cmd);
7521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7522                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7523                         pcix_cmd |= PCI_X_CMD_READ_2K;
7524                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7525                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7526                         pcix_cmd |= PCI_X_CMD_READ_2K;
7527                 }
7528                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7529                                       pcix_cmd);
7530         }
7531
7532         tw32_f(RDMAC_MODE, rdmac_mode);
7533         udelay(40);
7534
7535         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7536         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7537                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7538
7539         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7540                 tw32(SNDDATAC_MODE,
7541                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7542         else
7543                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7544
7545         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7546         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7547         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7548         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7549         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7550                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7551         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7552         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7553
7554         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7555                 err = tg3_load_5701_a0_firmware_fix(tp);
7556                 if (err)
7557                         return err;
7558         }
7559
7560         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7561                 err = tg3_load_tso_firmware(tp);
7562                 if (err)
7563                         return err;
7564         }
7565
7566         tp->tx_mode = TX_MODE_ENABLE;
7567         tw32_f(MAC_TX_MODE, tp->tx_mode);
7568         udelay(100);
7569
7570         tp->rx_mode = RX_MODE_ENABLE;
7571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7573             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7575                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7576
7577         tw32_f(MAC_RX_MODE, tp->rx_mode);
7578         udelay(10);
7579
7580         tw32(MAC_LED_CTRL, tp->led_ctrl);
7581
7582         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7583         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7584                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7585                 udelay(10);
7586         }
7587         tw32_f(MAC_RX_MODE, tp->rx_mode);
7588         udelay(10);
7589
7590         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7591                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7592                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7593                         /* Set drive transmission level to 1.2V  */
7594                         /* only if the signal pre-emphasis bit is not set  */
7595                         val = tr32(MAC_SERDES_CFG);
7596                         val &= 0xfffff000;
7597                         val |= 0x880;
7598                         tw32(MAC_SERDES_CFG, val);
7599                 }
7600                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7601                         tw32(MAC_SERDES_CFG, 0x616000);
7602         }
7603
7604         /* Prevent chip from dropping frames when flow control
7605          * is enabled.
7606          */
7607         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7608
7609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7610             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7611                 /* Use hardware link auto-negotiation */
7612                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7613         }
7614
7615         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7616             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7617                 u32 tmp;
7618
7619                 tmp = tr32(SERDES_RX_CTRL);
7620                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7621                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7622                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7623                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7624         }
7625
7626         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7627                 if (tp->link_config.phy_is_low_power) {
7628                         tp->link_config.phy_is_low_power = 0;
7629                         tp->link_config.speed = tp->link_config.orig_speed;
7630                         tp->link_config.duplex = tp->link_config.orig_duplex;
7631                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7632                 }
7633
7634                 err = tg3_setup_phy(tp, 0);
7635                 if (err)
7636                         return err;
7637
7638                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7639                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7640                         u32 tmp;
7641
7642                         /* Clear CRC stats. */
7643                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7644                                 tg3_writephy(tp, MII_TG3_TEST1,
7645                                              tmp | MII_TG3_TEST1_CRC_EN);
7646                                 tg3_readphy(tp, 0x14, &tmp);
7647                         }
7648                 }
7649         }
7650
7651         __tg3_set_rx_mode(tp->dev);
7652
7653         /* Initialize receive rules. */
7654         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7655         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7656         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7657         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7658
7659         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7660             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7661                 limit = 8;
7662         else
7663                 limit = 16;
7664         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7665                 limit -= 4;
7666         switch (limit) {
7667         case 16:
7668                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7669         case 15:
7670                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7671         case 14:
7672                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7673         case 13:
7674                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7675         case 12:
7676                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7677         case 11:
7678                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7679         case 10:
7680                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7681         case 9:
7682                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7683         case 8:
7684                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7685         case 7:
7686                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7687         case 6:
7688                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7689         case 5:
7690                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7691         case 4:
7692                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7693         case 3:
7694                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7695         case 2:
7696         case 1:
7697
7698         default:
7699                 break;
7700         }
7701
7702         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7703                 /* Write our heartbeat update interval to APE. */
7704                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7705                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7706
7707         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7708
7709         return 0;
7710 }
7711
7712 /* Called at device open time to get the chip ready for
7713  * packet processing.  Invoked with tp->lock held.
7714  */
7715 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7716 {
7717         tg3_switch_clocks(tp);
7718
7719         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7720
7721         return tg3_reset_hw(tp, reset_phy);
7722 }
7723
7724 #define TG3_STAT_ADD32(PSTAT, REG) \
7725 do {    u32 __val = tr32(REG); \
7726         (PSTAT)->low += __val; \
7727         if ((PSTAT)->low < __val) \
7728                 (PSTAT)->high += 1; \
7729 } while (0)
7730
7731 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7732 {
7733         struct tg3_hw_stats *sp = tp->hw_stats;
7734
7735         if (!netif_carrier_ok(tp->dev))
7736                 return;
7737
7738         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7739         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7740         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7741         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7742         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7743         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7744         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7745         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7746         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7747         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7748         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7749         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7750         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7751
7752         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7753         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7754         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7755         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7756         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7757         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7758         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7759         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7760         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7761         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7762         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7763         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7764         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7765         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7766
7767         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7768         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7769         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7770 }
7771
7772 static void tg3_timer(unsigned long __opaque)
7773 {
7774         struct tg3 *tp = (struct tg3 *) __opaque;
7775
7776         if (tp->irq_sync)
7777                 goto restart_timer;
7778
7779         spin_lock(&tp->lock);
7780
7781         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7782                 /* All of this garbage is because when using non-tagged
7783                  * IRQ status the mailbox/status_block protocol the chip
7784                  * uses with the cpu is race prone.
7785                  */
7786                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7787                         tw32(GRC_LOCAL_CTRL,
7788                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7789                 } else {
7790                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7791                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7792                 }
7793
7794                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7795                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7796                         spin_unlock(&tp->lock);
7797                         schedule_work(&tp->reset_task);
7798                         return;
7799                 }
7800         }
7801
7802         /* This part only runs once per second. */
7803         if (!--tp->timer_counter) {
7804                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7805                         tg3_periodic_fetch_stats(tp);
7806
7807                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7808                         u32 mac_stat;
7809                         int phy_event;
7810
7811                         mac_stat = tr32(MAC_STATUS);
7812
7813                         phy_event = 0;
7814                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7815                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7816                                         phy_event = 1;
7817                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7818                                 phy_event = 1;
7819
7820                         if (phy_event)
7821                                 tg3_setup_phy(tp, 0);
7822                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7823                         u32 mac_stat = tr32(MAC_STATUS);
7824                         int need_setup = 0;
7825
7826                         if (netif_carrier_ok(tp->dev) &&
7827                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7828                                 need_setup = 1;
7829                         }
7830                         if (! netif_carrier_ok(tp->dev) &&
7831                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7832                                          MAC_STATUS_SIGNAL_DET))) {
7833                                 need_setup = 1;
7834                         }
7835                         if (need_setup) {
7836                                 if (!tp->serdes_counter) {
7837                                         tw32_f(MAC_MODE,
7838                                              (tp->mac_mode &
7839                                               ~MAC_MODE_PORT_MODE_MASK));
7840                                         udelay(40);
7841                                         tw32_f(MAC_MODE, tp->mac_mode);
7842                                         udelay(40);
7843                                 }
7844                                 tg3_setup_phy(tp, 0);
7845                         }
7846                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7847                         tg3_serdes_parallel_detect(tp);
7848
7849                 tp->timer_counter = tp->timer_multiplier;
7850         }
7851
7852         /* Heartbeat is only sent once every 2 seconds.
7853          *
7854          * The heartbeat is to tell the ASF firmware that the host
7855          * driver is still alive.  In the event that the OS crashes,
7856          * ASF needs to reset the hardware to free up the FIFO space
7857          * that may be filled with rx packets destined for the host.
7858          * If the FIFO is full, ASF will no longer function properly.
7859          *
7860          * Unintended resets have been reported on real time kernels
7861          * where the timer doesn't run on time.  Netpoll will also have
7862          * same problem.
7863          *
7864          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7865          * to check the ring condition when the heartbeat is expiring
7866          * before doing the reset.  This will prevent most unintended
7867          * resets.
7868          */
7869         if (!--tp->asf_counter) {
7870                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7871                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7872                         tg3_wait_for_event_ack(tp);
7873
7874                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7875                                       FWCMD_NICDRV_ALIVE3);
7876                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7877                         /* 5 seconds timeout */
7878                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7879
7880                         tg3_generate_fw_event(tp);
7881                 }
7882                 tp->asf_counter = tp->asf_multiplier;
7883         }
7884
7885         spin_unlock(&tp->lock);
7886
7887 restart_timer:
7888         tp->timer.expires = jiffies + tp->timer_offset;
7889         add_timer(&tp->timer);
7890 }
7891
7892 static int tg3_request_irq(struct tg3 *tp)
7893 {
7894         irq_handler_t fn;
7895         unsigned long flags;
7896         struct net_device *dev = tp->dev;
7897
7898         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7899                 fn = tg3_msi;
7900                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7901                         fn = tg3_msi_1shot;
7902                 flags = IRQF_SAMPLE_RANDOM;
7903         } else {
7904                 fn = tg3_interrupt;
7905                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7906                         fn = tg3_interrupt_tagged;
7907                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7908         }
7909         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7910 }
7911
7912 static int tg3_test_interrupt(struct tg3 *tp)
7913 {
7914         struct net_device *dev = tp->dev;
7915         int err, i, intr_ok = 0;
7916
7917         if (!netif_running(dev))
7918                 return -ENODEV;
7919
7920         tg3_disable_ints(tp);
7921
7922         free_irq(tp->pdev->irq, dev);
7923
7924         err = request_irq(tp->pdev->irq, tg3_test_isr,
7925                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7926         if (err)
7927                 return err;
7928
7929         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7930         tg3_enable_ints(tp);
7931
7932         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7933                HOSTCC_MODE_NOW);
7934
7935         for (i = 0; i < 5; i++) {
7936                 u32 int_mbox, misc_host_ctrl;
7937
7938                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7939                                         TG3_64BIT_REG_LOW);
7940                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7941
7942                 if ((int_mbox != 0) ||
7943                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7944                         intr_ok = 1;
7945                         break;
7946                 }
7947
7948                 msleep(10);
7949         }
7950
7951         tg3_disable_ints(tp);
7952
7953         free_irq(tp->pdev->irq, dev);
7954
7955         err = tg3_request_irq(tp);
7956
7957         if (err)
7958                 return err;
7959
7960         if (intr_ok)
7961                 return 0;
7962
7963         return -EIO;
7964 }
7965
7966 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7967  * successfully restored
7968  */
7969 static int tg3_test_msi(struct tg3 *tp)
7970 {
7971         struct net_device *dev = tp->dev;
7972         int err;
7973         u16 pci_cmd;
7974
7975         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7976                 return 0;
7977
7978         /* Turn off SERR reporting in case MSI terminates with Master
7979          * Abort.
7980          */
7981         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7982         pci_write_config_word(tp->pdev, PCI_COMMAND,
7983                               pci_cmd & ~PCI_COMMAND_SERR);
7984
7985         err = tg3_test_interrupt(tp);
7986
7987         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7988
7989         if (!err)
7990                 return 0;
7991
7992         /* other failures */
7993         if (err != -EIO)
7994                 return err;
7995
7996         /* MSI test failed, go back to INTx mode */
7997         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7998                "switching to INTx mode. Please report this failure to "
7999                "the PCI maintainer and include system chipset information.\n",
8000                        tp->dev->name);
8001
8002         free_irq(tp->pdev->irq, dev);
8003         pci_disable_msi(tp->pdev);
8004
8005         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8006
8007         err = tg3_request_irq(tp);
8008         if (err)
8009                 return err;
8010
8011         /* Need to reset the chip because the MSI cycle may have terminated
8012          * with Master Abort.
8013          */
8014         tg3_full_lock(tp, 1);
8015
8016         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8017         err = tg3_init_hw(tp, 1);
8018
8019         tg3_full_unlock(tp);
8020
8021         if (err)
8022                 free_irq(tp->pdev->irq, dev);
8023
8024         return err;
8025 }
8026
8027 static int tg3_open(struct net_device *dev)
8028 {
8029         struct tg3 *tp = netdev_priv(dev);
8030         int err;
8031
8032         netif_carrier_off(tp->dev);
8033
8034         err = tg3_set_power_state(tp, PCI_D0);
8035         if (err)
8036                 return err;
8037
8038         tg3_full_lock(tp, 0);
8039
8040         tg3_disable_ints(tp);
8041         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8042
8043         tg3_full_unlock(tp);
8044
8045         /* The placement of this call is tied
8046          * to the setup and use of Host TX descriptors.
8047          */
8048         err = tg3_alloc_consistent(tp);
8049         if (err)
8050                 return err;
8051
8052         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8053                 /* All MSI supporting chips should support tagged
8054                  * status.  Assert that this is the case.
8055                  */
8056                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8057                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8058                                "Not using MSI.\n", tp->dev->name);
8059                 } else if (pci_enable_msi(tp->pdev) == 0) {
8060                         u32 msi_mode;
8061
8062                         msi_mode = tr32(MSGINT_MODE);
8063                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8064                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8065                 }
8066         }
8067         err = tg3_request_irq(tp);
8068
8069         if (err) {
8070                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8071                         pci_disable_msi(tp->pdev);
8072                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8073                 }
8074                 tg3_free_consistent(tp);
8075                 return err;
8076         }
8077
8078         napi_enable(&tp->napi);
8079
8080         tg3_full_lock(tp, 0);
8081
8082         err = tg3_init_hw(tp, 1);
8083         if (err) {
8084                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8085                 tg3_free_rings(tp);
8086         } else {
8087                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8088                         tp->timer_offset = HZ;
8089                 else
8090                         tp->timer_offset = HZ / 10;
8091
8092                 BUG_ON(tp->timer_offset > HZ);
8093                 tp->timer_counter = tp->timer_multiplier =
8094                         (HZ / tp->timer_offset);
8095                 tp->asf_counter = tp->asf_multiplier =
8096                         ((HZ / tp->timer_offset) * 2);
8097
8098                 init_timer(&tp->timer);
8099                 tp->timer.expires = jiffies + tp->timer_offset;
8100                 tp->timer.data = (unsigned long) tp;
8101                 tp->timer.function = tg3_timer;
8102         }
8103
8104         tg3_full_unlock(tp);
8105
8106         if (err) {
8107                 napi_disable(&tp->napi);
8108                 free_irq(tp->pdev->irq, dev);
8109                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8110                         pci_disable_msi(tp->pdev);
8111                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8112                 }
8113                 tg3_free_consistent(tp);
8114                 return err;
8115         }
8116
8117         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8118                 err = tg3_test_msi(tp);
8119
8120                 if (err) {
8121                         tg3_full_lock(tp, 0);
8122
8123                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8124                                 pci_disable_msi(tp->pdev);
8125                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8126                         }
8127                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8128                         tg3_free_rings(tp);
8129                         tg3_free_consistent(tp);
8130
8131                         tg3_full_unlock(tp);
8132
8133                         napi_disable(&tp->napi);
8134
8135                         return err;
8136                 }
8137
8138                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8139                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8140                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8141
8142                                 tw32(PCIE_TRANSACTION_CFG,
8143                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8144                         }
8145                 }
8146         }
8147
8148         tg3_phy_start(tp);
8149
8150         tg3_full_lock(tp, 0);
8151
8152         add_timer(&tp->timer);
8153         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8154         tg3_enable_ints(tp);
8155
8156         tg3_full_unlock(tp);
8157
8158         netif_start_queue(dev);
8159
8160         return 0;
8161 }
8162
8163 #if 0
8164 /*static*/ void tg3_dump_state(struct tg3 *tp)
8165 {
8166         u32 val32, val32_2, val32_3, val32_4, val32_5;
8167         u16 val16;
8168         int i;
8169
8170         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8171         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8172         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8173                val16, val32);
8174
8175         /* MAC block */
8176         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8177                tr32(MAC_MODE), tr32(MAC_STATUS));
8178         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8179                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8180         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8181                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8182         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8183                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8184
8185         /* Send data initiator control block */
8186         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8187                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8188         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8189                tr32(SNDDATAI_STATSCTRL));
8190
8191         /* Send data completion control block */
8192         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8193
8194         /* Send BD ring selector block */
8195         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8196                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8197
8198         /* Send BD initiator control block */
8199         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8200                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8201
8202         /* Send BD completion control block */
8203         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8204
8205         /* Receive list placement control block */
8206         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8207                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8208         printk("       RCVLPC_STATSCTRL[%08x]\n",
8209                tr32(RCVLPC_STATSCTRL));
8210
8211         /* Receive data and receive BD initiator control block */
8212         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8213                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8214
8215         /* Receive data completion control block */
8216         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8217                tr32(RCVDCC_MODE));
8218
8219         /* Receive BD initiator control block */
8220         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8221                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8222
8223         /* Receive BD completion control block */
8224         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8225                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8226
8227         /* Receive list selector control block */
8228         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8229                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8230
8231         /* Mbuf cluster free block */
8232         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8233                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8234
8235         /* Host coalescing control block */
8236         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8237                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8238         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8239                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8240                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8241         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8242                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8243                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8244         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8245                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8246         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8247                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8248
8249         /* Memory arbiter control block */
8250         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8251                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8252
8253         /* Buffer manager control block */
8254         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8255                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8256         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8257                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8258         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8259                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8260                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8261                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8262
8263         /* Read DMA control block */
8264         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8265                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8266
8267         /* Write DMA control block */
8268         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8269                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8270
8271         /* DMA completion block */
8272         printk("DEBUG: DMAC_MODE[%08x]\n",
8273                tr32(DMAC_MODE));
8274
8275         /* GRC block */
8276         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8277                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8278         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8279                tr32(GRC_LOCAL_CTRL));
8280
8281         /* TG3_BDINFOs */
8282         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8283                tr32(RCVDBDI_JUMBO_BD + 0x0),
8284                tr32(RCVDBDI_JUMBO_BD + 0x4),
8285                tr32(RCVDBDI_JUMBO_BD + 0x8),
8286                tr32(RCVDBDI_JUMBO_BD + 0xc));
8287         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8288                tr32(RCVDBDI_STD_BD + 0x0),
8289                tr32(RCVDBDI_STD_BD + 0x4),
8290                tr32(RCVDBDI_STD_BD + 0x8),
8291                tr32(RCVDBDI_STD_BD + 0xc));
8292         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8293                tr32(RCVDBDI_MINI_BD + 0x0),
8294                tr32(RCVDBDI_MINI_BD + 0x4),
8295                tr32(RCVDBDI_MINI_BD + 0x8),
8296                tr32(RCVDBDI_MINI_BD + 0xc));
8297
8298         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8299         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8300         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8301         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8302         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8303                val32, val32_2, val32_3, val32_4);
8304
8305         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8306         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8307         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8308         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8309         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8310                val32, val32_2, val32_3, val32_4);
8311
8312         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8313         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8314         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8315         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8316         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8317         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8318                val32, val32_2, val32_3, val32_4, val32_5);
8319
8320         /* SW status block */
8321         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8322                tp->hw_status->status,
8323                tp->hw_status->status_tag,
8324                tp->hw_status->rx_jumbo_consumer,
8325                tp->hw_status->rx_consumer,
8326                tp->hw_status->rx_mini_consumer,
8327                tp->hw_status->idx[0].rx_producer,
8328                tp->hw_status->idx[0].tx_consumer);
8329
8330         /* SW statistics block */
8331         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8332                ((u32 *)tp->hw_stats)[0],
8333                ((u32 *)tp->hw_stats)[1],
8334                ((u32 *)tp->hw_stats)[2],
8335                ((u32 *)tp->hw_stats)[3]);
8336
8337         /* Mailboxes */
8338         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8339                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8340                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8341                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8342                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8343
8344         /* NIC side send descriptors. */
8345         for (i = 0; i < 6; i++) {
8346                 unsigned long txd;
8347
8348                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8349                         + (i * sizeof(struct tg3_tx_buffer_desc));
8350                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8351                        i,
8352                        readl(txd + 0x0), readl(txd + 0x4),
8353                        readl(txd + 0x8), readl(txd + 0xc));
8354         }
8355
8356         /* NIC side RX descriptors. */
8357         for (i = 0; i < 6; i++) {
8358                 unsigned long rxd;
8359
8360                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8361                         + (i * sizeof(struct tg3_rx_buffer_desc));
8362                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8363                        i,
8364                        readl(rxd + 0x0), readl(rxd + 0x4),
8365                        readl(rxd + 0x8), readl(rxd + 0xc));
8366                 rxd += (4 * sizeof(u32));
8367                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8368                        i,
8369                        readl(rxd + 0x0), readl(rxd + 0x4),
8370                        readl(rxd + 0x8), readl(rxd + 0xc));
8371         }
8372
8373         for (i = 0; i < 6; i++) {
8374                 unsigned long rxd;
8375
8376                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8377                         + (i * sizeof(struct tg3_rx_buffer_desc));
8378                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8379                        i,
8380                        readl(rxd + 0x0), readl(rxd + 0x4),
8381                        readl(rxd + 0x8), readl(rxd + 0xc));
8382                 rxd += (4 * sizeof(u32));
8383                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8384                        i,
8385                        readl(rxd + 0x0), readl(rxd + 0x4),
8386                        readl(rxd + 0x8), readl(rxd + 0xc));
8387         }
8388 }
8389 #endif
8390
8391 static struct net_device_stats *tg3_get_stats(struct net_device *);
8392 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8393
8394 static int tg3_close(struct net_device *dev)
8395 {
8396         struct tg3 *tp = netdev_priv(dev);
8397
8398         napi_disable(&tp->napi);
8399         cancel_work_sync(&tp->reset_task);
8400
8401         netif_stop_queue(dev);
8402
8403         del_timer_sync(&tp->timer);
8404
8405         tg3_full_lock(tp, 1);
8406 #if 0
8407         tg3_dump_state(tp);
8408 #endif
8409
8410         tg3_disable_ints(tp);
8411
8412         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8413         tg3_free_rings(tp);
8414         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8415
8416         tg3_full_unlock(tp);
8417
8418         free_irq(tp->pdev->irq, dev);
8419         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8420                 pci_disable_msi(tp->pdev);
8421                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8422         }
8423
8424         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8425                sizeof(tp->net_stats_prev));
8426         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8427                sizeof(tp->estats_prev));
8428
8429         tg3_free_consistent(tp);
8430
8431         tg3_set_power_state(tp, PCI_D3hot);
8432
8433         netif_carrier_off(tp->dev);
8434
8435         return 0;
8436 }
8437
8438 static inline unsigned long get_stat64(tg3_stat64_t *val)
8439 {
8440         unsigned long ret;
8441
8442 #if (BITS_PER_LONG == 32)
8443         ret = val->low;
8444 #else
8445         ret = ((u64)val->high << 32) | ((u64)val->low);
8446 #endif
8447         return ret;
8448 }
8449
8450 static inline u64 get_estat64(tg3_stat64_t *val)
8451 {
8452        return ((u64)val->high << 32) | ((u64)val->low);
8453 }
8454
8455 static unsigned long calc_crc_errors(struct tg3 *tp)
8456 {
8457         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8458
8459         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8460             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8461              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8462                 u32 val;
8463
8464                 spin_lock_bh(&tp->lock);
8465                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8466                         tg3_writephy(tp, MII_TG3_TEST1,
8467                                      val | MII_TG3_TEST1_CRC_EN);
8468                         tg3_readphy(tp, 0x14, &val);
8469                 } else
8470                         val = 0;
8471                 spin_unlock_bh(&tp->lock);
8472
8473                 tp->phy_crc_errors += val;
8474
8475                 return tp->phy_crc_errors;
8476         }
8477
8478         return get_stat64(&hw_stats->rx_fcs_errors);
8479 }
8480
8481 #define ESTAT_ADD(member) \
8482         estats->member =        old_estats->member + \
8483                                 get_estat64(&hw_stats->member)
8484
8485 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8486 {
8487         struct tg3_ethtool_stats *estats = &tp->estats;
8488         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8489         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8490
8491         if (!hw_stats)
8492                 return old_estats;
8493
8494         ESTAT_ADD(rx_octets);
8495         ESTAT_ADD(rx_fragments);
8496         ESTAT_ADD(rx_ucast_packets);
8497         ESTAT_ADD(rx_mcast_packets);
8498         ESTAT_ADD(rx_bcast_packets);
8499         ESTAT_ADD(rx_fcs_errors);
8500         ESTAT_ADD(rx_align_errors);
8501         ESTAT_ADD(rx_xon_pause_rcvd);
8502         ESTAT_ADD(rx_xoff_pause_rcvd);
8503         ESTAT_ADD(rx_mac_ctrl_rcvd);
8504         ESTAT_ADD(rx_xoff_entered);
8505         ESTAT_ADD(rx_frame_too_long_errors);
8506         ESTAT_ADD(rx_jabbers);
8507         ESTAT_ADD(rx_undersize_packets);
8508         ESTAT_ADD(rx_in_length_errors);
8509         ESTAT_ADD(rx_out_length_errors);
8510         ESTAT_ADD(rx_64_or_less_octet_packets);
8511         ESTAT_ADD(rx_65_to_127_octet_packets);
8512         ESTAT_ADD(rx_128_to_255_octet_packets);
8513         ESTAT_ADD(rx_256_to_511_octet_packets);
8514         ESTAT_ADD(rx_512_to_1023_octet_packets);
8515         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8516         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8517         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8518         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8519         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8520
8521         ESTAT_ADD(tx_octets);
8522         ESTAT_ADD(tx_collisions);
8523         ESTAT_ADD(tx_xon_sent);
8524         ESTAT_ADD(tx_xoff_sent);
8525         ESTAT_ADD(tx_flow_control);
8526         ESTAT_ADD(tx_mac_errors);
8527         ESTAT_ADD(tx_single_collisions);
8528         ESTAT_ADD(tx_mult_collisions);
8529         ESTAT_ADD(tx_deferred);
8530         ESTAT_ADD(tx_excessive_collisions);
8531         ESTAT_ADD(tx_late_collisions);
8532         ESTAT_ADD(tx_collide_2times);
8533         ESTAT_ADD(tx_collide_3times);
8534         ESTAT_ADD(tx_collide_4times);
8535         ESTAT_ADD(tx_collide_5times);
8536         ESTAT_ADD(tx_collide_6times);
8537         ESTAT_ADD(tx_collide_7times);
8538         ESTAT_ADD(tx_collide_8times);
8539         ESTAT_ADD(tx_collide_9times);
8540         ESTAT_ADD(tx_collide_10times);
8541         ESTAT_ADD(tx_collide_11times);
8542         ESTAT_ADD(tx_collide_12times);
8543         ESTAT_ADD(tx_collide_13times);
8544         ESTAT_ADD(tx_collide_14times);
8545         ESTAT_ADD(tx_collide_15times);
8546         ESTAT_ADD(tx_ucast_packets);
8547         ESTAT_ADD(tx_mcast_packets);
8548         ESTAT_ADD(tx_bcast_packets);
8549         ESTAT_ADD(tx_carrier_sense_errors);
8550         ESTAT_ADD(tx_discards);
8551         ESTAT_ADD(tx_errors);
8552
8553         ESTAT_ADD(dma_writeq_full);
8554         ESTAT_ADD(dma_write_prioq_full);
8555         ESTAT_ADD(rxbds_empty);
8556         ESTAT_ADD(rx_discards);
8557         ESTAT_ADD(rx_errors);
8558         ESTAT_ADD(rx_threshold_hit);
8559
8560         ESTAT_ADD(dma_readq_full);
8561         ESTAT_ADD(dma_read_prioq_full);
8562         ESTAT_ADD(tx_comp_queue_full);
8563
8564         ESTAT_ADD(ring_set_send_prod_index);
8565         ESTAT_ADD(ring_status_update);
8566         ESTAT_ADD(nic_irqs);
8567         ESTAT_ADD(nic_avoided_irqs);
8568         ESTAT_ADD(nic_tx_threshold_hit);
8569
8570         return estats;
8571 }
8572
8573 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8574 {
8575         struct tg3 *tp = netdev_priv(dev);
8576         struct net_device_stats *stats = &tp->net_stats;
8577         struct net_device_stats *old_stats = &tp->net_stats_prev;
8578         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8579
8580         if (!hw_stats)
8581                 return old_stats;
8582
8583         stats->rx_packets = old_stats->rx_packets +
8584                 get_stat64(&hw_stats->rx_ucast_packets) +
8585                 get_stat64(&hw_stats->rx_mcast_packets) +
8586                 get_stat64(&hw_stats->rx_bcast_packets);
8587
8588         stats->tx_packets = old_stats->tx_packets +
8589                 get_stat64(&hw_stats->tx_ucast_packets) +
8590                 get_stat64(&hw_stats->tx_mcast_packets) +
8591                 get_stat64(&hw_stats->tx_bcast_packets);
8592
8593         stats->rx_bytes = old_stats->rx_bytes +
8594                 get_stat64(&hw_stats->rx_octets);
8595         stats->tx_bytes = old_stats->tx_bytes +
8596                 get_stat64(&hw_stats->tx_octets);
8597
8598         stats->rx_errors = old_stats->rx_errors +
8599                 get_stat64(&hw_stats->rx_errors);
8600         stats->tx_errors = old_stats->tx_errors +
8601                 get_stat64(&hw_stats->tx_errors) +
8602                 get_stat64(&hw_stats->tx_mac_errors) +
8603                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8604                 get_stat64(&hw_stats->tx_discards);
8605
8606         stats->multicast = old_stats->multicast +
8607                 get_stat64(&hw_stats->rx_mcast_packets);
8608         stats->collisions = old_stats->collisions +
8609                 get_stat64(&hw_stats->tx_collisions);
8610
8611         stats->rx_length_errors = old_stats->rx_length_errors +
8612                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8613                 get_stat64(&hw_stats->rx_undersize_packets);
8614
8615         stats->rx_over_errors = old_stats->rx_over_errors +
8616                 get_stat64(&hw_stats->rxbds_empty);
8617         stats->rx_frame_errors = old_stats->rx_frame_errors +
8618                 get_stat64(&hw_stats->rx_align_errors);
8619         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8620                 get_stat64(&hw_stats->tx_discards);
8621         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8622                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8623
8624         stats->rx_crc_errors = old_stats->rx_crc_errors +
8625                 calc_crc_errors(tp);
8626
8627         stats->rx_missed_errors = old_stats->rx_missed_errors +
8628                 get_stat64(&hw_stats->rx_discards);
8629
8630         return stats;
8631 }
8632
8633 static inline u32 calc_crc(unsigned char *buf, int len)
8634 {
8635         u32 reg;
8636         u32 tmp;
8637         int j, k;
8638
8639         reg = 0xffffffff;
8640
8641         for (j = 0; j < len; j++) {
8642                 reg ^= buf[j];
8643
8644                 for (k = 0; k < 8; k++) {
8645                         tmp = reg & 0x01;
8646
8647                         reg >>= 1;
8648
8649                         if (tmp) {
8650                                 reg ^= 0xedb88320;
8651                         }
8652                 }
8653         }
8654
8655         return ~reg;
8656 }
8657
8658 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8659 {
8660         /* accept or reject all multicast frames */
8661         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8662         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8663         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8664         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8665 }
8666
8667 static void __tg3_set_rx_mode(struct net_device *dev)
8668 {
8669         struct tg3 *tp = netdev_priv(dev);
8670         u32 rx_mode;
8671
8672         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8673                                   RX_MODE_KEEP_VLAN_TAG);
8674
8675         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8676          * flag clear.
8677          */
8678 #if TG3_VLAN_TAG_USED
8679         if (!tp->vlgrp &&
8680             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8681                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8682 #else
8683         /* By definition, VLAN is disabled always in this
8684          * case.
8685          */
8686         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8687                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8688 #endif
8689
8690         if (dev->flags & IFF_PROMISC) {
8691                 /* Promiscuous mode. */
8692                 rx_mode |= RX_MODE_PROMISC;
8693         } else if (dev->flags & IFF_ALLMULTI) {
8694                 /* Accept all multicast. */
8695                 tg3_set_multi (tp, 1);
8696         } else if (dev->mc_count < 1) {
8697                 /* Reject all multicast. */
8698                 tg3_set_multi (tp, 0);
8699         } else {
8700                 /* Accept one or more multicast(s). */
8701                 struct dev_mc_list *mclist;
8702                 unsigned int i;
8703                 u32 mc_filter[4] = { 0, };
8704                 u32 regidx;
8705                 u32 bit;
8706                 u32 crc;
8707
8708                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8709                      i++, mclist = mclist->next) {
8710
8711                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8712                         bit = ~crc & 0x7f;
8713                         regidx = (bit & 0x60) >> 5;
8714                         bit &= 0x1f;
8715                         mc_filter[regidx] |= (1 << bit);
8716                 }
8717
8718                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8719                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8720                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8721                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8722         }
8723
8724         if (rx_mode != tp->rx_mode) {
8725                 tp->rx_mode = rx_mode;
8726                 tw32_f(MAC_RX_MODE, rx_mode);
8727                 udelay(10);
8728         }
8729 }
8730
8731 static void tg3_set_rx_mode(struct net_device *dev)
8732 {
8733         struct tg3 *tp = netdev_priv(dev);
8734
8735         if (!netif_running(dev))
8736                 return;
8737
8738         tg3_full_lock(tp, 0);
8739         __tg3_set_rx_mode(dev);
8740         tg3_full_unlock(tp);
8741 }
8742
8743 #define TG3_REGDUMP_LEN         (32 * 1024)
8744
8745 static int tg3_get_regs_len(struct net_device *dev)
8746 {
8747         return TG3_REGDUMP_LEN;
8748 }
8749
8750 static void tg3_get_regs(struct net_device *dev,
8751                 struct ethtool_regs *regs, void *_p)
8752 {
8753         u32 *p = _p;
8754         struct tg3 *tp = netdev_priv(dev);
8755         u8 *orig_p = _p;
8756         int i;
8757
8758         regs->version = 0;
8759
8760         memset(p, 0, TG3_REGDUMP_LEN);
8761
8762         if (tp->link_config.phy_is_low_power)
8763                 return;
8764
8765         tg3_full_lock(tp, 0);
8766
8767 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8768 #define GET_REG32_LOOP(base,len)                \
8769 do {    p = (u32 *)(orig_p + (base));           \
8770         for (i = 0; i < len; i += 4)            \
8771                 __GET_REG32((base) + i);        \
8772 } while (0)
8773 #define GET_REG32_1(reg)                        \
8774 do {    p = (u32 *)(orig_p + (reg));            \
8775         __GET_REG32((reg));                     \
8776 } while (0)
8777
8778         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8779         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8780         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8781         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8782         GET_REG32_1(SNDDATAC_MODE);
8783         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8784         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8785         GET_REG32_1(SNDBDC_MODE);
8786         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8787         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8788         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8789         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8790         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8791         GET_REG32_1(RCVDCC_MODE);
8792         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8793         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8794         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8795         GET_REG32_1(MBFREE_MODE);
8796         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8797         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8798         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8799         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8800         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8801         GET_REG32_1(RX_CPU_MODE);
8802         GET_REG32_1(RX_CPU_STATE);
8803         GET_REG32_1(RX_CPU_PGMCTR);
8804         GET_REG32_1(RX_CPU_HWBKPT);
8805         GET_REG32_1(TX_CPU_MODE);
8806         GET_REG32_1(TX_CPU_STATE);
8807         GET_REG32_1(TX_CPU_PGMCTR);
8808         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8809         GET_REG32_LOOP(FTQ_RESET, 0x120);
8810         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8811         GET_REG32_1(DMAC_MODE);
8812         GET_REG32_LOOP(GRC_MODE, 0x4c);
8813         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8814                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8815
8816 #undef __GET_REG32
8817 #undef GET_REG32_LOOP
8818 #undef GET_REG32_1
8819
8820         tg3_full_unlock(tp);
8821 }
8822
8823 static int tg3_get_eeprom_len(struct net_device *dev)
8824 {
8825         struct tg3 *tp = netdev_priv(dev);
8826
8827         return tp->nvram_size;
8828 }
8829
8830 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8831 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8832 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8833
8834 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8835 {
8836         struct tg3 *tp = netdev_priv(dev);
8837         int ret;
8838         u8  *pd;
8839         u32 i, offset, len, b_offset, b_count;
8840         __le32 val;
8841
8842         if (tp->link_config.phy_is_low_power)
8843                 return -EAGAIN;
8844
8845         offset = eeprom->offset;
8846         len = eeprom->len;
8847         eeprom->len = 0;
8848
8849         eeprom->magic = TG3_EEPROM_MAGIC;
8850
8851         if (offset & 3) {
8852                 /* adjustments to start on required 4 byte boundary */
8853                 b_offset = offset & 3;
8854                 b_count = 4 - b_offset;
8855                 if (b_count > len) {
8856                         /* i.e. offset=1 len=2 */
8857                         b_count = len;
8858                 }
8859                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8860                 if (ret)
8861                         return ret;
8862                 memcpy(data, ((char*)&val) + b_offset, b_count);
8863                 len -= b_count;
8864                 offset += b_count;
8865                 eeprom->len += b_count;
8866         }
8867
8868         /* read bytes upto the last 4 byte boundary */
8869         pd = &data[eeprom->len];
8870         for (i = 0; i < (len - (len & 3)); i += 4) {
8871                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8872                 if (ret) {
8873                         eeprom->len += i;
8874                         return ret;
8875                 }
8876                 memcpy(pd + i, &val, 4);
8877         }
8878         eeprom->len += i;
8879
8880         if (len & 3) {
8881                 /* read last bytes not ending on 4 byte boundary */
8882                 pd = &data[eeprom->len];
8883                 b_count = len & 3;
8884                 b_offset = offset + len - b_count;
8885                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8886                 if (ret)
8887                         return ret;
8888                 memcpy(pd, &val, b_count);
8889                 eeprom->len += b_count;
8890         }
8891         return 0;
8892 }
8893
8894 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8895
8896 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8897 {
8898         struct tg3 *tp = netdev_priv(dev);
8899         int ret;
8900         u32 offset, len, b_offset, odd_len;
8901         u8 *buf;
8902         __le32 start, end;
8903
8904         if (tp->link_config.phy_is_low_power)
8905                 return -EAGAIN;
8906
8907         if (eeprom->magic != TG3_EEPROM_MAGIC)
8908                 return -EINVAL;
8909
8910         offset = eeprom->offset;
8911         len = eeprom->len;
8912
8913         if ((b_offset = (offset & 3))) {
8914                 /* adjustments to start on required 4 byte boundary */
8915                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8916                 if (ret)
8917                         return ret;
8918                 len += b_offset;
8919                 offset &= ~3;
8920                 if (len < 4)
8921                         len = 4;
8922         }
8923
8924         odd_len = 0;
8925         if (len & 3) {
8926                 /* adjustments to end on required 4 byte boundary */
8927                 odd_len = 1;
8928                 len = (len + 3) & ~3;
8929                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8930                 if (ret)
8931                         return ret;
8932         }
8933
8934         buf = data;
8935         if (b_offset || odd_len) {
8936                 buf = kmalloc(len, GFP_KERNEL);
8937                 if (!buf)
8938                         return -ENOMEM;
8939                 if (b_offset)
8940                         memcpy(buf, &start, 4);
8941                 if (odd_len)
8942                         memcpy(buf+len-4, &end, 4);
8943                 memcpy(buf + b_offset, data, eeprom->len);
8944         }
8945
8946         ret = tg3_nvram_write_block(tp, offset, len, buf);
8947
8948         if (buf != data)
8949                 kfree(buf);
8950
8951         return ret;
8952 }
8953
8954 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8955 {
8956         struct tg3 *tp = netdev_priv(dev);
8957
8958         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8959                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8960                         return -EAGAIN;
8961                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8962         }
8963
8964         cmd->supported = (SUPPORTED_Autoneg);
8965
8966         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8967                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8968                                    SUPPORTED_1000baseT_Full);
8969
8970         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8971                 cmd->supported |= (SUPPORTED_100baseT_Half |
8972                                   SUPPORTED_100baseT_Full |
8973                                   SUPPORTED_10baseT_Half |
8974                                   SUPPORTED_10baseT_Full |
8975                                   SUPPORTED_TP);
8976                 cmd->port = PORT_TP;
8977         } else {
8978                 cmd->supported |= SUPPORTED_FIBRE;
8979                 cmd->port = PORT_FIBRE;
8980         }
8981
8982         cmd->advertising = tp->link_config.advertising;
8983         if (netif_running(dev)) {
8984                 cmd->speed = tp->link_config.active_speed;
8985                 cmd->duplex = tp->link_config.active_duplex;
8986         }
8987         cmd->phy_address = PHY_ADDR;
8988         cmd->transceiver = 0;
8989         cmd->autoneg = tp->link_config.autoneg;
8990         cmd->maxtxpkt = 0;
8991         cmd->maxrxpkt = 0;
8992         return 0;
8993 }
8994
8995 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8996 {
8997         struct tg3 *tp = netdev_priv(dev);
8998
8999         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9000                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9001                         return -EAGAIN;
9002                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9003         }
9004
9005         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9006                 /* These are the only valid advertisement bits allowed.  */
9007                 if (cmd->autoneg == AUTONEG_ENABLE &&
9008                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9009                                           ADVERTISED_1000baseT_Full |
9010                                           ADVERTISED_Autoneg |
9011                                           ADVERTISED_FIBRE)))
9012                         return -EINVAL;
9013                 /* Fiber can only do SPEED_1000.  */
9014                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9015                          (cmd->speed != SPEED_1000))
9016                         return -EINVAL;
9017         /* Copper cannot force SPEED_1000.  */
9018         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9019                    (cmd->speed == SPEED_1000))
9020                 return -EINVAL;
9021         else if ((cmd->speed == SPEED_1000) &&
9022                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9023                 return -EINVAL;
9024
9025         tg3_full_lock(tp, 0);
9026
9027         tp->link_config.autoneg = cmd->autoneg;
9028         if (cmd->autoneg == AUTONEG_ENABLE) {
9029                 tp->link_config.advertising = (cmd->advertising |
9030                                               ADVERTISED_Autoneg);
9031                 tp->link_config.speed = SPEED_INVALID;
9032                 tp->link_config.duplex = DUPLEX_INVALID;
9033         } else {
9034                 tp->link_config.advertising = 0;
9035                 tp->link_config.speed = cmd->speed;
9036                 tp->link_config.duplex = cmd->duplex;
9037         }
9038
9039         tp->link_config.orig_speed = tp->link_config.speed;
9040         tp->link_config.orig_duplex = tp->link_config.duplex;
9041         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9042
9043         if (netif_running(dev))
9044                 tg3_setup_phy(tp, 1);
9045
9046         tg3_full_unlock(tp);
9047
9048         return 0;
9049 }
9050
9051 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9052 {
9053         struct tg3 *tp = netdev_priv(dev);
9054
9055         strcpy(info->driver, DRV_MODULE_NAME);
9056         strcpy(info->version, DRV_MODULE_VERSION);
9057         strcpy(info->fw_version, tp->fw_ver);
9058         strcpy(info->bus_info, pci_name(tp->pdev));
9059 }
9060
9061 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9062 {
9063         struct tg3 *tp = netdev_priv(dev);
9064
9065         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9066             device_can_wakeup(&tp->pdev->dev))
9067                 wol->supported = WAKE_MAGIC;
9068         else
9069                 wol->supported = 0;
9070         wol->wolopts = 0;
9071         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
9072                 wol->wolopts = WAKE_MAGIC;
9073         memset(&wol->sopass, 0, sizeof(wol->sopass));
9074 }
9075
9076 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9077 {
9078         struct tg3 *tp = netdev_priv(dev);
9079         struct device *dp = &tp->pdev->dev;
9080
9081         if (wol->wolopts & ~WAKE_MAGIC)
9082                 return -EINVAL;
9083         if ((wol->wolopts & WAKE_MAGIC) &&
9084             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9085                 return -EINVAL;
9086
9087         spin_lock_bh(&tp->lock);
9088         if (wol->wolopts & WAKE_MAGIC) {
9089                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9090                 device_set_wakeup_enable(dp, true);
9091         } else {
9092                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9093                 device_set_wakeup_enable(dp, false);
9094         }
9095         spin_unlock_bh(&tp->lock);
9096
9097         return 0;
9098 }
9099
9100 static u32 tg3_get_msglevel(struct net_device *dev)
9101 {
9102         struct tg3 *tp = netdev_priv(dev);
9103         return tp->msg_enable;
9104 }
9105
9106 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9107 {
9108         struct tg3 *tp = netdev_priv(dev);
9109         tp->msg_enable = value;
9110 }
9111
9112 static int tg3_set_tso(struct net_device *dev, u32 value)
9113 {
9114         struct tg3 *tp = netdev_priv(dev);
9115
9116         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9117                 if (value)
9118                         return -EINVAL;
9119                 return 0;
9120         }
9121         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9122             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9123                 if (value) {
9124                         dev->features |= NETIF_F_TSO6;
9125                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9126                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9127                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9128                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9129                                 dev->features |= NETIF_F_TSO_ECN;
9130                 } else
9131                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9132         }
9133         return ethtool_op_set_tso(dev, value);
9134 }
9135
9136 static int tg3_nway_reset(struct net_device *dev)
9137 {
9138         struct tg3 *tp = netdev_priv(dev);
9139         int r;
9140
9141         if (!netif_running(dev))
9142                 return -EAGAIN;
9143
9144         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9145                 return -EINVAL;
9146
9147         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9148                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9149                         return -EAGAIN;
9150                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9151         } else {
9152                 u32 bmcr;
9153
9154                 spin_lock_bh(&tp->lock);
9155                 r = -EINVAL;
9156                 tg3_readphy(tp, MII_BMCR, &bmcr);
9157                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9158                     ((bmcr & BMCR_ANENABLE) ||
9159                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9160                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9161                                                    BMCR_ANENABLE);
9162                         r = 0;
9163                 }
9164                 spin_unlock_bh(&tp->lock);
9165         }
9166
9167         return r;
9168 }
9169
9170 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9171 {
9172         struct tg3 *tp = netdev_priv(dev);
9173
9174         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9175         ering->rx_mini_max_pending = 0;
9176         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9177                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9178         else
9179                 ering->rx_jumbo_max_pending = 0;
9180
9181         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9182
9183         ering->rx_pending = tp->rx_pending;
9184         ering->rx_mini_pending = 0;
9185         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9186                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9187         else
9188                 ering->rx_jumbo_pending = 0;
9189
9190         ering->tx_pending = tp->tx_pending;
9191 }
9192
9193 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9194 {
9195         struct tg3 *tp = netdev_priv(dev);
9196         int irq_sync = 0, err = 0;
9197
9198         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9199             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9200             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9201             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9202             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9203              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9204                 return -EINVAL;
9205
9206         if (netif_running(dev)) {
9207                 tg3_phy_stop(tp);
9208                 tg3_netif_stop(tp);
9209                 irq_sync = 1;
9210         }
9211
9212         tg3_full_lock(tp, irq_sync);
9213
9214         tp->rx_pending = ering->rx_pending;
9215
9216         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9217             tp->rx_pending > 63)
9218                 tp->rx_pending = 63;
9219         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9220         tp->tx_pending = ering->tx_pending;
9221
9222         if (netif_running(dev)) {
9223                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9224                 err = tg3_restart_hw(tp, 1);
9225                 if (!err)
9226                         tg3_netif_start(tp);
9227         }
9228
9229         tg3_full_unlock(tp);
9230
9231         if (irq_sync && !err)
9232                 tg3_phy_start(tp);
9233
9234         return err;
9235 }
9236
9237 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9238 {
9239         struct tg3 *tp = netdev_priv(dev);
9240
9241         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9242
9243         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9244                 epause->rx_pause = 1;
9245         else
9246                 epause->rx_pause = 0;
9247
9248         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9249                 epause->tx_pause = 1;
9250         else
9251                 epause->tx_pause = 0;
9252 }
9253
9254 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9255 {
9256         struct tg3 *tp = netdev_priv(dev);
9257         int err = 0;
9258
9259         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9260                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9261                         return -EAGAIN;
9262
9263                 if (epause->autoneg) {
9264                         u32 newadv;
9265                         struct phy_device *phydev;
9266
9267                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9268
9269                         if (epause->rx_pause) {
9270                                 if (epause->tx_pause)
9271                                         newadv = ADVERTISED_Pause;
9272                                 else
9273                                         newadv = ADVERTISED_Pause |
9274                                                  ADVERTISED_Asym_Pause;
9275                         } else if (epause->tx_pause) {
9276                                 newadv = ADVERTISED_Asym_Pause;
9277                         } else
9278                                 newadv = 0;
9279
9280                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9281                                 u32 oldadv = phydev->advertising &
9282                                              (ADVERTISED_Pause |
9283                                               ADVERTISED_Asym_Pause);
9284                                 if (oldadv != newadv) {
9285                                         phydev->advertising &=
9286                                                 ~(ADVERTISED_Pause |
9287                                                   ADVERTISED_Asym_Pause);
9288                                         phydev->advertising |= newadv;
9289                                         err = phy_start_aneg(phydev);
9290                                 }
9291                         } else {
9292                                 tp->link_config.advertising &=
9293                                                 ~(ADVERTISED_Pause |
9294                                                   ADVERTISED_Asym_Pause);
9295                                 tp->link_config.advertising |= newadv;
9296                         }
9297                 } else {
9298                         if (epause->rx_pause)
9299                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9300                         else
9301                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9302
9303                         if (epause->tx_pause)
9304                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9305                         else
9306                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9307
9308                         if (netif_running(dev))
9309                                 tg3_setup_flow_control(tp, 0, 0);
9310                 }
9311         } else {
9312                 int irq_sync = 0;
9313
9314                 if (netif_running(dev)) {
9315                         tg3_netif_stop(tp);
9316                         irq_sync = 1;
9317                 }
9318
9319                 tg3_full_lock(tp, irq_sync);
9320
9321                 if (epause->autoneg)
9322                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9323                 else
9324                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9325                 if (epause->rx_pause)
9326                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9327                 else
9328                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9329                 if (epause->tx_pause)
9330                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9331                 else
9332                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9333
9334                 if (netif_running(dev)) {
9335                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9336                         err = tg3_restart_hw(tp, 1);
9337                         if (!err)
9338                                 tg3_netif_start(tp);
9339                 }
9340
9341                 tg3_full_unlock(tp);
9342         }
9343
9344         return err;
9345 }
9346
9347 static u32 tg3_get_rx_csum(struct net_device *dev)
9348 {
9349         struct tg3 *tp = netdev_priv(dev);
9350         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9351 }
9352
9353 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9354 {
9355         struct tg3 *tp = netdev_priv(dev);
9356
9357         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9358                 if (data != 0)
9359                         return -EINVAL;
9360                 return 0;
9361         }
9362
9363         spin_lock_bh(&tp->lock);
9364         if (data)
9365                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9366         else
9367                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9368         spin_unlock_bh(&tp->lock);
9369
9370         return 0;
9371 }
9372
9373 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9374 {
9375         struct tg3 *tp = netdev_priv(dev);
9376
9377         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9378                 if (data != 0)
9379                         return -EINVAL;
9380                 return 0;
9381         }
9382
9383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9384             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9385             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9386             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9388                 ethtool_op_set_tx_ipv6_csum(dev, data);
9389         else
9390                 ethtool_op_set_tx_csum(dev, data);
9391
9392         return 0;
9393 }
9394
9395 static int tg3_get_sset_count (struct net_device *dev, int sset)
9396 {
9397         switch (sset) {
9398         case ETH_SS_TEST:
9399                 return TG3_NUM_TEST;
9400         case ETH_SS_STATS:
9401                 return TG3_NUM_STATS;
9402         default:
9403                 return -EOPNOTSUPP;
9404         }
9405 }
9406
9407 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9408 {
9409         switch (stringset) {
9410         case ETH_SS_STATS:
9411                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9412                 break;
9413         case ETH_SS_TEST:
9414                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9415                 break;
9416         default:
9417                 WARN_ON(1);     /* we need a WARN() */
9418                 break;
9419         }
9420 }
9421
9422 static int tg3_phys_id(struct net_device *dev, u32 data)
9423 {
9424         struct tg3 *tp = netdev_priv(dev);
9425         int i;
9426
9427         if (!netif_running(tp->dev))
9428                 return -EAGAIN;
9429
9430         if (data == 0)
9431                 data = UINT_MAX / 2;
9432
9433         for (i = 0; i < (data * 2); i++) {
9434                 if ((i % 2) == 0)
9435                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9436                                            LED_CTRL_1000MBPS_ON |
9437                                            LED_CTRL_100MBPS_ON |
9438                                            LED_CTRL_10MBPS_ON |
9439                                            LED_CTRL_TRAFFIC_OVERRIDE |
9440                                            LED_CTRL_TRAFFIC_BLINK |
9441                                            LED_CTRL_TRAFFIC_LED);
9442
9443                 else
9444                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9445                                            LED_CTRL_TRAFFIC_OVERRIDE);
9446
9447                 if (msleep_interruptible(500))
9448                         break;
9449         }
9450         tw32(MAC_LED_CTRL, tp->led_ctrl);
9451         return 0;
9452 }
9453
9454 static void tg3_get_ethtool_stats (struct net_device *dev,
9455                                    struct ethtool_stats *estats, u64 *tmp_stats)
9456 {
9457         struct tg3 *tp = netdev_priv(dev);
9458         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9459 }
9460
9461 #define NVRAM_TEST_SIZE 0x100
9462 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9463 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9464 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9465 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9466 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9467
9468 static int tg3_test_nvram(struct tg3 *tp)
9469 {
9470         u32 csum, magic;
9471         __le32 *buf;
9472         int i, j, k, err = 0, size;
9473
9474         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9475                 return -EIO;
9476
9477         if (magic == TG3_EEPROM_MAGIC)
9478                 size = NVRAM_TEST_SIZE;
9479         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9480                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9481                     TG3_EEPROM_SB_FORMAT_1) {
9482                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9483                         case TG3_EEPROM_SB_REVISION_0:
9484                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9485                                 break;
9486                         case TG3_EEPROM_SB_REVISION_2:
9487                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9488                                 break;
9489                         case TG3_EEPROM_SB_REVISION_3:
9490                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9491                                 break;
9492                         default:
9493                                 return 0;
9494                         }
9495                 } else
9496                         return 0;
9497         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9498                 size = NVRAM_SELFBOOT_HW_SIZE;
9499         else
9500                 return -EIO;
9501
9502         buf = kmalloc(size, GFP_KERNEL);
9503         if (buf == NULL)
9504                 return -ENOMEM;
9505
9506         err = -EIO;
9507         for (i = 0, j = 0; i < size; i += 4, j++) {
9508                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9509                         break;
9510         }
9511         if (i < size)
9512                 goto out;
9513
9514         /* Selfboot format */
9515         magic = swab32(le32_to_cpu(buf[0]));
9516         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9517             TG3_EEPROM_MAGIC_FW) {
9518                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9519
9520                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9521                     TG3_EEPROM_SB_REVISION_2) {
9522                         /* For rev 2, the csum doesn't include the MBA. */
9523                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9524                                 csum8 += buf8[i];
9525                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9526                                 csum8 += buf8[i];
9527                 } else {
9528                         for (i = 0; i < size; i++)
9529                                 csum8 += buf8[i];
9530                 }
9531
9532                 if (csum8 == 0) {
9533                         err = 0;
9534                         goto out;
9535                 }
9536
9537                 err = -EIO;
9538                 goto out;
9539         }
9540
9541         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9542             TG3_EEPROM_MAGIC_HW) {
9543                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9544                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9545                 u8 *buf8 = (u8 *) buf;
9546
9547                 /* Separate the parity bits and the data bytes.  */
9548                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9549                         if ((i == 0) || (i == 8)) {
9550                                 int l;
9551                                 u8 msk;
9552
9553                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9554                                         parity[k++] = buf8[i] & msk;
9555                                 i++;
9556                         }
9557                         else if (i == 16) {
9558                                 int l;
9559                                 u8 msk;
9560
9561                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9562                                         parity[k++] = buf8[i] & msk;
9563                                 i++;
9564
9565                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9566                                         parity[k++] = buf8[i] & msk;
9567                                 i++;
9568                         }
9569                         data[j++] = buf8[i];
9570                 }
9571
9572                 err = -EIO;
9573                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9574                         u8 hw8 = hweight8(data[i]);
9575
9576                         if ((hw8 & 0x1) && parity[i])
9577                                 goto out;
9578                         else if (!(hw8 & 0x1) && !parity[i])
9579                                 goto out;
9580                 }
9581                 err = 0;
9582                 goto out;
9583         }
9584
9585         /* Bootstrap checksum at offset 0x10 */
9586         csum = calc_crc((unsigned char *) buf, 0x10);
9587         if(csum != le32_to_cpu(buf[0x10/4]))
9588                 goto out;
9589
9590         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9591         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9592         if (csum != le32_to_cpu(buf[0xfc/4]))
9593                  goto out;
9594
9595         err = 0;
9596
9597 out:
9598         kfree(buf);
9599         return err;
9600 }
9601
9602 #define TG3_SERDES_TIMEOUT_SEC  2
9603 #define TG3_COPPER_TIMEOUT_SEC  6
9604
9605 static int tg3_test_link(struct tg3 *tp)
9606 {
9607         int i, max;
9608
9609         if (!netif_running(tp->dev))
9610                 return -ENODEV;
9611
9612         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9613                 max = TG3_SERDES_TIMEOUT_SEC;
9614         else
9615                 max = TG3_COPPER_TIMEOUT_SEC;
9616
9617         for (i = 0; i < max; i++) {
9618                 if (netif_carrier_ok(tp->dev))
9619                         return 0;
9620
9621                 if (msleep_interruptible(1000))
9622                         break;
9623         }
9624
9625         return -EIO;
9626 }
9627
9628 /* Only test the commonly used registers */
9629 static int tg3_test_registers(struct tg3 *tp)
9630 {
9631         int i, is_5705, is_5750;
9632         u32 offset, read_mask, write_mask, val, save_val, read_val;
9633         static struct {
9634                 u16 offset;
9635                 u16 flags;
9636 #define TG3_FL_5705     0x1
9637 #define TG3_FL_NOT_5705 0x2
9638 #define TG3_FL_NOT_5788 0x4
9639 #define TG3_FL_NOT_5750 0x8
9640                 u32 read_mask;
9641                 u32 write_mask;
9642         } reg_tbl[] = {
9643                 /* MAC Control Registers */
9644                 { MAC_MODE, TG3_FL_NOT_5705,
9645                         0x00000000, 0x00ef6f8c },
9646                 { MAC_MODE, TG3_FL_5705,
9647                         0x00000000, 0x01ef6b8c },
9648                 { MAC_STATUS, TG3_FL_NOT_5705,
9649                         0x03800107, 0x00000000 },
9650                 { MAC_STATUS, TG3_FL_5705,
9651                         0x03800100, 0x00000000 },
9652                 { MAC_ADDR_0_HIGH, 0x0000,
9653                         0x00000000, 0x0000ffff },
9654                 { MAC_ADDR_0_LOW, 0x0000,
9655                         0x00000000, 0xffffffff },
9656                 { MAC_RX_MTU_SIZE, 0x0000,
9657                         0x00000000, 0x0000ffff },
9658                 { MAC_TX_MODE, 0x0000,
9659                         0x00000000, 0x00000070 },
9660                 { MAC_TX_LENGTHS, 0x0000,
9661                         0x00000000, 0x00003fff },
9662                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9663                         0x00000000, 0x000007fc },
9664                 { MAC_RX_MODE, TG3_FL_5705,
9665                         0x00000000, 0x000007dc },
9666                 { MAC_HASH_REG_0, 0x0000,
9667                         0x00000000, 0xffffffff },
9668                 { MAC_HASH_REG_1, 0x0000,
9669                         0x00000000, 0xffffffff },
9670                 { MAC_HASH_REG_2, 0x0000,
9671                         0x00000000, 0xffffffff },
9672                 { MAC_HASH_REG_3, 0x0000,
9673                         0x00000000, 0xffffffff },
9674
9675                 /* Receive Data and Receive BD Initiator Control Registers. */
9676                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9677                         0x00000000, 0xffffffff },
9678                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9679                         0x00000000, 0xffffffff },
9680                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9681                         0x00000000, 0x00000003 },
9682                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9683                         0x00000000, 0xffffffff },
9684                 { RCVDBDI_STD_BD+0, 0x0000,
9685                         0x00000000, 0xffffffff },
9686                 { RCVDBDI_STD_BD+4, 0x0000,
9687                         0x00000000, 0xffffffff },
9688                 { RCVDBDI_STD_BD+8, 0x0000,
9689                         0x00000000, 0xffff0002 },
9690                 { RCVDBDI_STD_BD+0xc, 0x0000,
9691                         0x00000000, 0xffffffff },
9692
9693                 /* Receive BD Initiator Control Registers. */
9694                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9695                         0x00000000, 0xffffffff },
9696                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9697                         0x00000000, 0x000003ff },
9698                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9699                         0x00000000, 0xffffffff },
9700
9701                 /* Host Coalescing Control Registers. */
9702                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9703                         0x00000000, 0x00000004 },
9704                 { HOSTCC_MODE, TG3_FL_5705,
9705                         0x00000000, 0x000000f6 },
9706                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9707                         0x00000000, 0xffffffff },
9708                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9709                         0x00000000, 0x000003ff },
9710                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9711                         0x00000000, 0xffffffff },
9712                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9713                         0x00000000, 0x000003ff },
9714                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9715                         0x00000000, 0xffffffff },
9716                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9717                         0x00000000, 0x000000ff },
9718                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9719                         0x00000000, 0xffffffff },
9720                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9721                         0x00000000, 0x000000ff },
9722                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9723                         0x00000000, 0xffffffff },
9724                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9725                         0x00000000, 0xffffffff },
9726                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9727                         0x00000000, 0xffffffff },
9728                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9729                         0x00000000, 0x000000ff },
9730                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9731                         0x00000000, 0xffffffff },
9732                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9733                         0x00000000, 0x000000ff },
9734                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9735                         0x00000000, 0xffffffff },
9736                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9737                         0x00000000, 0xffffffff },
9738                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9739                         0x00000000, 0xffffffff },
9740                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9741                         0x00000000, 0xffffffff },
9742                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9743                         0x00000000, 0xffffffff },
9744                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9745                         0xffffffff, 0x00000000 },
9746                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9747                         0xffffffff, 0x00000000 },
9748
9749                 /* Buffer Manager Control Registers. */
9750                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9751                         0x00000000, 0x007fff80 },
9752                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9753                         0x00000000, 0x007fffff },
9754                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9755                         0x00000000, 0x0000003f },
9756                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9757                         0x00000000, 0x000001ff },
9758                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9759                         0x00000000, 0x000001ff },
9760                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9761                         0xffffffff, 0x00000000 },
9762                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9763                         0xffffffff, 0x00000000 },
9764
9765                 /* Mailbox Registers */
9766                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9767                         0x00000000, 0x000001ff },
9768                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9769                         0x00000000, 0x000001ff },
9770                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9771                         0x00000000, 0x000007ff },
9772                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9773                         0x00000000, 0x000001ff },
9774
9775                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9776         };
9777
9778         is_5705 = is_5750 = 0;
9779         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9780                 is_5705 = 1;
9781                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9782                         is_5750 = 1;
9783         }
9784
9785         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9786                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9787                         continue;
9788
9789                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9790                         continue;
9791
9792                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9793                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9794                         continue;
9795
9796                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9797                         continue;
9798
9799                 offset = (u32) reg_tbl[i].offset;
9800                 read_mask = reg_tbl[i].read_mask;
9801                 write_mask = reg_tbl[i].write_mask;
9802
9803                 /* Save the original register content */
9804                 save_val = tr32(offset);
9805
9806                 /* Determine the read-only value. */
9807                 read_val = save_val & read_mask;
9808
9809                 /* Write zero to the register, then make sure the read-only bits
9810                  * are not changed and the read/write bits are all zeros.
9811                  */
9812                 tw32(offset, 0);
9813
9814                 val = tr32(offset);
9815
9816                 /* Test the read-only and read/write bits. */
9817                 if (((val & read_mask) != read_val) || (val & write_mask))
9818                         goto out;
9819
9820                 /* Write ones to all the bits defined by RdMask and WrMask, then
9821                  * make sure the read-only bits are not changed and the
9822                  * read/write bits are all ones.
9823                  */
9824                 tw32(offset, read_mask | write_mask);
9825
9826                 val = tr32(offset);
9827
9828                 /* Test the read-only bits. */
9829                 if ((val & read_mask) != read_val)
9830                         goto out;
9831
9832                 /* Test the read/write bits. */
9833                 if ((val & write_mask) != write_mask)
9834                         goto out;
9835
9836                 tw32(offset, save_val);
9837         }
9838
9839         return 0;
9840
9841 out:
9842         if (netif_msg_hw(tp))
9843                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9844                        offset);
9845         tw32(offset, save_val);
9846         return -EIO;
9847 }
9848
9849 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9850 {
9851         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9852         int i;
9853         u32 j;
9854
9855         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9856                 for (j = 0; j < len; j += 4) {
9857                         u32 val;
9858
9859                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9860                         tg3_read_mem(tp, offset + j, &val);
9861                         if (val != test_pattern[i])
9862                                 return -EIO;
9863                 }
9864         }
9865         return 0;
9866 }
9867
9868 static int tg3_test_memory(struct tg3 *tp)
9869 {
9870         static struct mem_entry {
9871                 u32 offset;
9872                 u32 len;
9873         } mem_tbl_570x[] = {
9874                 { 0x00000000, 0x00b50},
9875                 { 0x00002000, 0x1c000},
9876                 { 0xffffffff, 0x00000}
9877         }, mem_tbl_5705[] = {
9878                 { 0x00000100, 0x0000c},
9879                 { 0x00000200, 0x00008},
9880                 { 0x00004000, 0x00800},
9881                 { 0x00006000, 0x01000},
9882                 { 0x00008000, 0x02000},
9883                 { 0x00010000, 0x0e000},
9884                 { 0xffffffff, 0x00000}
9885         }, mem_tbl_5755[] = {
9886                 { 0x00000200, 0x00008},
9887                 { 0x00004000, 0x00800},
9888                 { 0x00006000, 0x00800},
9889                 { 0x00008000, 0x02000},
9890                 { 0x00010000, 0x0c000},
9891                 { 0xffffffff, 0x00000}
9892         }, mem_tbl_5906[] = {
9893                 { 0x00000200, 0x00008},
9894                 { 0x00004000, 0x00400},
9895                 { 0x00006000, 0x00400},
9896                 { 0x00008000, 0x01000},
9897                 { 0x00010000, 0x01000},
9898                 { 0xffffffff, 0x00000}
9899         };
9900         struct mem_entry *mem_tbl;
9901         int err = 0;
9902         int i;
9903
9904         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9905                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9906                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9907                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9908                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9909                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9910                         mem_tbl = mem_tbl_5755;
9911                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9912                         mem_tbl = mem_tbl_5906;
9913                 else
9914                         mem_tbl = mem_tbl_5705;
9915         } else
9916                 mem_tbl = mem_tbl_570x;
9917
9918         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9919                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9920                     mem_tbl[i].len)) != 0)
9921                         break;
9922         }
9923
9924         return err;
9925 }
9926
9927 #define TG3_MAC_LOOPBACK        0
9928 #define TG3_PHY_LOOPBACK        1
9929
9930 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9931 {
9932         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9933         u32 desc_idx;
9934         struct sk_buff *skb, *rx_skb;
9935         u8 *tx_data;
9936         dma_addr_t map;
9937         int num_pkts, tx_len, rx_len, i, err;
9938         struct tg3_rx_buffer_desc *desc;
9939
9940         if (loopback_mode == TG3_MAC_LOOPBACK) {
9941                 /* HW errata - mac loopback fails in some cases on 5780.
9942                  * Normal traffic and PHY loopback are not affected by
9943                  * errata.
9944                  */
9945                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9946                         return 0;
9947
9948                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9949                            MAC_MODE_PORT_INT_LPBACK;
9950                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9951                         mac_mode |= MAC_MODE_LINK_POLARITY;
9952                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9953                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9954                 else
9955                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9956                 tw32(MAC_MODE, mac_mode);
9957         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9958                 u32 val;
9959
9960                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9961                         u32 phytest;
9962
9963                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9964                                 u32 phy;
9965
9966                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9967                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9968                                 if (!tg3_readphy(tp, 0x1b, &phy))
9969                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9970                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9971                         }
9972                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9973                 } else
9974                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9975
9976                 tg3_phy_toggle_automdix(tp, 0);
9977
9978                 tg3_writephy(tp, MII_BMCR, val);
9979                 udelay(40);
9980
9981                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9982                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9983                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9984                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9985                 } else
9986                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9987
9988                 /* reset to prevent losing 1st rx packet intermittently */
9989                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9990                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9991                         udelay(10);
9992                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9993                 }
9994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9995                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9996                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9997                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9998                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9999                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10000                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10001                 }
10002                 tw32(MAC_MODE, mac_mode);
10003         }
10004         else
10005                 return -EINVAL;
10006
10007         err = -EIO;
10008
10009         tx_len = 1514;
10010         skb = netdev_alloc_skb(tp->dev, tx_len);
10011         if (!skb)
10012                 return -ENOMEM;
10013
10014         tx_data = skb_put(skb, tx_len);
10015         memcpy(tx_data, tp->dev->dev_addr, 6);
10016         memset(tx_data + 6, 0x0, 8);
10017
10018         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10019
10020         for (i = 14; i < tx_len; i++)
10021                 tx_data[i] = (u8) (i & 0xff);
10022
10023         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10024
10025         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10026              HOSTCC_MODE_NOW);
10027
10028         udelay(10);
10029
10030         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10031
10032         num_pkts = 0;
10033
10034         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10035
10036         tp->tx_prod++;
10037         num_pkts++;
10038
10039         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10040                      tp->tx_prod);
10041         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10042
10043         udelay(10);
10044
10045         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10046         for (i = 0; i < 25; i++) {
10047                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10048                        HOSTCC_MODE_NOW);
10049
10050                 udelay(10);
10051
10052                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10053                 rx_idx = tp->hw_status->idx[0].rx_producer;
10054                 if ((tx_idx == tp->tx_prod) &&
10055                     (rx_idx == (rx_start_idx + num_pkts)))
10056                         break;
10057         }
10058
10059         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10060         dev_kfree_skb(skb);
10061
10062         if (tx_idx != tp->tx_prod)
10063                 goto out;
10064
10065         if (rx_idx != rx_start_idx + num_pkts)
10066                 goto out;
10067
10068         desc = &tp->rx_rcb[rx_start_idx];
10069         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10070         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10071         if (opaque_key != RXD_OPAQUE_RING_STD)
10072                 goto out;
10073
10074         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10075             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10076                 goto out;
10077
10078         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10079         if (rx_len != tx_len)
10080                 goto out;
10081
10082         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10083
10084         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10085         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10086
10087         for (i = 14; i < tx_len; i++) {
10088                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10089                         goto out;
10090         }
10091         err = 0;
10092
10093         /* tg3_free_rings will unmap and free the rx_skb */
10094 out:
10095         return err;
10096 }
10097
10098 #define TG3_MAC_LOOPBACK_FAILED         1
10099 #define TG3_PHY_LOOPBACK_FAILED         2
10100 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10101                                          TG3_PHY_LOOPBACK_FAILED)
10102
10103 static int tg3_test_loopback(struct tg3 *tp)
10104 {
10105         int err = 0;
10106         u32 cpmuctrl = 0;
10107
10108         if (!netif_running(tp->dev))
10109                 return TG3_LOOPBACK_FAILED;
10110
10111         err = tg3_reset_hw(tp, 1);
10112         if (err)
10113                 return TG3_LOOPBACK_FAILED;
10114
10115         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10117             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10118                 int i;
10119                 u32 status;
10120
10121                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10122
10123                 /* Wait for up to 40 microseconds to acquire lock. */
10124                 for (i = 0; i < 4; i++) {
10125                         status = tr32(TG3_CPMU_MUTEX_GNT);
10126                         if (status == CPMU_MUTEX_GNT_DRIVER)
10127                                 break;
10128                         udelay(10);
10129                 }
10130
10131                 if (status != CPMU_MUTEX_GNT_DRIVER)
10132                         return TG3_LOOPBACK_FAILED;
10133
10134                 /* Turn off link-based power management. */
10135                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10136                 tw32(TG3_CPMU_CTRL,
10137                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10138                                   CPMU_CTRL_LINK_AWARE_MODE));
10139         }
10140
10141         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10142                 err |= TG3_MAC_LOOPBACK_FAILED;
10143
10144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10147                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10148
10149                 /* Release the mutex */
10150                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10151         }
10152
10153         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10154             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10155                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10156                         err |= TG3_PHY_LOOPBACK_FAILED;
10157         }
10158
10159         return err;
10160 }
10161
10162 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10163                           u64 *data)
10164 {
10165         struct tg3 *tp = netdev_priv(dev);
10166
10167         if (tp->link_config.phy_is_low_power)
10168                 tg3_set_power_state(tp, PCI_D0);
10169
10170         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10171
10172         if (tg3_test_nvram(tp) != 0) {
10173                 etest->flags |= ETH_TEST_FL_FAILED;
10174                 data[0] = 1;
10175         }
10176         if (tg3_test_link(tp) != 0) {
10177                 etest->flags |= ETH_TEST_FL_FAILED;
10178                 data[1] = 1;
10179         }
10180         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10181                 int err, err2 = 0, irq_sync = 0;
10182
10183                 if (netif_running(dev)) {
10184                         tg3_phy_stop(tp);
10185                         tg3_netif_stop(tp);
10186                         irq_sync = 1;
10187                 }
10188
10189                 tg3_full_lock(tp, irq_sync);
10190
10191                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10192                 err = tg3_nvram_lock(tp);
10193                 tg3_halt_cpu(tp, RX_CPU_BASE);
10194                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10195                         tg3_halt_cpu(tp, TX_CPU_BASE);
10196                 if (!err)
10197                         tg3_nvram_unlock(tp);
10198
10199                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10200                         tg3_phy_reset(tp);
10201
10202                 if (tg3_test_registers(tp) != 0) {
10203                         etest->flags |= ETH_TEST_FL_FAILED;
10204                         data[2] = 1;
10205                 }
10206                 if (tg3_test_memory(tp) != 0) {
10207                         etest->flags |= ETH_TEST_FL_FAILED;
10208                         data[3] = 1;
10209                 }
10210                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10211                         etest->flags |= ETH_TEST_FL_FAILED;
10212
10213                 tg3_full_unlock(tp);
10214
10215                 if (tg3_test_interrupt(tp) != 0) {
10216                         etest->flags |= ETH_TEST_FL_FAILED;
10217                         data[5] = 1;
10218                 }
10219
10220                 tg3_full_lock(tp, 0);
10221
10222                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10223                 if (netif_running(dev)) {
10224                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10225                         err2 = tg3_restart_hw(tp, 1);
10226                         if (!err2)
10227                                 tg3_netif_start(tp);
10228                 }
10229
10230                 tg3_full_unlock(tp);
10231
10232                 if (irq_sync && !err2)
10233                         tg3_phy_start(tp);
10234         }
10235         if (tp->link_config.phy_is_low_power)
10236                 tg3_set_power_state(tp, PCI_D3hot);
10237
10238 }
10239
10240 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10241 {
10242         struct mii_ioctl_data *data = if_mii(ifr);
10243         struct tg3 *tp = netdev_priv(dev);
10244         int err;
10245
10246         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10247                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10248                         return -EAGAIN;
10249                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10250         }
10251
10252         switch(cmd) {
10253         case SIOCGMIIPHY:
10254                 data->phy_id = PHY_ADDR;
10255
10256                 /* fallthru */
10257         case SIOCGMIIREG: {
10258                 u32 mii_regval;
10259
10260                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10261                         break;                  /* We have no PHY */
10262
10263                 if (tp->link_config.phy_is_low_power)
10264                         return -EAGAIN;
10265
10266                 spin_lock_bh(&tp->lock);
10267                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10268                 spin_unlock_bh(&tp->lock);
10269
10270                 data->val_out = mii_regval;
10271
10272                 return err;
10273         }
10274
10275         case SIOCSMIIREG:
10276                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10277                         break;                  /* We have no PHY */
10278
10279                 if (!capable(CAP_NET_ADMIN))
10280                         return -EPERM;
10281
10282                 if (tp->link_config.phy_is_low_power)
10283                         return -EAGAIN;
10284
10285                 spin_lock_bh(&tp->lock);
10286                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10287                 spin_unlock_bh(&tp->lock);
10288
10289                 return err;
10290
10291         default:
10292                 /* do nothing */
10293                 break;
10294         }
10295         return -EOPNOTSUPP;
10296 }
10297
10298 #if TG3_VLAN_TAG_USED
10299 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10300 {
10301         struct tg3 *tp = netdev_priv(dev);
10302
10303         if (netif_running(dev))
10304                 tg3_netif_stop(tp);
10305
10306         tg3_full_lock(tp, 0);
10307
10308         tp->vlgrp = grp;
10309
10310         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10311         __tg3_set_rx_mode(dev);
10312
10313         if (netif_running(dev))
10314                 tg3_netif_start(tp);
10315
10316         tg3_full_unlock(tp);
10317 }
10318 #endif
10319
10320 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10321 {
10322         struct tg3 *tp = netdev_priv(dev);
10323
10324         memcpy(ec, &tp->coal, sizeof(*ec));
10325         return 0;
10326 }
10327
10328 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10329 {
10330         struct tg3 *tp = netdev_priv(dev);
10331         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10332         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10333
10334         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10335                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10336                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10337                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10338                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10339         }
10340
10341         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10342             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10343             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10344             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10345             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10346             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10347             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10348             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10349             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10350             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10351                 return -EINVAL;
10352
10353         /* No rx interrupts will be generated if both are zero */
10354         if ((ec->rx_coalesce_usecs == 0) &&
10355             (ec->rx_max_coalesced_frames == 0))
10356                 return -EINVAL;
10357
10358         /* No tx interrupts will be generated if both are zero */
10359         if ((ec->tx_coalesce_usecs == 0) &&
10360             (ec->tx_max_coalesced_frames == 0))
10361                 return -EINVAL;
10362
10363         /* Only copy relevant parameters, ignore all others. */
10364         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10365         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10366         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10367         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10368         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10369         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10370         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10371         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10372         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10373
10374         if (netif_running(dev)) {
10375                 tg3_full_lock(tp, 0);
10376                 __tg3_set_coalesce(tp, &tp->coal);
10377                 tg3_full_unlock(tp);
10378         }
10379         return 0;
10380 }
10381
10382 static const struct ethtool_ops tg3_ethtool_ops = {
10383         .get_settings           = tg3_get_settings,
10384         .set_settings           = tg3_set_settings,
10385         .get_drvinfo            = tg3_get_drvinfo,
10386         .get_regs_len           = tg3_get_regs_len,
10387         .get_regs               = tg3_get_regs,
10388         .get_wol                = tg3_get_wol,
10389         .set_wol                = tg3_set_wol,
10390         .get_msglevel           = tg3_get_msglevel,
10391         .set_msglevel           = tg3_set_msglevel,
10392         .nway_reset             = tg3_nway_reset,
10393         .get_link               = ethtool_op_get_link,
10394         .get_eeprom_len         = tg3_get_eeprom_len,
10395         .get_eeprom             = tg3_get_eeprom,
10396         .set_eeprom             = tg3_set_eeprom,
10397         .get_ringparam          = tg3_get_ringparam,
10398         .set_ringparam          = tg3_set_ringparam,
10399         .get_pauseparam         = tg3_get_pauseparam,
10400         .set_pauseparam         = tg3_set_pauseparam,
10401         .get_rx_csum            = tg3_get_rx_csum,
10402         .set_rx_csum            = tg3_set_rx_csum,
10403         .set_tx_csum            = tg3_set_tx_csum,
10404         .set_sg                 = ethtool_op_set_sg,
10405         .set_tso                = tg3_set_tso,
10406         .self_test              = tg3_self_test,
10407         .get_strings            = tg3_get_strings,
10408         .phys_id                = tg3_phys_id,
10409         .get_ethtool_stats      = tg3_get_ethtool_stats,
10410         .get_coalesce           = tg3_get_coalesce,
10411         .set_coalesce           = tg3_set_coalesce,
10412         .get_sset_count         = tg3_get_sset_count,
10413 };
10414
10415 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10416 {
10417         u32 cursize, val, magic;
10418
10419         tp->nvram_size = EEPROM_CHIP_SIZE;
10420
10421         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10422                 return;
10423
10424         if ((magic != TG3_EEPROM_MAGIC) &&
10425             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10426             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10427                 return;
10428
10429         /*
10430          * Size the chip by reading offsets at increasing powers of two.
10431          * When we encounter our validation signature, we know the addressing
10432          * has wrapped around, and thus have our chip size.
10433          */
10434         cursize = 0x10;
10435
10436         while (cursize < tp->nvram_size) {
10437                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10438                         return;
10439
10440                 if (val == magic)
10441                         break;
10442
10443                 cursize <<= 1;
10444         }
10445
10446         tp->nvram_size = cursize;
10447 }
10448
10449 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10450 {
10451         u32 val;
10452
10453         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10454                 return;
10455
10456         /* Selfboot format */
10457         if (val != TG3_EEPROM_MAGIC) {
10458                 tg3_get_eeprom_size(tp);
10459                 return;
10460         }
10461
10462         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10463                 if (val != 0) {
10464                         tp->nvram_size = (val >> 16) * 1024;
10465                         return;
10466                 }
10467         }
10468         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10469 }
10470
10471 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10472 {
10473         u32 nvcfg1;
10474
10475         nvcfg1 = tr32(NVRAM_CFG1);
10476         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10477                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10478         }
10479         else {
10480                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10481                 tw32(NVRAM_CFG1, nvcfg1);
10482         }
10483
10484         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10485             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10486                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10487                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10488                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10489                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10490                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10491                                 break;
10492                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10493                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10494                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10495                                 break;
10496                         case FLASH_VENDOR_ATMEL_EEPROM:
10497                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10498                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10499                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10500                                 break;
10501                         case FLASH_VENDOR_ST:
10502                                 tp->nvram_jedecnum = JEDEC_ST;
10503                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10504                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10505                                 break;
10506                         case FLASH_VENDOR_SAIFUN:
10507                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10508                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10509                                 break;
10510                         case FLASH_VENDOR_SST_SMALL:
10511                         case FLASH_VENDOR_SST_LARGE:
10512                                 tp->nvram_jedecnum = JEDEC_SST;
10513                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10514                                 break;
10515                 }
10516         }
10517         else {
10518                 tp->nvram_jedecnum = JEDEC_ATMEL;
10519                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10520                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10521         }
10522 }
10523
10524 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10525 {
10526         u32 nvcfg1;
10527
10528         nvcfg1 = tr32(NVRAM_CFG1);
10529
10530         /* NVRAM protection for TPM */
10531         if (nvcfg1 & (1 << 27))
10532                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10533
10534         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10535                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10536                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10537                         tp->nvram_jedecnum = JEDEC_ATMEL;
10538                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10539                         break;
10540                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10541                         tp->nvram_jedecnum = JEDEC_ATMEL;
10542                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10543                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10544                         break;
10545                 case FLASH_5752VENDOR_ST_M45PE10:
10546                 case FLASH_5752VENDOR_ST_M45PE20:
10547                 case FLASH_5752VENDOR_ST_M45PE40:
10548                         tp->nvram_jedecnum = JEDEC_ST;
10549                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10550                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10551                         break;
10552         }
10553
10554         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10555                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10556                         case FLASH_5752PAGE_SIZE_256:
10557                                 tp->nvram_pagesize = 256;
10558                                 break;
10559                         case FLASH_5752PAGE_SIZE_512:
10560                                 tp->nvram_pagesize = 512;
10561                                 break;
10562                         case FLASH_5752PAGE_SIZE_1K:
10563                                 tp->nvram_pagesize = 1024;
10564                                 break;
10565                         case FLASH_5752PAGE_SIZE_2K:
10566                                 tp->nvram_pagesize = 2048;
10567                                 break;
10568                         case FLASH_5752PAGE_SIZE_4K:
10569                                 tp->nvram_pagesize = 4096;
10570                                 break;
10571                         case FLASH_5752PAGE_SIZE_264:
10572                                 tp->nvram_pagesize = 264;
10573                                 break;
10574                 }
10575         }
10576         else {
10577                 /* For eeprom, set pagesize to maximum eeprom size */
10578                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10579
10580                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10581                 tw32(NVRAM_CFG1, nvcfg1);
10582         }
10583 }
10584
10585 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10586 {
10587         u32 nvcfg1, protect = 0;
10588
10589         nvcfg1 = tr32(NVRAM_CFG1);
10590
10591         /* NVRAM protection for TPM */
10592         if (nvcfg1 & (1 << 27)) {
10593                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10594                 protect = 1;
10595         }
10596
10597         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10598         switch (nvcfg1) {
10599                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10600                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10601                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10602                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10603                         tp->nvram_jedecnum = JEDEC_ATMEL;
10604                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10605                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10606                         tp->nvram_pagesize = 264;
10607                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10608                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10609                                 tp->nvram_size = (protect ? 0x3e200 :
10610                                                   TG3_NVRAM_SIZE_512KB);
10611                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10612                                 tp->nvram_size = (protect ? 0x1f200 :
10613                                                   TG3_NVRAM_SIZE_256KB);
10614                         else
10615                                 tp->nvram_size = (protect ? 0x1f200 :
10616                                                   TG3_NVRAM_SIZE_128KB);
10617                         break;
10618                 case FLASH_5752VENDOR_ST_M45PE10:
10619                 case FLASH_5752VENDOR_ST_M45PE20:
10620                 case FLASH_5752VENDOR_ST_M45PE40:
10621                         tp->nvram_jedecnum = JEDEC_ST;
10622                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10623                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10624                         tp->nvram_pagesize = 256;
10625                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10626                                 tp->nvram_size = (protect ?
10627                                                   TG3_NVRAM_SIZE_64KB :
10628                                                   TG3_NVRAM_SIZE_128KB);
10629                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10630                                 tp->nvram_size = (protect ?
10631                                                   TG3_NVRAM_SIZE_64KB :
10632                                                   TG3_NVRAM_SIZE_256KB);
10633                         else
10634                                 tp->nvram_size = (protect ?
10635                                                   TG3_NVRAM_SIZE_128KB :
10636                                                   TG3_NVRAM_SIZE_512KB);
10637                         break;
10638         }
10639 }
10640
10641 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10642 {
10643         u32 nvcfg1;
10644
10645         nvcfg1 = tr32(NVRAM_CFG1);
10646
10647         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10648                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10649                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10650                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10651                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10652                         tp->nvram_jedecnum = JEDEC_ATMEL;
10653                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10654                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10655
10656                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10657                         tw32(NVRAM_CFG1, nvcfg1);
10658                         break;
10659                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10660                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10661                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10662                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10663                         tp->nvram_jedecnum = JEDEC_ATMEL;
10664                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10665                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10666                         tp->nvram_pagesize = 264;
10667                         break;
10668                 case FLASH_5752VENDOR_ST_M45PE10:
10669                 case FLASH_5752VENDOR_ST_M45PE20:
10670                 case FLASH_5752VENDOR_ST_M45PE40:
10671                         tp->nvram_jedecnum = JEDEC_ST;
10672                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10673                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10674                         tp->nvram_pagesize = 256;
10675                         break;
10676         }
10677 }
10678
10679 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10680 {
10681         u32 nvcfg1, protect = 0;
10682
10683         nvcfg1 = tr32(NVRAM_CFG1);
10684
10685         /* NVRAM protection for TPM */
10686         if (nvcfg1 & (1 << 27)) {
10687                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10688                 protect = 1;
10689         }
10690
10691         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10692         switch (nvcfg1) {
10693                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10694                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10695                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10696                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10697                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10698                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10699                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10700                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10701                         tp->nvram_jedecnum = JEDEC_ATMEL;
10702                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10703                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10704                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10705                         tp->nvram_pagesize = 256;
10706                         break;
10707                 case FLASH_5761VENDOR_ST_A_M45PE20:
10708                 case FLASH_5761VENDOR_ST_A_M45PE40:
10709                 case FLASH_5761VENDOR_ST_A_M45PE80:
10710                 case FLASH_5761VENDOR_ST_A_M45PE16:
10711                 case FLASH_5761VENDOR_ST_M_M45PE20:
10712                 case FLASH_5761VENDOR_ST_M_M45PE40:
10713                 case FLASH_5761VENDOR_ST_M_M45PE80:
10714                 case FLASH_5761VENDOR_ST_M_M45PE16:
10715                         tp->nvram_jedecnum = JEDEC_ST;
10716                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10717                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10718                         tp->nvram_pagesize = 256;
10719                         break;
10720         }
10721
10722         if (protect) {
10723                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10724         } else {
10725                 switch (nvcfg1) {
10726                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10727                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10728                         case FLASH_5761VENDOR_ST_A_M45PE16:
10729                         case FLASH_5761VENDOR_ST_M_M45PE16:
10730                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10731                                 break;
10732                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10733                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10734                         case FLASH_5761VENDOR_ST_A_M45PE80:
10735                         case FLASH_5761VENDOR_ST_M_M45PE80:
10736                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10737                                 break;
10738                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10739                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10740                         case FLASH_5761VENDOR_ST_A_M45PE40:
10741                         case FLASH_5761VENDOR_ST_M_M45PE40:
10742                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10743                                 break;
10744                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10745                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10746                         case FLASH_5761VENDOR_ST_A_M45PE20:
10747                         case FLASH_5761VENDOR_ST_M_M45PE20:
10748                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10749                                 break;
10750                 }
10751         }
10752 }
10753
10754 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10755 {
10756         tp->nvram_jedecnum = JEDEC_ATMEL;
10757         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10758         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10759 }
10760
10761 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10762 static void __devinit tg3_nvram_init(struct tg3 *tp)
10763 {
10764         tw32_f(GRC_EEPROM_ADDR,
10765              (EEPROM_ADDR_FSM_RESET |
10766               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10767                EEPROM_ADDR_CLKPERD_SHIFT)));
10768
10769         msleep(1);
10770
10771         /* Enable seeprom accesses. */
10772         tw32_f(GRC_LOCAL_CTRL,
10773              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10774         udelay(100);
10775
10776         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10777             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10778                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10779
10780                 if (tg3_nvram_lock(tp)) {
10781                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10782                                "tg3_nvram_init failed.\n", tp->dev->name);
10783                         return;
10784                 }
10785                 tg3_enable_nvram_access(tp);
10786
10787                 tp->nvram_size = 0;
10788
10789                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10790                         tg3_get_5752_nvram_info(tp);
10791                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10792                         tg3_get_5755_nvram_info(tp);
10793                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10794                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10795                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10796                         tg3_get_5787_nvram_info(tp);
10797                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10798                         tg3_get_5761_nvram_info(tp);
10799                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10800                         tg3_get_5906_nvram_info(tp);
10801                 else
10802                         tg3_get_nvram_info(tp);
10803
10804                 if (tp->nvram_size == 0)
10805                         tg3_get_nvram_size(tp);
10806
10807                 tg3_disable_nvram_access(tp);
10808                 tg3_nvram_unlock(tp);
10809
10810         } else {
10811                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10812
10813                 tg3_get_eeprom_size(tp);
10814         }
10815 }
10816
10817 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10818                                         u32 offset, u32 *val)
10819 {
10820         u32 tmp;
10821         int i;
10822
10823         if (offset > EEPROM_ADDR_ADDR_MASK ||
10824             (offset % 4) != 0)
10825                 return -EINVAL;
10826
10827         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10828                                         EEPROM_ADDR_DEVID_MASK |
10829                                         EEPROM_ADDR_READ);
10830         tw32(GRC_EEPROM_ADDR,
10831              tmp |
10832              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10833              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10834               EEPROM_ADDR_ADDR_MASK) |
10835              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10836
10837         for (i = 0; i < 1000; i++) {
10838                 tmp = tr32(GRC_EEPROM_ADDR);
10839
10840                 if (tmp & EEPROM_ADDR_COMPLETE)
10841                         break;
10842                 msleep(1);
10843         }
10844         if (!(tmp & EEPROM_ADDR_COMPLETE))
10845                 return -EBUSY;
10846
10847         *val = tr32(GRC_EEPROM_DATA);
10848         return 0;
10849 }
10850
10851 #define NVRAM_CMD_TIMEOUT 10000
10852
10853 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10854 {
10855         int i;
10856
10857         tw32(NVRAM_CMD, nvram_cmd);
10858         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10859                 udelay(10);
10860                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10861                         udelay(10);
10862                         break;
10863                 }
10864         }
10865         if (i == NVRAM_CMD_TIMEOUT) {
10866                 return -EBUSY;
10867         }
10868         return 0;
10869 }
10870
10871 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10872 {
10873         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10874             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10875             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10876            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10877             (tp->nvram_jedecnum == JEDEC_ATMEL))
10878
10879                 addr = ((addr / tp->nvram_pagesize) <<
10880                         ATMEL_AT45DB0X1B_PAGE_POS) +
10881                        (addr % tp->nvram_pagesize);
10882
10883         return addr;
10884 }
10885
10886 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10887 {
10888         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10889             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10890             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10891            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10892             (tp->nvram_jedecnum == JEDEC_ATMEL))
10893
10894                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10895                         tp->nvram_pagesize) +
10896                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10897
10898         return addr;
10899 }
10900
10901 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10902 {
10903         int ret;
10904
10905         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10906                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10907
10908         offset = tg3_nvram_phys_addr(tp, offset);
10909
10910         if (offset > NVRAM_ADDR_MSK)
10911                 return -EINVAL;
10912
10913         ret = tg3_nvram_lock(tp);
10914         if (ret)
10915                 return ret;
10916
10917         tg3_enable_nvram_access(tp);
10918
10919         tw32(NVRAM_ADDR, offset);
10920         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10921                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10922
10923         if (ret == 0)
10924                 *val = swab32(tr32(NVRAM_RDDATA));
10925
10926         tg3_disable_nvram_access(tp);
10927
10928         tg3_nvram_unlock(tp);
10929
10930         return ret;
10931 }
10932
10933 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10934 {
10935         u32 v;
10936         int res = tg3_nvram_read(tp, offset, &v);
10937         if (!res)
10938                 *val = cpu_to_le32(v);
10939         return res;
10940 }
10941
10942 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10943 {
10944         int err;
10945         u32 tmp;
10946
10947         err = tg3_nvram_read(tp, offset, &tmp);
10948         *val = swab32(tmp);
10949         return err;
10950 }
10951
10952 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10953                                     u32 offset, u32 len, u8 *buf)
10954 {
10955         int i, j, rc = 0;
10956         u32 val;
10957
10958         for (i = 0; i < len; i += 4) {
10959                 u32 addr;
10960                 __le32 data;
10961
10962                 addr = offset + i;
10963
10964                 memcpy(&data, buf + i, 4);
10965
10966                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10967
10968                 val = tr32(GRC_EEPROM_ADDR);
10969                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10970
10971                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10972                         EEPROM_ADDR_READ);
10973                 tw32(GRC_EEPROM_ADDR, val |
10974                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10975                         (addr & EEPROM_ADDR_ADDR_MASK) |
10976                         EEPROM_ADDR_START |
10977                         EEPROM_ADDR_WRITE);
10978
10979                 for (j = 0; j < 1000; j++) {
10980                         val = tr32(GRC_EEPROM_ADDR);
10981
10982                         if (val & EEPROM_ADDR_COMPLETE)
10983                                 break;
10984                         msleep(1);
10985                 }
10986                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10987                         rc = -EBUSY;
10988                         break;
10989                 }
10990         }
10991
10992         return rc;
10993 }
10994
10995 /* offset and length are dword aligned */
10996 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10997                 u8 *buf)
10998 {
10999         int ret = 0;
11000         u32 pagesize = tp->nvram_pagesize;
11001         u32 pagemask = pagesize - 1;
11002         u32 nvram_cmd;
11003         u8 *tmp;
11004
11005         tmp = kmalloc(pagesize, GFP_KERNEL);
11006         if (tmp == NULL)
11007                 return -ENOMEM;
11008
11009         while (len) {
11010                 int j;
11011                 u32 phy_addr, page_off, size;
11012
11013                 phy_addr = offset & ~pagemask;
11014
11015                 for (j = 0; j < pagesize; j += 4) {
11016                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11017                                                 (__le32 *) (tmp + j))))
11018                                 break;
11019                 }
11020                 if (ret)
11021                         break;
11022
11023                 page_off = offset & pagemask;
11024                 size = pagesize;
11025                 if (len < size)
11026                         size = len;
11027
11028                 len -= size;
11029
11030                 memcpy(tmp + page_off, buf, size);
11031
11032                 offset = offset + (pagesize - page_off);
11033
11034                 tg3_enable_nvram_access(tp);
11035
11036                 /*
11037                  * Before we can erase the flash page, we need
11038                  * to issue a special "write enable" command.
11039                  */
11040                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11041
11042                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11043                         break;
11044
11045                 /* Erase the target page */
11046                 tw32(NVRAM_ADDR, phy_addr);
11047
11048                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11049                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11050
11051                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11052                         break;
11053
11054                 /* Issue another write enable to start the write. */
11055                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11056
11057                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11058                         break;
11059
11060                 for (j = 0; j < pagesize; j += 4) {
11061                         __be32 data;
11062
11063                         data = *((__be32 *) (tmp + j));
11064                         /* swab32(le32_to_cpu(data)), actually */
11065                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11066
11067                         tw32(NVRAM_ADDR, phy_addr + j);
11068
11069                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11070                                 NVRAM_CMD_WR;
11071
11072                         if (j == 0)
11073                                 nvram_cmd |= NVRAM_CMD_FIRST;
11074                         else if (j == (pagesize - 4))
11075                                 nvram_cmd |= NVRAM_CMD_LAST;
11076
11077                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11078                                 break;
11079                 }
11080                 if (ret)
11081                         break;
11082         }
11083
11084         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11085         tg3_nvram_exec_cmd(tp, nvram_cmd);
11086
11087         kfree(tmp);
11088
11089         return ret;
11090 }
11091
11092 /* offset and length are dword aligned */
11093 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11094                 u8 *buf)
11095 {
11096         int i, ret = 0;
11097
11098         for (i = 0; i < len; i += 4, offset += 4) {
11099                 u32 page_off, phy_addr, nvram_cmd;
11100                 __be32 data;
11101
11102                 memcpy(&data, buf + i, 4);
11103                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11104
11105                 page_off = offset % tp->nvram_pagesize;
11106
11107                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11108
11109                 tw32(NVRAM_ADDR, phy_addr);
11110
11111                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11112
11113                 if ((page_off == 0) || (i == 0))
11114                         nvram_cmd |= NVRAM_CMD_FIRST;
11115                 if (page_off == (tp->nvram_pagesize - 4))
11116                         nvram_cmd |= NVRAM_CMD_LAST;
11117
11118                 if (i == (len - 4))
11119                         nvram_cmd |= NVRAM_CMD_LAST;
11120
11121                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11122                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11123                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11124                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11125                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11126                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11127                     (tp->nvram_jedecnum == JEDEC_ST) &&
11128                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11129
11130                         if ((ret = tg3_nvram_exec_cmd(tp,
11131                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11132                                 NVRAM_CMD_DONE)))
11133
11134                                 break;
11135                 }
11136                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11137                         /* We always do complete word writes to eeprom. */
11138                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11139                 }
11140
11141                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11142                         break;
11143         }
11144         return ret;
11145 }
11146
11147 /* offset and length are dword aligned */
11148 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11149 {
11150         int ret;
11151
11152         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11153                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11154                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11155                 udelay(40);
11156         }
11157
11158         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11159                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11160         }
11161         else {
11162                 u32 grc_mode;
11163
11164                 ret = tg3_nvram_lock(tp);
11165                 if (ret)
11166                         return ret;
11167
11168                 tg3_enable_nvram_access(tp);
11169                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11170                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11171                         tw32(NVRAM_WRITE1, 0x406);
11172
11173                 grc_mode = tr32(GRC_MODE);
11174                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11175
11176                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11177                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11178
11179                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11180                                 buf);
11181                 }
11182                 else {
11183                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11184                                 buf);
11185                 }
11186
11187                 grc_mode = tr32(GRC_MODE);
11188                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11189
11190                 tg3_disable_nvram_access(tp);
11191                 tg3_nvram_unlock(tp);
11192         }
11193
11194         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11195                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11196                 udelay(40);
11197         }
11198
11199         return ret;
11200 }
11201
11202 struct subsys_tbl_ent {
11203         u16 subsys_vendor, subsys_devid;
11204         u32 phy_id;
11205 };
11206
11207 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11208         /* Broadcom boards. */
11209         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11210         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11211         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11212         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11213         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11214         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11215         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11216         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11217         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11218         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11219         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11220
11221         /* 3com boards. */
11222         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11223         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11224         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11225         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11226         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11227
11228         /* DELL boards. */
11229         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11230         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11231         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11232         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11233
11234         /* Compaq boards. */
11235         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11236         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11237         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11238         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11239         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11240
11241         /* IBM boards. */
11242         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11243 };
11244
11245 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11246 {
11247         int i;
11248
11249         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11250                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11251                      tp->pdev->subsystem_vendor) &&
11252                     (subsys_id_to_phy_id[i].subsys_devid ==
11253                      tp->pdev->subsystem_device))
11254                         return &subsys_id_to_phy_id[i];
11255         }
11256         return NULL;
11257 }
11258
11259 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11260 {
11261         u32 val;
11262         u16 pmcsr;
11263
11264         /* On some early chips the SRAM cannot be accessed in D3hot state,
11265          * so need make sure we're in D0.
11266          */
11267         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11268         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11269         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11270         msleep(1);
11271
11272         /* Make sure register accesses (indirect or otherwise)
11273          * will function correctly.
11274          */
11275         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11276                                tp->misc_host_ctrl);
11277
11278         /* The memory arbiter has to be enabled in order for SRAM accesses
11279          * to succeed.  Normally on powerup the tg3 chip firmware will make
11280          * sure it is enabled, but other entities such as system netboot
11281          * code might disable it.
11282          */
11283         val = tr32(MEMARB_MODE);
11284         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11285
11286         tp->phy_id = PHY_ID_INVALID;
11287         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11288
11289         /* Assume an onboard device and WOL capable by default.  */
11290         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11291
11292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11293                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11294                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11295                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11296                 }
11297                 val = tr32(VCPU_CFGSHDW);
11298                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11299                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11300                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11301                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11302                     device_may_wakeup(&tp->pdev->dev))
11303                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11304                 return;
11305         }
11306
11307         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11308         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11309                 u32 nic_cfg, led_cfg;
11310                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11311                 int eeprom_phy_serdes = 0;
11312
11313                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11314                 tp->nic_sram_data_cfg = nic_cfg;
11315
11316                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11317                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11318                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11319                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11320                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11321                     (ver > 0) && (ver < 0x100))
11322                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11323
11324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11325                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11326
11327                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11328                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11329                         eeprom_phy_serdes = 1;
11330
11331                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11332                 if (nic_phy_id != 0) {
11333                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11334                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11335
11336                         eeprom_phy_id  = (id1 >> 16) << 10;
11337                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11338                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11339                 } else
11340                         eeprom_phy_id = 0;
11341
11342                 tp->phy_id = eeprom_phy_id;
11343                 if (eeprom_phy_serdes) {
11344                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11345                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11346                         else
11347                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11348                 }
11349
11350                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11351                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11352                                     SHASTA_EXT_LED_MODE_MASK);
11353                 else
11354                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11355
11356                 switch (led_cfg) {
11357                 default:
11358                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11359                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11360                         break;
11361
11362                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11363                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11364                         break;
11365
11366                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11367                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11368
11369                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11370                          * read on some older 5700/5701 bootcode.
11371                          */
11372                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11373                             ASIC_REV_5700 ||
11374                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11375                             ASIC_REV_5701)
11376                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11377
11378                         break;
11379
11380                 case SHASTA_EXT_LED_SHARED:
11381                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11382                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11383                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11384                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11385                                                  LED_CTRL_MODE_PHY_2);
11386                         break;
11387
11388                 case SHASTA_EXT_LED_MAC:
11389                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11390                         break;
11391
11392                 case SHASTA_EXT_LED_COMBO:
11393                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11394                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11395                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11396                                                  LED_CTRL_MODE_PHY_2);
11397                         break;
11398
11399                 }
11400
11401                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11402                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11403                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11404                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11405
11406                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11407                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11408
11409                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11410                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11411                         if ((tp->pdev->subsystem_vendor ==
11412                              PCI_VENDOR_ID_ARIMA) &&
11413                             (tp->pdev->subsystem_device == 0x205a ||
11414                              tp->pdev->subsystem_device == 0x2063))
11415                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11416                 } else {
11417                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11418                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11419                 }
11420
11421                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11422                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11423                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11424                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11425                 }
11426                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11427                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11428                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11429                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11430                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11431
11432                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11433                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) &&
11434                     device_may_wakeup(&tp->pdev->dev))
11435                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11436
11437                 if (cfg2 & (1 << 17))
11438                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11439
11440                 /* serdes signal pre-emphasis in register 0x590 set by */
11441                 /* bootcode if bit 18 is set */
11442                 if (cfg2 & (1 << 18))
11443                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11444
11445                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11446                         u32 cfg3;
11447
11448                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11449                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11450                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11451                 }
11452
11453                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11454                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11455                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11456                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11457                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11458                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11459         }
11460 }
11461
11462 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11463 {
11464         int i;
11465         u32 val;
11466
11467         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11468         tw32(OTP_CTRL, cmd);
11469
11470         /* Wait for up to 1 ms for command to execute. */
11471         for (i = 0; i < 100; i++) {
11472                 val = tr32(OTP_STATUS);
11473                 if (val & OTP_STATUS_CMD_DONE)
11474                         break;
11475                 udelay(10);
11476         }
11477
11478         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11479 }
11480
11481 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11482  * configuration is a 32-bit value that straddles the alignment boundary.
11483  * We do two 32-bit reads and then shift and merge the results.
11484  */
11485 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11486 {
11487         u32 bhalf_otp, thalf_otp;
11488
11489         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11490
11491         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11492                 return 0;
11493
11494         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11495
11496         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11497                 return 0;
11498
11499         thalf_otp = tr32(OTP_READ_DATA);
11500
11501         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11502
11503         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11504                 return 0;
11505
11506         bhalf_otp = tr32(OTP_READ_DATA);
11507
11508         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11509 }
11510
11511 static int __devinit tg3_phy_probe(struct tg3 *tp)
11512 {
11513         u32 hw_phy_id_1, hw_phy_id_2;
11514         u32 hw_phy_id, hw_phy_id_masked;
11515         int err;
11516
11517         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11518                 return tg3_phy_init(tp);
11519
11520         /* Reading the PHY ID register can conflict with ASF
11521          * firwmare access to the PHY hardware.
11522          */
11523         err = 0;
11524         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11525             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11526                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11527         } else {
11528                 /* Now read the physical PHY_ID from the chip and verify
11529                  * that it is sane.  If it doesn't look good, we fall back
11530                  * to either the hard-coded table based PHY_ID and failing
11531                  * that the value found in the eeprom area.
11532                  */
11533                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11534                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11535
11536                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11537                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11538                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11539
11540                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11541         }
11542
11543         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11544                 tp->phy_id = hw_phy_id;
11545                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11546                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11547                 else
11548                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11549         } else {
11550                 if (tp->phy_id != PHY_ID_INVALID) {
11551                         /* Do nothing, phy ID already set up in
11552                          * tg3_get_eeprom_hw_cfg().
11553                          */
11554                 } else {
11555                         struct subsys_tbl_ent *p;
11556
11557                         /* No eeprom signature?  Try the hardcoded
11558                          * subsys device table.
11559                          */
11560                         p = lookup_by_subsys(tp);
11561                         if (!p)
11562                                 return -ENODEV;
11563
11564                         tp->phy_id = p->phy_id;
11565                         if (!tp->phy_id ||
11566                             tp->phy_id == PHY_ID_BCM8002)
11567                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11568                 }
11569         }
11570
11571         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11572             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11573             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11574                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11575
11576                 tg3_readphy(tp, MII_BMSR, &bmsr);
11577                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11578                     (bmsr & BMSR_LSTATUS))
11579                         goto skip_phy_reset;
11580
11581                 err = tg3_phy_reset(tp);
11582                 if (err)
11583                         return err;
11584
11585                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11586                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11587                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11588                 tg3_ctrl = 0;
11589                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11590                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11591                                     MII_TG3_CTRL_ADV_1000_FULL);
11592                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11593                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11594                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11595                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11596                 }
11597
11598                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11599                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11600                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11601                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11602                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11603
11604                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11605                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11606
11607                         tg3_writephy(tp, MII_BMCR,
11608                                      BMCR_ANENABLE | BMCR_ANRESTART);
11609                 }
11610                 tg3_phy_set_wirespeed(tp);
11611
11612                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11613                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11614                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11615         }
11616
11617 skip_phy_reset:
11618         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11619                 err = tg3_init_5401phy_dsp(tp);
11620                 if (err)
11621                         return err;
11622         }
11623
11624         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11625                 err = tg3_init_5401phy_dsp(tp);
11626         }
11627
11628         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11629                 tp->link_config.advertising =
11630                         (ADVERTISED_1000baseT_Half |
11631                          ADVERTISED_1000baseT_Full |
11632                          ADVERTISED_Autoneg |
11633                          ADVERTISED_FIBRE);
11634         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11635                 tp->link_config.advertising &=
11636                         ~(ADVERTISED_1000baseT_Half |
11637                           ADVERTISED_1000baseT_Full);
11638
11639         return err;
11640 }
11641
11642 static void __devinit tg3_read_partno(struct tg3 *tp)
11643 {
11644         unsigned char vpd_data[256];
11645         unsigned int i;
11646         u32 magic;
11647
11648         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11649                 goto out_not_found;
11650
11651         if (magic == TG3_EEPROM_MAGIC) {
11652                 for (i = 0; i < 256; i += 4) {
11653                         u32 tmp;
11654
11655                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11656                                 goto out_not_found;
11657
11658                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11659                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11660                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11661                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11662                 }
11663         } else {
11664                 int vpd_cap;
11665
11666                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11667                 for (i = 0; i < 256; i += 4) {
11668                         u32 tmp, j = 0;
11669                         __le32 v;
11670                         u16 tmp16;
11671
11672                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11673                                               i);
11674                         while (j++ < 100) {
11675                                 pci_read_config_word(tp->pdev, vpd_cap +
11676                                                      PCI_VPD_ADDR, &tmp16);
11677                                 if (tmp16 & 0x8000)
11678                                         break;
11679                                 msleep(1);
11680                         }
11681                         if (!(tmp16 & 0x8000))
11682                                 goto out_not_found;
11683
11684                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11685                                               &tmp);
11686                         v = cpu_to_le32(tmp);
11687                         memcpy(&vpd_data[i], &v, 4);
11688                 }
11689         }
11690
11691         /* Now parse and find the part number. */
11692         for (i = 0; i < 254; ) {
11693                 unsigned char val = vpd_data[i];
11694                 unsigned int block_end;
11695
11696                 if (val == 0x82 || val == 0x91) {
11697                         i = (i + 3 +
11698                              (vpd_data[i + 1] +
11699                               (vpd_data[i + 2] << 8)));
11700                         continue;
11701                 }
11702
11703                 if (val != 0x90)
11704                         goto out_not_found;
11705
11706                 block_end = (i + 3 +
11707                              (vpd_data[i + 1] +
11708                               (vpd_data[i + 2] << 8)));
11709                 i += 3;
11710
11711                 if (block_end > 256)
11712                         goto out_not_found;
11713
11714                 while (i < (block_end - 2)) {
11715                         if (vpd_data[i + 0] == 'P' &&
11716                             vpd_data[i + 1] == 'N') {
11717                                 int partno_len = vpd_data[i + 2];
11718
11719                                 i += 3;
11720                                 if (partno_len > 24 || (partno_len + i) > 256)
11721                                         goto out_not_found;
11722
11723                                 memcpy(tp->board_part_number,
11724                                        &vpd_data[i], partno_len);
11725
11726                                 /* Success. */
11727                                 return;
11728                         }
11729                         i += 3 + vpd_data[i + 2];
11730                 }
11731
11732                 /* Part number not found. */
11733                 goto out_not_found;
11734         }
11735
11736 out_not_found:
11737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11738                 strcpy(tp->board_part_number, "BCM95906");
11739         else
11740                 strcpy(tp->board_part_number, "none");
11741 }
11742
11743 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11744 {
11745         u32 val;
11746
11747         if (tg3_nvram_read_swab(tp, offset, &val) ||
11748             (val & 0xfc000000) != 0x0c000000 ||
11749             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11750             val != 0)
11751                 return 0;
11752
11753         return 1;
11754 }
11755
11756 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11757 {
11758         u32 val, offset, start;
11759         u32 ver_offset;
11760         int i, bcnt;
11761
11762         if (tg3_nvram_read_swab(tp, 0, &val))
11763                 return;
11764
11765         if (val != TG3_EEPROM_MAGIC)
11766                 return;
11767
11768         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11769             tg3_nvram_read_swab(tp, 0x4, &start))
11770                 return;
11771
11772         offset = tg3_nvram_logical_addr(tp, offset);
11773
11774         if (!tg3_fw_img_is_valid(tp, offset) ||
11775             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11776                 return;
11777
11778         offset = offset + ver_offset - start;
11779         for (i = 0; i < 16; i += 4) {
11780                 __le32 v;
11781                 if (tg3_nvram_read_le(tp, offset + i, &v))
11782                         return;
11783
11784                 memcpy(tp->fw_ver + i, &v, 4);
11785         }
11786
11787         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11788              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11789                 return;
11790
11791         for (offset = TG3_NVM_DIR_START;
11792              offset < TG3_NVM_DIR_END;
11793              offset += TG3_NVM_DIRENT_SIZE) {
11794                 if (tg3_nvram_read_swab(tp, offset, &val))
11795                         return;
11796
11797                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11798                         break;
11799         }
11800
11801         if (offset == TG3_NVM_DIR_END)
11802                 return;
11803
11804         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11805                 start = 0x08000000;
11806         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11807                 return;
11808
11809         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11810             !tg3_fw_img_is_valid(tp, offset) ||
11811             tg3_nvram_read_swab(tp, offset + 8, &val))
11812                 return;
11813
11814         offset += val - start;
11815
11816         bcnt = strlen(tp->fw_ver);
11817
11818         tp->fw_ver[bcnt++] = ',';
11819         tp->fw_ver[bcnt++] = ' ';
11820
11821         for (i = 0; i < 4; i++) {
11822                 __le32 v;
11823                 if (tg3_nvram_read_le(tp, offset, &v))
11824                         return;
11825
11826                 offset += sizeof(v);
11827
11828                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11829                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11830                         break;
11831                 }
11832
11833                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11834                 bcnt += sizeof(v);
11835         }
11836
11837         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11838 }
11839
11840 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11841
11842 static int __devinit tg3_get_invariants(struct tg3 *tp)
11843 {
11844         static struct pci_device_id write_reorder_chipsets[] = {
11845                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11846                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11847                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11848                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11849                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11850                              PCI_DEVICE_ID_VIA_8385_0) },
11851                 { },
11852         };
11853         u32 misc_ctrl_reg;
11854         u32 cacheline_sz_reg;
11855         u32 pci_state_reg, grc_misc_cfg;
11856         u32 val;
11857         u16 pci_cmd;
11858         int err, pcie_cap;
11859
11860         /* Force memory write invalidate off.  If we leave it on,
11861          * then on 5700_BX chips we have to enable a workaround.
11862          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11863          * to match the cacheline size.  The Broadcom driver have this
11864          * workaround but turns MWI off all the times so never uses
11865          * it.  This seems to suggest that the workaround is insufficient.
11866          */
11867         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11868         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11869         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11870
11871         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11872          * has the register indirect write enable bit set before
11873          * we try to access any of the MMIO registers.  It is also
11874          * critical that the PCI-X hw workaround situation is decided
11875          * before that as well.
11876          */
11877         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11878                               &misc_ctrl_reg);
11879
11880         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11881                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11882         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11883                 u32 prod_id_asic_rev;
11884
11885                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11886                                       &prod_id_asic_rev);
11887                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11888         }
11889
11890         /* Wrong chip ID in 5752 A0. This code can be removed later
11891          * as A0 is not in production.
11892          */
11893         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11894                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11895
11896         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11897          * we need to disable memory and use config. cycles
11898          * only to access all registers. The 5702/03 chips
11899          * can mistakenly decode the special cycles from the
11900          * ICH chipsets as memory write cycles, causing corruption
11901          * of register and memory space. Only certain ICH bridges
11902          * will drive special cycles with non-zero data during the
11903          * address phase which can fall within the 5703's address
11904          * range. This is not an ICH bug as the PCI spec allows
11905          * non-zero address during special cycles. However, only
11906          * these ICH bridges are known to drive non-zero addresses
11907          * during special cycles.
11908          *
11909          * Since special cycles do not cross PCI bridges, we only
11910          * enable this workaround if the 5703 is on the secondary
11911          * bus of these ICH bridges.
11912          */
11913         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11914             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11915                 static struct tg3_dev_id {
11916                         u32     vendor;
11917                         u32     device;
11918                         u32     rev;
11919                 } ich_chipsets[] = {
11920                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11921                           PCI_ANY_ID },
11922                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11923                           PCI_ANY_ID },
11924                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11925                           0xa },
11926                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11927                           PCI_ANY_ID },
11928                         { },
11929                 };
11930                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11931                 struct pci_dev *bridge = NULL;
11932
11933                 while (pci_id->vendor != 0) {
11934                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11935                                                 bridge);
11936                         if (!bridge) {
11937                                 pci_id++;
11938                                 continue;
11939                         }
11940                         if (pci_id->rev != PCI_ANY_ID) {
11941                                 if (bridge->revision > pci_id->rev)
11942                                         continue;
11943                         }
11944                         if (bridge->subordinate &&
11945                             (bridge->subordinate->number ==
11946                              tp->pdev->bus->number)) {
11947
11948                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11949                                 pci_dev_put(bridge);
11950                                 break;
11951                         }
11952                 }
11953         }
11954
11955         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11956                 static struct tg3_dev_id {
11957                         u32     vendor;
11958                         u32     device;
11959                 } bridge_chipsets[] = {
11960                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11961                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11962                         { },
11963                 };
11964                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11965                 struct pci_dev *bridge = NULL;
11966
11967                 while (pci_id->vendor != 0) {
11968                         bridge = pci_get_device(pci_id->vendor,
11969                                                 pci_id->device,
11970                                                 bridge);
11971                         if (!bridge) {
11972                                 pci_id++;
11973                                 continue;
11974                         }
11975                         if (bridge->subordinate &&
11976                             (bridge->subordinate->number <=
11977                              tp->pdev->bus->number) &&
11978                             (bridge->subordinate->subordinate >=
11979                              tp->pdev->bus->number)) {
11980                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11981                                 pci_dev_put(bridge);
11982                                 break;
11983                         }
11984                 }
11985         }
11986
11987         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11988          * DMA addresses > 40-bit. This bridge may have other additional
11989          * 57xx devices behind it in some 4-port NIC designs for example.
11990          * Any tg3 device found behind the bridge will also need the 40-bit
11991          * DMA workaround.
11992          */
11993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11995                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11996                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11997                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11998         }
11999         else {
12000                 struct pci_dev *bridge = NULL;
12001
12002                 do {
12003                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12004                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12005                                                 bridge);
12006                         if (bridge && bridge->subordinate &&
12007                             (bridge->subordinate->number <=
12008                              tp->pdev->bus->number) &&
12009                             (bridge->subordinate->subordinate >=
12010                              tp->pdev->bus->number)) {
12011                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12012                                 pci_dev_put(bridge);
12013                                 break;
12014                         }
12015                 } while (bridge);
12016         }
12017
12018         /* Initialize misc host control in PCI block. */
12019         tp->misc_host_ctrl |= (misc_ctrl_reg &
12020                                MISC_HOST_CTRL_CHIPREV);
12021         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12022                                tp->misc_host_ctrl);
12023
12024         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12025                               &cacheline_sz_reg);
12026
12027         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12028         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12029         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12030         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12031
12032         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12033             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12034                 tp->pdev_peer = tg3_find_peer(tp);
12035
12036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12039             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12041             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12042             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12044             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12045                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12046
12047         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12048             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12049                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12050
12051         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12052                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12053                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12054                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12055                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12056                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12057                      tp->pdev_peer == tp->pdev))
12058                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12059
12060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12062                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12063                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12064                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12065                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12066                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12067                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12068                 } else {
12069                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12070                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12071                                 ASIC_REV_5750 &&
12072                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12073                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12074                 }
12075         }
12076
12077         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12078              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12079                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12080
12081         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12082         if (pcie_cap != 0) {
12083                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12084
12085                 pcie_set_readrq(tp->pdev, 4096);
12086
12087                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12088                         u16 lnkctl;
12089
12090                         pci_read_config_word(tp->pdev,
12091                                              pcie_cap + PCI_EXP_LNKCTL,
12092                                              &lnkctl);
12093                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12094                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12095                 }
12096         }
12097
12098         /* If we have an AMD 762 or VIA K8T800 chipset, write
12099          * reordering to the mailbox registers done by the host
12100          * controller can cause major troubles.  We read back from
12101          * every mailbox register write to force the writes to be
12102          * posted to the chip in order.
12103          */
12104         if (pci_dev_present(write_reorder_chipsets) &&
12105             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12106                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12107
12108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12109             tp->pci_lat_timer < 64) {
12110                 tp->pci_lat_timer = 64;
12111
12112                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12113                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12114                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12115                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12116
12117                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12118                                        cacheline_sz_reg);
12119         }
12120
12121         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12122             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12123                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12124                 if (!tp->pcix_cap) {
12125                         printk(KERN_ERR PFX "Cannot find PCI-X "
12126                                             "capability, aborting.\n");
12127                         return -EIO;
12128                 }
12129         }
12130
12131         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12132                               &pci_state_reg);
12133
12134         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12135                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12136
12137                 /* If this is a 5700 BX chipset, and we are in PCI-X
12138                  * mode, enable register write workaround.
12139                  *
12140                  * The workaround is to use indirect register accesses
12141                  * for all chip writes not to mailbox registers.
12142                  */
12143                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12144                         u32 pm_reg;
12145
12146                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12147
12148                         /* The chip can have it's power management PCI config
12149                          * space registers clobbered due to this bug.
12150                          * So explicitly force the chip into D0 here.
12151                          */
12152                         pci_read_config_dword(tp->pdev,
12153                                               tp->pm_cap + PCI_PM_CTRL,
12154                                               &pm_reg);
12155                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12156                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12157                         pci_write_config_dword(tp->pdev,
12158                                                tp->pm_cap + PCI_PM_CTRL,
12159                                                pm_reg);
12160
12161                         /* Also, force SERR#/PERR# in PCI command. */
12162                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12163                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12164                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12165                 }
12166         }
12167
12168         /* 5700 BX chips need to have their TX producer index mailboxes
12169          * written twice to workaround a bug.
12170          */
12171         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12172                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12173
12174         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12175                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12176         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12177                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12178
12179         /* Chip-specific fixup from Broadcom driver */
12180         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12181             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12182                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12183                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12184         }
12185
12186         /* Default fast path register access methods */
12187         tp->read32 = tg3_read32;
12188         tp->write32 = tg3_write32;
12189         tp->read32_mbox = tg3_read32;
12190         tp->write32_mbox = tg3_write32;
12191         tp->write32_tx_mbox = tg3_write32;
12192         tp->write32_rx_mbox = tg3_write32;
12193
12194         /* Various workaround register access methods */
12195         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12196                 tp->write32 = tg3_write_indirect_reg32;
12197         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12198                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12199                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12200                 /*
12201                  * Back to back register writes can cause problems on these
12202                  * chips, the workaround is to read back all reg writes
12203                  * except those to mailbox regs.
12204                  *
12205                  * See tg3_write_indirect_reg32().
12206                  */
12207                 tp->write32 = tg3_write_flush_reg32;
12208         }
12209
12210
12211         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12212             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12213                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12214                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12215                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12216         }
12217
12218         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12219                 tp->read32 = tg3_read_indirect_reg32;
12220                 tp->write32 = tg3_write_indirect_reg32;
12221                 tp->read32_mbox = tg3_read_indirect_mbox;
12222                 tp->write32_mbox = tg3_write_indirect_mbox;
12223                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12224                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12225
12226                 iounmap(tp->regs);
12227                 tp->regs = NULL;
12228
12229                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12230                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12231                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12232         }
12233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12234                 tp->read32_mbox = tg3_read32_mbox_5906;
12235                 tp->write32_mbox = tg3_write32_mbox_5906;
12236                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12237                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12238         }
12239
12240         if (tp->write32 == tg3_write_indirect_reg32 ||
12241             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12242              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12243               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12244                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12245
12246         /* Get eeprom hw config before calling tg3_set_power_state().
12247          * In particular, the TG3_FLG2_IS_NIC flag must be
12248          * determined before calling tg3_set_power_state() so that
12249          * we know whether or not to switch out of Vaux power.
12250          * When the flag is set, it means that GPIO1 is used for eeprom
12251          * write protect and also implies that it is a LOM where GPIOs
12252          * are not used to switch power.
12253          */
12254         tg3_get_eeprom_hw_cfg(tp);
12255
12256         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12257                 /* Allow reads and writes to the
12258                  * APE register and memory space.
12259                  */
12260                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12261                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12262                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12263                                        pci_state_reg);
12264         }
12265
12266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12267             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12268             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12269                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12270
12271                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12272                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12273                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12274                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12275                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12276         }
12277
12278         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12279          * GPIO1 driven high will bring 5700's external PHY out of reset.
12280          * It is also used as eeprom write protect on LOMs.
12281          */
12282         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12283         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12284             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12285                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12286                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12287         /* Unused GPIO3 must be driven as output on 5752 because there
12288          * are no pull-up resistors on unused GPIO pins.
12289          */
12290         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12291                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12292
12293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12294                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12295
12296         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12297                 /* Turn off the debug UART. */
12298                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12299                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12300                         /* Keep VMain power. */
12301                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12302                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12303         }
12304
12305         /* Force the chip into D0. */
12306         err = tg3_set_power_state(tp, PCI_D0);
12307         if (err) {
12308                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12309                        pci_name(tp->pdev));
12310                 return err;
12311         }
12312
12313         /* 5700 B0 chips do not support checksumming correctly due
12314          * to hardware bugs.
12315          */
12316         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12317                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12318
12319         /* Derive initial jumbo mode from MTU assigned in
12320          * ether_setup() via the alloc_etherdev() call
12321          */
12322         if (tp->dev->mtu > ETH_DATA_LEN &&
12323             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12324                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12325
12326         /* Determine WakeOnLan speed to use. */
12327         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12328             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12329             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12330             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12331                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12332         } else {
12333                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12334         }
12335
12336         /* A few boards don't want Ethernet@WireSpeed phy feature */
12337         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12338             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12339              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12340              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12341             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12342             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12343                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12344
12345         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12346             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12347                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12348         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12349                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12350
12351         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12352                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12353                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12354                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12355                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12356                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12357                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12358                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12359                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12360                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12361                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12362                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12363                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12364         }
12365
12366         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12367             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12368                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12369                 if (tp->phy_otp == 0)
12370                         tp->phy_otp = TG3_OTP_DEFAULT;
12371         }
12372
12373         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12374                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12375         else
12376                 tp->mi_mode = MAC_MI_MODE_BASE;
12377
12378         tp->coalesce_mode = 0;
12379         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12380             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12381                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12382
12383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12384                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12385
12386         err = tg3_mdio_init(tp);
12387         if (err)
12388                 return err;
12389
12390         /* Initialize data/descriptor byte/word swapping. */
12391         val = tr32(GRC_MODE);
12392         val &= GRC_MODE_HOST_STACKUP;
12393         tw32(GRC_MODE, val | tp->grc_mode);
12394
12395         tg3_switch_clocks(tp);
12396
12397         /* Clear this out for sanity. */
12398         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12399
12400         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12401                               &pci_state_reg);
12402         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12403             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12404                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12405
12406                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12407                     chiprevid == CHIPREV_ID_5701_B0 ||
12408                     chiprevid == CHIPREV_ID_5701_B2 ||
12409                     chiprevid == CHIPREV_ID_5701_B5) {
12410                         void __iomem *sram_base;
12411
12412                         /* Write some dummy words into the SRAM status block
12413                          * area, see if it reads back correctly.  If the return
12414                          * value is bad, force enable the PCIX workaround.
12415                          */
12416                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12417
12418                         writel(0x00000000, sram_base);
12419                         writel(0x00000000, sram_base + 4);
12420                         writel(0xffffffff, sram_base + 4);
12421                         if (readl(sram_base) != 0x00000000)
12422                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12423                 }
12424         }
12425
12426         udelay(50);
12427         tg3_nvram_init(tp);
12428
12429         grc_misc_cfg = tr32(GRC_MISC_CFG);
12430         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12431
12432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12433             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12434              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12435                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12436
12437         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12438             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12439                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12440         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12441                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12442                                       HOSTCC_MODE_CLRTICK_TXBD);
12443
12444                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12445                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12446                                        tp->misc_host_ctrl);
12447         }
12448
12449         /* Preserve the APE MAC_MODE bits */
12450         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12451                 tp->mac_mode = tr32(MAC_MODE) |
12452                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12453         else
12454                 tp->mac_mode = TG3_DEF_MAC_MODE;
12455
12456         /* these are limited to 10/100 only */
12457         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12458              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12459             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12460              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12461              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12462               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12463               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12464             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12465              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12466               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12467               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12469                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12470
12471         err = tg3_phy_probe(tp);
12472         if (err) {
12473                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12474                        pci_name(tp->pdev), err);
12475                 /* ... but do not return immediately ... */
12476                 tg3_mdio_fini(tp);
12477         }
12478
12479         tg3_read_partno(tp);
12480         tg3_read_fw_ver(tp);
12481
12482         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12483                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12484         } else {
12485                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12486                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12487                 else
12488                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12489         }
12490
12491         /* 5700 {AX,BX} chips have a broken status block link
12492          * change bit implementation, so we must use the
12493          * status register in those cases.
12494          */
12495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12496                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12497         else
12498                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12499
12500         /* The led_ctrl is set during tg3_phy_probe, here we might
12501          * have to force the link status polling mechanism based
12502          * upon subsystem IDs.
12503          */
12504         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12505             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12506             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12507                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12508                                   TG3_FLAG_USE_LINKCHG_REG);
12509         }
12510
12511         /* For all SERDES we poll the MAC status register. */
12512         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12513                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12514         else
12515                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12516
12517         /* All chips before 5787 can get confused if TX buffers
12518          * straddle the 4GB address boundary in some cases.
12519          */
12520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12526                 tp->dev->hard_start_xmit = tg3_start_xmit;
12527         else
12528                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12529
12530         tp->rx_offset = 2;
12531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12532             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12533                 tp->rx_offset = 0;
12534
12535         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12536
12537         /* Increment the rx prod index on the rx std ring by at most
12538          * 8 for these chips to workaround hw errata.
12539          */
12540         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12541             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12542             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12543                 tp->rx_std_max_post = 8;
12544
12545         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12546                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12547                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12548
12549         return err;
12550 }
12551
12552 #ifdef CONFIG_SPARC
12553 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12554 {
12555         struct net_device *dev = tp->dev;
12556         struct pci_dev *pdev = tp->pdev;
12557         struct device_node *dp = pci_device_to_OF_node(pdev);
12558         const unsigned char *addr;
12559         int len;
12560
12561         addr = of_get_property(dp, "local-mac-address", &len);
12562         if (addr && len == 6) {
12563                 memcpy(dev->dev_addr, addr, 6);
12564                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12565                 return 0;
12566         }
12567         return -ENODEV;
12568 }
12569
12570 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12571 {
12572         struct net_device *dev = tp->dev;
12573
12574         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12575         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12576         return 0;
12577 }
12578 #endif
12579
12580 static int __devinit tg3_get_device_address(struct tg3 *tp)
12581 {
12582         struct net_device *dev = tp->dev;
12583         u32 hi, lo, mac_offset;
12584         int addr_ok = 0;
12585
12586 #ifdef CONFIG_SPARC
12587         if (!tg3_get_macaddr_sparc(tp))
12588                 return 0;
12589 #endif
12590
12591         mac_offset = 0x7c;
12592         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12593             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12594                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12595                         mac_offset = 0xcc;
12596                 if (tg3_nvram_lock(tp))
12597                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12598                 else
12599                         tg3_nvram_unlock(tp);
12600         }
12601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12602                 mac_offset = 0x10;
12603
12604         /* First try to get it from MAC address mailbox. */
12605         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12606         if ((hi >> 16) == 0x484b) {
12607                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12608                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12609
12610                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12611                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12612                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12613                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12614                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12615
12616                 /* Some old bootcode may report a 0 MAC address in SRAM */
12617                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12618         }
12619         if (!addr_ok) {
12620                 /* Next, try NVRAM. */
12621                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12622                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12623                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12624                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12625                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12626                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12627                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12628                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12629                 }
12630                 /* Finally just fetch it out of the MAC control regs. */
12631                 else {
12632                         hi = tr32(MAC_ADDR_0_HIGH);
12633                         lo = tr32(MAC_ADDR_0_LOW);
12634
12635                         dev->dev_addr[5] = lo & 0xff;
12636                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12637                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12638                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12639                         dev->dev_addr[1] = hi & 0xff;
12640                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12641                 }
12642         }
12643
12644         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12645 #ifdef CONFIG_SPARC
12646                 if (!tg3_get_default_macaddr_sparc(tp))
12647                         return 0;
12648 #endif
12649                 return -EINVAL;
12650         }
12651         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12652         return 0;
12653 }
12654
12655 #define BOUNDARY_SINGLE_CACHELINE       1
12656 #define BOUNDARY_MULTI_CACHELINE        2
12657
12658 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12659 {
12660         int cacheline_size;
12661         u8 byte;
12662         int goal;
12663
12664         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12665         if (byte == 0)
12666                 cacheline_size = 1024;
12667         else
12668                 cacheline_size = (int) byte * 4;
12669
12670         /* On 5703 and later chips, the boundary bits have no
12671          * effect.
12672          */
12673         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12674             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12675             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12676                 goto out;
12677
12678 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12679         goal = BOUNDARY_MULTI_CACHELINE;
12680 #else
12681 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12682         goal = BOUNDARY_SINGLE_CACHELINE;
12683 #else
12684         goal = 0;
12685 #endif
12686 #endif
12687
12688         if (!goal)
12689                 goto out;
12690
12691         /* PCI controllers on most RISC systems tend to disconnect
12692          * when a device tries to burst across a cache-line boundary.
12693          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12694          *
12695          * Unfortunately, for PCI-E there are only limited
12696          * write-side controls for this, and thus for reads
12697          * we will still get the disconnects.  We'll also waste
12698          * these PCI cycles for both read and write for chips
12699          * other than 5700 and 5701 which do not implement the
12700          * boundary bits.
12701          */
12702         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12703             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12704                 switch (cacheline_size) {
12705                 case 16:
12706                 case 32:
12707                 case 64:
12708                 case 128:
12709                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12710                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12711                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12712                         } else {
12713                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12714                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12715                         }
12716                         break;
12717
12718                 case 256:
12719                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12720                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12721                         break;
12722
12723                 default:
12724                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12725                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12726                         break;
12727                 }
12728         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12729                 switch (cacheline_size) {
12730                 case 16:
12731                 case 32:
12732                 case 64:
12733                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12734                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12735                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12736                                 break;
12737                         }
12738                         /* fallthrough */
12739                 case 128:
12740                 default:
12741                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12742                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12743                         break;
12744                 }
12745         } else {
12746                 switch (cacheline_size) {
12747                 case 16:
12748                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12749                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12750                                         DMA_RWCTRL_WRITE_BNDRY_16);
12751                                 break;
12752                         }
12753                         /* fallthrough */
12754                 case 32:
12755                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12756                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12757                                         DMA_RWCTRL_WRITE_BNDRY_32);
12758                                 break;
12759                         }
12760                         /* fallthrough */
12761                 case 64:
12762                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12763                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12764                                         DMA_RWCTRL_WRITE_BNDRY_64);
12765                                 break;
12766                         }
12767                         /* fallthrough */
12768                 case 128:
12769                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12770                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12771                                         DMA_RWCTRL_WRITE_BNDRY_128);
12772                                 break;
12773                         }
12774                         /* fallthrough */
12775                 case 256:
12776                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12777                                 DMA_RWCTRL_WRITE_BNDRY_256);
12778                         break;
12779                 case 512:
12780                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12781                                 DMA_RWCTRL_WRITE_BNDRY_512);
12782                         break;
12783                 case 1024:
12784                 default:
12785                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12786                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12787                         break;
12788                 }
12789         }
12790
12791 out:
12792         return val;
12793 }
12794
12795 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12796 {
12797         struct tg3_internal_buffer_desc test_desc;
12798         u32 sram_dma_descs;
12799         int i, ret;
12800
12801         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12802
12803         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12804         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12805         tw32(RDMAC_STATUS, 0);
12806         tw32(WDMAC_STATUS, 0);
12807
12808         tw32(BUFMGR_MODE, 0);
12809         tw32(FTQ_RESET, 0);
12810
12811         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12812         test_desc.addr_lo = buf_dma & 0xffffffff;
12813         test_desc.nic_mbuf = 0x00002100;
12814         test_desc.len = size;
12815
12816         /*
12817          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12818          * the *second* time the tg3 driver was getting loaded after an
12819          * initial scan.
12820          *
12821          * Broadcom tells me:
12822          *   ...the DMA engine is connected to the GRC block and a DMA
12823          *   reset may affect the GRC block in some unpredictable way...
12824          *   The behavior of resets to individual blocks has not been tested.
12825          *
12826          * Broadcom noted the GRC reset will also reset all sub-components.
12827          */
12828         if (to_device) {
12829                 test_desc.cqid_sqid = (13 << 8) | 2;
12830
12831                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12832                 udelay(40);
12833         } else {
12834                 test_desc.cqid_sqid = (16 << 8) | 7;
12835
12836                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12837                 udelay(40);
12838         }
12839         test_desc.flags = 0x00000005;
12840
12841         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12842                 u32 val;
12843
12844                 val = *(((u32 *)&test_desc) + i);
12845                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12846                                        sram_dma_descs + (i * sizeof(u32)));
12847                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12848         }
12849         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12850
12851         if (to_device) {
12852                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12853         } else {
12854                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12855         }
12856
12857         ret = -ENODEV;
12858         for (i = 0; i < 40; i++) {
12859                 u32 val;
12860
12861                 if (to_device)
12862                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12863                 else
12864                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12865                 if ((val & 0xffff) == sram_dma_descs) {
12866                         ret = 0;
12867                         break;
12868                 }
12869
12870                 udelay(100);
12871         }
12872
12873         return ret;
12874 }
12875
12876 #define TEST_BUFFER_SIZE        0x2000
12877
12878 static int __devinit tg3_test_dma(struct tg3 *tp)
12879 {
12880         dma_addr_t buf_dma;
12881         u32 *buf, saved_dma_rwctrl;
12882         int ret;
12883
12884         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12885         if (!buf) {
12886                 ret = -ENOMEM;
12887                 goto out_nofree;
12888         }
12889
12890         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12891                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12892
12893         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12894
12895         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12896                 /* DMA read watermark not used on PCIE */
12897                 tp->dma_rwctrl |= 0x00180000;
12898         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12900                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12901                         tp->dma_rwctrl |= 0x003f0000;
12902                 else
12903                         tp->dma_rwctrl |= 0x003f000f;
12904         } else {
12905                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12906                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12907                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12908                         u32 read_water = 0x7;
12909
12910                         /* If the 5704 is behind the EPB bridge, we can
12911                          * do the less restrictive ONE_DMA workaround for
12912                          * better performance.
12913                          */
12914                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12915                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12916                                 tp->dma_rwctrl |= 0x8000;
12917                         else if (ccval == 0x6 || ccval == 0x7)
12918                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12919
12920                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12921                                 read_water = 4;
12922                         /* Set bit 23 to enable PCIX hw bug fix */
12923                         tp->dma_rwctrl |=
12924                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12925                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12926                                 (1 << 23);
12927                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12928                         /* 5780 always in PCIX mode */
12929                         tp->dma_rwctrl |= 0x00144000;
12930                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12931                         /* 5714 always in PCIX mode */
12932                         tp->dma_rwctrl |= 0x00148000;
12933                 } else {
12934                         tp->dma_rwctrl |= 0x001b000f;
12935                 }
12936         }
12937
12938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12940                 tp->dma_rwctrl &= 0xfffffff0;
12941
12942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12943             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12944                 /* Remove this if it causes problems for some boards. */
12945                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12946
12947                 /* On 5700/5701 chips, we need to set this bit.
12948                  * Otherwise the chip will issue cacheline transactions
12949                  * to streamable DMA memory with not all the byte
12950                  * enables turned on.  This is an error on several
12951                  * RISC PCI controllers, in particular sparc64.
12952                  *
12953                  * On 5703/5704 chips, this bit has been reassigned
12954                  * a different meaning.  In particular, it is used
12955                  * on those chips to enable a PCI-X workaround.
12956                  */
12957                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12958         }
12959
12960         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12961
12962 #if 0
12963         /* Unneeded, already done by tg3_get_invariants.  */
12964         tg3_switch_clocks(tp);
12965 #endif
12966
12967         ret = 0;
12968         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12969             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12970                 goto out;
12971
12972         /* It is best to perform DMA test with maximum write burst size
12973          * to expose the 5700/5701 write DMA bug.
12974          */
12975         saved_dma_rwctrl = tp->dma_rwctrl;
12976         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12977         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12978
12979         while (1) {
12980                 u32 *p = buf, i;
12981
12982                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12983                         p[i] = i;
12984
12985                 /* Send the buffer to the chip. */
12986                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12987                 if (ret) {
12988                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12989                         break;
12990                 }
12991
12992 #if 0
12993                 /* validate data reached card RAM correctly. */
12994                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12995                         u32 val;
12996                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12997                         if (le32_to_cpu(val) != p[i]) {
12998                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12999                                 /* ret = -ENODEV here? */
13000                         }
13001                         p[i] = 0;
13002                 }
13003 #endif
13004                 /* Now read it back. */
13005                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13006                 if (ret) {
13007                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13008
13009                         break;
13010                 }
13011
13012                 /* Verify it. */
13013                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13014                         if (p[i] == i)
13015                                 continue;
13016
13017                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13018                             DMA_RWCTRL_WRITE_BNDRY_16) {
13019                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13020                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13021                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13022                                 break;
13023                         } else {
13024                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13025                                 ret = -ENODEV;
13026                                 goto out;
13027                         }
13028                 }
13029
13030                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13031                         /* Success. */
13032                         ret = 0;
13033                         break;
13034                 }
13035         }
13036         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13037             DMA_RWCTRL_WRITE_BNDRY_16) {
13038                 static struct pci_device_id dma_wait_state_chipsets[] = {
13039                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13040                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13041                         { },
13042                 };
13043
13044                 /* DMA test passed without adjusting DMA boundary,
13045                  * now look for chipsets that are known to expose the
13046                  * DMA bug without failing the test.
13047                  */
13048                 if (pci_dev_present(dma_wait_state_chipsets)) {
13049                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13050                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13051                 }
13052                 else
13053                         /* Safe to use the calculated DMA boundary. */
13054                         tp->dma_rwctrl = saved_dma_rwctrl;
13055
13056                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13057         }
13058
13059 out:
13060         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13061 out_nofree:
13062         return ret;
13063 }
13064
13065 static void __devinit tg3_init_link_config(struct tg3 *tp)
13066 {
13067         tp->link_config.advertising =
13068                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13069                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13070                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13071                  ADVERTISED_Autoneg | ADVERTISED_MII);
13072         tp->link_config.speed = SPEED_INVALID;
13073         tp->link_config.duplex = DUPLEX_INVALID;
13074         tp->link_config.autoneg = AUTONEG_ENABLE;
13075         tp->link_config.active_speed = SPEED_INVALID;
13076         tp->link_config.active_duplex = DUPLEX_INVALID;
13077         tp->link_config.phy_is_low_power = 0;
13078         tp->link_config.orig_speed = SPEED_INVALID;
13079         tp->link_config.orig_duplex = DUPLEX_INVALID;
13080         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13081 }
13082
13083 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13084 {
13085         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13086                 tp->bufmgr_config.mbuf_read_dma_low_water =
13087                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13088                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13089                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13090                 tp->bufmgr_config.mbuf_high_water =
13091                         DEFAULT_MB_HIGH_WATER_5705;
13092                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13093                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13094                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13095                         tp->bufmgr_config.mbuf_high_water =
13096                                 DEFAULT_MB_HIGH_WATER_5906;
13097                 }
13098
13099                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13100                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13101                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13102                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13103                 tp->bufmgr_config.mbuf_high_water_jumbo =
13104                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13105         } else {
13106                 tp->bufmgr_config.mbuf_read_dma_low_water =
13107                         DEFAULT_MB_RDMA_LOW_WATER;
13108                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13109                         DEFAULT_MB_MACRX_LOW_WATER;
13110                 tp->bufmgr_config.mbuf_high_water =
13111                         DEFAULT_MB_HIGH_WATER;
13112
13113                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13114                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13115                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13116                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13117                 tp->bufmgr_config.mbuf_high_water_jumbo =
13118                         DEFAULT_MB_HIGH_WATER_JUMBO;
13119         }
13120
13121         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13122         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13123 }
13124
13125 static char * __devinit tg3_phy_string(struct tg3 *tp)
13126 {
13127         switch (tp->phy_id & PHY_ID_MASK) {
13128         case PHY_ID_BCM5400:    return "5400";
13129         case PHY_ID_BCM5401:    return "5401";
13130         case PHY_ID_BCM5411:    return "5411";
13131         case PHY_ID_BCM5701:    return "5701";
13132         case PHY_ID_BCM5703:    return "5703";
13133         case PHY_ID_BCM5704:    return "5704";
13134         case PHY_ID_BCM5705:    return "5705";
13135         case PHY_ID_BCM5750:    return "5750";
13136         case PHY_ID_BCM5752:    return "5752";
13137         case PHY_ID_BCM5714:    return "5714";
13138         case PHY_ID_BCM5780:    return "5780";
13139         case PHY_ID_BCM5755:    return "5755";
13140         case PHY_ID_BCM5787:    return "5787";
13141         case PHY_ID_BCM5784:    return "5784";
13142         case PHY_ID_BCM5756:    return "5722/5756";
13143         case PHY_ID_BCM5906:    return "5906";
13144         case PHY_ID_BCM5761:    return "5761";
13145         case PHY_ID_BCM8002:    return "8002/serdes";
13146         case 0:                 return "serdes";
13147         default:                return "unknown";
13148         }
13149 }
13150
13151 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13152 {
13153         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13154                 strcpy(str, "PCI Express");
13155                 return str;
13156         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13157                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13158
13159                 strcpy(str, "PCIX:");
13160
13161                 if ((clock_ctrl == 7) ||
13162                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13163                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13164                         strcat(str, "133MHz");
13165                 else if (clock_ctrl == 0)
13166                         strcat(str, "33MHz");
13167                 else if (clock_ctrl == 2)
13168                         strcat(str, "50MHz");
13169                 else if (clock_ctrl == 4)
13170                         strcat(str, "66MHz");
13171                 else if (clock_ctrl == 6)
13172                         strcat(str, "100MHz");
13173         } else {
13174                 strcpy(str, "PCI:");
13175                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13176                         strcat(str, "66MHz");
13177                 else
13178                         strcat(str, "33MHz");
13179         }
13180         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13181                 strcat(str, ":32-bit");
13182         else
13183                 strcat(str, ":64-bit");
13184         return str;
13185 }
13186
13187 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13188 {
13189         struct pci_dev *peer;
13190         unsigned int func, devnr = tp->pdev->devfn & ~7;
13191
13192         for (func = 0; func < 8; func++) {
13193                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13194                 if (peer && peer != tp->pdev)
13195                         break;
13196                 pci_dev_put(peer);
13197         }
13198         /* 5704 can be configured in single-port mode, set peer to
13199          * tp->pdev in that case.
13200          */
13201         if (!peer) {
13202                 peer = tp->pdev;
13203                 return peer;
13204         }
13205
13206         /*
13207          * We don't need to keep the refcount elevated; there's no way
13208          * to remove one half of this device without removing the other
13209          */
13210         pci_dev_put(peer);
13211
13212         return peer;
13213 }
13214
13215 static void __devinit tg3_init_coal(struct tg3 *tp)
13216 {
13217         struct ethtool_coalesce *ec = &tp->coal;
13218
13219         memset(ec, 0, sizeof(*ec));
13220         ec->cmd = ETHTOOL_GCOALESCE;
13221         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13222         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13223         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13224         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13225         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13226         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13227         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13228         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13229         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13230
13231         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13232                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13233                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13234                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13235                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13236                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13237         }
13238
13239         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13240                 ec->rx_coalesce_usecs_irq = 0;
13241                 ec->tx_coalesce_usecs_irq = 0;
13242                 ec->stats_block_coalesce_usecs = 0;
13243         }
13244 }
13245
13246 static int __devinit tg3_init_one(struct pci_dev *pdev,
13247                                   const struct pci_device_id *ent)
13248 {
13249         static int tg3_version_printed = 0;
13250         resource_size_t tg3reg_base;
13251         unsigned long tg3reg_len;
13252         struct net_device *dev;
13253         struct tg3 *tp;
13254         int err, pm_cap;
13255         char str[40];
13256         u64 dma_mask, persist_dma_mask;
13257
13258         if (tg3_version_printed++ == 0)
13259                 printk(KERN_INFO "%s", version);
13260
13261         err = pci_enable_device(pdev);
13262         if (err) {
13263                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13264                        "aborting.\n");
13265                 return err;
13266         }
13267
13268         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13269                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13270                        "base address, aborting.\n");
13271                 err = -ENODEV;
13272                 goto err_out_disable_pdev;
13273         }
13274
13275         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13276         if (err) {
13277                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13278                        "aborting.\n");
13279                 goto err_out_disable_pdev;
13280         }
13281
13282         pci_set_master(pdev);
13283
13284         /* Find power-management capability. */
13285         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13286         if (pm_cap == 0) {
13287                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13288                        "aborting.\n");
13289                 err = -EIO;
13290                 goto err_out_free_res;
13291         }
13292
13293         tg3reg_base = pci_resource_start(pdev, 0);
13294         tg3reg_len = pci_resource_len(pdev, 0);
13295
13296         dev = alloc_etherdev(sizeof(*tp));
13297         if (!dev) {
13298                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13299                 err = -ENOMEM;
13300                 goto err_out_free_res;
13301         }
13302
13303         SET_NETDEV_DEV(dev, &pdev->dev);
13304
13305 #if TG3_VLAN_TAG_USED
13306         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13307         dev->vlan_rx_register = tg3_vlan_rx_register;
13308 #endif
13309
13310         tp = netdev_priv(dev);
13311         tp->pdev = pdev;
13312         tp->dev = dev;
13313         tp->pm_cap = pm_cap;
13314         tp->rx_mode = TG3_DEF_RX_MODE;
13315         tp->tx_mode = TG3_DEF_TX_MODE;
13316
13317         if (tg3_debug > 0)
13318                 tp->msg_enable = tg3_debug;
13319         else
13320                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13321
13322         /* The word/byte swap controls here control register access byte
13323          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13324          * setting below.
13325          */
13326         tp->misc_host_ctrl =
13327                 MISC_HOST_CTRL_MASK_PCI_INT |
13328                 MISC_HOST_CTRL_WORD_SWAP |
13329                 MISC_HOST_CTRL_INDIR_ACCESS |
13330                 MISC_HOST_CTRL_PCISTATE_RW;
13331
13332         /* The NONFRM (non-frame) byte/word swap controls take effect
13333          * on descriptor entries, anything which isn't packet data.
13334          *
13335          * The StrongARM chips on the board (one for tx, one for rx)
13336          * are running in big-endian mode.
13337          */
13338         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13339                         GRC_MODE_WSWAP_NONFRM_DATA);
13340 #ifdef __BIG_ENDIAN
13341         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13342 #endif
13343         spin_lock_init(&tp->lock);
13344         spin_lock_init(&tp->indirect_lock);
13345         INIT_WORK(&tp->reset_task, tg3_reset_task);
13346
13347         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
13348         if (!tp->regs) {
13349                 printk(KERN_ERR PFX "Cannot map device registers, "
13350                        "aborting.\n");
13351                 err = -ENOMEM;
13352                 goto err_out_free_dev;
13353         }
13354
13355         tg3_init_link_config(tp);
13356
13357         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13358         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13359         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13360
13361         dev->open = tg3_open;
13362         dev->stop = tg3_close;
13363         dev->get_stats = tg3_get_stats;
13364         dev->set_multicast_list = tg3_set_rx_mode;
13365         dev->set_mac_address = tg3_set_mac_addr;
13366         dev->do_ioctl = tg3_ioctl;
13367         dev->tx_timeout = tg3_tx_timeout;
13368         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13369         dev->ethtool_ops = &tg3_ethtool_ops;
13370         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13371         dev->change_mtu = tg3_change_mtu;
13372         dev->irq = pdev->irq;
13373 #ifdef CONFIG_NET_POLL_CONTROLLER
13374         dev->poll_controller = tg3_poll_controller;
13375 #endif
13376
13377         err = tg3_get_invariants(tp);
13378         if (err) {
13379                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13380                        "aborting.\n");
13381                 goto err_out_iounmap;
13382         }
13383
13384         /* The EPB bridge inside 5714, 5715, and 5780 and any
13385          * device behind the EPB cannot support DMA addresses > 40-bit.
13386          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13387          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13388          * do DMA address check in tg3_start_xmit().
13389          */
13390         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13391                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13392         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13393                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13394 #ifdef CONFIG_HIGHMEM
13395                 dma_mask = DMA_64BIT_MASK;
13396 #endif
13397         } else
13398                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13399
13400         /* Configure DMA attributes. */
13401         if (dma_mask > DMA_32BIT_MASK) {
13402                 err = pci_set_dma_mask(pdev, dma_mask);
13403                 if (!err) {
13404                         dev->features |= NETIF_F_HIGHDMA;
13405                         err = pci_set_consistent_dma_mask(pdev,
13406                                                           persist_dma_mask);
13407                         if (err < 0) {
13408                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13409                                        "DMA for consistent allocations\n");
13410                                 goto err_out_iounmap;
13411                         }
13412                 }
13413         }
13414         if (err || dma_mask == DMA_32BIT_MASK) {
13415                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13416                 if (err) {
13417                         printk(KERN_ERR PFX "No usable DMA configuration, "
13418                                "aborting.\n");
13419                         goto err_out_iounmap;
13420                 }
13421         }
13422
13423         tg3_init_bufmgr_config(tp);
13424
13425         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13426                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13427         }
13428         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13429             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13430             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13431             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13432             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13433                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13434         } else {
13435                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13436         }
13437
13438         /* TSO is on by default on chips that support hardware TSO.
13439          * Firmware TSO on older chips gives lower performance, so it
13440          * is off by default, but can be enabled using ethtool.
13441          */
13442         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13443                 dev->features |= NETIF_F_TSO;
13444                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13445                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13446                         dev->features |= NETIF_F_TSO6;
13447                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13448                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13449                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13450                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13451                         dev->features |= NETIF_F_TSO_ECN;
13452         }
13453
13454
13455         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13456             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13457             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13458                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13459                 tp->rx_pending = 63;
13460         }
13461
13462         err = tg3_get_device_address(tp);
13463         if (err) {
13464                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13465                        "aborting.\n");
13466                 goto err_out_iounmap;
13467         }
13468
13469         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13470                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13471                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13472                                "base address for APE, aborting.\n");
13473                         err = -ENODEV;
13474                         goto err_out_iounmap;
13475                 }
13476
13477                 tg3reg_base = pci_resource_start(pdev, 2);
13478                 tg3reg_len = pci_resource_len(pdev, 2);
13479
13480                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13481                 if (!tp->aperegs) {
13482                         printk(KERN_ERR PFX "Cannot map APE registers, "
13483                                "aborting.\n");
13484                         err = -ENOMEM;
13485                         goto err_out_iounmap;
13486                 }
13487
13488                 tg3_ape_lock_init(tp);
13489         }
13490
13491         /*
13492          * Reset chip in case UNDI or EFI driver did not shutdown
13493          * DMA self test will enable WDMAC and we'll see (spurious)
13494          * pending DMA on the PCI bus at that point.
13495          */
13496         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13497             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13498                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13499                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13500         }
13501
13502         err = tg3_test_dma(tp);
13503         if (err) {
13504                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13505                 goto err_out_apeunmap;
13506         }
13507
13508         /* Tigon3 can do ipv4 only... and some chips have buggy
13509          * checksumming.
13510          */
13511         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13512                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13513                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13514                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13515                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13516                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13517                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13518                         dev->features |= NETIF_F_IPV6_CSUM;
13519
13520                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13521         } else
13522                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13523
13524         /* flow control autonegotiation is default behavior */
13525         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13526         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13527
13528         tg3_init_coal(tp);
13529
13530         pci_set_drvdata(pdev, dev);
13531
13532         err = register_netdev(dev);
13533         if (err) {
13534                 printk(KERN_ERR PFX "Cannot register net device, "
13535                        "aborting.\n");
13536                 goto err_out_apeunmap;
13537         }
13538
13539         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13540                "(%s) %s Ethernet %pM\n",
13541                dev->name,
13542                tp->board_part_number,
13543                tp->pci_chip_rev_id,
13544                tg3_phy_string(tp),
13545                tg3_bus_string(tp, str),
13546                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13547                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13548                  "10/100/1000Base-T")),
13549                dev->dev_addr);
13550
13551         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13552                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13553                dev->name,
13554                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13555                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13556                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13557                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13558                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13559                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13560         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13561                dev->name, tp->dma_rwctrl,
13562                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13563                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13564
13565         return 0;
13566
13567 err_out_apeunmap:
13568         if (tp->aperegs) {
13569                 iounmap(tp->aperegs);
13570                 tp->aperegs = NULL;
13571         }
13572
13573 err_out_iounmap:
13574         if (tp->regs) {
13575                 iounmap(tp->regs);
13576                 tp->regs = NULL;
13577         }
13578
13579 err_out_free_dev:
13580         free_netdev(dev);
13581
13582 err_out_free_res:
13583         pci_release_regions(pdev);
13584
13585 err_out_disable_pdev:
13586         pci_disable_device(pdev);
13587         pci_set_drvdata(pdev, NULL);
13588         return err;
13589 }
13590
13591 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13592 {
13593         struct net_device *dev = pci_get_drvdata(pdev);
13594
13595         if (dev) {
13596                 struct tg3 *tp = netdev_priv(dev);
13597
13598                 flush_scheduled_work();
13599
13600                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13601                         tg3_phy_fini(tp);
13602                         tg3_mdio_fini(tp);
13603                 }
13604
13605                 unregister_netdev(dev);
13606                 if (tp->aperegs) {
13607                         iounmap(tp->aperegs);
13608                         tp->aperegs = NULL;
13609                 }
13610                 if (tp->regs) {
13611                         iounmap(tp->regs);
13612                         tp->regs = NULL;
13613                 }
13614                 free_netdev(dev);
13615                 pci_release_regions(pdev);
13616                 pci_disable_device(pdev);
13617                 pci_set_drvdata(pdev, NULL);
13618         }
13619 }
13620
13621 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13622 {
13623         struct net_device *dev = pci_get_drvdata(pdev);
13624         struct tg3 *tp = netdev_priv(dev);
13625         pci_power_t target_state;
13626         int err;
13627
13628         /* PCI register 4 needs to be saved whether netif_running() or not.
13629          * MSI address and data need to be saved if using MSI and
13630          * netif_running().
13631          */
13632         pci_save_state(pdev);
13633
13634         if (!netif_running(dev))
13635                 return 0;
13636
13637         flush_scheduled_work();
13638         tg3_phy_stop(tp);
13639         tg3_netif_stop(tp);
13640
13641         del_timer_sync(&tp->timer);
13642
13643         tg3_full_lock(tp, 1);
13644         tg3_disable_ints(tp);
13645         tg3_full_unlock(tp);
13646
13647         netif_device_detach(dev);
13648
13649         tg3_full_lock(tp, 0);
13650         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13651         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13652         tg3_full_unlock(tp);
13653
13654         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13655
13656         err = tg3_set_power_state(tp, target_state);
13657         if (err) {
13658                 int err2;
13659
13660                 tg3_full_lock(tp, 0);
13661
13662                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13663                 err2 = tg3_restart_hw(tp, 1);
13664                 if (err2)
13665                         goto out;
13666
13667                 tp->timer.expires = jiffies + tp->timer_offset;
13668                 add_timer(&tp->timer);
13669
13670                 netif_device_attach(dev);
13671                 tg3_netif_start(tp);
13672
13673 out:
13674                 tg3_full_unlock(tp);
13675
13676                 if (!err2)
13677                         tg3_phy_start(tp);
13678         }
13679
13680         return err;
13681 }
13682
13683 static int tg3_resume(struct pci_dev *pdev)
13684 {
13685         struct net_device *dev = pci_get_drvdata(pdev);
13686         struct tg3 *tp = netdev_priv(dev);
13687         int err;
13688
13689         pci_restore_state(tp->pdev);
13690
13691         if (!netif_running(dev))
13692                 return 0;
13693
13694         err = tg3_set_power_state(tp, PCI_D0);
13695         if (err)
13696                 return err;
13697
13698         netif_device_attach(dev);
13699
13700         tg3_full_lock(tp, 0);
13701
13702         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13703         err = tg3_restart_hw(tp, 1);
13704         if (err)
13705                 goto out;
13706
13707         tp->timer.expires = jiffies + tp->timer_offset;
13708         add_timer(&tp->timer);
13709
13710         tg3_netif_start(tp);
13711
13712 out:
13713         tg3_full_unlock(tp);
13714
13715         if (!err)
13716                 tg3_phy_start(tp);
13717
13718         return err;
13719 }
13720
13721 static struct pci_driver tg3_driver = {
13722         .name           = DRV_MODULE_NAME,
13723         .id_table       = tg3_pci_tbl,
13724         .probe          = tg3_init_one,
13725         .remove         = __devexit_p(tg3_remove_one),
13726         .suspend        = tg3_suspend,
13727         .resume         = tg3_resume
13728 };
13729
13730 static int __init tg3_init(void)
13731 {
13732         return pci_register_driver(&tg3_driver);
13733 }
13734
13735 static void __exit tg3_cleanup(void)
13736 {
13737         pci_unregister_driver(&tg3_driver);
13738 }
13739
13740 module_init(tg3_init);
13741 module_exit(tg3_cleanup);